aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/census/context.h8
-rw-r--r--src/core/census/grpc_filter.c89
-rw-r--r--src/core/channel/channel_stack.c59
-rw-r--r--src/core/channel/channel_stack.h83
-rw-r--r--src/core/channel/client_channel.c612
-rw-r--r--src/core/channel/client_channel.h12
-rw-r--r--src/core/channel/client_uchannel.c423
-rw-r--r--src/core/channel/client_uchannel.h16
-rw-r--r--src/core/channel/compress_filter.c344
-rw-r--r--src/core/channel/connected_channel.c33
-rw-r--r--src/core/channel/connected_channel.h2
-rw-r--r--src/core/channel/http_client_filter.c177
-rw-r--r--src/core/channel/http_server_filter.c208
-rw-r--r--src/core/channel/noop_filter.c122
-rw-r--r--src/core/channel/subchannel_call_holder.c259
-rw-r--r--src/core/channel/subchannel_call_holder.h98
-rw-r--r--src/core/client_config/connector.h2
-rw-r--r--src/core/client_config/default_initial_connect_string.c (renamed from src/core/client_config/subchannel_factory_decorators/add_channel_arg.c)14
-rw-r--r--src/core/client_config/initial_connect_string.c (renamed from src/core/client_config/subchannel_factory_decorators/merge_channel_args.h)27
-rw-r--r--src/core/client_config/initial_connect_string.h (renamed from src/core/channel/noop_filter.h)22
-rw-r--r--src/core/client_config/lb_policies/pick_first.c186
-rw-r--r--src/core/client_config/lb_policies/round_robin.c276
-rw-r--r--src/core/client_config/lb_policy.c92
-rw-r--r--src/core/client_config/lb_policy.h50
-rw-r--r--src/core/client_config/resolver.c7
-rw-r--r--src/core/client_config/resolver.h10
-rw-r--r--src/core/client_config/resolvers/dns_resolver.c8
-rw-r--r--src/core/client_config/resolvers/sockaddr_resolver.c16
-rw-r--r--src/core/client_config/resolvers/zookeeper_resolver.c7
-rw-r--r--src/core/client_config/subchannel.c743
-rw-r--r--src/core/client_config/subchannel.h107
-rw-r--r--src/core/client_config/subchannel_factory_decorators/merge_channel_args.c86
-rw-r--r--src/core/compression/algorithm.c70
-rw-r--r--src/core/compression/algorithm_metadata.h (renamed from src/core/client_config/subchannel_factory_decorators/add_channel_arg.h)27
-rw-r--r--src/core/compression/message_compress.c28
-rw-r--r--src/core/httpcli/httpcli.c9
-rw-r--r--src/core/httpcli/httpcli.h2
-rw-r--r--src/core/httpcli/httpcli_security_connector.c5
-rw-r--r--src/core/iomgr/closure.c23
-rw-r--r--src/core/iomgr/closure.h19
-rw-r--r--src/core/iomgr/endpoint_pair_posix.c3
-rw-r--r--src/core/iomgr/exec_ctx.c5
-rw-r--r--src/core/iomgr/executor.c13
-rw-r--r--src/core/iomgr/fd_posix.c23
-rw-r--r--src/core/iomgr/fd_posix.h5
-rw-r--r--src/core/iomgr/pollset.h5
-rw-r--r--src/core/iomgr/pollset_multipoller_with_epoll.c36
-rw-r--r--src/core/iomgr/pollset_multipoller_with_poll_posix.c23
-rw-r--r--src/core/iomgr/pollset_posix.c127
-rw-r--r--src/core/iomgr/pollset_posix.h15
-rw-r--r--src/core/iomgr/pollset_set.h22
-rw-r--r--src/core/iomgr/pollset_set_posix.c51
-rw-r--r--src/core/iomgr/pollset_set_posix.h4
-rw-r--r--src/core/iomgr/pollset_set_windows.c8
-rw-r--r--src/core/iomgr/pollset_windows.c13
-rw-r--r--src/core/iomgr/tcp_client_posix.c4
-rw-r--r--src/core/iomgr/tcp_posix.c16
-rw-r--r--src/core/iomgr/tcp_posix.h6
-rw-r--r--src/core/iomgr/tcp_server.h19
-rw-r--r--src/core/iomgr/tcp_server_posix.c153
-rw-r--r--src/core/iomgr/tcp_server_windows.c125
-rw-r--r--src/core/iomgr/tcp_windows.c14
-rw-r--r--src/core/iomgr/timer.c12
-rw-r--r--src/core/iomgr/timer_internal.h2
-rw-r--r--src/core/iomgr/udp_server.c4
-rw-r--r--src/core/iomgr/wakeup_fd_posix.c8
-rw-r--r--src/core/iomgr/wakeup_fd_posix.h2
-rw-r--r--src/core/iomgr/workqueue_posix.c7
-rw-r--r--src/core/json/json_reader.c15
-rw-r--r--src/core/json/json_string.c2
-rw-r--r--src/core/profiling/basic_timers.c190
-rw-r--r--src/core/profiling/timers.h2
-rw-r--r--src/core/security/client_auth_filter.c140
-rw-r--r--src/core/security/credentials.c21
-rw-r--r--src/core/security/credentials.h11
-rw-r--r--src/core/security/credentials_posix.c2
-rw-r--r--src/core/security/credentials_win32.c2
-rw-r--r--src/core/security/google_default_credentials.c15
-rw-r--r--src/core/security/handshake.c49
-rw-r--r--src/core/security/handshake.h2
-rw-r--r--src/core/security/json_token.c4
-rw-r--r--src/core/security/security_connector.c27
-rw-r--r--src/core/security/security_connector.h14
-rw-r--r--src/core/security/server_auth_filter.c90
-rw-r--r--src/core/security/server_secure_chttp2.c88
-rw-r--r--src/core/statistics/window_stats.c2
-rw-r--r--src/core/support/alloc.c20
-rw-r--r--src/core/support/avl.c288
-rw-r--r--src/core/support/cmdline.c62
-rw-r--r--src/core/support/log.c3
-rw-r--r--src/core/support/log_linux.c4
-rw-r--r--src/core/support/log_posix.c4
-rw-r--r--src/core/support/log_win32.c4
-rw-r--r--src/core/support/slice.c22
-rw-r--r--src/core/support/slice_buffer.c49
-rw-r--r--src/core/support/string.c29
-rw-r--r--src/core/support/string.h10
-rw-r--r--src/core/support/sync_posix.c5
-rw-r--r--src/core/support/thd_posix.c9
-rw-r--r--src/core/support/time.c64
-rw-r--r--src/core/support/time_posix.c17
-rw-r--r--src/core/support/time_precise.c4
-rw-r--r--src/core/support/time_win32.c6
-rw-r--r--src/core/surface/byte_buffer_queue.c97
-rw-r--r--src/core/surface/call.c1936
-rw-r--r--src/core/surface/call.h81
-rw-r--r--src/core/surface/call_log_batch.c27
-rw-r--r--src/core/surface/channel.c181
-rw-r--r--src/core/surface/channel.h13
-rw-r--r--src/core/surface/channel_connectivity.c41
-rw-r--r--src/core/surface/channel_create.c43
-rw-r--r--src/core/surface/channel_ping.c (renamed from src/core/surface/byte_buffer_queue.h)67
-rw-r--r--src/core/surface/completion_queue.c132
-rw-r--r--src/core/surface/completion_queue.h11
-rw-r--r--src/core/surface/init.c14
-rw-r--r--src/core/surface/lame_client.c82
-rw-r--r--src/core/surface/secure_channel_create.c64
-rw-r--r--src/core/surface/server.c251
-rw-r--r--src/core/surface/server.h2
-rw-r--r--src/core/surface/server_chttp2.c19
-rw-r--r--src/core/surface/server_create.c14
-rw-r--r--src/core/transport/byte_stream.c76
-rw-r--r--src/core/transport/byte_stream.h88
-rw-r--r--src/core/transport/chttp2/frame_data.c99
-rw-r--r--src/core/transport/chttp2/frame_data.h27
-rw-r--r--src/core/transport/chttp2/frame_ping.c11
-rw-r--r--src/core/transport/chttp2/frame_settings.c34
-rw-r--r--src/core/transport/chttp2/frame_settings.h1
-rw-r--r--src/core/transport/chttp2/frame_window_update.c15
-rw-r--r--src/core/transport/chttp2/hpack_encoder.c (renamed from src/core/transport/chttp2/stream_encoder.c)408
-rw-r--r--src/core/transport/chttp2/hpack_encoder.h (renamed from src/core/transport/chttp2/stream_encoder.h)54
-rw-r--r--src/core/transport/chttp2/hpack_parser.c176
-rw-r--r--src/core/transport/chttp2/hpack_parser.h5
-rw-r--r--src/core/transport/chttp2/hpack_table.c122
-rw-r--r--src/core/transport/chttp2/hpack_table.h35
-rw-r--r--src/core/transport/chttp2/incoming_metadata.c132
-rw-r--r--src/core/transport/chttp2/incoming_metadata.h26
-rw-r--r--src/core/transport/chttp2/internal.h371
-rw-r--r--src/core/transport/chttp2/parsing.c387
-rw-r--r--src/core/transport/chttp2/stream_lists.c128
-rw-r--r--src/core/transport/chttp2/timeout_encoding.c17
-rw-r--r--src/core/transport/chttp2/varint.h6
-rw-r--r--src/core/transport/chttp2/writing.c336
-rw-r--r--src/core/transport/chttp2_transport.c953
-rw-r--r--src/core/transport/chttp2_transport.h2
-rw-r--r--src/core/transport/connectivity_state.c76
-rw-r--r--src/core/transport/connectivity_state.h13
-rw-r--r--src/core/transport/metadata.c615
-rw-r--r--src/core/transport/metadata.h42
-rw-r--r--src/core/transport/metadata_batch.c (renamed from src/core/transport/stream_op.c)179
-rw-r--r--src/core/transport/metadata_batch.h (renamed from src/core/transport/stream_op.h)91
-rw-r--r--src/core/transport/static_metadata.c88
-rw-r--r--src/core/transport/static_metadata.h406
-rw-r--r--src/core/transport/transport.c63
-rw-r--r--src/core/transport/transport.h75
-rw-r--r--src/core/transport/transport_impl.h8
-rw-r--r--src/core/transport/transport_op_string.c84
-rw-r--r--src/core/tsi/transport_security.c8
158 files changed, 7662 insertions, 7052 deletions
diff --git a/src/core/census/context.h b/src/core/census/context.h
index d9907d4da7..700bcf86cf 100644
--- a/src/core/census/context.h
+++ b/src/core/census/context.h
@@ -36,14 +36,12 @@
#include <grpc/census.h>
+#define GRPC_CENSUS_MAX_ON_THE_WIRE_TAG_BYTES 2048
+
/* census_context is the in-memory representation of information needed to
* maintain tracing, RPC statistics and resource usage information. */
struct census_context {
- gpr_uint64 op_id; /* Operation identifier - unique per-context */
- gpr_uint64 trace_id; /* Globally unique trace identifier */
- /* TODO(aveitch) Add census tags:
- const census_tag_set *tags;
- */
+ census_tag_set *tags; /* Opaque data structure for census tags. */
};
#endif /* GRPC_INTERNAL_CORE_CENSUS_CONTEXT_H */
diff --git a/src/core/census/grpc_filter.c b/src/core/census/grpc_filter.c
index 872543057e..4529ae9bd7 100644
--- a/src/core/census/grpc_filter.c
+++ b/src/core/census/grpc_filter.c
@@ -36,16 +36,17 @@
#include <stdio.h>
#include <string.h>
-#include "src/core/channel/channel_stack.h"
-#include "src/core/channel/noop_filter.h"
-#include "src/core/statistics/census_interface.h"
-#include "src/core/statistics/census_rpc_stats.h"
#include <grpc/census.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice.h>
#include <grpc/support/time.h>
+#include "src/core/channel/channel_stack.h"
+#include "src/core/statistics/census_interface.h"
+#include "src/core/statistics/census_rpc_stats.h"
+#include "src/core/transport/static_metadata.h"
+
typedef struct call_data {
census_op_id op_id;
census_context *ctxt;
@@ -53,28 +54,22 @@ typedef struct call_data {
int error;
/* recv callback */
- grpc_stream_op_buffer *recv_ops;
+ grpc_metadata_batch *recv_initial_metadata;
grpc_closure *on_done_recv;
+ grpc_closure finish_recv;
} call_data;
-typedef struct channel_data {
- grpc_mdstr *path_str; /* pointer to meta data str with key == ":path" */
-} channel_data;
+typedef struct channel_data { gpr_uint8 unused; } channel_data;
-static void extract_and_annotate_method_tag(grpc_stream_op_buffer *sopb,
+static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
call_data *calld,
channel_data *chand) {
grpc_linked_mdelem *m;
- size_t i;
- for (i = 0; i < sopb->nops; i++) {
- grpc_stream_op *op = &sopb->ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
- for (m = op->data.metadata.list.head; m != NULL; m = m->next) {
- if (m->md->key == chand->path_str) {
- gpr_log(GPR_DEBUG, "%s",
- (const char *)GPR_SLICE_START_PTR(m->md->value->slice));
- /* Add method tag here */
- }
+ for (m = md->list.head; m != NULL; m = m->next) {
+ if (m->md->key == GRPC_MDSTR_PATH) {
+ gpr_log(GPR_DEBUG, "%s",
+ (const char *)GPR_SLICE_START_PTR(m->md->value->slice));
+ /* Add method tag here */
}
}
}
@@ -83,8 +78,8 @@ static void client_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- if (op->send_ops) {
- extract_and_annotate_method_tag(op->send_ops, calld, chand);
+ if (op->send_initial_metadata) {
+ extract_and_annotate_method_tag(op->send_initial_metadata, calld, chand);
}
}
@@ -101,7 +96,7 @@ static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (success) {
- extract_and_annotate_method_tag(calld->recv_ops, calld, chand);
+ extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand);
}
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
@@ -109,32 +104,33 @@ static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
static void server_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
- if (op->recv_ops) {
+ if (op->recv_initial_metadata) {
/* substitute our callback for the op callback */
- calld->recv_ops = op->recv_ops;
- calld->on_done_recv = op->on_done_recv;
- op->on_done_recv = calld->on_done_recv;
+ calld->recv_initial_metadata = op->recv_initial_metadata;
+ calld->on_done_recv = op->on_complete;
+ op->on_complete = &calld->finish_recv;
}
}
static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
+ /* TODO(ctiller): this code fails. I don't know why. I expect it's
+ incomplete, and someone should look at it soon.
+
call_data *calld = elem->call_data;
- GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0));
+ GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0)); */
server_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
}
static void client_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
+ grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
- if (initial_op) client_mutate_op(elem, initial_op);
}
static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
@@ -146,15 +142,13 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static void server_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
+ grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
/* TODO(hongyu): call census_tracing_start_op here. */
- grpc_closure_init(d->on_done_recv, server_on_done_recv, elem);
- if (initial_op) server_mutate_op(elem, initial_op);
+ grpc_closure_init(&d->finish_recv, server_on_done_recv, elem);
}
static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
@@ -165,33 +159,26 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
}
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(chand != NULL);
- chand->path_str = grpc_mdstr_from_string(mdctx, ":path");
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(chand != NULL);
- if (chand->path_str != NULL) {
- GRPC_MDSTR_UNREF(chand->path_str);
- }
}
const grpc_channel_filter grpc_client_census_filter = {
- client_start_transport_op, grpc_channel_next_op,
- sizeof(call_data), client_init_call_elem,
- client_destroy_call_elem, sizeof(channel_data),
- init_channel_elem, destroy_channel_elem,
- grpc_call_next_get_peer, "census-client"};
+ client_start_transport_op, grpc_channel_next_op, sizeof(call_data),
+ client_init_call_elem, grpc_call_stack_ignore_set_pollset,
+ client_destroy_call_elem, sizeof(channel_data), init_channel_elem,
+ destroy_channel_elem, grpc_call_next_get_peer, "census-client"};
const grpc_channel_filter grpc_server_census_filter = {
- server_start_transport_op, grpc_channel_next_op,
- sizeof(call_data), server_init_call_elem,
- server_destroy_call_elem, sizeof(channel_data),
- init_channel_elem, destroy_channel_elem,
- grpc_call_next_get_peer, "census-server"};
+ server_start_transport_op, grpc_channel_next_op, sizeof(call_data),
+ server_init_call_elem, grpc_call_stack_ignore_set_pollset,
+ server_destroy_call_elem, sizeof(channel_data), init_channel_elem,
+ destroy_channel_elem, grpc_call_next_get_peer, "census-server"};
diff --git a/src/core/channel/channel_stack.c b/src/core/channel/channel_stack.c
index abd7f719e7..5e09a050ee 100644
--- a/src/core/channel/channel_stack.c
+++ b/src/core/channel/channel_stack.c
@@ -101,20 +101,23 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack,
return CALL_ELEMS_FROM_STACK(call_stack) + index;
}
-void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
+void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
+ grpc_iomgr_cb_func destroy, void *destroy_arg,
const grpc_channel_filter **filters,
- size_t filter_count, grpc_channel *master,
- const grpc_channel_args *args,
- grpc_mdctx *metadata_context,
- grpc_channel_stack *stack) {
+ size_t filter_count,
+ const grpc_channel_args *channel_args,
+ const char *name, grpc_channel_stack *stack) {
size_t call_size =
ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
grpc_channel_element *elems;
+ grpc_channel_element_args args;
char *user_data;
size_t i;
stack->count = filter_count;
+ GRPC_STREAM_REF_INIT(&stack->refcount, initial_refs, destroy, destroy_arg,
+ name);
elems = CHANNEL_ELEMS_FROM_STACK(stack);
user_data =
((char *)elems) +
@@ -122,11 +125,13 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
/* init per-filter data */
for (i = 0; i < filter_count; i++) {
+ args.channel_stack = stack;
+ args.channel_args = channel_args;
+ args.is_first = i == 0;
+ args.is_last = i == (filter_count - 1);
elems[i].filter = filters[i];
elems[i].channel_data = user_data;
- elems[i].filter->init_channel_elem(exec_ctx, &elems[i], master, args,
- metadata_context, i == 0,
- i == (filter_count - 1));
+ elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args);
user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
}
@@ -151,33 +156,63 @@ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
}
void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack *channel_stack,
+ grpc_channel_stack *channel_stack, int initial_refs,
+ grpc_iomgr_cb_func destroy, void *destroy_arg,
+ grpc_call_context_element *context,
const void *transport_server_data,
- grpc_transport_stream_op *initial_op,
grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
+ grpc_call_element_args args;
size_t count = channel_stack->count;
grpc_call_element *call_elems;
char *user_data;
size_t i;
call_stack->count = count;
+ GRPC_STREAM_REF_INIT(&call_stack->refcount, initial_refs, destroy,
+ destroy_arg, "CALL_STACK");
call_elems = CALL_ELEMS_FROM_STACK(call_stack);
user_data = ((char *)call_elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */
for (i = 0; i < count; i++) {
+ args.call_stack = call_stack;
+ args.server_transport_data = transport_server_data;
+ args.context = context;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
- call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i],
- transport_server_data, initial_op);
+ call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i], &args);
user_data +=
ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
}
}
+void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_call_stack *call_stack,
+ grpc_pollset *pollset) {
+ size_t count = call_stack->count;
+ grpc_call_element *call_elems;
+ char *user_data;
+ size_t i;
+
+ call_elems = CALL_ELEMS_FROM_STACK(call_stack);
+ user_data = ((char *)call_elems) +
+ ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
+
+ /* init per-filter data */
+ for (i = 0; i < count; i++) {
+ call_elems[i].filter->set_pollset(exec_ctx, &call_elems[i], pollset);
+ user_data +=
+ ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
+ }
+}
+
+void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_pollset *pollset) {}
+
void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack) {
grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack);
size_t count = stack->count;
diff --git a/src/core/channel/channel_stack.h b/src/core/channel/channel_stack.h
index 6732cc3018..c01050e717 100644
--- a/src/core/channel/channel_stack.h
+++ b/src/core/channel/channel_stack.h
@@ -51,6 +51,22 @@
typedef struct grpc_channel_element grpc_channel_element;
typedef struct grpc_call_element grpc_call_element;
+typedef struct grpc_channel_stack grpc_channel_stack;
+typedef struct grpc_call_stack grpc_call_stack;
+
+typedef struct {
+ grpc_channel_stack *channel_stack;
+ const grpc_channel_args *channel_args;
+ int is_first;
+ int is_last;
+} grpc_channel_element_args;
+
+typedef struct {
+ grpc_call_stack *call_stack;
+ const void *server_transport_data;
+ grpc_call_context_element *context;
+} grpc_call_element_args;
+
/* Channel filters specify:
1. the amount of memory needed in the channel & call (via the sizeof_XXX
members)
@@ -84,8 +100,9 @@ typedef struct {
transport and is on the server. Most filters want to ignore this
argument. */
void (*init_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op);
+ grpc_call_element_args *args);
+ void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset);
/* Destroy per call data.
The filter does not need to do any chaining */
void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
@@ -99,9 +116,7 @@ typedef struct {
useful for asserting correct configuration by upper layer code.
The filter does not need to do any chaining */
void (*init_channel_elem)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- grpc_channel *master, const grpc_channel_args *args,
- grpc_mdctx *metadata_context, int is_first,
- int is_last);
+ grpc_channel_element_args *args);
/* Destroy per channel data.
The filter does not need to do any chaining */
void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
@@ -132,16 +147,24 @@ struct grpc_call_element {
/* A channel stack tracks a set of related filters for one channel, and
guarantees they live within a single malloc() allocation */
-typedef struct {
+struct grpc_channel_stack {
+ grpc_stream_refcount refcount;
size_t count;
/* Memory required for a call stack (computed at channel stack
initialization) */
size_t call_stack_size;
-} grpc_channel_stack;
+};
/* A call stack tracks a set of related filters for one call, and guarantees
they live within a single malloc() allocation */
-typedef struct { size_t count; } grpc_call_stack;
+struct grpc_call_stack {
+ /* shared refcount for this channel stack.
+ MUST be the first element: the underlying code calls destroy
+ with the address of the refcount, but higher layers prefer to think
+ about the address of the call stack itself. */
+ grpc_stream_refcount refcount;
+ size_t count;
+};
/* Get a channel element given a channel stack and its index */
grpc_channel_element *grpc_channel_stack_element(grpc_channel_stack *stack,
@@ -156,12 +179,11 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *stack, size_t i);
size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
size_t filter_count);
/* Initialize a channel stack given some filters */
-void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
+void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
+ grpc_iomgr_cb_func destroy, void *destroy_arg,
const grpc_channel_filter **filters,
- size_t filter_count, grpc_channel *master,
- const grpc_channel_args *args,
- grpc_mdctx *metadata_context,
- grpc_channel_stack *stack);
+ size_t filter_count, const grpc_channel_args *args,
+ const char *name, grpc_channel_stack *stack);
/* Destroy a channel stack */
void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
grpc_channel_stack *stack);
@@ -170,13 +192,44 @@ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
expected to be NULL on a client, or an opaque transport owned pointer on the
server. */
void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack *channel_stack,
+ grpc_channel_stack *channel_stack, int initial_refs,
+ grpc_iomgr_cb_func destroy, void *destroy_arg,
+ grpc_call_context_element *context,
const void *transport_server_data,
- grpc_transport_stream_op *initial_op,
grpc_call_stack *call_stack);
+/* Set a pollset for a call stack: must occur before the first op is started */
+void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_call_stack *call_stack,
+ grpc_pollset *pollset);
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define GRPC_CALL_STACK_REF(call_stack, reason) \
+ grpc_stream_ref(&(call_stack)->refcount, reason)
+#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
+ grpc_stream_unref(exec_ctx, &(call_stack)->refcount, reason)
+#define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \
+ grpc_stream_ref(&(channel_stack)->refcount, reason)
+#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \
+ grpc_stream_unref(exec_ctx, &(channel_stack)->refcount, reason)
+#else
+#define GRPC_CALL_STACK_REF(call_stack, reason) \
+ grpc_stream_ref(&(call_stack)->refcount)
+#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
+ grpc_stream_unref(exec_ctx, &(call_stack)->refcount)
+#define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \
+ grpc_stream_ref(&(channel_stack)->refcount)
+#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \
+ grpc_stream_unref(exec_ctx, &(channel_stack)->refcount)
+#endif
+
/* Destroy a call stack */
void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack);
+/* Ignore set pollset - used by filters to implement the set_pollset method
+ if they don't care about pollsets at all. Does nothing. */
+void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_pollset *pollset);
/* Call the next operation in a call stack */
void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op *op);
diff --git a/src/core/channel/client_channel.c b/src/core/channel/client_channel.c
index 9f85557ea1..385ae3be9b 100644
--- a/src/core/channel/client_channel.c
+++ b/src/core/channel/client_channel.c
@@ -43,6 +43,7 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/connected_channel.h"
+#include "src/core/channel/subchannel_call_holder.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
@@ -51,20 +52,13 @@
/* Client channel implementation */
-typedef struct call_data call_data;
+typedef grpc_subchannel_call_holder call_data;
typedef struct client_channel_channel_data {
- /** metadata context for this channel */
- grpc_mdctx *mdctx;
/** resolver for this channel */
grpc_resolver *resolver;
/** have we started resolving this channel */
int started_resolving;
- /** master channel - the grpc_channel instance that ultimately owns
- this channel_data via its channel stack.
- We occasionally use this to bump the refcount on the master channel
- to keep ourselves alive through an asynchronous operation. */
- grpc_channel *master;
/** mutex protecting client configuration, including all
variables below in this data structure */
@@ -82,8 +76,10 @@ typedef struct client_channel_channel_data {
grpc_connectivity_state_tracker state_tracker;
/** when an lb_policy arrives, should we try to exit idle */
int exit_idle_when_lb_policy_arrives;
- /** pollset_set of interested parties in a new connection */
- grpc_pollset_set pollset_set;
+ /** owning stack */
+ grpc_channel_stack *owning_stack;
+ /** interested parties */
+ grpc_pollset_set interested_parties;
} channel_data;
/** We create one watcher for each new lb_policy that is returned from a
@@ -98,360 +94,20 @@ typedef struct {
grpc_lb_policy *lb_policy;
} lb_policy_connectivity_watcher;
-typedef enum {
- CALL_CREATED,
- CALL_WAITING_FOR_SEND,
- CALL_WAITING_FOR_CONFIG,
- CALL_WAITING_FOR_PICK,
- CALL_WAITING_FOR_CALL,
- CALL_ACTIVE,
- CALL_CANCELLED
-} call_state;
-
-struct call_data {
- /* owning element */
- grpc_call_element *elem;
-
- gpr_mu mu_state;
-
- call_state state;
- gpr_timespec deadline;
- grpc_subchannel *picked_channel;
- grpc_closure async_setup_task;
- grpc_transport_stream_op waiting_op;
- /* our child call stack */
- grpc_subchannel_call *subchannel_call;
- grpc_linked_mdelem status;
- grpc_linked_mdelem details;
-};
-
-static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
- grpc_transport_stream_op *new_op)
- GRPC_MUST_USE_RESULT;
-
-static void handle_op_after_cancellation(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- if (op->send_ops) {
- grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
- op->on_done_send->cb(exec_ctx, op->on_done_send->cb_arg, 0);
- }
- if (op->recv_ops) {
- char status[GPR_LTOA_MIN_BUFSIZE];
- grpc_metadata_batch mdb;
- gpr_ltoa(GRPC_STATUS_CANCELLED, status);
- calld->status.md =
- grpc_mdelem_from_strings(chand->mdctx, "grpc-status", status);
- calld->details.md =
- grpc_mdelem_from_strings(chand->mdctx, "grpc-message", "Cancelled");
- calld->status.prev = calld->details.next = NULL;
- calld->status.next = &calld->details;
- calld->details.prev = &calld->status;
- mdb.list.head = &calld->status;
- mdb.list.tail = &calld->details;
- mdb.garbage.head = mdb.garbage.tail = NULL;
- mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
- grpc_sopb_add_metadata(op->recv_ops, mdb);
- *op->recv_state = GRPC_STREAM_CLOSED;
- op->on_done_recv->cb(exec_ctx, op->on_done_recv->cb_arg, 1);
- }
- if (op->on_consumed) {
- op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 0);
- }
-}
-
typedef struct {
grpc_closure closure;
grpc_call_element *elem;
} waiting_call;
-static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op,
- int continuation);
-
-static void continue_with_pick(grpc_exec_ctx *exec_ctx, void *arg,
- int iomgr_success) {
- waiting_call *wc = arg;
- call_data *calld = wc->elem->call_data;
- perform_transport_stream_op(exec_ctx, wc->elem, &calld->waiting_op, 1);
- gpr_free(wc);
-}
-
-static void add_to_lb_policy_wait_queue_locked_state_config(
- grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- waiting_call *wc = gpr_malloc(sizeof(*wc));
- grpc_closure_init(&wc->closure, continue_with_pick, wc);
- wc->elem = elem;
- grpc_closure_list_add(&chand->waiting_for_config_closures, &wc->closure, 1);
-}
-
-static int is_empty(void *p, int len) {
- char *ptr = p;
- int i;
- for (i = 0; i < len; i++) {
- if (ptr[i] != 0) return 0;
- }
- return 1;
-}
-
-static void started_call_locked(grpc_exec_ctx *exec_ctx, void *arg,
- int iomgr_success) {
- call_data *calld = arg;
- grpc_transport_stream_op op;
- int have_waiting;
-
- if (calld->state == CALL_CANCELLED && calld->subchannel_call != NULL) {
- memset(&op, 0, sizeof(op));
- op.cancel_with_status = GRPC_STATUS_CANCELLED;
- gpr_mu_unlock(&calld->mu_state);
- grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, &op);
- } else if (calld->state == CALL_WAITING_FOR_CALL) {
- have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
- if (calld->subchannel_call != NULL) {
- calld->state = CALL_ACTIVE;
- gpr_mu_unlock(&calld->mu_state);
- if (have_waiting) {
- grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
- &calld->waiting_op);
- }
- } else {
- calld->state = CALL_CANCELLED;
- gpr_mu_unlock(&calld->mu_state);
- if (have_waiting) {
- handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
- }
- }
- } else {
- GPR_ASSERT(calld->state == CALL_CANCELLED);
- gpr_mu_unlock(&calld->mu_state);
- }
-}
-
-static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
- int iomgr_success) {
- call_data *calld = arg;
- gpr_mu_lock(&calld->mu_state);
- started_call_locked(exec_ctx, arg, iomgr_success);
-}
-
-static void picked_target(grpc_exec_ctx *exec_ctx, void *arg,
- int iomgr_success) {
- call_data *calld = arg;
- grpc_pollset *pollset;
- grpc_subchannel_call_create_status call_creation_status;
-
- GPR_TIMER_BEGIN("picked_target", 0);
-
- if (calld->picked_channel == NULL) {
- /* treat this like a cancellation */
- calld->waiting_op.cancel_with_status = GRPC_STATUS_UNAVAILABLE;
- perform_transport_stream_op(exec_ctx, calld->elem, &calld->waiting_op, 1);
- } else {
- gpr_mu_lock(&calld->mu_state);
- if (calld->state == CALL_CANCELLED) {
- gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
- } else {
- GPR_ASSERT(calld->state == CALL_WAITING_FOR_PICK);
- calld->state = CALL_WAITING_FOR_CALL;
- pollset = calld->waiting_op.bind_pollset;
- grpc_closure_init(&calld->async_setup_task, started_call, calld);
- call_creation_status = grpc_subchannel_create_call(
- exec_ctx, calld->picked_channel, pollset, &calld->subchannel_call,
- &calld->async_setup_task);
- if (call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY) {
- started_call_locked(exec_ctx, calld, iomgr_success);
- } else {
- gpr_mu_unlock(&calld->mu_state);
- }
- }
- }
-
- GPR_TIMER_END("picked_target", 0);
-}
-
-static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
- grpc_transport_stream_op *new_op) {
- call_data *calld = elem->call_data;
- grpc_closure *consumed_op = NULL;
- grpc_transport_stream_op *waiting_op = &calld->waiting_op;
- GPR_ASSERT((waiting_op->send_ops != NULL) + (new_op->send_ops != NULL) <= 1);
- GPR_ASSERT((waiting_op->recv_ops != NULL) + (new_op->recv_ops != NULL) <= 1);
- if (new_op->send_ops != NULL) {
- waiting_op->send_ops = new_op->send_ops;
- waiting_op->is_last_send = new_op->is_last_send;
- waiting_op->on_done_send = new_op->on_done_send;
- }
- if (new_op->recv_ops != NULL) {
- waiting_op->recv_ops = new_op->recv_ops;
- waiting_op->recv_state = new_op->recv_state;
- waiting_op->on_done_recv = new_op->on_done_recv;
- }
- if (new_op->on_consumed != NULL) {
- if (waiting_op->on_consumed != NULL) {
- consumed_op = waiting_op->on_consumed;
- }
- waiting_op->on_consumed = new_op->on_consumed;
- }
- if (new_op->cancel_with_status != GRPC_STATUS_OK) {
- waiting_op->cancel_with_status = new_op->cancel_with_status;
- }
- return consumed_op;
-}
-
static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- grpc_subchannel_call *subchannel_call;
- char *result;
-
- gpr_mu_lock(&calld->mu_state);
- if (calld->state == CALL_ACTIVE) {
- subchannel_call = calld->subchannel_call;
- GRPC_SUBCHANNEL_CALL_REF(subchannel_call, "get_peer");
- gpr_mu_unlock(&calld->mu_state);
- result = grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "get_peer");
- return result;
- } else {
- gpr_mu_unlock(&calld->mu_state);
- return grpc_channel_get_target(chand->master);
- }
-}
-
-static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op,
- int continuation) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- grpc_subchannel_call *subchannel_call;
- grpc_lb_policy *lb_policy;
- grpc_transport_stream_op op2;
- GPR_TIMER_BEGIN("perform_transport_stream_op", 0);
- GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
-
- gpr_mu_lock(&calld->mu_state);
- switch (calld->state) {
- case CALL_ACTIVE:
- GPR_ASSERT(!continuation);
- subchannel_call = calld->subchannel_call;
- gpr_mu_unlock(&calld->mu_state);
- grpc_subchannel_call_process_op(exec_ctx, subchannel_call, op);
- break;
- case CALL_CANCELLED:
- gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(exec_ctx, elem, op);
- break;
- case CALL_WAITING_FOR_SEND:
- GPR_ASSERT(!continuation);
- grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
- if (!calld->waiting_op.send_ops &&
- calld->waiting_op.cancel_with_status == GRPC_STATUS_OK) {
- gpr_mu_unlock(&calld->mu_state);
- break;
- }
- *op = calld->waiting_op;
- memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
- continuation = 1;
- /* fall through */
- case CALL_WAITING_FOR_CONFIG:
- case CALL_WAITING_FOR_PICK:
- case CALL_WAITING_FOR_CALL:
- if (!continuation) {
- if (op->cancel_with_status != GRPC_STATUS_OK) {
- calld->state = CALL_CANCELLED;
- op2 = calld->waiting_op;
- memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
- if (op->on_consumed) {
- calld->waiting_op.on_consumed = op->on_consumed;
- op->on_consumed = NULL;
- } else if (op2.on_consumed) {
- calld->waiting_op.on_consumed = op2.on_consumed;
- op2.on_consumed = NULL;
- }
- gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(exec_ctx, elem, op);
- handle_op_after_cancellation(exec_ctx, elem, &op2);
- } else {
- grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
- gpr_mu_unlock(&calld->mu_state);
- }
- break;
- }
- /* fall through */
- case CALL_CREATED:
- if (op->cancel_with_status != GRPC_STATUS_OK) {
- calld->state = CALL_CANCELLED;
- gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(exec_ctx, elem, op);
- } else {
- calld->waiting_op = *op;
-
- if (op->send_ops == NULL) {
- /* need to have some send ops before we can select the
- lb target */
- calld->state = CALL_WAITING_FOR_SEND;
- gpr_mu_unlock(&calld->mu_state);
- } else {
- gpr_mu_lock(&chand->mu_config);
- lb_policy = chand->lb_policy;
- if (lb_policy) {
- grpc_transport_stream_op *waiting_op = &calld->waiting_op;
- grpc_pollset *bind_pollset = waiting_op->bind_pollset;
- grpc_metadata_batch *initial_metadata =
- &waiting_op->send_ops->ops[0].data.metadata;
- GRPC_LB_POLICY_REF(lb_policy, "pick");
- gpr_mu_unlock(&chand->mu_config);
- calld->state = CALL_WAITING_FOR_PICK;
-
- GPR_ASSERT(waiting_op->bind_pollset);
- GPR_ASSERT(waiting_op->send_ops);
- GPR_ASSERT(waiting_op->send_ops->nops >= 1);
- GPR_ASSERT(waiting_op->send_ops->ops[0].type == GRPC_OP_METADATA);
- gpr_mu_unlock(&calld->mu_state);
-
- grpc_closure_init(&calld->async_setup_task, picked_target, calld);
- grpc_lb_policy_pick(exec_ctx, lb_policy, bind_pollset,
- initial_metadata, &calld->picked_channel,
- &calld->async_setup_task);
-
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick");
- } else if (chand->resolver != NULL) {
- calld->state = CALL_WAITING_FOR_CONFIG;
- add_to_lb_policy_wait_queue_locked_state_config(elem);
- if (!chand->started_resolving && chand->resolver != NULL) {
- GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
- chand->started_resolving = 1;
- grpc_resolver_next(exec_ctx, chand->resolver,
- &chand->incoming_configuration,
- &chand->on_config_changed);
- }
- gpr_mu_unlock(&chand->mu_config);
- gpr_mu_unlock(&calld->mu_state);
- } else {
- calld->state = CALL_CANCELLED;
- gpr_mu_unlock(&chand->mu_config);
- gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(exec_ctx, elem, op);
- }
- }
- }
- break;
- }
-
- GPR_TIMER_END("perform_transport_stream_op", 0);
+ return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data);
}
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
- perform_transport_stream_op(exec_ctx, elem, op, 0);
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
+ grpc_subchannel_call_holder_perform_op(exec_ctx, elem->call_data, op);
}
static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
@@ -460,10 +116,18 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
static void on_lb_policy_state_changed_locked(
grpc_exec_ctx *exec_ctx, lb_policy_connectivity_watcher *w) {
+ grpc_connectivity_state publish_state = w->state;
/* check if the notification is for a stale policy */
if (w->lb_policy != w->chand->lb_policy) return;
- grpc_connectivity_state_set(exec_ctx, &w->chand->state_tracker, w->state,
+ if (publish_state == GRPC_CHANNEL_FATAL_FAILURE &&
+ w->chand->resolver != NULL) {
+ publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
+ grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver);
+ GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
+ w->chand->lb_policy = NULL;
+ }
+ grpc_connectivity_state_set(exec_ctx, &w->chand->state_tracker, publish_state,
"lb_changed");
if (w->state != GRPC_CHANNEL_FATAL_FAILURE) {
watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
@@ -478,7 +142,7 @@ static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg,
on_lb_policy_state_changed_locked(exec_ctx, w);
gpr_mu_unlock(&w->chand->mu_config);
- GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->chand->master, "watch_lb_policy");
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
gpr_free(w);
}
@@ -486,7 +150,7 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
- GRPC_CHANNEL_INTERNAL_REF(chand->master, "watch_lb_policy");
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w);
@@ -518,6 +182,11 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
chand->incoming_configuration = NULL;
+ if (lb_policy != NULL) {
+ grpc_pollset_set_add_pollset_set(exec_ctx, &lb_policy->interested_parties,
+ &chand->interested_parties);
+ }
+
gpr_mu_lock(&chand->mu_config);
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
@@ -539,7 +208,7 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
watch_lb_policy(exec_ctx, chand, lb_policy, state);
}
gpr_mu_unlock(&chand->mu_config);
- GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, resolver, &chand->incoming_configuration,
&chand->on_config_changed);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel-next");
@@ -561,7 +230,9 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
if (old_lb_policy != NULL) {
- grpc_lb_policy_shutdown(exec_ctx, old_lb_policy);
+ grpc_pollset_set_del_pollset_set(exec_ctx,
+ &old_lb_policy->interested_parties,
+ &chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
}
@@ -569,20 +240,22 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
}
- GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->master, "resolver");
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
}
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
- grpc_lb_policy *lb_policy = NULL;
channel_data *chand = elem->channel_data;
grpc_resolver *destroy_resolver = NULL;
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
GPR_ASSERT(op->set_accept_stream == NULL);
- GPR_ASSERT(op->bind_pollset == NULL);
+ if (op->bind_pollset != NULL) {
+ grpc_pollset_set_add_pollset(exec_ctx, &chand->interested_parties,
+ op->bind_pollset);
+ }
gpr_mu_lock(&chand->mu_config);
if (op->on_connectivity_state_change != NULL) {
@@ -593,11 +266,14 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
op->connectivity_state = NULL;
}
- if (!is_empty(op, sizeof(*op))) {
- lb_policy = chand->lb_policy;
- if (lb_policy) {
- GRPC_LB_POLICY_REF(lb_policy, "broadcast");
+ if (op->send_ping != NULL) {
+ if (chand->lb_policy == NULL) {
+ grpc_exec_ctx_enqueue(exec_ctx, op->send_ping, 0);
+ } else {
+ grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
+ op->bind_pollset = NULL;
}
+ op->send_ping = NULL;
}
if (op->disconnect && chand->resolver != NULL) {
@@ -606,7 +282,9 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
destroy_resolver = chand->resolver;
chand->resolver = NULL;
if (chand->lb_policy != NULL) {
- grpc_lb_policy_shutdown(exec_ctx, chand->lb_policy);
+ grpc_pollset_set_del_pollset_set(exec_ctx,
+ &chand->lb_policy->interested_parties,
+ &chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
chand->lb_policy = NULL;
}
@@ -617,79 +295,119 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_resolver_shutdown(exec_ctx, destroy_resolver);
GRPC_RESOLVER_UNREF(exec_ctx, destroy_resolver, "channel");
}
+}
- if (lb_policy) {
- grpc_lb_policy_broadcast(exec_ctx, lb_policy, op);
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "broadcast");
+typedef struct {
+ grpc_metadata_batch *initial_metadata;
+ grpc_connected_subchannel **connected_subchannel;
+ grpc_closure *on_ready;
+ grpc_call_element *elem;
+ grpc_closure closure;
+} continue_picking_args;
+
+static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **connected_subchannel,
+ grpc_closure *on_ready);
+
+static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, int success) {
+ continue_picking_args *cpa = arg;
+ if (!success) {
+ grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 0);
+ } else if (cpa->connected_subchannel == NULL) {
+ /* cancelled, do nothing */
+ } else if (cc_pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
+ cpa->connected_subchannel, cpa->on_ready)) {
+ grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 1);
}
+ gpr_free(cpa);
}
-/* Constructor for call_data */
-static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
+static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **connected_subchannel,
+ grpc_closure *on_ready) {
+ grpc_call_element *elem = elemp;
+ channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
+ continue_picking_args *cpa;
+ grpc_closure *closure;
- /* TODO(ctiller): is there something useful we can do here? */
- GPR_ASSERT(initial_op == NULL);
+ GPR_ASSERT(connected_subchannel);
- GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
- GPR_ASSERT(server_transport_data == NULL);
- gpr_mu_init(&calld->mu_state);
- calld->elem = elem;
- calld->state = CALL_CREATED;
- calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ gpr_mu_lock(&chand->mu_config);
+ if (initial_metadata == NULL) {
+ if (chand->lb_policy != NULL) {
+ grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
+ connected_subchannel);
+ }
+ for (closure = chand->waiting_for_config_closures.head; closure != NULL;
+ closure = grpc_closure_next(closure)) {
+ cpa = closure->cb_arg;
+ if (cpa->connected_subchannel == connected_subchannel) {
+ cpa->connected_subchannel = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 0);
+ }
+ }
+ gpr_mu_unlock(&chand->mu_config);
+ return 1;
+ }
+ if (chand->lb_policy != NULL) {
+ int r =
+ grpc_lb_policy_pick(exec_ctx, chand->lb_policy, calld->pollset,
+ initial_metadata, connected_subchannel, on_ready);
+ gpr_mu_unlock(&chand->mu_config);
+ return r;
+ }
+ if (chand->resolver != NULL && !chand->started_resolving) {
+ chand->started_resolving = 1;
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
+ grpc_resolver_next(exec_ctx, chand->resolver,
+ &chand->incoming_configuration,
+ &chand->on_config_changed);
+ }
+ cpa = gpr_malloc(sizeof(*cpa));
+ cpa->initial_metadata = initial_metadata;
+ cpa->connected_subchannel = connected_subchannel;
+ cpa->on_ready = on_ready;
+ cpa->elem = elem;
+ grpc_closure_init(&cpa->closure, continue_picking, cpa);
+ grpc_closure_list_add(&chand->waiting_for_config_closures, &cpa->closure, 1);
+ gpr_mu_unlock(&chand->mu_config);
+ return 0;
+}
+
+/* Constructor for call_data */
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
+ grpc_subchannel_call_holder_init(elem->call_data, cc_pick_subchannel, elem,
+ args->call_stack);
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- grpc_subchannel_call *subchannel_call;
-
- /* if the call got activated, we need to destroy the child stack also, and
- remove it from the in-flight requests tracked by the child_entry we
- picked */
- gpr_mu_lock(&calld->mu_state);
- switch (calld->state) {
- case CALL_ACTIVE:
- subchannel_call = calld->subchannel_call;
- gpr_mu_unlock(&calld->mu_state);
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "client_channel");
- break;
- case CALL_CREATED:
- case CALL_CANCELLED:
- gpr_mu_unlock(&calld->mu_state);
- break;
- case CALL_WAITING_FOR_PICK:
- case CALL_WAITING_FOR_CONFIG:
- case CALL_WAITING_FOR_CALL:
- case CALL_WAITING_FOR_SEND:
- GPR_UNREACHABLE_CODE(return );
- }
+ grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
}
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *args,
- grpc_mdctx *metadata_context, int is_first,
- int is_last) {
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
memset(chand, 0, sizeof(*chand));
- GPR_ASSERT(is_last);
+ GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
gpr_mu_init(&chand->mu_config);
- chand->mdctx = metadata_context;
- chand->master = master;
- grpc_pollset_set_init(&chand->pollset_set);
grpc_closure_init(&chand->on_config_changed, cc_on_config_changed, chand);
+ chand->owning_stack = args->channel_stack;
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
+ grpc_pollset_set_init(&chand->interested_parties);
}
/* Destructor for channel_data */
@@ -702,17 +420,26 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
}
if (chand->lb_policy != NULL) {
+ grpc_pollset_set_del_pollset_set(exec_ctx,
+ &chand->lb_policy->interested_parties,
+ &chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
- grpc_pollset_set_destroy(&chand->pollset_set);
+ grpc_pollset_set_destroy(&chand->interested_parties);
gpr_mu_destroy(&chand->mu_config);
}
+static void cc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset) {
+ call_data *calld = elem->call_data;
+ calld->pollset = pollset;
+}
+
const grpc_channel_filter grpc_client_channel_filter = {
cc_start_transport_stream_op, cc_start_transport_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, cc_get_peer, "client-channel",
+ init_call_elem, cc_set_pollset, destroy_call_elem, sizeof(channel_data),
+ init_channel_elem, destroy_channel_elem, cc_get_peer, "client-channel",
};
void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
@@ -728,7 +455,7 @@ void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
if (!grpc_closure_list_empty(chand->waiting_for_config_closures) ||
chand->exit_idle_when_lb_policy_arrives) {
chand->started_resolving = 1;
- GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, resolver, &chand->incoming_configuration,
&chand->on_config_changed);
}
@@ -747,7 +474,7 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
} else {
chand->exit_idle_when_lb_policy_arrives = 1;
if (!chand->started_resolving && chand->resolver != NULL) {
- GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
chand->started_resolving = 1;
grpc_resolver_next(exec_ctx, chand->resolver,
&chand->incoming_configuration,
@@ -759,32 +486,39 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
return out;
}
+typedef struct {
+ channel_data *chand;
+ grpc_pollset *pollset;
+ grpc_closure *on_complete;
+ grpc_closure my_closure;
+} external_connectivity_watcher;
+
+static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
+ external_connectivity_watcher *w = arg;
+ grpc_closure *follow_up = w->on_complete;
+ grpc_pollset_set_del_pollset(exec_ctx, &w->chand->interested_parties,
+ w->pollset);
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
+ "external_connectivity_watcher");
+ gpr_free(w);
+ follow_up->cb(exec_ctx, follow_up->cb_arg, iomgr_success);
+}
+
void grpc_client_channel_watch_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete) {
channel_data *chand = elem->channel_data;
+ external_connectivity_watcher *w = gpr_malloc(sizeof(*w));
+ w->chand = chand;
+ w->pollset = pollset;
+ w->on_complete = on_complete;
+ grpc_pollset_set_add_pollset(exec_ctx, &chand->interested_parties, pollset);
+ grpc_closure_init(&w->my_closure, on_external_watch_complete, w);
+ GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
+ "external_connectivity_watcher");
gpr_mu_lock(&chand->mu_config);
grpc_connectivity_state_notify_on_state_change(
- exec_ctx, &chand->state_tracker, state, on_complete);
+ exec_ctx, &chand->state_tracker, state, &w->my_closure);
gpr_mu_unlock(&chand->mu_config);
}
-
-grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
- grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
- return &chand->pollset_set;
-}
-
-void grpc_client_channel_add_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_pollset *pollset) {
- channel_data *chand = elem->channel_data;
- grpc_pollset_set_add_pollset(exec_ctx, &chand->pollset_set, pollset);
-}
-
-void grpc_client_channel_del_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_pollset *pollset) {
- channel_data *chand = elem->channel_data;
- grpc_pollset_set_del_pollset(exec_ctx, &chand->pollset_set, pollset);
-}
diff --git a/src/core/channel/client_channel.h b/src/core/channel/client_channel.h
index 5103f07a43..d9bc4971f1 100644
--- a/src/core/channel/client_channel.h
+++ b/src/core/channel/client_channel.h
@@ -57,17 +57,7 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
void grpc_client_channel_watch_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete);
-grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
- grpc_channel_element *elem);
-
-void grpc_client_channel_add_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *channel,
- grpc_pollset *pollset);
-void grpc_client_channel_del_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *channel,
- grpc_pollset *pollset);
-
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_CHANNEL_H */
diff --git a/src/core/channel/client_uchannel.c b/src/core/channel/client_uchannel.c
index 19a8fa1f79..2c0b07d8bf 100644
--- a/src/core/channel/client_uchannel.c
+++ b/src/core/channel/client_uchannel.c
@@ -39,6 +39,7 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/client_channel.h"
#include "src/core/channel/compress_filter.h"
+#include "src/core/channel/subchannel_call_holder.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/support/string.h"
#include "src/core/surface/channel.h"
@@ -52,23 +53,18 @@
/** Microchannel (uchannel) implementation: a lightweight channel without any
* load-balancing mechanisms meant for communication from within the core. */
-typedef struct call_data call_data;
-
typedef struct client_uchannel_channel_data {
- /** metadata context for this channel */
- grpc_mdctx *mdctx;
-
/** master channel - the grpc_channel instance that ultimately owns
this channel_data via its channel stack.
We occasionally use this to bump the refcount on the master channel
to keep ourselves alive through an asynchronous operation. */
- grpc_channel *master;
+ grpc_channel_stack *owning_stack;
/** connectivity state being tracked */
grpc_connectivity_state_tracker state_tracker;
/** the subchannel wrapped by the microchannel */
- grpc_subchannel *subchannel;
+ grpc_connected_subchannel *connected_subchannel;
/** the callback used to stay subscribed to subchannel connectivity
* notifications */
@@ -80,85 +76,7 @@ typedef struct client_uchannel_channel_data {
gpr_mu mu_state;
} channel_data;
-typedef enum {
- CALL_CREATED,
- CALL_WAITING_FOR_SEND,
- CALL_WAITING_FOR_CALL,
- CALL_ACTIVE,
- CALL_CANCELLED
-} call_state;
-
-struct call_data {
- /* owning element */
- grpc_call_element *elem;
-
- gpr_mu mu_state;
-
- call_state state;
- gpr_timespec deadline;
- grpc_closure async_setup_task;
- grpc_transport_stream_op waiting_op;
- /* our child call stack */
- grpc_subchannel_call *subchannel_call;
- grpc_linked_mdelem status;
- grpc_linked_mdelem details;
-};
-
-static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
- grpc_transport_stream_op *new_op)
- GRPC_MUST_USE_RESULT;
-
-static void handle_op_after_cancellation(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- if (op->send_ops) {
- grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
- op->on_done_send->cb(exec_ctx, op->on_done_send->cb_arg, 0);
- }
- if (op->recv_ops) {
- char status[GPR_LTOA_MIN_BUFSIZE];
- grpc_metadata_batch mdb;
- gpr_ltoa(GRPC_STATUS_CANCELLED, status);
- calld->status.md =
- grpc_mdelem_from_strings(chand->mdctx, "grpc-status", status);
- calld->details.md =
- grpc_mdelem_from_strings(chand->mdctx, "grpc-message", "Cancelled");
- calld->status.prev = calld->details.next = NULL;
- calld->status.next = &calld->details;
- calld->details.prev = &calld->status;
- mdb.list.head = &calld->status;
- mdb.list.tail = &calld->details;
- mdb.garbage.head = mdb.garbage.tail = NULL;
- mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
- grpc_sopb_add_metadata(op->recv_ops, mdb);
- *op->recv_state = GRPC_STREAM_CLOSED;
- op->on_done_recv->cb(exec_ctx, op->on_done_recv->cb_arg, 1);
- }
- if (op->on_consumed) {
- op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 0);
- }
-}
-
-typedef struct {
- grpc_closure closure;
- grpc_call_element *elem;
-} waiting_call;
-
-static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op,
- int continuation);
-
-static int is_empty(void *p, int len) {
- char *ptr = p;
- int i;
- for (i = 0; i < len; i++) {
- if (ptr[i] != 0) return 0;
- }
- return 1;
-}
+typedef grpc_subchannel_call_holder call_data;
static void monitor_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
@@ -166,206 +84,20 @@ static void monitor_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
chand->subchannel_connectivity,
"uchannel_monitor_subchannel");
- grpc_subchannel_notify_on_state_change(exec_ctx, chand->subchannel,
- &chand->subchannel_connectivity,
- &chand->connectivity_cb);
-}
-
-static void started_call_locked(grpc_exec_ctx *exec_ctx, void *arg,
- int iomgr_success) {
- call_data *calld = arg;
- grpc_transport_stream_op op;
- int have_waiting;
-
- if (calld->state == CALL_CANCELLED && iomgr_success == 0) {
- have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
- gpr_mu_unlock(&calld->mu_state);
- if (have_waiting) {
- handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
- }
- } else if (calld->state == CALL_CANCELLED && calld->subchannel_call != NULL) {
- memset(&op, 0, sizeof(op));
- op.cancel_with_status = GRPC_STATUS_CANCELLED;
- gpr_mu_unlock(&calld->mu_state);
- grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, &op);
- } else if (calld->state == CALL_WAITING_FOR_CALL) {
- have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
- if (calld->subchannel_call != NULL) {
- calld->state = CALL_ACTIVE;
- gpr_mu_unlock(&calld->mu_state);
- if (have_waiting) {
- grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
- &calld->waiting_op);
- }
- } else {
- calld->state = CALL_CANCELLED;
- gpr_mu_unlock(&calld->mu_state);
- if (have_waiting) {
- handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
- }
- }
- } else {
- GPR_ASSERT(calld->state == CALL_CANCELLED);
- gpr_mu_unlock(&calld->mu_state);
- have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
- if (have_waiting) {
- handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
- }
- }
-}
-
-static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
- int iomgr_success) {
- call_data *calld = arg;
- gpr_mu_lock(&calld->mu_state);
- started_call_locked(exec_ctx, arg, iomgr_success);
-}
-
-static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
- grpc_transport_stream_op *new_op) {
- call_data *calld = elem->call_data;
- grpc_closure *consumed_op = NULL;
- grpc_transport_stream_op *waiting_op = &calld->waiting_op;
- GPR_ASSERT((waiting_op->send_ops != NULL) + (new_op->send_ops != NULL) <= 1);
- GPR_ASSERT((waiting_op->recv_ops != NULL) + (new_op->recv_ops != NULL) <= 1);
- if (new_op->send_ops != NULL) {
- waiting_op->send_ops = new_op->send_ops;
- waiting_op->is_last_send = new_op->is_last_send;
- waiting_op->on_done_send = new_op->on_done_send;
- }
- if (new_op->recv_ops != NULL) {
- waiting_op->recv_ops = new_op->recv_ops;
- waiting_op->recv_state = new_op->recv_state;
- waiting_op->on_done_recv = new_op->on_done_recv;
- }
- if (new_op->on_consumed != NULL) {
- if (waiting_op->on_consumed != NULL) {
- consumed_op = waiting_op->on_consumed;
- }
- waiting_op->on_consumed = new_op->on_consumed;
- }
- if (new_op->cancel_with_status != GRPC_STATUS_OK) {
- waiting_op->cancel_with_status = new_op->cancel_with_status;
- }
- return consumed_op;
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, chand->connected_subchannel, NULL,
+ &chand->subchannel_connectivity, &chand->connectivity_cb);
}
static char *cuc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- grpc_subchannel_call *subchannel_call;
- char *result;
-
- gpr_mu_lock(&calld->mu_state);
- if (calld->state == CALL_ACTIVE) {
- subchannel_call = calld->subchannel_call;
- GRPC_SUBCHANNEL_CALL_REF(subchannel_call, "get_peer");
- gpr_mu_unlock(&calld->mu_state);
- result = grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "get_peer");
- return result;
- } else {
- gpr_mu_unlock(&calld->mu_state);
- return grpc_channel_get_target(chand->master);
- }
-}
-
-static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op,
- int continuation) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- grpc_subchannel_call *subchannel_call;
- grpc_transport_stream_op op2;
- GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
-
- gpr_mu_lock(&calld->mu_state);
- /* make sure the wrapped subchannel has been set (see
- * grpc_client_uchannel_set_subchannel) */
- GPR_ASSERT(chand->subchannel != NULL);
-
- switch (calld->state) {
- case CALL_ACTIVE:
- GPR_ASSERT(!continuation);
- subchannel_call = calld->subchannel_call;
- gpr_mu_unlock(&calld->mu_state);
- grpc_subchannel_call_process_op(exec_ctx, subchannel_call, op);
- break;
- case CALL_CANCELLED:
- gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(exec_ctx, elem, op);
- break;
- case CALL_WAITING_FOR_SEND:
- GPR_ASSERT(!continuation);
- grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
- if (!calld->waiting_op.send_ops &&
- calld->waiting_op.cancel_with_status == GRPC_STATUS_OK) {
- gpr_mu_unlock(&calld->mu_state);
- break;
- }
- *op = calld->waiting_op;
- memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
- continuation = 1;
- /* fall through */
- case CALL_WAITING_FOR_CALL:
- if (!continuation) {
- if (op->cancel_with_status != GRPC_STATUS_OK) {
- calld->state = CALL_CANCELLED;
- op2 = calld->waiting_op;
- memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
- if (op->on_consumed) {
- calld->waiting_op.on_consumed = op->on_consumed;
- op->on_consumed = NULL;
- } else if (op2.on_consumed) {
- calld->waiting_op.on_consumed = op2.on_consumed;
- op2.on_consumed = NULL;
- }
- gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(exec_ctx, elem, op);
- handle_op_after_cancellation(exec_ctx, elem, &op2);
- grpc_subchannel_cancel_waiting_call(exec_ctx, chand->subchannel, 1);
- } else {
- grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
- gpr_mu_unlock(&calld->mu_state);
- }
- break;
- }
- /* fall through */
- case CALL_CREATED:
- if (op->cancel_with_status != GRPC_STATUS_OK) {
- calld->state = CALL_CANCELLED;
- gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(exec_ctx, elem, op);
- } else {
- calld->waiting_op = *op;
- if (op->send_ops == NULL) {
- calld->state = CALL_WAITING_FOR_SEND;
- gpr_mu_unlock(&calld->mu_state);
- } else {
- grpc_subchannel_call_create_status call_creation_status;
- grpc_pollset *pollset = calld->waiting_op.bind_pollset;
- calld->state = CALL_WAITING_FOR_CALL;
- grpc_closure_init(&calld->async_setup_task, started_call, calld);
- call_creation_status = grpc_subchannel_create_call(
- exec_ctx, chand->subchannel, pollset, &calld->subchannel_call,
- &calld->async_setup_task);
- if (call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY) {
- started_call_locked(exec_ctx, calld, 1);
- } else {
- gpr_mu_unlock(&calld->mu_state);
- }
- }
- }
- break;
- }
+ return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data);
}
static void cuc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
- perform_transport_stream_op(exec_ctx, elem, op, 0);
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
+ grpc_subchannel_call_holder_perform_op(exec_ctx, elem->call_data, op);
}
static void cuc_start_transport_op(grpc_exec_ctx *exec_ctx,
@@ -392,64 +124,39 @@ static void cuc_start_transport_op(grpc_exec_ctx *exec_ctx,
}
}
+static int cuc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **connected_subchannel,
+ grpc_closure *on_ready) {
+ channel_data *chand = arg;
+ GPR_ASSERT(initial_metadata != NULL);
+ *connected_subchannel = chand->connected_subchannel;
+ return 1;
+}
+
/* Constructor for call_data */
static void cuc_init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
- call_data *calld = elem->call_data;
- memset(calld, 0, sizeof(call_data));
-
- /* TODO(ctiller): is there something useful we can do here? */
- GPR_ASSERT(initial_op == NULL);
-
- GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
- GPR_ASSERT(server_transport_data == NULL);
- gpr_mu_init(&calld->mu_state);
- calld->elem = elem;
- calld->state = CALL_CREATED;
- calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ grpc_call_element_args *args) {
+ grpc_subchannel_call_holder_init(elem->call_data, cuc_pick_subchannel,
+ elem->channel_data, args->call_stack);
}
/* Destructor for call_data */
static void cuc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- grpc_subchannel_call *subchannel_call;
-
- /* if the call got activated, we need to destroy the child stack also, and
- remove it from the in-flight requests tracked by the child_entry we
- picked */
- gpr_mu_lock(&calld->mu_state);
- switch (calld->state) {
- case CALL_ACTIVE:
- subchannel_call = calld->subchannel_call;
- gpr_mu_unlock(&calld->mu_state);
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "client_uchannel");
- break;
- case CALL_CREATED:
- case CALL_CANCELLED:
- gpr_mu_unlock(&calld->mu_state);
- break;
- case CALL_WAITING_FOR_CALL:
- case CALL_WAITING_FOR_SEND:
- GPR_UNREACHABLE_CODE(return );
- }
+ grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
}
/* Constructor for channel_data */
static void cuc_init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
- grpc_channel *master,
- const grpc_channel_args *args,
- grpc_mdctx *metadata_context, int is_first,
- int is_last) {
+ grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
memset(chand, 0, sizeof(*chand));
grpc_closure_init(&chand->connectivity_cb, monitor_subchannel, chand);
- GPR_ASSERT(is_last);
+ GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
- chand->mdctx = metadata_context;
- chand->master = master;
+ chand->owning_stack = args->channel_stack;
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_uchannel");
gpr_mu_init(&chand->mu_state);
@@ -459,40 +166,41 @@ static void cuc_init_channel_elem(grpc_exec_ctx *exec_ctx,
static void cuc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
- grpc_subchannel_state_change_unsubscribe(exec_ctx, chand->subchannel,
- &chand->connectivity_cb);
+ /* cancel subscription */
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, chand->connected_subchannel, NULL, NULL,
+ &chand->connectivity_cb);
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
gpr_mu_destroy(&chand->mu_state);
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, chand->connected_subchannel,
+ "uchannel");
+}
+
+static void cuc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset) {
+ call_data *calld = elem->call_data;
+ calld->pollset = pollset;
}
const grpc_channel_filter grpc_client_uchannel_filter = {
cuc_start_transport_stream_op, cuc_start_transport_op, sizeof(call_data),
- cuc_init_call_elem, cuc_destroy_call_elem, sizeof(channel_data),
- cuc_init_channel_elem, cuc_destroy_channel_elem, cuc_get_peer,
- "client-uchannel",
+ cuc_init_call_elem, cuc_set_pollset, cuc_destroy_call_elem,
+ sizeof(channel_data), cuc_init_channel_elem, cuc_destroy_channel_elem,
+ cuc_get_peer, "client-uchannel",
};
grpc_connectivity_state grpc_client_uchannel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
channel_data *chand = elem->channel_data;
grpc_connectivity_state out;
- out = grpc_connectivity_state_check(&chand->state_tracker);
gpr_mu_lock(&chand->mu_state);
- if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
- grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
- GRPC_CHANNEL_CONNECTING,
- "uchannel_connecting_changed");
- chand->subchannel_connectivity = out;
- grpc_subchannel_notify_on_state_change(exec_ctx, chand->subchannel,
- &chand->subchannel_connectivity,
- &chand->connectivity_cb);
- }
+ out = grpc_connectivity_state_check(&chand->state_tracker);
gpr_mu_unlock(&chand->mu_state);
return out;
}
void grpc_client_uchannel_watch_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete) {
channel_data *chand = elem->channel_data;
gpr_mu_lock(&chand->mu_state);
@@ -501,45 +209,14 @@ void grpc_client_uchannel_watch_connectivity_state(
gpr_mu_unlock(&chand->mu_state);
}
-grpc_pollset_set *grpc_client_uchannel_get_connecting_pollset_set(
- grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
- grpc_channel_element *parent_elem;
- gpr_mu_lock(&chand->mu_state);
- parent_elem = grpc_channel_stack_last_element(grpc_channel_get_channel_stack(
- grpc_subchannel_get_master(chand->subchannel)));
- gpr_mu_unlock(&chand->mu_state);
- return grpc_client_channel_get_connecting_pollset_set(parent_elem);
-}
-
-void grpc_client_uchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_pollset *pollset) {
- grpc_pollset_set *master_pollset_set =
- grpc_client_uchannel_get_connecting_pollset_set(elem);
- grpc_pollset_set_add_pollset(exec_ctx, master_pollset_set, pollset);
-}
-
-void grpc_client_uchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_pollset *pollset) {
- grpc_pollset_set *master_pollset_set =
- grpc_client_uchannel_get_connecting_pollset_set(elem);
- grpc_pollset_set_del_pollset(exec_ctx, master_pollset_set, pollset);
-}
-
grpc_channel *grpc_client_uchannel_create(grpc_subchannel *subchannel,
grpc_channel_args *args) {
grpc_channel *channel = NULL;
#define MAX_FILTERS 3
const grpc_channel_filter *filters[MAX_FILTERS];
- grpc_mdctx *mdctx = grpc_subchannel_get_mdctx(subchannel);
- grpc_channel *master = grpc_subchannel_get_master(subchannel);
- char *target = grpc_channel_get_target(master);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
size_t n = 0;
- grpc_mdctx_ref(mdctx);
if (grpc_channel_args_is_census_enabled(args)) {
filters[n++] = &grpc_client_census_filter;
}
@@ -547,20 +224,20 @@ grpc_channel *grpc_client_uchannel_create(grpc_subchannel *subchannel,
filters[n++] = &grpc_client_uchannel_filter;
GPR_ASSERT(n <= MAX_FILTERS);
- channel = grpc_channel_create_from_filters(&exec_ctx, target, filters, n,
- args, mdctx, 1);
+ channel =
+ grpc_channel_create_from_filters(&exec_ctx, NULL, filters, n, args, 1);
- gpr_free(target);
return channel;
}
-void grpc_client_uchannel_set_subchannel(grpc_channel *uchannel,
- grpc_subchannel *subchannel) {
+void grpc_client_uchannel_set_connected_subchannel(
+ grpc_channel *uchannel, grpc_connected_subchannel *connected_subchannel) {
grpc_channel_element *elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(uchannel));
channel_data *chand = elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
gpr_mu_lock(&chand->mu_state);
- chand->subchannel = subchannel;
+ chand->connected_subchannel = connected_subchannel;
+ GRPC_CONNECTED_SUBCHANNEL_REF(connected_subchannel, "uchannel");
gpr_mu_unlock(&chand->mu_state);
}
diff --git a/src/core/channel/client_uchannel.h b/src/core/channel/client_uchannel.h
index dfe6695ae3..92a831493c 100644
--- a/src/core/channel/client_uchannel.h
+++ b/src/core/channel/client_uchannel.h
@@ -48,23 +48,13 @@ grpc_connectivity_state grpc_client_uchannel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
void grpc_client_uchannel_watch_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete);
-grpc_pollset_set *grpc_client_uchannel_get_connecting_pollset_set(
- grpc_channel_element *elem);
-
-void grpc_client_uchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *channel,
- grpc_pollset *pollset);
-void grpc_client_uchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *channel,
- grpc_pollset *pollset);
-
grpc_channel *grpc_client_uchannel_create(grpc_subchannel *subchannel,
grpc_channel_args *args);
-void grpc_client_uchannel_set_subchannel(grpc_channel *uchannel,
- grpc_subchannel *subchannel);
+void grpc_client_uchannel_set_connected_subchannel(
+ grpc_channel *uchannel, grpc_connected_subchannel *connected_subchannel);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_MICROCHANNEL_H */
diff --git a/src/core/channel/compress_filter.c b/src/core/channel/compress_filter.c
index 20b5084044..cc8e191628 100644
--- a/src/core/channel/compress_filter.c
+++ b/src/core/channel/compress_filter.c
@@ -39,61 +39,44 @@
#include <grpc/support/log.h>
#include <grpc/support/slice_buffer.h>
-#include "src/core/channel/compress_filter.h"
#include "src/core/channel/channel_args.h"
-#include "src/core/profiling/timers.h"
+#include "src/core/channel/compress_filter.h"
+#include "src/core/compression/algorithm_metadata.h"
#include "src/core/compression/message_compress.h"
+#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
+#include "src/core/transport/static_metadata.h"
typedef struct call_data {
gpr_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
gpr_uint32 remaining_slice_bytes;
- /**< Input data to be read, as per BEGIN_MESSAGE */
- int written_initial_metadata; /**< Already processed initial md? */
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
/** If true, contents of \a compression_algorithm are authoritative */
int has_compression_algorithm;
+
+ grpc_transport_stream_op send_op;
+ gpr_uint32 send_length;
+ gpr_uint32 send_flags;
+ gpr_slice incoming_slice;
+ grpc_slice_buffer_stream replacement_stream;
+ grpc_closure *post_send;
+ grpc_closure send_done;
+ grpc_closure got_slice;
} call_data;
typedef struct channel_data {
- /** Metadata key for the incoming (requested) compression algorithm */
- grpc_mdstr *mdstr_request_compression_algorithm_key;
- /** Metadata key for the outgoing (used) compression algorithm */
- grpc_mdstr *mdstr_outgoing_compression_algorithm_key;
- /** Metadata key for the accepted encodings */
- grpc_mdstr *mdstr_compression_capabilities_key;
- /** Precomputed metadata elements for all available compression algorithms */
- grpc_mdelem *mdelem_compression_algorithms[GRPC_COMPRESS_ALGORITHMS_COUNT];
- /** Precomputed metadata elements for the accepted encodings */
- grpc_mdelem *mdelem_accept_encoding;
/** The default, channel-level, compression algorithm */
grpc_compression_algorithm default_compression_algorithm;
/** Compression options for the channel */
grpc_compression_options compression_options;
+ /** Supported compression algorithms */
+ gpr_uint32 supported_compression_algorithms;
} channel_data;
-/** Compress \a slices in place using \a algorithm. Returns 1 if compression did
- * actually happen, 0 otherwise (for example if the compressed output size was
- * larger than the raw input).
- *
- * Returns 1 if the data was actually compress and 0 otherwise. */
-static int compress_send_sb(grpc_compression_algorithm algorithm,
- gpr_slice_buffer *slices) {
- int did_compress;
- gpr_slice_buffer tmp;
- gpr_slice_buffer_init(&tmp);
- did_compress = grpc_msg_compress(algorithm, slices, &tmp);
- if (did_compress) {
- gpr_slice_buffer_swap(slices, &tmp);
- }
- gpr_slice_buffer_destroy(&tmp);
- return did_compress;
-}
-
/** For each \a md element from the incoming metadata, filter out the entry for
* "grpc-encoding", using its value to populate the call data's
* compression_algorithm field. */
@@ -102,7 +85,7 @@ static grpc_mdelem *compression_md_filter(void *user_data, grpc_mdelem *md) {
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
- if (md->key == channeld->mdstr_request_compression_algorithm_key) {
+ if (md->key == GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST) {
const char *md_c_str = grpc_mdstr_as_c_string(md->value);
if (!grpc_compression_algorithm_parse(md_c_str, strlen(md_c_str),
&calld->compression_algorithm)) {
@@ -127,7 +110,9 @@ static grpc_mdelem *compression_md_filter(void *user_data, grpc_mdelem *md) {
return md;
}
-static int skip_compression(channel_data *channeld, call_data *calld) {
+static int skip_compression(grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ channel_data *channeld = elem->channel_data;
if (calld->has_compression_algorithm) {
if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
return 1;
@@ -138,169 +123,126 @@ static int skip_compression(channel_data *channeld, call_data *calld) {
return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE;
}
-/** Assembles a new grpc_stream_op_buffer with the compressed slices, modifying
- * the associated GRPC_OP_BEGIN_MESSAGE accordingly (new compressed length,
- * flags indicating compression is in effect) and replaces \a send_ops with it.
- * */
-static void finish_compressed_sopb(grpc_stream_op_buffer *send_ops,
- grpc_call_element *elem) {
- size_t i;
+/** Filter initial metadata */
+static void process_send_initial_metadata(
+ grpc_call_element *elem, grpc_metadata_batch *initial_metadata) {
call_data *calld = elem->call_data;
- int new_slices_added = 0; /* GPR_FALSE */
- grpc_metadata_batch metadata;
- grpc_stream_op_buffer new_send_ops;
- grpc_sopb_init(&new_send_ops);
-
- for (i = 0; i < send_ops->nops; i++) {
- grpc_stream_op *sop = &send_ops->ops[i];
- switch (sop->type) {
- case GRPC_OP_BEGIN_MESSAGE:
- GPR_ASSERT(calld->slices.length <= GPR_UINT32_MAX);
- grpc_sopb_add_begin_message(
- &new_send_ops, (gpr_uint32)calld->slices.length,
- sop->data.begin_message.flags | GRPC_WRITE_INTERNAL_COMPRESS);
- break;
- case GRPC_OP_SLICE:
- /* Once we reach the slices section of the original buffer, simply add
- * all the new (compressed) slices. We obviously want to do this only
- * once, hence the "new_slices_added" guard. */
- if (!new_slices_added) {
- size_t j;
- for (j = 0; j < calld->slices.count; ++j) {
- grpc_sopb_add_slice(&new_send_ops,
- gpr_slice_ref(calld->slices.slices[j]));
- }
- new_slices_added = 1; /* GPR_TRUE */
- }
- break;
- case GRPC_OP_METADATA:
- /* move the metadata to the new buffer. */
- grpc_metadata_batch_move(&metadata, &sop->data.metadata);
- grpc_sopb_add_metadata(&new_send_ops, metadata);
- break;
- case GRPC_NO_OP:
- break;
- }
+ channel_data *channeld = elem->channel_data;
+ /* Parse incoming request for compression. If any, it'll be available
+ * at calld->compression_algorithm */
+ grpc_metadata_batch_filter(initial_metadata, compression_md_filter, elem);
+ if (!calld->has_compression_algorithm) {
+ /* If no algorithm was found in the metadata and we aren't
+ * exceptionally skipping compression, fall back to the channel
+ * default */
+ calld->compression_algorithm = channeld->default_compression_algorithm;
+ calld->has_compression_algorithm = 1; /* GPR_TRUE */
}
- grpc_sopb_swap(send_ops, &new_send_ops);
- grpc_sopb_destroy(&new_send_ops);
+ /* hint compression algorithm */
+ grpc_metadata_batch_add_tail(
+ initial_metadata, &calld->compression_algorithm_storage,
+ grpc_compression_encoding_mdelem(calld->compression_algorithm));
+
+ /* convey supported compression algorithms */
+ grpc_metadata_batch_add_tail(initial_metadata,
+ &calld->accept_encoding_storage,
+ GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(
+ channeld->supported_compression_algorithms));
}
-/** Filter's "main" function, called for any incoming grpc_transport_stream_op
- * instance that holds a non-zero number of send operations, accesible to this
- * function in \a send_ops. */
-static void process_send_ops(grpc_call_element *elem,
- grpc_stream_op_buffer *send_ops) {
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
- size_t i;
- int did_compress = 0;
+static void continue_send_message(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem);
- /* In streaming calls, we need to reset the previously accumulated slices */
+static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, int success) {
+ grpc_call_element *elem = elemp;
+ call_data *calld = elem->call_data;
gpr_slice_buffer_reset_and_unref(&calld->slices);
- for (i = 0; i < send_ops->nops; ++i) {
- grpc_stream_op *sop = &send_ops->ops[i];
- switch (sop->type) {
- case GRPC_OP_BEGIN_MESSAGE:
- /* buffer up slices until we've processed all the expected ones (as
- * given by GRPC_OP_BEGIN_MESSAGE) */
- calld->remaining_slice_bytes = sop->data.begin_message.length;
- if (sop->data.begin_message.flags & GRPC_WRITE_NO_COMPRESS) {
- calld->has_compression_algorithm = 1; /* GPR_TRUE */
- calld->compression_algorithm = GRPC_COMPRESS_NONE;
- }
- break;
- case GRPC_OP_METADATA:
- if (!calld->written_initial_metadata) {
- /* Parse incoming request for compression. If any, it'll be available
- * at calld->compression_algorithm */
- grpc_metadata_batch_filter(&(sop->data.metadata),
- compression_md_filter, elem);
- if (!calld->has_compression_algorithm) {
- /* If no algorithm was found in the metadata and we aren't
- * exceptionally skipping compression, fall back to the channel
- * default */
- calld->compression_algorithm =
- channeld->default_compression_algorithm;
- calld->has_compression_algorithm = 1; /* GPR_TRUE */
- }
- /* hint compression algorithm */
- grpc_metadata_batch_add_tail(
- &(sop->data.metadata), &calld->compression_algorithm_storage,
- GRPC_MDELEM_REF(channeld->mdelem_compression_algorithms
- [calld->compression_algorithm]));
-
- /* convey supported compression algorithms */
- grpc_metadata_batch_add_tail(
- &(sop->data.metadata), &calld->accept_encoding_storage,
- GRPC_MDELEM_REF(channeld->mdelem_accept_encoding));
-
- calld->written_initial_metadata = 1; /* GPR_TRUE */
- }
- break;
- case GRPC_OP_SLICE:
- if (skip_compression(channeld, calld)) continue;
- GPR_ASSERT(calld->remaining_slice_bytes > 0);
- /* Increase input ref count, gpr_slice_buffer_add takes ownership. */
- gpr_slice_buffer_add(&calld->slices, gpr_slice_ref(sop->data.slice));
- GPR_ASSERT(GPR_SLICE_LENGTH(sop->data.slice) <=
- calld->remaining_slice_bytes);
- calld->remaining_slice_bytes -=
- (gpr_uint32)GPR_SLICE_LENGTH(sop->data.slice);
- if (calld->remaining_slice_bytes == 0) {
- did_compress =
- compress_send_sb(calld->compression_algorithm, &calld->slices);
- }
- break;
- case GRPC_NO_OP:
- break;
- }
- }
+ calld->post_send->cb(exec_ctx, calld->post_send->cb_arg, success);
+}
- /* Modify the send_ops stream_op_buffer depending on whether compression was
- * carried out */
+static void finish_send_message(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ int did_compress;
+ gpr_slice_buffer tmp;
+ gpr_slice_buffer_init(&tmp);
+ did_compress =
+ grpc_msg_compress(calld->compression_algorithm, &calld->slices, &tmp);
if (did_compress) {
- finish_compressed_sopb(send_ops, elem);
+ gpr_slice_buffer_swap(&calld->slices, &tmp);
+ calld->send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
+ }
+ gpr_slice_buffer_destroy(&tmp);
+
+ grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
+ calld->send_flags);
+ calld->send_op.send_message = &calld->replacement_stream.base;
+ calld->post_send = calld->send_op.on_complete;
+ calld->send_op.on_complete = &calld->send_done;
+
+ grpc_call_next_op(exec_ctx, elem, &calld->send_op);
+}
+
+static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, int success) {
+ grpc_call_element *elem = elemp;
+ call_data *calld = elem->call_data;
+ gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
+ if (calld->send_length == calld->slices.length) {
+ finish_send_message(exec_ctx, elem);
+ } else {
+ continue_send_message(exec_ctx, elem);
+ }
+}
+
+static void continue_send_message(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ while (grpc_byte_stream_next(exec_ctx, calld->send_op.send_message,
+ &calld->incoming_slice, ~(size_t)0,
+ &calld->got_slice)) {
+ gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
+ if (calld->send_length == calld->slices.length) {
+ finish_send_message(exec_ctx, elem);
+ break;
+ }
}
}
-/* Called either:
- - in response to an API call (or similar) from above, to send something
- - a network event (or similar) from below, to receive something
- op contains type and call direction information, in addition to the data
- that is being sent or received. */
static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
+ call_data *calld = elem->call_data;
+
GPR_TIMER_BEGIN("compress_start_transport_stream_op", 0);
- if (op->send_ops && op->send_ops->nops > 0) {
- process_send_ops(elem, op->send_ops);
+ if (op->send_initial_metadata) {
+ process_send_initial_metadata(elem, op->send_initial_metadata);
+ }
+ if (op->send_message != NULL && !skip_compression(elem) &&
+ 0 == (op->send_message->flags & GRPC_WRITE_NO_COMPRESS)) {
+ calld->send_op = *op;
+ calld->send_length = op->send_message->length;
+ calld->send_flags = op->send_message->flags;
+ continue_send_message(exec_ctx, elem);
+ } else {
+ /* pass control down the stack */
+ grpc_call_next_op(exec_ctx, elem, op);
}
GPR_TIMER_END("compress_start_transport_stream_op", 0);
-
- /* pass control down the stack */
- grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
+ grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
gpr_slice_buffer_init(&calld->slices);
calld->has_compression_algorithm = 0;
- calld->written_initial_metadata = 0; /* GPR_FALSE */
-
- if (initial_op) {
- if (initial_op->send_ops && initial_op->send_ops->nops > 0) {
- process_send_ops(elem, initial_op->send_ops);
- }
- }
+ grpc_closure_init(&calld->got_slice, got_slice, elem);
+ grpc_closure_init(&calld->send_done, send_done, elem);
}
/* Destructor for call_data */
@@ -313,85 +255,43 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *channeld = elem->channel_data;
grpc_compression_algorithm algo_idx;
- const char *supported_algorithms_names[GRPC_COMPRESS_ALGORITHMS_COUNT - 1];
- size_t supported_algorithms_idx = 0;
- char *accept_encoding_str;
- size_t accept_encoding_str_len;
grpc_compression_options_init(&channeld->compression_options);
channeld->compression_options.enabled_algorithms_bitset =
- (gpr_uint32)grpc_channel_args_compression_algorithm_get_states(args);
+ (gpr_uint32)grpc_channel_args_compression_algorithm_get_states(
+ args->channel_args);
channeld->default_compression_algorithm =
- grpc_channel_args_get_compression_algorithm(args);
+ grpc_channel_args_get_compression_algorithm(args->channel_args);
/* Make sure the default isn't disabled. */
GPR_ASSERT(grpc_compression_options_is_algorithm_enabled(
&channeld->compression_options, channeld->default_compression_algorithm));
channeld->compression_options.default_compression_algorithm =
channeld->default_compression_algorithm;
- channeld->mdstr_request_compression_algorithm_key =
- grpc_mdstr_from_string(mdctx, GRPC_COMPRESS_REQUEST_ALGORITHM_KEY);
-
- channeld->mdstr_outgoing_compression_algorithm_key =
- grpc_mdstr_from_string(mdctx, "grpc-encoding");
-
- channeld->mdstr_compression_capabilities_key =
- grpc_mdstr_from_string(mdctx, "grpc-accept-encoding");
-
+ channeld->supported_compression_algorithms = 0;
for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
- char *algorithm_name;
/* skip disabled algorithms */
if (grpc_compression_options_is_algorithm_enabled(
&channeld->compression_options, algo_idx) == 0) {
continue;
}
- GPR_ASSERT(grpc_compression_algorithm_name(algo_idx, &algorithm_name) != 0);
- channeld->mdelem_compression_algorithms[algo_idx] =
- grpc_mdelem_from_metadata_strings(
- mdctx,
- GRPC_MDSTR_REF(channeld->mdstr_outgoing_compression_algorithm_key),
- grpc_mdstr_from_string(mdctx, algorithm_name));
- if (algo_idx > 0) {
- supported_algorithms_names[supported_algorithms_idx++] = algorithm_name;
- }
+ channeld->supported_compression_algorithms |= 1u << algo_idx;
}
- /* TODO(dgq): gpr_strjoin_sep could be made to work with statically allocated
- * arrays, as to avoid the heap allocs */
- accept_encoding_str =
- gpr_strjoin_sep(supported_algorithms_names, supported_algorithms_idx, ",",
- &accept_encoding_str_len);
-
- channeld->mdelem_accept_encoding = grpc_mdelem_from_metadata_strings(
- mdctx, GRPC_MDSTR_REF(channeld->mdstr_compression_capabilities_key),
- grpc_mdstr_from_string(mdctx, accept_encoding_str));
- gpr_free(accept_encoding_str);
-
- GPR_ASSERT(!is_last);
+ GPR_ASSERT(!args->is_last);
}
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- channel_data *channeld = elem->channel_data;
- grpc_compression_algorithm algo_idx;
-
- GRPC_MDSTR_UNREF(channeld->mdstr_request_compression_algorithm_key);
- GRPC_MDSTR_UNREF(channeld->mdstr_outgoing_compression_algorithm_key);
- GRPC_MDSTR_UNREF(channeld->mdstr_compression_capabilities_key);
- for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
- GRPC_MDELEM_UNREF(channeld->mdelem_compression_algorithms[algo_idx]);
- }
- GRPC_MDELEM_UNREF(channeld->mdelem_accept_encoding);
-}
+ grpc_channel_element *elem) {}
const grpc_channel_filter grpc_compress_filter = {
compress_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, grpc_call_next_get_peer, "compress"};
+ init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
+ sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+ grpc_call_next_get_peer, "compress"};
diff --git a/src/core/channel/connected_channel.c b/src/core/channel/connected_channel.c
index 6d4d7be632..e8eb9dcfc5 100644
--- a/src/core/channel/connected_channel.c
+++ b/src/core/channel/connected_channel.c
@@ -83,19 +83,26 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
+ grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
int r;
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
- r = grpc_transport_init_stream(exec_ctx, chand->transport,
- TRANSPORT_STREAM_FROM_CALL_DATA(calld),
- server_transport_data, initial_op);
+ r = grpc_transport_init_stream(
+ exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
+ &args->call_stack->refcount, args->server_transport_data);
GPR_ASSERT(r == 0);
}
+static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset) {
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
+ grpc_transport_set_pollset(exec_ctx, chand->transport,
+ TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollset);
+}
+
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
@@ -108,11 +115,10 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *cd = (channel_data *)elem->channel_data;
- GPR_ASSERT(is_last);
+ GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
cd->transport = NULL;
}
@@ -132,8 +138,8 @@ static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
const grpc_channel_filter grpc_connected_channel_filter = {
con_start_transport_stream_op, con_start_transport_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, con_get_peer, "connected",
+ init_call_elem, set_pollset, destroy_call_elem, sizeof(channel_data),
+ init_channel_elem, destroy_channel_elem, con_get_peer, "connected",
};
void grpc_connected_channel_bind_transport(grpc_channel_stack *channel_stack,
@@ -154,3 +160,8 @@ void grpc_connected_channel_bind_transport(grpc_channel_stack *channel_stack,
channel. */
channel_stack->call_stack_size += grpc_transport_stream_size(transport);
}
+
+grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ return TRANSPORT_STREAM_FROM_CALL_DATA(calld);
+}
diff --git a/src/core/channel/connected_channel.h b/src/core/channel/connected_channel.h
index eac6eb7ebe..95c1834bfa 100644
--- a/src/core/channel/connected_channel.h
+++ b/src/core/channel/connected_channel.h
@@ -46,4 +46,6 @@ extern const grpc_channel_filter grpc_connected_channel_filter;
void grpc_connected_channel_bind_transport(grpc_channel_stack* channel_stack,
grpc_transport* transport);
+grpc_stream* grpc_connected_channel_get_stream(grpc_call_element* elem);
+
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CONNECTED_CHANNEL_H */
diff --git a/src/core/channel/http_client_filter.c b/src/core/channel/http_client_filter.c
index f78a5cc315..65cfb778bb 100644
--- a/src/core/channel/http_client_filter.c
+++ b/src/core/channel/http_client_filter.c
@@ -31,12 +31,13 @@
*/
#include "src/core/channel/http_client_filter.h"
-#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include "src/core/support/string.h"
+#include <string.h>
#include "src/core/profiling/timers.h"
+#include "src/core/support/string.h"
+#include "src/core/transport/static_metadata.h"
typedef struct call_data {
grpc_linked_mdelem method;
@@ -45,10 +46,8 @@ typedef struct call_data {
grpc_linked_mdelem te_trailers;
grpc_linked_mdelem content_type;
grpc_linked_mdelem user_agent;
- int sent_initial_metadata;
- int got_initial_metadata;
- grpc_stream_op_buffer *recv_ops;
+ grpc_metadata_batch *recv_initial_metadata;
/** Closure to call when finished with the hc_on_recv hook */
grpc_closure *on_done_recv;
@@ -59,12 +58,7 @@ typedef struct call_data {
} call_data;
typedef struct channel_data {
- grpc_mdelem *te_trailers;
- grpc_mdelem *method;
- grpc_mdelem *scheme;
- grpc_mdelem *content_type;
- grpc_mdelem *status;
- /** complete user agent mdelem */
+ grpc_mdelem *static_scheme;
grpc_mdelem *user_agent;
} channel_data;
@@ -75,14 +69,12 @@ typedef struct {
static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
client_recv_filter_args *a = user_data;
- grpc_call_element *elem = a->elem;
- channel_data *channeld = elem->channel_data;
- if (md == channeld->status) {
+ if (md == GRPC_MDELEM_STATUS_200) {
return NULL;
- } else if (md->key == channeld->status->key) {
- grpc_call_element_send_cancel(a->exec_ctx, elem);
+ } else if (md->key == GRPC_MDSTR_STATUS) {
+ grpc_call_element_send_cancel(a->exec_ctx, a->elem);
return NULL;
- } else if (md->key == channeld->content_type->key) {
+ } else if (md->key == GRPC_MDSTR_CONTENT_TYPE) {
return NULL;
}
return md;
@@ -91,30 +83,21 @@ static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
static void hc_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
- size_t i;
- size_t nops = calld->recv_ops->nops;
- grpc_stream_op *ops = calld->recv_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- client_recv_filter_args a;
- if (op->type != GRPC_OP_METADATA) continue;
- calld->got_initial_metadata = 1;
- a.elem = elem;
- a.exec_ctx = exec_ctx;
- grpc_metadata_batch_filter(&op->data.metadata, client_recv_filter, &a);
- }
+ client_recv_filter_args a;
+ a.elem = elem;
+ a.exec_ctx = exec_ctx;
+ grpc_metadata_batch_filter(calld->recv_initial_metadata, client_recv_filter,
+ &a);
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
static grpc_mdelem *client_strip_filter(void *user_data, grpc_mdelem *md) {
- grpc_call_element *elem = user_data;
- channel_data *channeld = elem->channel_data;
/* eat the things we'd like to set ourselves */
- if (md->key == channeld->method->key) return NULL;
- if (md->key == channeld->scheme->key) return NULL;
- if (md->key == channeld->te_trailers->key) return NULL;
- if (md->key == channeld->content_type->key) return NULL;
- if (md->key == channeld->user_agent->key) return NULL;
+ if (md->key == GRPC_MDSTR_METHOD) return NULL;
+ if (md->key == GRPC_MDSTR_SCHEME) return NULL;
+ if (md->key == GRPC_MDSTR_TE) return NULL;
+ if (md->key == GRPC_MDSTR_CONTENT_TYPE) return NULL;
+ if (md->key == GRPC_MDSTR_USER_AGENT) return NULL;
return md;
}
@@ -123,40 +106,29 @@ static void hc_mutate_op(grpc_call_element *elem,
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
- size_t i;
- if (op->send_ops && !calld->sent_initial_metadata) {
- size_t nops = op->send_ops->nops;
- grpc_stream_op *ops = op->send_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *stream_op = &ops[i];
- if (stream_op->type != GRPC_OP_METADATA) continue;
- calld->sent_initial_metadata = 1;
- grpc_metadata_batch_filter(&stream_op->data.metadata, client_strip_filter,
- elem);
- /* Send : prefixed headers, which have to be before any application
- layer headers. */
- grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->method,
- GRPC_MDELEM_REF(channeld->method));
- grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->scheme,
- GRPC_MDELEM_REF(channeld->scheme));
- grpc_metadata_batch_add_tail(&stream_op->data.metadata,
- &calld->te_trailers,
- GRPC_MDELEM_REF(channeld->te_trailers));
- grpc_metadata_batch_add_tail(&stream_op->data.metadata,
- &calld->content_type,
- GRPC_MDELEM_REF(channeld->content_type));
- grpc_metadata_batch_add_tail(&stream_op->data.metadata,
- &calld->user_agent,
- GRPC_MDELEM_REF(channeld->user_agent));
- break;
- }
+ if (op->send_initial_metadata != NULL) {
+ grpc_metadata_batch_filter(op->send_initial_metadata, client_strip_filter,
+ elem);
+ /* Send : prefixed headers, which have to be before any application
+ layer headers. */
+ grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->method,
+ GRPC_MDELEM_METHOD_POST);
+ grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->scheme,
+ channeld->static_scheme);
+ grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->te_trailers,
+ GRPC_MDELEM_TE_TRAILERS);
+ grpc_metadata_batch_add_tail(
+ op->send_initial_metadata, &calld->content_type,
+ GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC);
+ grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->user_agent,
+ GRPC_MDELEM_REF(channeld->user_agent));
}
- if (op->recv_ops && !calld->got_initial_metadata) {
+ if (op->recv_initial_metadata != NULL) {
/* substitute our callback for the higher callback */
- calld->recv_ops = op->recv_ops;
- calld->on_done_recv = op->on_done_recv;
- op->on_done_recv = &calld->hc_on_recv;
+ calld->recv_initial_metadata = op->recv_initial_metadata;
+ calld->on_done_recv = op->on_complete;
+ op->on_complete = &calld->hc_on_recv;
}
}
@@ -172,35 +144,38 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
+ grpc_call_element_args *args) {
call_data *calld = elem->call_data;
- calld->sent_initial_metadata = 0;
- calld->got_initial_metadata = 0;
calld->on_done_recv = NULL;
grpc_closure_init(&calld->hc_on_recv, hc_on_recv, elem);
- if (initial_op) hc_mutate_op(elem, initial_op);
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {}
-static const char *scheme_from_args(const grpc_channel_args *args) {
+static grpc_mdelem *scheme_from_args(const grpc_channel_args *args) {
unsigned i;
+ size_t j;
+ grpc_mdelem *valid_schemes[] = {GRPC_MDELEM_SCHEME_HTTP,
+ GRPC_MDELEM_SCHEME_HTTPS};
if (args != NULL) {
for (i = 0; i < args->num_args; ++i) {
if (args->args[i].type == GRPC_ARG_STRING &&
strcmp(args->args[i].key, GRPC_ARG_HTTP2_SCHEME) == 0) {
- return args->args[i].value.string;
+ for (j = 0; j < GPR_ARRAY_SIZE(valid_schemes); j++) {
+ if (0 == strcmp(grpc_mdstr_as_c_string(valid_schemes[j]->value),
+ args->args[i].value.string)) {
+ return valid_schemes[j];
+ }
+ }
}
}
}
- return "http";
+ return GRPC_MDELEM_SCHEME_HTTP;
}
-static grpc_mdstr *user_agent_from_args(grpc_mdctx *mdctx,
- const grpc_channel_args *args) {
+static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args) {
gpr_strvec v;
size_t i;
int is_first = 1;
@@ -242,7 +217,7 @@ static grpc_mdstr *user_agent_from_args(grpc_mdctx *mdctx,
tmp = gpr_strvec_flatten(&v, NULL);
gpr_strvec_destroy(&v);
- result = grpc_mdstr_from_string(mdctx, tmp);
+ result = grpc_mdstr_from_string(tmp);
gpr_free(tmp);
return result;
@@ -250,46 +225,24 @@ static grpc_mdstr *user_agent_from_args(grpc_mdctx *mdctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *channel_args,
- grpc_mdctx *mdctx, int is_first, int is_last) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- /* The first and the last filters tend to be implemented differently to
- handle the case that there's no 'next' filter to call on the up or down
- path */
- GPR_ASSERT(!is_last);
-
- /* initialize members */
- channeld->te_trailers = grpc_mdelem_from_strings(mdctx, "te", "trailers");
- channeld->method = grpc_mdelem_from_strings(mdctx, ":method", "POST");
- channeld->scheme = grpc_mdelem_from_strings(mdctx, ":scheme",
- scheme_from_args(channel_args));
- channeld->content_type =
- grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");
- channeld->status = grpc_mdelem_from_strings(mdctx, ":status", "200");
- channeld->user_agent = grpc_mdelem_from_metadata_strings(
- mdctx, grpc_mdstr_from_string(mdctx, "user-agent"),
- user_agent_from_args(mdctx, channel_args));
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ channel_data *chand = elem->channel_data;
+ GPR_ASSERT(!args->is_last);
+ chand->static_scheme = scheme_from_args(args->channel_args);
+ chand->user_agent = grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_USER_AGENT, user_agent_from_args(args->channel_args));
}
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- GRPC_MDELEM_UNREF(channeld->te_trailers);
- GRPC_MDELEM_UNREF(channeld->method);
- GRPC_MDELEM_UNREF(channeld->scheme);
- GRPC_MDELEM_UNREF(channeld->content_type);
- GRPC_MDELEM_UNREF(channeld->status);
- GRPC_MDELEM_UNREF(channeld->user_agent);
+ channel_data *chand = elem->channel_data;
+ GRPC_MDELEM_UNREF(chand->user_agent);
}
const grpc_channel_filter grpc_http_client_filter = {
hc_start_transport_op, grpc_channel_next_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data),
- init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
- "http-client"};
+ init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
+ sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+ grpc_call_next_get_peer, "http-client"};
diff --git a/src/core/channel/http_server_filter.c b/src/core/channel/http_server_filter.c
index 99e5066a4e..ae8660da92 100644
--- a/src/core/channel/http_server_filter.c
+++ b/src/core/channel/http_server_filter.c
@@ -33,13 +33,13 @@
#include "src/core/channel/http_server_filter.h"
-#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <string.h>
#include "src/core/profiling/timers.h"
+#include "src/core/transport/static_metadata.h"
typedef struct call_data {
- gpr_uint8 got_initial_metadata;
gpr_uint8 seen_path;
gpr_uint8 seen_post;
gpr_uint8 sent_status;
@@ -49,7 +49,7 @@ typedef struct call_data {
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
- grpc_stream_op_buffer *recv_ops;
+ grpc_metadata_batch *recv_initial_metadata;
/** Closure to call when finished with the hs_on_recv hook */
grpc_closure *on_done_recv;
/** Receive closures are chained: we inject this closure as the on_done_recv
@@ -58,22 +58,7 @@ typedef struct call_data {
grpc_closure hs_on_recv;
} call_data;
-typedef struct channel_data {
- grpc_mdelem *te_trailers;
- grpc_mdelem *method_post;
- grpc_mdelem *http_scheme;
- grpc_mdelem *https_scheme;
- /* TODO(klempner): Remove this once we stop using it */
- grpc_mdelem *grpc_scheme;
- grpc_mdelem *content_type;
- grpc_mdelem *status_ok;
- grpc_mdelem *status_not_found;
- grpc_mdstr *path_key;
- grpc_mdstr *authority_key;
- grpc_mdstr *host_key;
-
- grpc_mdctx *mdctx;
-} channel_data;
+typedef struct channel_data { gpr_uint8 unused; } channel_data;
typedef struct {
grpc_call_element *elem;
@@ -83,25 +68,24 @@ typedef struct {
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
server_filter_args *a = user_data;
grpc_call_element *elem = a->elem;
- channel_data *channeld = elem->channel_data;
call_data *calld = elem->call_data;
/* Check if it is one of the headers we care about. */
- if (md == channeld->te_trailers || md == channeld->method_post ||
- md == channeld->http_scheme || md == channeld->https_scheme ||
- md == channeld->grpc_scheme || md == channeld->content_type) {
+ if (md == GRPC_MDELEM_TE_TRAILERS || md == GRPC_MDELEM_METHOD_POST ||
+ md == GRPC_MDELEM_SCHEME_HTTP || md == GRPC_MDELEM_SCHEME_HTTPS ||
+ md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) {
/* swallow it */
- if (md == channeld->method_post) {
+ if (md == GRPC_MDELEM_METHOD_POST) {
calld->seen_post = 1;
- } else if (md->key == channeld->http_scheme->key) {
+ } else if (md->key == GRPC_MDSTR_SCHEME) {
calld->seen_scheme = 1;
- } else if (md == channeld->te_trailers) {
+ } else if (md == GRPC_MDELEM_TE_TRAILERS) {
calld->seen_te_trailers = 1;
}
/* TODO(klempner): Track that we've seen all the headers we should
require */
return NULL;
- } else if (md->key == channeld->content_type->key) {
+ } else if (md->key == GRPC_MDSTR_CONTENT_TYPE) {
if (strncmp(grpc_mdstr_as_c_string(md->value), "application/grpc+", 17) ==
0) {
/* Although the C implementation doesn't (currently) generate them,
@@ -113,12 +97,11 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
/* TODO(klempner): We're currently allowing this, but we shouldn't
see it without a proxy so log for now. */
gpr_log(GPR_INFO, "Unexpected content-type %s",
- channeld->content_type->key);
+ grpc_mdstr_as_c_string(md->value));
}
return NULL;
- } else if (md->key == channeld->te_trailers->key ||
- md->key == channeld->method_post->key ||
- md->key == channeld->http_scheme->key) {
+ } else if (md->key == GRPC_MDSTR_TE || md->key == GRPC_MDSTR_METHOD ||
+ md->key == GRPC_MDSTR_SCHEME) {
gpr_log(GPR_ERROR, "Invalid %s: header: '%s'",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value));
/* swallow it and error everything out. */
@@ -126,23 +109,21 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
on the wire here. */
grpc_call_element_send_cancel(a->exec_ctx, elem);
return NULL;
- } else if (md->key == channeld->path_key) {
+ } else if (md->key == GRPC_MDSTR_PATH) {
if (calld->seen_path) {
gpr_log(GPR_ERROR, "Received :path twice");
return NULL;
}
calld->seen_path = 1;
return md;
- } else if (md->key == channeld->authority_key) {
+ } else if (md->key == GRPC_MDSTR_AUTHORITY) {
calld->seen_authority = 1;
return md;
- } else if (md->key == channeld->host_key) {
+ } else if (md->key == GRPC_MDSTR_HOST) {
/* translate host to :authority since :authority may be
omitted */
grpc_mdelem *authority = grpc_mdelem_from_metadata_strings(
- channeld->mdctx, GRPC_MDSTR_REF(channeld->authority_key),
- GRPC_MDSTR_REF(md->value));
- GRPC_MDELEM_UNREF(md);
+ GRPC_MDSTR_AUTHORITY, GRPC_MDSTR_REF(md->value));
calld->seen_authority = 1;
return authority;
} else {
@@ -154,43 +135,35 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (success) {
- size_t i;
- size_t nops = calld->recv_ops->nops;
- grpc_stream_op *ops = calld->recv_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- server_filter_args a;
- if (op->type != GRPC_OP_METADATA) continue;
- calld->got_initial_metadata = 1;
- a.elem = elem;
- a.exec_ctx = exec_ctx;
- grpc_metadata_batch_filter(&op->data.metadata, server_filter, &a);
- /* Have we seen the required http2 transport headers?
- (:method, :scheme, content-type, with :path and :authority covered
- at the channel level right now) */
- if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers &&
- calld->seen_path && calld->seen_authority) {
- /* do nothing */
- } else {
- if (!calld->seen_path) {
- gpr_log(GPR_ERROR, "Missing :path header");
- }
- if (!calld->seen_authority) {
- gpr_log(GPR_ERROR, "Missing :authority header");
- }
- if (!calld->seen_post) {
- gpr_log(GPR_ERROR, "Missing :method header");
- }
- if (!calld->seen_scheme) {
- gpr_log(GPR_ERROR, "Missing :scheme header");
- }
- if (!calld->seen_te_trailers) {
- gpr_log(GPR_ERROR, "Missing te trailers header");
- }
- /* Error this call out */
- success = 0;
- grpc_call_element_send_cancel(exec_ctx, elem);
+ server_filter_args a;
+ a.elem = elem;
+ a.exec_ctx = exec_ctx;
+ grpc_metadata_batch_filter(calld->recv_initial_metadata, server_filter, &a);
+ /* Have we seen the required http2 transport headers?
+ (:method, :scheme, content-type, with :path and :authority covered
+ at the channel level right now) */
+ if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers &&
+ calld->seen_path && calld->seen_authority) {
+ /* do nothing */
+ } else {
+ if (!calld->seen_path) {
+ gpr_log(GPR_ERROR, "Missing :path header");
+ }
+ if (!calld->seen_authority) {
+ gpr_log(GPR_ERROR, "Missing :authority header");
+ }
+ if (!calld->seen_post) {
+ gpr_log(GPR_ERROR, "Missing :method header");
+ }
+ if (!calld->seen_scheme) {
+ gpr_log(GPR_ERROR, "Missing :scheme header");
+ }
+ if (!calld->seen_te_trailers) {
+ gpr_log(GPR_ERROR, "Missing te trailers header");
}
+ /* Error this call out */
+ success = 0;
+ grpc_call_element_send_cancel(exec_ctx, elem);
}
}
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
@@ -200,30 +173,21 @@ static void hs_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
- size_t i;
- if (op->send_ops && !calld->sent_status) {
- size_t nops = op->send_ops->nops;
- grpc_stream_op *ops = op->send_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *stream_op = &ops[i];
- if (stream_op->type != GRPC_OP_METADATA) continue;
- calld->sent_status = 1;
- grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->status,
- GRPC_MDELEM_REF(channeld->status_ok));
- grpc_metadata_batch_add_tail(&stream_op->data.metadata,
- &calld->content_type,
- GRPC_MDELEM_REF(channeld->content_type));
- break;
- }
+ if (op->send_initial_metadata != NULL && !calld->sent_status) {
+ calld->sent_status = 1;
+ grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->status,
+ GRPC_MDELEM_STATUS_200);
+ grpc_metadata_batch_add_tail(
+ op->send_initial_metadata, &calld->content_type,
+ GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC);
}
- if (op->recv_ops && !calld->got_initial_metadata) {
+ if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
- calld->recv_ops = op->recv_ops;
- calld->on_done_recv = op->on_done_recv;
- op->on_done_recv = &calld->hs_on_recv;
+ calld->recv_initial_metadata = op->recv_initial_metadata;
+ calld->on_done_recv = op->on_complete;
+ op->on_complete = &calld->hs_on_recv;
}
}
@@ -239,14 +203,12 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
+ grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
memset(calld, 0, sizeof(*calld));
grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
- if (initial_op) hs_mutate_op(elem, initial_op);
}
/* Destructor for call_data */
@@ -255,57 +217,17 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- /* The first and the last filters tend to be implemented differently to
- handle the case that there's no 'next' filter to call on the up or down
- path */
- GPR_ASSERT(!is_first);
- GPR_ASSERT(!is_last);
-
- /* initialize members */
- channeld->te_trailers = grpc_mdelem_from_strings(mdctx, "te", "trailers");
- channeld->status_ok = grpc_mdelem_from_strings(mdctx, ":status", "200");
- channeld->status_not_found =
- grpc_mdelem_from_strings(mdctx, ":status", "404");
- channeld->method_post = grpc_mdelem_from_strings(mdctx, ":method", "POST");
- channeld->http_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "http");
- channeld->https_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "https");
- channeld->grpc_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "grpc");
- channeld->path_key = grpc_mdstr_from_string(mdctx, ":path");
- channeld->authority_key = grpc_mdstr_from_string(mdctx, ":authority");
- channeld->host_key = grpc_mdstr_from_string(mdctx, "host");
- channeld->content_type =
- grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");
-
- channeld->mdctx = mdctx;
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ GPR_ASSERT(!args->is_last);
}
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- GRPC_MDELEM_UNREF(channeld->te_trailers);
- GRPC_MDELEM_UNREF(channeld->status_ok);
- GRPC_MDELEM_UNREF(channeld->status_not_found);
- GRPC_MDELEM_UNREF(channeld->method_post);
- GRPC_MDELEM_UNREF(channeld->http_scheme);
- GRPC_MDELEM_UNREF(channeld->https_scheme);
- GRPC_MDELEM_UNREF(channeld->grpc_scheme);
- GRPC_MDELEM_UNREF(channeld->content_type);
- GRPC_MDSTR_UNREF(channeld->path_key);
- GRPC_MDSTR_UNREF(channeld->authority_key);
- GRPC_MDSTR_UNREF(channeld->host_key);
-}
+ grpc_channel_element *elem) {}
const grpc_channel_filter grpc_http_server_filter = {
hs_start_transport_op, grpc_channel_next_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data),
- init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
- "http-server"};
+ init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
+ sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+ grpc_call_next_get_peer, "http-server"};
diff --git a/src/core/channel/noop_filter.c b/src/core/channel/noop_filter.c
deleted file mode 100644
index 48f6b1c650..0000000000
--- a/src/core/channel/noop_filter.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/channel/noop_filter.h"
-#include <grpc/support/log.h>
-
-typedef struct call_data {
- int unused; /* C89 requires at least one struct element */
-} call_data;
-
-typedef struct channel_data {
- int unused; /* C89 requires at least one struct element */
-} channel_data;
-
-/* used to silence 'variable not used' warnings */
-static void ignore_unused(void *ignored) {}
-
-static void noop_mutate_op(grpc_call_element *elem,
- grpc_transport_stream_op *op) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
-
- ignore_unused(calld);
- ignore_unused(channeld);
-
- /* do nothing */
-}
-
-/* Called either:
- - in response to an API call (or similar) from above, to send something
- - a network event (or similar) from below, to receive something
- op contains type and call direction information, in addition to the data
- that is being sent or received. */
-static void noop_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op) {
- noop_mutate_op(elem, op);
-
- /* pass control down the stack */
- grpc_call_next_op(exec_ctx, elem, op);
-}
-
-/* Constructor for call_data */
-static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
-
- /* initialize members */
- calld->unused = channeld->unused;
-
- if (initial_op) noop_mutate_op(elem, initial_op);
-}
-
-/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {}
-
-/* Constructor for channel_data */
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- /* The first and the last filters tend to be implemented differently to
- handle the case that there's no 'next' filter to call on the up or down
- path */
- GPR_ASSERT(!is_first);
- GPR_ASSERT(!is_last);
-
- /* initialize members */
- channeld->unused = 0;
-}
-
-/* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- /* grab pointers to our data from the channel element */
- channel_data *channeld = elem->channel_data;
-
- ignore_unused(channeld);
-}
-
-const grpc_channel_filter grpc_no_op_filter = {
- noop_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, grpc_call_next_get_peer, "no-op"};
diff --git a/src/core/channel/subchannel_call_holder.c b/src/core/channel/subchannel_call_holder.c
new file mode 100644
index 0000000000..f5da41f3cd
--- /dev/null
+++ b/src/core/channel/subchannel_call_holder.c
@@ -0,0 +1,259 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/channel/subchannel_call_holder.h"
+
+#include <grpc/support/alloc.h>
+
+#include "src/core/profiling/timers.h"
+
+#define GET_CALL(holder) \
+ ((grpc_subchannel_call *)(gpr_atm_acq_load(&(holder)->subchannel_call)))
+
+#define CANCELLED_CALL ((grpc_subchannel_call *)1)
+
+static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *holder,
+ int success);
+static void retry_ops(grpc_exec_ctx *exec_ctx, void *retry_ops_args,
+ int success);
+
+static void add_waiting_locked(grpc_subchannel_call_holder *holder,
+ grpc_transport_stream_op *op);
+static void fail_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder);
+static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder);
+
+void grpc_subchannel_call_holder_init(
+ grpc_subchannel_call_holder *holder,
+ grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
+ void *pick_subchannel_arg, grpc_call_stack *owning_call) {
+ gpr_atm_rel_store(&holder->subchannel_call, 0);
+ holder->pick_subchannel = pick_subchannel;
+ holder->pick_subchannel_arg = pick_subchannel_arg;
+ gpr_mu_init(&holder->mu);
+ holder->connected_subchannel = NULL;
+ holder->waiting_ops = NULL;
+ holder->waiting_ops_count = 0;
+ holder->waiting_ops_capacity = 0;
+ holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
+ holder->owning_call = owning_call;
+}
+
+void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder) {
+ grpc_subchannel_call *call = GET_CALL(holder);
+ if (call != NULL && call != CANCELLED_CALL) {
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "holder");
+ }
+ GPR_ASSERT(holder->creation_phase ==
+ GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
+ gpr_mu_destroy(&holder->mu);
+ GPR_ASSERT(holder->waiting_ops_count == 0);
+ gpr_free(holder->waiting_ops);
+}
+
+void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder,
+ grpc_transport_stream_op *op) {
+ /* try to (atomically) get the call */
+ grpc_subchannel_call *call = GET_CALL(holder);
+ GPR_TIMER_BEGIN("grpc_subchannel_call_holder_perform_op", 0);
+ if (call == CANCELLED_CALL) {
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+ return;
+ }
+ if (call != NULL) {
+ grpc_subchannel_call_process_op(exec_ctx, call, op);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+ return;
+ }
+ /* we failed; lock and figure out what to do */
+ gpr_mu_lock(&holder->mu);
+retry:
+ /* need to recheck that another thread hasn't set the call */
+ call = GET_CALL(holder);
+ if (call == CANCELLED_CALL) {
+ gpr_mu_unlock(&holder->mu);
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+ return;
+ }
+ if (call != NULL) {
+ gpr_mu_unlock(&holder->mu);
+ grpc_subchannel_call_process_op(exec_ctx, call, op);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+ return;
+ }
+ /* if this is a cancellation, then we can raise our cancelled flag */
+ if (op->cancel_with_status != GRPC_STATUS_OK) {
+ if (!gpr_atm_rel_cas(&holder->subchannel_call, 0, 1)) {
+ goto retry;
+ } else {
+ switch (holder->creation_phase) {
+ case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
+ fail_locked(exec_ctx, holder);
+ break;
+ case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
+ holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg, NULL,
+ &holder->connected_subchannel, NULL);
+ break;
+ }
+ gpr_mu_unlock(&holder->mu);
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+ return;
+ }
+ }
+ /* if we don't have a subchannel, try to get one */
+ if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
+ holder->connected_subchannel == NULL &&
+ op->send_initial_metadata != NULL) {
+ holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
+ grpc_closure_init(&holder->next_step, subchannel_ready, holder);
+ GRPC_CALL_STACK_REF(holder->owning_call, "pick_subchannel");
+ if (holder->pick_subchannel(
+ exec_ctx, holder->pick_subchannel_arg, op->send_initial_metadata,
+ &holder->connected_subchannel, &holder->next_step)) {
+ holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
+ GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
+ }
+ }
+ /* if we've got a subchannel, then let's ask it to create a call */
+ if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
+ holder->connected_subchannel != NULL) {
+ gpr_atm_rel_store(
+ &holder->subchannel_call,
+ (gpr_atm)(gpr_uintptr)grpc_connected_subchannel_create_call(
+ exec_ctx, holder->connected_subchannel, holder->pollset));
+ retry_waiting_locked(exec_ctx, holder);
+ goto retry;
+ }
+ /* nothing to be done but wait */
+ add_waiting_locked(holder, op);
+ gpr_mu_unlock(&holder->mu);
+ GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
+}
+
+static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, int success) {
+ grpc_subchannel_call_holder *holder = arg;
+ grpc_subchannel_call *call;
+ gpr_mu_lock(&holder->mu);
+ GPR_ASSERT(holder->creation_phase ==
+ GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
+ call = GET_CALL(holder);
+ GPR_ASSERT(call == NULL || call == CANCELLED_CALL);
+ holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
+ if (holder->connected_subchannel == NULL) {
+ fail_locked(exec_ctx, holder);
+ } else {
+ gpr_atm_rel_store(
+ &holder->subchannel_call,
+ (gpr_atm)(gpr_uintptr)grpc_connected_subchannel_create_call(
+ exec_ctx, holder->connected_subchannel, holder->pollset));
+ retry_waiting_locked(exec_ctx, holder);
+ }
+ gpr_mu_unlock(&holder->mu);
+ GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
+}
+
+typedef struct {
+ grpc_transport_stream_op *ops;
+ size_t nops;
+ grpc_subchannel_call *call;
+} retry_ops_args;
+
+static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder) {
+ retry_ops_args *a = gpr_malloc(sizeof(*a));
+ a->ops = holder->waiting_ops;
+ a->nops = holder->waiting_ops_count;
+ a->call = GET_CALL(holder);
+ if (a->call == CANCELLED_CALL) {
+ gpr_free(a);
+ fail_locked(exec_ctx, holder);
+ return;
+ }
+ holder->waiting_ops = NULL;
+ holder->waiting_ops_count = 0;
+ holder->waiting_ops_capacity = 0;
+ GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
+ grpc_exec_ctx_enqueue(exec_ctx, grpc_closure_create(retry_ops, a), 1);
+}
+
+static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, int success) {
+ retry_ops_args *a = args;
+ size_t i;
+ for (i = 0; i < a->nops; i++) {
+ grpc_subchannel_call_process_op(exec_ctx, a->call, &a->ops[i]);
+ }
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops");
+ gpr_free(a->ops);
+ gpr_free(a);
+}
+
+static void add_waiting_locked(grpc_subchannel_call_holder *holder,
+ grpc_transport_stream_op *op) {
+ GPR_TIMER_BEGIN("add_waiting_locked", 0);
+ if (holder->waiting_ops_count == holder->waiting_ops_capacity) {
+ holder->waiting_ops_capacity = GPR_MAX(3, 2 * holder->waiting_ops_capacity);
+ holder->waiting_ops =
+ gpr_realloc(holder->waiting_ops, holder->waiting_ops_capacity *
+ sizeof(*holder->waiting_ops));
+ }
+ holder->waiting_ops[holder->waiting_ops_count++] = *op;
+ GPR_TIMER_END("add_waiting_locked", 0);
+}
+
+static void fail_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder) {
+ size_t i;
+ for (i = 0; i < holder->waiting_ops_count; i++) {
+ grpc_exec_ctx_enqueue(exec_ctx, holder->waiting_ops[i].on_complete, 0);
+ grpc_exec_ctx_enqueue(exec_ctx, holder->waiting_ops[i].recv_message_ready,
+ 0);
+ }
+ holder->waiting_ops_count = 0;
+}
+
+char *grpc_subchannel_call_holder_get_peer(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_call_holder *holder) {
+ grpc_subchannel_call *subchannel_call = GET_CALL(holder);
+
+ if (subchannel_call) {
+ return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
+ } else {
+ return NULL;
+ }
+}
diff --git a/src/core/channel/subchannel_call_holder.h b/src/core/channel/subchannel_call_holder.h
new file mode 100644
index 0000000000..9cf72c6cf7
--- /dev/null
+++ b/src/core/channel/subchannel_call_holder.h
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CORE_CHANNEL_SUBCHANNEL_CALL_HOLDER_H
+#define GRPC_INTERNAL_CORE_CHANNEL_SUBCHANNEL_CALL_HOLDER_H
+
+#include "src/core/client_config/subchannel.h"
+
+/** Pick a subchannel for grpc_subchannel_call_holder;
+ Return 1 if subchannel is available immediately (in which case on_ready
+ should not be called), or 0 otherwise (in which case on_ready should be
+ called when the subchannel is available) */
+typedef int (*grpc_subchannel_call_holder_pick_subchannel)(
+ grpc_exec_ctx *exec_ctx, void *arg, grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready);
+
+typedef enum {
+ GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
+ GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
+} grpc_subchannel_call_holder_creation_phase;
+
+/** Wrapper for holding a pointer to grpc_subchannel_call, and the
+ associated machinery to create such a pointer.
+ Handles queueing of stream ops until a call object is ready, waiting
+ for initial metadata before trying to create a call object,
+ and handling cancellation gracefully.
+
+ Both the channel and uchannel filter use this as their call_data. */
+typedef struct grpc_subchannel_call_holder {
+ /** either 0 for no call, 1 for cancelled, or a pointer to a
+ grpc_subchannel_call */
+ gpr_atm subchannel_call;
+ /** Helper function to choose the subchannel on which to create
+ the call object. Channel filter delegates to the load
+ balancing policy (once it's ready); uchannel returns
+ immediately */
+ grpc_subchannel_call_holder_pick_subchannel pick_subchannel;
+ void *pick_subchannel_arg;
+
+ gpr_mu mu;
+
+ grpc_subchannel_call_holder_creation_phase creation_phase;
+ grpc_connected_subchannel *connected_subchannel;
+ grpc_pollset *pollset;
+
+ grpc_transport_stream_op *waiting_ops;
+ size_t waiting_ops_count;
+ size_t waiting_ops_capacity;
+
+ grpc_closure next_step;
+
+ grpc_call_stack *owning_call;
+} grpc_subchannel_call_holder;
+
+void grpc_subchannel_call_holder_init(
+ grpc_subchannel_call_holder *holder,
+ grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
+ void *pick_subchannel_arg, grpc_call_stack *owning_call);
+void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder);
+
+void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder,
+ grpc_transport_stream_op *op);
+char *grpc_subchannel_call_holder_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call_holder *holder);
+
+#endif
diff --git a/src/core/client_config/connector.h b/src/core/client_config/connector.h
index e9b8be4b53..a649f143ae 100644
--- a/src/core/client_config/connector.h
+++ b/src/core/client_config/connector.h
@@ -51,6 +51,8 @@ typedef struct {
/** address to connect to */
const struct sockaddr *addr;
size_t addr_len;
+ /** initial connect string to send */
+ gpr_slice initial_connect_string;
/** deadline for connection */
gpr_timespec deadline;
/** channel arguments (to be passed to transport) */
diff --git a/src/core/client_config/subchannel_factory_decorators/add_channel_arg.c b/src/core/client_config/default_initial_connect_string.c
index 585e465fa4..6a4e23e6f2 100644
--- a/src/core/client_config/subchannel_factory_decorators/add_channel_arg.c
+++ b/src/core/client_config/default_initial_connect_string.c
@@ -31,13 +31,9 @@
*
*/
-#include "src/core/client_config/subchannel_factory_decorators/add_channel_arg.h"
-#include "src/core/client_config/subchannel_factory_decorators/merge_channel_args.h"
+#include <grpc/support/slice.h>
+#include "src/core/iomgr/sockaddr.h"
-grpc_subchannel_factory *grpc_subchannel_factory_add_channel_arg(
- grpc_subchannel_factory *input, const grpc_arg *arg) {
- grpc_channel_args args;
- args.num_args = 1;
- args.args = (grpc_arg *)arg;
- return grpc_subchannel_factory_merge_channel_args(input, &args);
-}
+void grpc_set_default_initial_connect_string(struct sockaddr **addr,
+ size_t *addr_len,
+ gpr_slice *initial_str) {}
diff --git a/src/core/client_config/subchannel_factory_decorators/merge_channel_args.h b/src/core/client_config/initial_connect_string.c
index a9e1691871..19afa1675a 100644
--- a/src/core/client_config/subchannel_factory_decorators/merge_channel_args.h
+++ b/src/core/client_config/initial_connect_string.c
@@ -31,16 +31,23 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_MERGE_CHANNEL_ARGS_H
-#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_MERGE_CHANNEL_ARGS_H
+#include "src/core/client_config/initial_connect_string.h"
-#include "src/core/client_config/subchannel_factory.h"
+#include <stddef.h>
-/** Takes a subchannel factory, returns a new one that mutates incoming
- channel_args by adding a new argument; ownership of input, args is retained
- by the caller. */
-grpc_subchannel_factory *grpc_subchannel_factory_merge_channel_args(
- grpc_subchannel_factory *input, const grpc_channel_args *args);
+extern void grpc_set_default_initial_connect_string(struct sockaddr **addr,
+ size_t *addr_len,
+ gpr_slice *initial_str);
-#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_MERGE_CHANNEL_ARGS_H \
- */
+static grpc_set_initial_connect_string_func g_set_initial_connect_string_func =
+ grpc_set_default_initial_connect_string;
+
+void grpc_test_set_initial_connect_string_function(
+ grpc_set_initial_connect_string_func func) {
+ g_set_initial_connect_string_func = func;
+}
+
+void grpc_set_initial_connect_string(struct sockaddr **addr, size_t *addr_len,
+ gpr_slice *initial_str) {
+ g_set_initial_connect_string_func(addr, addr_len, initial_str);
+}
diff --git a/src/core/channel/noop_filter.h b/src/core/client_config/initial_connect_string.h
index ded9b33117..b6dca7134a 100644
--- a/src/core/channel/noop_filter.h
+++ b/src/core/client_config/initial_connect_string.h
@@ -31,14 +31,20 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CHANNEL_NOOP_FILTER_H
-#define GRPC_INTERNAL_CORE_CHANNEL_NOOP_FILTER_H
+#ifndef GRPC_INTERNAL_CORE_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H
+#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H
-#include "src/core/channel/channel_stack.h"
+#include <grpc/support/slice.h>
+#include "src/core/iomgr/sockaddr.h"
-/* No-op filter: simply takes everything it's given, and passes it on to the
- next filter. Exists simply as a starting point that others can take and
- customize for their own filters */
-extern const grpc_channel_filter grpc_no_op_filter;
+typedef void (*grpc_set_initial_connect_string_func)(struct sockaddr **addr,
+ size_t *addr_len,
+ gpr_slice *initial_str);
+void grpc_test_set_initial_connect_string_function(
+ grpc_set_initial_connect_string_func func);
-#endif /* GRPC_INTERNAL_CORE_CHANNEL_NOOP_FILTER_H */
+/** Set a string to be sent once connected. Optionally reset addr. */
+void grpc_set_initial_connect_string(struct sockaddr **addr, size_t *addr_len,
+ gpr_slice *connect_string);
+
+#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H */
diff --git a/src/core/client_config/lb_policies/pick_first.c b/src/core/client_config/lb_policies/pick_first.c
index e5bf0680ff..37de3e9f68 100644
--- a/src/core/client_config/lb_policies/pick_first.c
+++ b/src/core/client_config/lb_policies/pick_first.c
@@ -42,7 +42,7 @@
typedef struct pending_pick {
struct pending_pick *next;
grpc_pollset *pollset;
- grpc_subchannel **target;
+ grpc_connected_subchannel **target;
grpc_closure *on_complete;
} pending_pick;
@@ -60,7 +60,7 @@ typedef struct {
/** the selected channel
TODO(ctiller): this should be atomically set so we don't
need to take a mutex in the common case */
- grpc_subchannel *selected;
+ grpc_connected_subchannel *selected;
/** have we started picking? */
int started_picking;
/** are we shut down? */
@@ -76,24 +76,6 @@ typedef struct {
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
-static void del_interested_parties_locked(grpc_exec_ctx *exec_ctx,
- pick_first_lb_policy *p) {
- pending_pick *pp;
- for (pp = p->pending_picks; pp; pp = pp->next) {
- grpc_subchannel_del_interested_party(
- exec_ctx, p->subchannels[p->checking_subchannel], pp->pollset);
- }
-}
-
-static void add_interested_parties_locked(grpc_exec_ctx *exec_ctx,
- pick_first_lb_policy *p) {
- pending_pick *pp;
- for (pp = p->pending_picks; pp; pp = pp->next) {
- grpc_subchannel_add_interested_party(
- exec_ctx, p->subchannels[p->checking_subchannel], pp->pollset);
- }
-}
-
void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
size_t i;
@@ -102,7 +84,7 @@ void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
}
if (p->selected) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
@@ -114,30 +96,65 @@ void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
- del_interested_parties_locked(exec_ctx, p);
p->shutdown = 1;
pp = p->pending_picks;
p->pending_picks = NULL;
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
+ /* cancel subscription */
+ if (p->selected != NULL) {
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
+ } else {
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
+ &p->connectivity_changed);
+ }
gpr_mu_unlock(&p->mu);
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
+ grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
+ pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
pp = next;
}
}
+static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ pending_pick *pp;
+ gpr_mu_lock(&p->mu);
+ pp = p->pending_picks;
+ p->pending_picks = NULL;
+ while (pp != NULL) {
+ pending_pick *next = pp->next;
+ if (pp->target == target) {
+ grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
+ pp->pollset);
+ *target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
+ gpr_free(pp);
+ } else {
+ pp->next = p->pending_picks;
+ p->pending_picks = pp;
+ }
+ pp = next;
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) {
p->started_picking = 1;
p->checking_subchannel = 0;
p->checking_connectivity = GRPC_CHANNEL_IDLE;
- GRPC_LB_POLICY_REF(&p->base, "pick_first_connectivity");
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "pick_first_connectivity");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
- &p->checking_connectivity, &p->connectivity_changed);
+ &p->base.interested_parties, &p->checking_connectivity,
+ &p->connectivity_changed);
}
void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
@@ -149,22 +166,22 @@ void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_mu_unlock(&p->mu);
}
-void pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
- grpc_subchannel **target, grpc_closure *on_complete) {
+int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target, grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
if (p->selected) {
gpr_mu_unlock(&p->mu);
*target = p->selected;
- grpc_exec_ctx_enqueue(exec_ctx, on_complete, 1);
+ return 1;
} else {
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
- grpc_subchannel_add_interested_party(
- exec_ctx, p->subchannels[p->checking_subchannel], pollset);
+ grpc_pollset_set_add_pollset(exec_ctx, &p->base.interested_parties,
+ pollset);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->pollset = pollset;
@@ -172,6 +189,7 @@ void pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pp->on_complete = on_complete;
p->pending_picks = pp;
gpr_mu_unlock(&p->mu);
+ return 0;
}
}
@@ -179,25 +197,17 @@ static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
pick_first_lb_policy *p = arg;
size_t i;
- grpc_transport_op op;
size_t num_subchannels = p->num_subchannels;
grpc_subchannel **subchannels;
- grpc_subchannel *exclude_subchannel;
gpr_mu_lock(&p->mu);
subchannels = p->subchannels;
p->num_subchannels = 0;
p->subchannels = NULL;
- exclude_subchannel = p->selected;
gpr_mu_unlock(&p->mu);
- GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "destroy_subchannels");
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels");
for (i = 0; i < num_subchannels; i++) {
- if (subchannels[i] != exclude_subchannel) {
- memset(&op, 0, sizeof(op));
- op.disconnect = 1;
- grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], &op);
- }
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first");
}
@@ -207,23 +217,28 @@ static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
pick_first_lb_policy *p = arg;
+ grpc_subchannel *selected_subchannel;
pending_pick *pp;
gpr_mu_lock(&p->mu);
if (p->shutdown) {
gpr_mu_unlock(&p->mu);
- GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
return;
} else if (p->selected != NULL) {
+ if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ /* if the selected channel goes bad, we're done */
+ p->checking_connectivity = GRPC_CHANNEL_FATAL_FAILURE;
+ }
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
p->checking_connectivity, "selected_changed");
if (p->checking_connectivity != GRPC_CHANNEL_FATAL_FAILURE) {
- grpc_subchannel_notify_on_state_change(exec_ctx, p->selected,
- &p->checking_connectivity,
- &p->connectivity_changed);
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, p->selected, &p->base.interested_parties,
+ &p->checking_connectivity, &p->connectivity_changed);
} else {
- GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
}
} else {
loop:
@@ -231,39 +246,41 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
case GRPC_CHANNEL_READY:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, "connecting_ready");
- p->selected = p->subchannels[p->checking_subchannel];
- GRPC_SUBCHANNEL_REF(p->selected, "picked_first");
+ selected_subchannel = p->subchannels[p->checking_subchannel];
+ p->selected =
+ grpc_subchannel_get_connected_subchannel(selected_subchannel);
+ GPR_ASSERT(p->selected);
+ GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked_first");
/* drop the pick list: we are connected now */
- GRPC_LB_POLICY_REF(&p->base, "destroy_subchannels");
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
grpc_exec_ctx_enqueue(exec_ctx,
grpc_closure_create(destroy_subchannels, p), 1);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = p->selected;
- grpc_subchannel_del_interested_party(exec_ctx, p->selected,
- pp->pollset);
+ grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
+ pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
- grpc_subchannel_notify_on_state_change(exec_ctx, p->selected,
- &p->checking_connectivity,
- &p->connectivity_changed);
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, p->selected, &p->base.interested_parties,
+ &p->checking_connectivity, &p->connectivity_changed);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
"connecting_transient_failure");
- del_interested_parties_locked(exec_ctx, p);
p->checking_subchannel =
(p->checking_subchannel + 1) % p->num_subchannels;
p->checking_connectivity = grpc_subchannel_check_connectivity(
p->subchannels[p->checking_subchannel]);
- add_interested_parties_locked(exec_ctx, p);
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
- &p->checking_connectivity, &p->connectivity_changed);
+ &p->base.interested_parties, &p->checking_connectivity,
+ &p->connectivity_changed);
} else {
goto loop;
}
@@ -275,13 +292,13 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"connecting_changed");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
- &p->checking_connectivity, &p->connectivity_changed);
+ &p->base.interested_parties, &p->checking_connectivity,
+ &p->connectivity_changed);
break;
case GRPC_CHANNEL_FATAL_FAILURE:
- del_interested_parties_locked(exec_ctx, p);
- GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
- p->subchannels[p->num_subchannels - 1]);
p->num_subchannels--;
+ GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
+ p->subchannels[p->num_subchannels]);
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
"pick_first");
if (p->num_subchannels == 0) {
@@ -294,7 +311,8 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
- GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
+ "pick_first_connectivity");
} else {
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
@@ -302,7 +320,6 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
p->checking_subchannel %= p->num_subchannels;
p->checking_connectivity = grpc_subchannel_check_connectivity(
p->subchannels[p->checking_subchannel]);
- add_interested_parties_locked(exec_ctx, p);
goto loop;
}
}
@@ -311,39 +328,6 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&p->mu);
}
-static void pf_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_transport_op *op) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- size_t i;
- size_t n;
- grpc_subchannel **subchannels;
- grpc_subchannel *selected;
-
- gpr_mu_lock(&p->mu);
- n = p->num_subchannels;
- subchannels = gpr_malloc(n * sizeof(*subchannels));
- selected = p->selected;
- if (selected) {
- GRPC_SUBCHANNEL_REF(selected, "pf_broadcast_to_selected");
- }
- for (i = 0; i < n; i++) {
- subchannels[i] = p->subchannels[i];
- GRPC_SUBCHANNEL_REF(subchannels[i], "pf_broadcast");
- }
- gpr_mu_unlock(&p->mu);
-
- for (i = 0; i < n; i++) {
- if (selected == subchannels[i]) continue;
- grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], op);
- GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pf_broadcast");
- }
- if (p->selected) {
- grpc_subchannel_process_transport_op(exec_ctx, selected, op);
- GRPC_SUBCHANNEL_UNREF(exec_ctx, selected, "pf_broadcast_to_selected");
- }
- gpr_free(subchannels);
-}
-
static grpc_connectivity_state pf_check_connectivity(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
@@ -364,8 +348,20 @@ void pf_notify_on_state_change(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_mu_unlock(&p->mu);
}
+void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ gpr_mu_lock(&p->mu);
+ if (p->selected) {
+ grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
+ } else {
+ grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
- pf_destroy, pf_shutdown, pf_pick, pf_exit_idle, pf_broadcast,
+ pf_destroy, pf_shutdown, pf_pick, pf_cancel_pick, pf_ping_one, pf_exit_idle,
pf_check_connectivity, pf_notify_on_state_change};
static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}
diff --git a/src/core/client_config/lb_policies/round_robin.c b/src/core/client_config/lb_policies/round_robin.c
index d0b60a0df2..d487456363 100644
--- a/src/core/client_config/lb_policies/round_robin.c
+++ b/src/core/client_config/lb_policies/round_robin.c
@@ -38,6 +38,8 @@
#include <grpc/support/alloc.h>
#include "src/core/transport/connectivity_state.h"
+typedef struct round_robin_lb_policy round_robin_lb_policy;
+
int grpc_lb_round_robin_trace = 0;
/** List of entities waiting for a pick.
@@ -46,7 +48,7 @@ int grpc_lb_round_robin_trace = 0;
typedef struct pending_pick {
struct pending_pick *next;
grpc_pollset *pollset;
- grpc_subchannel **target;
+ grpc_connected_subchannel **target;
grpc_closure *on_complete;
} pending_pick;
@@ -58,22 +60,27 @@ typedef struct ready_list {
} ready_list;
typedef struct {
- size_t subchannel_idx; /**< Index over p->subchannels */
- void *p; /**< round_robin_lb_policy instance */
-} connectivity_changed_cb_arg;
-
-typedef struct {
+ /** index within policy->subchannels */
+ size_t index;
+ /** backpointer to owning policy */
+ round_robin_lb_policy *policy;
+ /** subchannel itself */
+ grpc_subchannel *subchannel;
+ /** notification that connectivity has changed on subchannel */
+ grpc_closure connectivity_changed_closure;
+ /** this subchannels current position in subchannel->ready_list */
+ ready_list *ready_list_node;
+ /** last observed connectivity */
+ grpc_connectivity_state connectivity_state;
+} subchannel_data;
+
+struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
/** all our subchannels */
- grpc_subchannel **subchannels;
size_t num_subchannels;
-
- /** Callbacks, one per subchannel being watched, to be called when their
- * respective connectivity changes */
- grpc_closure *connectivity_changed_cbs;
- connectivity_changed_cb_arg *cb_args;
+ subchannel_data **subchannels;
/** mutex protecting remaining members */
gpr_mu mu;
@@ -81,8 +88,6 @@ typedef struct {
int started_picking;
/** are we shutting down? */
int shutdown;
- /** Connectivity state of the subchannels being watched */
- grpc_connectivity_state *subchannel_connectivity;
/** List of picks that are waiting on connectivity */
pending_pick *pending_picks;
@@ -93,13 +98,7 @@ typedef struct {
ready_list ready_list;
/** Last pick from the ready list. */
ready_list *ready_list_last_pick;
-
- /** Subchannel index to ready_list node.
- *
- * Kept in order to remove nodes from the ready list associated with a
- * subchannel */
- ready_list **subchannel_index_to_readylist_node;
-} round_robin_lb_policy;
+};
/** Returns the next subchannel from the connected list or NULL if the list is
* empty.
@@ -144,9 +143,9 @@ static void advance_last_picked_locked(round_robin_lb_policy *p) {
/** Prepends (relative to the root at p->ready_list) the connected subchannel \a
* csc to the list of ready subchannels. */
static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
- grpc_subchannel *csc) {
+ grpc_subchannel *sc) {
ready_list *new_elem = gpr_malloc(sizeof(ready_list));
- new_elem->subchannel = csc;
+ new_elem->subchannel = sc;
if (p->ready_list.prev == NULL) {
/* first element */
new_elem->next = &p->ready_list;
@@ -160,7 +159,7 @@ static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
p->ready_list.prev = new_elem;
}
if (grpc_lb_round_robin_trace) {
- gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (SC %p)", new_elem, csc);
+ gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (SC %p)", new_elem, sc);
}
return new_elem;
}
@@ -200,28 +199,15 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
gpr_free(node);
}
-static void del_interested_parties_locked(grpc_exec_ctx *exec_ctx,
- round_robin_lb_policy *p,
- const size_t subchannel_idx) {
- pending_pick *pp;
- for (pp = p->pending_picks; pp; pp = pp->next) {
- grpc_subchannel_del_interested_party(
- exec_ctx, p->subchannels[subchannel_idx], pp->pollset);
- }
-}
-
void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
size_t i;
ready_list *elem;
for (i = 0; i < p->num_subchannels; i++) {
- del_interested_parties_locked(exec_ctx, p, i);
+ subchannel_data *sd = p->subchannels[i];
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
+ gpr_free(sd);
}
- for (i = 0; i < p->num_subchannels; i++) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "round_robin");
- }
- gpr_free(p->connectivity_changed_cbs);
- gpr_free(p->subchannel_connectivity);
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
@@ -237,20 +223,15 @@ void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(elem);
elem = tmp;
}
- gpr_free(p->subchannel_index_to_readylist_node);
- gpr_free(p->cb_args);
gpr_free(p);
}
void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- size_t i;
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
- gpr_mu_lock(&p->mu);
+ size_t i;
- for (i = 0; i < p->num_subchannels; i++) {
- del_interested_parties_locked(exec_ctx, p, i);
- }
+ gpr_mu_lock(&p->mu);
p->shutdown = 1;
while ((pp = p->pending_picks)) {
@@ -261,6 +242,35 @@ void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
+ for (i = 0; i < p->num_subchannels; i++) {
+ subchannel_data *sd = p->subchannels[i];
+ grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
+ &sd->connectivity_changed_closure);
+ }
+ gpr_mu_unlock(&p->mu);
+}
+
+static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ pending_pick *pp;
+ gpr_mu_lock(&p->mu);
+ pp = p->pending_picks;
+ p->pending_picks = NULL;
+ while (pp != NULL) {
+ pending_pick *next = pp->next;
+ if (pp->target == target) {
+ grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
+ pp->pollset);
+ *target = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
+ gpr_free(pp);
+ } else {
+ pp->next = p->pending_picks;
+ p->pending_picks = pp;
+ }
+ pp = next;
+ }
gpr_mu_unlock(&p->mu);
}
@@ -268,12 +278,16 @@ static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
size_t i;
p->started_picking = 1;
+ gpr_log(GPR_DEBUG, "LB_POLICY: p=%p num_subchannels=%d", p,
+ p->num_subchannels);
+
for (i = 0; i < p->num_subchannels; i++) {
- p->subchannel_connectivity[i] = GRPC_CHANNEL_IDLE;
- grpc_subchannel_notify_on_state_change(exec_ctx, p->subchannels[i],
- &p->subchannel_connectivity[i],
- &p->connectivity_changed_cbs[i]);
- GRPC_LB_POLICY_REF(&p->base, "round_robin_connectivity");
+ subchannel_data *sd = p->subchannels[i];
+ sd->connectivity_state = GRPC_CHANNEL_IDLE;
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, &p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "round_robin_connectivity");
}
}
@@ -286,32 +300,30 @@ void rr_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_mu_unlock(&p->mu);
}
-void rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
- grpc_subchannel **target, grpc_closure *on_complete) {
- size_t i;
+int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target, grpc_closure *on_complete) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
ready_list *selected;
gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
gpr_mu_unlock(&p->mu);
- *target = selected->subchannel;
+ *target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
if (grpc_lb_round_robin_trace) {
- gpr_log(GPR_DEBUG, "[RR PICK] TARGET <-- SUBCHANNEL %p (NODE %p)",
+ gpr_log(GPR_DEBUG,
+ "[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (NODE %p)",
selected->subchannel, selected);
}
/* only advance the last picked pointer if the selection was used */
advance_last_picked_locked(p);
- on_complete->cb(exec_ctx, on_complete->cb_arg, 1);
+ return 1;
} else {
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
- for (i = 0; i < p->num_subchannels; i++) {
- grpc_subchannel_add_interested_party(exec_ctx, p->subchannels[i],
- pollset);
- }
+ grpc_pollset_set_add_pollset(exec_ctx, &p->base.interested_parties,
+ pollset);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->pollset = pollset;
@@ -319,38 +331,31 @@ void rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pp->on_complete = on_complete;
p->pending_picks = pp;
gpr_mu_unlock(&p->mu);
+ return 0;
}
}
static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
- connectivity_changed_cb_arg *cb_arg = arg;
- round_robin_lb_policy *p = cb_arg->p;
- /* index over p->subchannels of this cb's subchannel */
- const size_t this_idx = cb_arg->subchannel_idx;
+ subchannel_data *sd = arg;
+ round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
ready_list *selected;
int unref = 0;
- /* connectivity state of this cb's subchannel */
- grpc_connectivity_state *this_connectivity;
-
gpr_mu_lock(&p->mu);
- this_connectivity = &p->subchannel_connectivity[this_idx];
-
if (p->shutdown) {
unref = 1;
} else {
- switch (*this_connectivity) {
+ switch (sd->connectivity_state) {
case GRPC_CHANNEL_READY:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, "connecting_ready");
/* add the newly connected subchannel to the list of connected ones.
* Note that it goes to the "end of the line". */
- p->subchannel_index_to_readylist_node[this_idx] =
- add_connected_sc_locked(p, p->subchannels[this_idx]);
+ sd->ready_list_node = add_connected_sc_locked(p, sd->subchannel);
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
@@ -362,60 +367,60 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
- *pp->target = selected->subchannel;
+ *pp->target =
+ grpc_subchannel_get_connected_subchannel(selected->subchannel);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
selected->subchannel, selected);
}
- grpc_subchannel_del_interested_party(exec_ctx, selected->subchannel,
- pp->pollset);
+ grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
+ pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[this_idx], this_connectivity,
- &p->connectivity_changed_cbs[this_idx]);
+ exec_ctx, sd->subchannel, &p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
- *this_connectivity, "connecting_changed");
+ sd->connectivity_state,
+ "connecting_changed");
grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[this_idx], this_connectivity,
- &p->connectivity_changed_cbs[this_idx]);
+ exec_ctx, sd->subchannel, &p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
- del_interested_parties_locked(exec_ctx, p, this_idx);
/* renew state notification */
grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[this_idx], this_connectivity,
- &p->connectivity_changed_cbs[this_idx]);
+ exec_ctx, sd->subchannel, &p->base.interested_parties,
+ &sd->connectivity_state, &sd->connectivity_changed_closure);
/* remove from ready list if still present */
- if (p->subchannel_index_to_readylist_node[this_idx] != NULL) {
- remove_disconnected_sc_locked(
- p, p->subchannel_index_to_readylist_node[this_idx]);
- p->subchannel_index_to_readylist_node[this_idx] = NULL;
+ if (sd->ready_list_node != NULL) {
+ remove_disconnected_sc_locked(p, sd->ready_list_node);
+ sd->ready_list_node = NULL;
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
"connecting_transient_failure");
break;
case GRPC_CHANNEL_FATAL_FAILURE:
- del_interested_parties_locked(exec_ctx, p, this_idx);
- if (p->subchannel_index_to_readylist_node[this_idx] != NULL) {
- remove_disconnected_sc_locked(
- p, p->subchannel_index_to_readylist_node[this_idx]);
- p->subchannel_index_to_readylist_node[this_idx] = NULL;
+ if (sd->ready_list_node != NULL) {
+ remove_disconnected_sc_locked(p, sd->ready_list_node);
+ sd->ready_list_node = NULL;
}
- GPR_SWAP(grpc_subchannel *, p->subchannels[this_idx],
- p->subchannels[p->num_subchannels - 1]);
p->num_subchannels--;
- GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
- "round_robin");
+ GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
+ p->subchannels[p->num_subchannels]);
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
+ p->subchannels[sd->index]->index = sd->index;
+ gpr_free(sd);
+ unref = 1;
if (p->num_subchannels == 0) {
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE,
@@ -426,7 +431,6 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
- unref = 1;
} else {
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
@@ -438,33 +442,10 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&p->mu);
if (unref) {
- GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
}
}
-static void rr_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_transport_op *op) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- size_t i;
- size_t n;
- grpc_subchannel **subchannels;
-
- gpr_mu_lock(&p->mu);
- n = p->num_subchannels;
- subchannels = gpr_malloc(n * sizeof(*subchannels));
- for (i = 0; i < n; i++) {
- subchannels[i] = p->subchannels[i];
- GRPC_SUBCHANNEL_REF(subchannels[i], "rr_broadcast");
- }
- gpr_mu_unlock(&p->mu);
-
- for (i = 0; i < n; i++) {
- grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], op);
- GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "rr_broadcast");
- }
- gpr_free(subchannels);
-}
-
static grpc_connectivity_state rr_check_connectivity(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
@@ -486,8 +467,24 @@ static void rr_notify_on_state_change(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&p->mu);
}
+static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ ready_list *selected;
+ grpc_connected_subchannel *target;
+ gpr_mu_lock(&p->mu);
+ if ((selected = peek_next_connected_locked(p))) {
+ gpr_mu_unlock(&p->mu);
+ target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ grpc_connected_subchannel_ping(exec_ctx, target, closure);
+ } else {
+ gpr_mu_unlock(&p->mu);
+ grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
+ }
+}
+
static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
- rr_destroy, rr_shutdown, rr_pick, rr_exit_idle, rr_broadcast,
+ rr_destroy, rr_shutdown, rr_pick, rr_cancel_pick, rr_ping_one, rr_exit_idle,
rr_check_connectivity, rr_notify_on_state_change};
static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
@@ -501,27 +498,22 @@ static grpc_lb_policy *create_round_robin(grpc_lb_policy_factory *factory,
GPR_ASSERT(args->num_subchannels > 0);
memset(p, 0, sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable);
- p->subchannels =
- gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
p->num_subchannels = args->num_subchannels;
+ p->subchannels = gpr_malloc(sizeof(*p->subchannels) * p->num_subchannels);
+ memset(p->subchannels, 0, sizeof(*p->subchannels) * p->num_subchannels);
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
- memcpy(p->subchannels, args->subchannels,
- sizeof(grpc_subchannel *) * args->num_subchannels);
gpr_mu_init(&p->mu);
- p->connectivity_changed_cbs =
- gpr_malloc(sizeof(grpc_closure) * args->num_subchannels);
- p->subchannel_connectivity =
- gpr_malloc(sizeof(grpc_connectivity_state) * args->num_subchannels);
-
- p->cb_args =
- gpr_malloc(sizeof(connectivity_changed_cb_arg) * args->num_subchannels);
for (i = 0; i < args->num_subchannels; i++) {
- p->cb_args[i].subchannel_idx = i;
- p->cb_args[i].p = p;
- grpc_closure_init(&p->connectivity_changed_cbs[i], rr_connectivity_changed,
- &p->cb_args[i]);
+ subchannel_data *sd = gpr_malloc(sizeof(*sd));
+ memset(sd, 0, sizeof(*sd));
+ p->subchannels[i] = sd;
+ sd->policy = p;
+ sd->index = i;
+ sd->subchannel = args->subchannels[i];
+ grpc_closure_init(&sd->connectivity_changed_closure,
+ rr_connectivity_changed, sd);
}
/* The (dummy node) root of the ready list */
@@ -530,10 +522,6 @@ static grpc_lb_policy *create_round_robin(grpc_lb_policy_factory *factory,
p->ready_list.next = NULL;
p->ready_list_last_pick = &p->ready_list;
- p->subchannel_index_to_readylist_node =
- gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
- memset(p->subchannel_index_to_readylist_node, 0,
- sizeof(grpc_subchannel *) * args->num_subchannels);
return &p->base;
}
diff --git a/src/core/client_config/lb_policy.c b/src/core/client_config/lb_policy.c
index c955186f7f..d4672f6b25 100644
--- a/src/core/client_config/lb_policy.c
+++ b/src/core/client_config/lb_policy.c
@@ -33,58 +33,94 @@
#include "src/core/client_config/lb_policy.h"
+#define WEAK_REF_BITS 16
+
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable) {
policy->vtable = vtable;
- gpr_ref_init(&policy->refs, 1);
+ gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
+ grpc_pollset_set_init(&policy->interested_parties);
}
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
-void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line,
- const char *reason) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "LB_POLICY:%p ref %d -> %d %s",
- policy, (int)policy->refs.count, (int)policy->refs.count + 1, reason);
+#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
+#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char *purpose
+#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
+#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose
#else
-void grpc_lb_policy_ref(grpc_lb_policy *policy) {
+#define REF_FUNC_EXTRA_ARGS
+#define REF_MUTATE_EXTRA_ARGS
+#define REF_FUNC_PASS_ARGS(new_reason)
+#define REF_MUTATE_PASS_ARGS(x)
#endif
- gpr_ref(&policy->refs);
-}
+static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
+ int barrier REF_MUTATE_EXTRA_ARGS) {
+ gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
+ : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
-void grpc_lb_policy_unref(grpc_lb_policy *policy,
- grpc_closure_list *closure_list, const char *file,
- int line, const char *reason) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "LB_POLICY:%p unref %d -> %d %s",
- policy, (int)policy->refs.count, (int)policy->refs.count - 1, reason);
-#else
-void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "LB_POLICY: %p % 12s 0x%08x -> 0x%08x [%s]", c, purpose, old_val,
+ old_val + delta, reason);
#endif
- if (gpr_unref(&policy->refs)) {
- policy->vtable->destroy(exec_ctx, policy);
+ return old_val;
+}
+
+void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+ ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
+}
+
+void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+ gpr_atm old_val =
+ ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
+ 1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
+ gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
+ gpr_atm check = 1 << WEAK_REF_BITS;
+ if ((old_val & mask) == check) {
+ policy->vtable->shutdown(exec_ctx, policy);
}
+ grpc_lb_policy_weak_unref(exec_ctx,
+ policy REF_FUNC_PASS_ARGS("strong-unref"));
}
-void grpc_lb_policy_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
- policy->vtable->shutdown(exec_ctx, policy);
+void grpc_lb_policy_weak_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+ ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
}
-void grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_pollset *pollset,
- grpc_metadata_batch *initial_metadata,
- grpc_subchannel **target, grpc_closure *on_complete) {
- policy->vtable->pick(exec_ctx, policy, pollset, initial_metadata, target,
- on_complete);
+void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+ gpr_atm old_val =
+ ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
+ if (old_val == 1) {
+ grpc_pollset_set_destroy(&policy->interested_parties);
+ policy->vtable->destroy(exec_ctx, policy);
+ }
}
-void grpc_lb_policy_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_transport_op *op) {
- policy->vtable->broadcast(exec_ctx, policy, op);
+int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_pollset *pollset,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target,
+ grpc_closure *on_complete) {
+ return policy->vtable->pick(exec_ctx, policy, pollset, initial_metadata,
+ target, on_complete);
+}
+
+void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connected_subchannel **target) {
+ policy->vtable->cancel_pick(exec_ctx, policy, target);
}
void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
policy->vtable->exit_idle(exec_ctx, policy);
}
+void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_closure *closure) {
+ policy->vtable->ping_one(exec_ctx, policy, closure);
+}
+
void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connectivity_state *state,
diff --git a/src/core/client_config/lb_policy.h b/src/core/client_config/lb_policy.h
index 0eefe64991..db5238c8ca 100644
--- a/src/core/client_config/lb_policy.h
+++ b/src/core/client_config/lb_policy.h
@@ -47,7 +47,8 @@ typedef void (*grpc_lb_completion)(void *cb_arg, grpc_subchannel *subchannel,
struct grpc_lb_policy {
const grpc_lb_policy_vtable *vtable;
- gpr_refcount refs;
+ gpr_atm ref_pair;
+ grpc_pollset_set interested_parties;
};
struct grpc_lb_policy_vtable {
@@ -56,17 +57,18 @@ struct grpc_lb_policy_vtable {
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** implement grpc_lb_policy_pick */
- void (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
- grpc_subchannel **target, grpc_closure *on_complete);
+ int (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target, grpc_closure *on_complete);
+ void (*cancel_pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connected_subchannel **target);
+
+ void (*ping_one)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_closure *closure);
/** try to enter a READY connectivity state */
void (*exit_idle)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
- /** broadcast a transport op to all subchannels */
- void (*broadcast)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_transport_op *op);
-
/** check the current connectivity of the lb_policy */
grpc_connectivity_state (*check_connectivity)(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy);
@@ -79,40 +81,54 @@ struct grpc_lb_policy_vtable {
grpc_closure *closure);
};
+/*#define GRPC_LB_POLICY_REFCOUNT_DEBUG*/
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(exec_ctx, p, r) \
grpc_lb_policy_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
+#define GRPC_LB_POLICY_WEAK_REF(p, r) \
+ grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
+ grpc_lb_policy_weak_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line,
const char *reason);
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const char *file, int line, const char *reason);
+void grpc_lb_policy_weak_ref(grpc_lb_policy *policy, const char *file, int line,
+ const char *reason);
+void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ const char *file, int line, const char *reason);
#else
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
#define GRPC_LB_POLICY_UNREF(cl, p, r) grpc_lb_policy_unref((cl), (p))
+#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
+#define GRPC_LB_POLICY_WEAK_UNREF(cl, p, r) grpc_lb_policy_weak_unref((cl), (p))
void grpc_lb_policy_ref(grpc_lb_policy *policy);
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+void grpc_lb_policy_weak_ref(grpc_lb_policy *policy);
+void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
#endif
/** called by concrete implementations to initialize the base struct */
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable);
-/** Start shutting down (fail any pending picks) */
-void grpc_lb_policy_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
-
/** Given initial metadata in \a initial_metadata, find an appropriate
target for this rpc, and 'return' it by calling \a on_complete after setting
\a target.
Picking can be asynchronous. Any IO should be done under \a pollset. */
-void grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_pollset *pollset,
- grpc_metadata_batch *initial_metadata,
- grpc_subchannel **target, grpc_closure *on_complete);
+int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_pollset *pollset,
+ grpc_metadata_batch *initial_metadata,
+ grpc_connected_subchannel **target,
+ grpc_closure *on_complete);
+
+void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_closure *closure);
-void grpc_lb_policy_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_transport_op *op);
+void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connected_subchannel **target);
void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
diff --git a/src/core/client_config/resolver.c b/src/core/client_config/resolver.c
index 081097eb19..eda01e72ba 100644
--- a/src/core/client_config/resolver.c
+++ b/src/core/client_config/resolver.c
@@ -71,11 +71,8 @@ void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
}
void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver,
- struct sockaddr *failing_address,
- int failing_address_len) {
- resolver->vtable->channel_saw_error(exec_ctx, resolver, failing_address,
- failing_address_len);
+ grpc_resolver *resolver) {
+ resolver->vtable->channel_saw_error(exec_ctx, resolver);
}
void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
diff --git a/src/core/client_config/resolver.h b/src/core/client_config/resolver.h
index 7ba0cd5bd4..e612eaf3b3 100644
--- a/src/core/client_config/resolver.h
+++ b/src/core/client_config/resolver.h
@@ -35,8 +35,8 @@
#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_RESOLVER_H
#include "src/core/client_config/client_config.h"
+#include "src/core/client_config/subchannel.h"
#include "src/core/iomgr/iomgr.h"
-#include "src/core/iomgr/sockaddr.h"
typedef struct grpc_resolver grpc_resolver;
typedef struct grpc_resolver_vtable grpc_resolver_vtable;
@@ -51,9 +51,7 @@ struct grpc_resolver {
struct grpc_resolver_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
- void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- struct sockaddr *failing_address,
- int failing_address_len);
+ void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*next)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_client_config **target_config, grpc_closure *on_complete);
};
@@ -81,9 +79,7 @@ void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
/** Notification that the channel has seen an error on some address.
Can be used as a hint that re-resolution is desirable soon. */
void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver,
- struct sockaddr *failing_address,
- int failing_address_len);
+ grpc_resolver *resolver);
/** Get the next client config. Called by the channel to fetch a new
configuration. Expected to set *target_config with a new configuration,
diff --git a/src/core/client_config/resolvers/dns_resolver.c b/src/core/client_config/resolvers/dns_resolver.c
index 7f9dd2543f..28ca30b946 100644
--- a/src/core/client_config/resolvers/dns_resolver.c
+++ b/src/core/client_config/resolvers/dns_resolver.c
@@ -40,7 +40,6 @@
#include <grpc/support/string_util.h>
#include "src/core/client_config/lb_policy_registry.h"
-#include "src/core/client_config/subchannel_factory_decorators/add_channel_arg.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/support/string.h"
@@ -81,9 +80,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
dns_resolver *r);
static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- struct sockaddr *failing_address,
- int failing_address_len);
+static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_client_config **target_config,
grpc_closure *on_complete);
@@ -103,8 +100,7 @@ static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
}
static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver, struct sockaddr *sa,
- int len) {
+ grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (!r->resolving) {
diff --git a/src/core/client_config/resolvers/sockaddr_resolver.c b/src/core/client_config/resolvers/sockaddr_resolver.c
index 0b017f06c7..81d6627ecc 100644
--- a/src/core/client_config/resolvers/sockaddr_resolver.c
+++ b/src/core/client_config/resolvers/sockaddr_resolver.c
@@ -83,9 +83,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *r,
- struct sockaddr *failing_address,
- int failing_address_len);
+ grpc_resolver *r);
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_client_config **target_config,
grpc_closure *on_complete);
@@ -107,8 +105,13 @@ static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
}
static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver,
- struct sockaddr *sa, int len) {}
+ grpc_resolver *resolver) {
+ sockaddr_resolver *r = (sockaddr_resolver *)resolver;
+ gpr_mu_lock(&r->mu);
+ r->published = 0;
+ sockaddr_maybe_finish_next_locked(exec_ctx, r);
+ gpr_mu_unlock(&r->mu);
+}
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_client_config **target_config,
@@ -344,6 +347,9 @@ static grpc_resolver *sockaddr_create(
gpr_slice_buffer_destroy(&path_parts);
gpr_slice_unref(path_slice);
if (errors_found) {
+ gpr_free(r->lb_policy_name);
+ gpr_free(r->addrs);
+ gpr_free(r->addrs_len);
gpr_free(r);
return NULL;
}
diff --git a/src/core/client_config/resolvers/zookeeper_resolver.c b/src/core/client_config/resolvers/zookeeper_resolver.c
index 136197d4c6..4924ca77d6 100644
--- a/src/core/client_config/resolvers/zookeeper_resolver.c
+++ b/src/core/client_config/resolvers/zookeeper_resolver.c
@@ -96,9 +96,7 @@ static void zookeeper_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void zookeeper_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void zookeeper_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *r,
- struct sockaddr *failing_address,
- int failing_address_len);
+ grpc_resolver *r);
static void zookeeper_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_client_config **target_config,
grpc_closure *on_complete);
@@ -125,8 +123,7 @@ static void zookeeper_shutdown(grpc_exec_ctx *exec_ctx,
}
static void zookeeper_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver,
- struct sockaddr *sa, int len) {
+ grpc_resolver *resolver) {
zookeeper_resolver *r = (zookeeper_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (r->resolving == 0) {
diff --git a/src/core/client_config/subchannel.c b/src/core/client_config/subchannel.c
index 0401dd3868..afb1cdbd6d 100644
--- a/src/core/client_config/subchannel.c
+++ b/src/core/client_config/subchannel.c
@@ -40,9 +40,15 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
+#include "src/core/client_config/initial_connect_string.h"
#include "src/core/iomgr/timer.h"
-#include "src/core/transport/connectivity_state.h"
+#include "src/core/profiling/timers.h"
#include "src/core/surface/channel.h"
+#include "src/core/transport/connectivity_state.h"
+#include "src/core/transport/connectivity_state.h"
+
+#define INTERNAL_REF_BITS 16
+#define STRONG_REF_MASK (~(gpr_atm)((1 << INTERNAL_REF_BITS) - 1))
#define GRPC_SUBCHANNEL_MIN_CONNECT_TIMEOUT_SECONDS 20
#define GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS 1
@@ -50,33 +56,35 @@
#define GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2
-typedef struct {
- /* all fields protected by subchannel->mu */
- /** refcount */
- int refs;
- /** parent subchannel */
- grpc_subchannel *subchannel;
-} connection;
+#define GET_CONNECTED_SUBCHANNEL(subchannel, barrier) \
+ ((grpc_connected_subchannel *)(gpr_atm_##barrier##_load( \
+ &(subchannel)->connected_subchannel)))
typedef struct {
grpc_closure closure;
- size_t version;
grpc_subchannel *subchannel;
grpc_connectivity_state connectivity_state;
} state_watcher;
-typedef struct waiting_for_connect {
- struct waiting_for_connect *next;
- grpc_closure *notify;
- grpc_pollset *pollset;
- grpc_subchannel_call **target;
+typedef struct external_state_watcher {
grpc_subchannel *subchannel;
- grpc_closure continuation;
-} waiting_for_connect;
+ grpc_pollset_set *pollset_set;
+ grpc_closure *notify;
+ grpc_closure closure;
+ struct external_state_watcher *next;
+ struct external_state_watcher *prev;
+} external_state_watcher;
struct grpc_subchannel {
grpc_connector *connector;
+ /** refcount
+ - lower INTERNAL_REF_BITS bits are for internal references:
+ these do not keep the subchannel open.
+ - upper remaining bits are for public references: these do
+ keep the subchannel open */
+ gpr_atm ref_pair;
+
/** non-transport related channel filters */
const grpc_channel_filter **filters;
size_t num_filters;
@@ -85,15 +93,9 @@ struct grpc_subchannel {
/** address to connect to */
struct sockaddr *addr;
size_t addr_len;
- /** metadata context */
- grpc_mdctx *mdctx;
- /** master channel - the grpc_channel instance that ultimately owns
- this channel_data via its channel stack.
- We occasionally use this to bump the refcount on the master channel
- to keep ourselves alive through an asynchronous operation. */
- grpc_channel *master;
- /** have we seen a disconnection? */
- int disconnected;
+
+ /** initial string to send to peer */
+ gpr_slice initial_connect_string;
/** set during connection */
grpc_connect_out_args connecting_result;
@@ -102,27 +104,24 @@ struct grpc_subchannel {
grpc_closure connected;
/** pollset_set tracking who's interested in a connection
- being setup - owned by the master channel (in particular the
- client_channel
- filter there-in) */
- grpc_pollset_set *pollset_set;
+ being setup */
+ grpc_pollset_set pollset_set;
+
+ /** active connection, or null; of type grpc_connected_subchannel */
+ gpr_atm connected_subchannel;
/** mutex protecting remaining elements */
gpr_mu mu;
- /** active connection */
- connection *active;
- /** version number for the active connection */
- size_t active_version;
- /** refcount */
- int refs;
+ /** have we seen a disconnection? */
+ int disconnected;
/** are we connecting */
int connecting;
- /** things waiting for a connection */
- waiting_for_connect *waiting;
/** connectivity state tracking */
grpc_connectivity_state_tracker state_tracker;
+ external_state_watcher root_external_state_watcher;
+
/** next connect attempt time */
gpr_timespec next_attempt;
/** amount to backoff each failure */
@@ -136,147 +135,141 @@ struct grpc_subchannel {
};
struct grpc_subchannel_call {
- connection *connection;
- gpr_refcount refs;
+ grpc_connected_subchannel *connection;
};
#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack *)((call) + 1))
-#define CHANNEL_STACK_FROM_CONNECTION(con) ((grpc_channel_stack *)((con) + 1))
-
-static grpc_subchannel_call *create_call(grpc_exec_ctx *exec_ctx,
- connection *con);
-static void connectivity_state_changed_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c,
- const char *reason);
-static grpc_connectivity_state compute_connectivity_locked(grpc_subchannel *c);
+#define CHANNEL_STACK_FROM_CONNECTION(con) ((grpc_channel_stack *)(con))
+#define CALLSTACK_TO_SUBCHANNEL_CALL(callstack) \
+ (((grpc_subchannel_call *)(callstack)) - 1)
+
static gpr_timespec compute_connect_deadline(grpc_subchannel *c);
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
int iomgr_success);
-static void subchannel_ref_locked(grpc_subchannel *c
- GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-static int subchannel_unref_locked(
- grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) GRPC_MUST_USE_RESULT;
-static void connection_ref_locked(connection *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-static grpc_subchannel *connection_unref_locked(
- grpc_exec_ctx *exec_ctx,
- connection *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) GRPC_MUST_USE_RESULT;
-static void subchannel_destroy(grpc_exec_ctx *exec_ctx, grpc_subchannel *c);
-
-#ifdef GRPC_SUBCHANNEL_REFCOUNT_DEBUG
-#define SUBCHANNEL_REF_LOCKED(p, r) \
- subchannel_ref_locked((p), __FILE__, __LINE__, (r))
-#define SUBCHANNEL_UNREF_LOCKED(p, r) \
- subchannel_unref_locked((p), __FILE__, __LINE__, (r))
-#define CONNECTION_REF_LOCKED(p, r) \
- connection_ref_locked((p), __FILE__, __LINE__, (r))
-#define CONNECTION_UNREF_LOCKED(cl, p, r) \
- connection_unref_locked((cl), (p), __FILE__, __LINE__, (r))
-#define REF_PASS_ARGS , file, line, reason
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define REF_REASON reason
#define REF_LOG(name, p) \
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "%s: %p ref %d -> %d %s", \
- (name), (p), (p)->refs, (p)->refs + 1, reason)
+ (name), (p), (p)->refs.count, (p)->refs.count + 1, reason)
#define UNREF_LOG(name, p) \
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "%s: %p unref %d -> %d %s", \
- (name), (p), (p)->refs, (p)->refs - 1, reason)
+ (name), (p), (p)->refs.count, (p)->refs.count - 1, reason)
+#define REF_MUTATE_EXTRA_ARGS \
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char *purpose
+#define REF_MUTATE_PURPOSE(x) , file, line, reason, x
#else
-#define SUBCHANNEL_REF_LOCKED(p, r) subchannel_ref_locked((p))
-#define SUBCHANNEL_UNREF_LOCKED(p, r) subchannel_unref_locked((p))
-#define CONNECTION_REF_LOCKED(p, r) connection_ref_locked((p))
-#define CONNECTION_UNREF_LOCKED(cl, p, r) connection_unref_locked((cl), (p))
-#define REF_PASS_ARGS
+#define REF_REASON ""
#define REF_LOG(name, p) \
do { \
} while (0)
#define UNREF_LOG(name, p) \
do { \
} while (0)
+#define REF_MUTATE_EXTRA_ARGS
+#define REF_MUTATE_PURPOSE(x)
#endif
/*
* connection implementation
*/
-static void connection_destroy(grpc_exec_ctx *exec_ctx, connection *c) {
- GPR_ASSERT(c->refs == 0);
+static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
+ int success) {
+ grpc_connected_subchannel *c = arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c));
gpr_free(c);
}
-static void connection_ref_locked(connection *c
- GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
- REF_LOG("CONNECTION", c);
- subchannel_ref_locked(c->subchannel REF_PASS_ARGS);
- ++c->refs;
+void grpc_connected_subchannel_ref(grpc_connected_subchannel *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON);
}
-static grpc_subchannel *connection_unref_locked(
- grpc_exec_ctx *exec_ctx, connection *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
- grpc_subchannel *destroy = NULL;
- UNREF_LOG("CONNECTION", c);
- if (subchannel_unref_locked(c->subchannel REF_PASS_ARGS)) {
- destroy = c->subchannel;
- }
- if (--c->refs == 0 && c->subchannel->active != c) {
- connection_destroy(exec_ctx, c);
- }
- return destroy;
+void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
+ grpc_connected_subchannel *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c),
+ REF_REASON);
}
/*
* grpc_subchannel implementation
*/
-static void subchannel_ref_locked(grpc_subchannel *c
- GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
- REF_LOG("SUBCHANNEL", c);
- ++c->refs;
+static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
+ int success) {
+ grpc_subchannel *c = arg;
+ gpr_free((void *)c->filters);
+ grpc_channel_args_destroy(c->args);
+ gpr_free(c->addr);
+ gpr_slice_unref(c->initial_connect_string);
+ grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
+ grpc_connector_unref(exec_ctx, c->connector);
+ grpc_pollset_set_destroy(&c->pollset_set);
+ gpr_free(c);
}
-static int subchannel_unref_locked(grpc_subchannel *c
- GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
- UNREF_LOG("SUBCHANNEL", c);
- return --c->refs == 0;
+static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
+ int barrier REF_MUTATE_EXTRA_ARGS) {
+ gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
+ : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SUBCHANNEL: %p % 12s 0x%08x -> 0x%08x [%s]", c, purpose, old_val,
+ old_val + delta, reason);
+#endif
+ return old_val;
}
void grpc_subchannel_ref(grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
- gpr_mu_lock(&c->mu);
- subchannel_ref_locked(c REF_PASS_ARGS);
- gpr_mu_unlock(&c->mu);
+ gpr_atm old_refs;
+ old_refs = ref_mutate(c, (1 << INTERNAL_REF_BITS),
+ 0 REF_MUTATE_PURPOSE("STRONG_REF"));
+ GPR_ASSERT((old_refs & STRONG_REF_MASK) != 0);
}
-void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
- int destroy;
- gpr_mu_lock(&c->mu);
- destroy = subchannel_unref_locked(c REF_PASS_ARGS);
- gpr_mu_unlock(&c->mu);
- if (destroy) subchannel_destroy(exec_ctx, c);
+void grpc_subchannel_weak_ref(grpc_subchannel *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ gpr_atm old_refs;
+ old_refs = ref_mutate(c, 1, 0 REF_MUTATE_PURPOSE("WEAK_REF"));
+ GPR_ASSERT(old_refs != 0);
}
-static void subchannel_destroy(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
- if (c->active != NULL) {
- connection_destroy(exec_ctx, c->active);
+static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
+ grpc_connected_subchannel *con;
+ gpr_mu_lock(&c->mu);
+ GPR_ASSERT(!c->disconnected);
+ c->disconnected = 1;
+ grpc_connector_shutdown(exec_ctx, c->connector);
+ con = GET_CONNECTED_SUBCHANNEL(c, no_barrier);
+ if (con != NULL) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, con, "connection");
+ gpr_atm_no_barrier_store(&c->connected_subchannel, 0xdeadbeef);
}
- gpr_free((void *)c->filters);
- grpc_channel_args_destroy(c->args);
- gpr_free(c->addr);
- grpc_mdctx_unref(c->mdctx);
- grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
- grpc_connector_unref(exec_ctx, c->connector);
- gpr_free(c);
+ gpr_mu_unlock(&c->mu);
}
-void grpc_subchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c,
- grpc_pollset *pollset) {
- grpc_pollset_set_add_pollset(exec_ctx, c->pollset_set, pollset);
+void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ gpr_atm old_refs;
+ old_refs = ref_mutate(c, (gpr_atm)1 - (gpr_atm)(1 << INTERNAL_REF_BITS),
+ 1 REF_MUTATE_PURPOSE("STRONG_UNREF"));
+ if ((old_refs & STRONG_REF_MASK) == (1 << INTERNAL_REF_BITS)) {
+ disconnect(exec_ctx, c);
+ }
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "strong-unref");
}
-void grpc_subchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c,
- grpc_pollset *pollset) {
- grpc_pollset_set_del_pollset(exec_ctx, c->pollset_set, pollset);
+void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ gpr_atm old_refs;
+ old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
+ if (old_refs == 1) {
+ grpc_exec_ctx_enqueue(exec_ctx, grpc_closure_create(subchannel_destroy, c),
+ 1);
+ }
}
static gpr_uint32 random_seed() {
@@ -286,10 +279,8 @@ static gpr_uint32 random_seed() {
grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
grpc_subchannel_args *args) {
grpc_subchannel *c = gpr_malloc(sizeof(*c));
- grpc_channel_element *parent_elem = grpc_channel_stack_last_element(
- grpc_channel_get_channel_stack(args->master));
memset(c, 0, sizeof(*c));
- c->refs = 1;
+ gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
c->connector = connector;
grpc_connector_ref(c->connector);
c->num_filters = args->filter_count;
@@ -298,13 +289,14 @@ grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
sizeof(grpc_channel_filter *) * c->num_filters);
c->addr = gpr_malloc(args->addr_len);
memcpy(c->addr, args->addr, args->addr_len);
+ grpc_pollset_set_init(&c->pollset_set);
c->addr_len = args->addr_len;
+ grpc_set_initial_connect_string(&c->addr, &c->addr_len,
+ &c->initial_connect_string);
c->args = grpc_channel_args_copy(args->args);
- c->mdctx = args->mdctx;
- c->master = args->master;
- c->pollset_set = grpc_client_channel_get_connecting_pollset_set(parent_elem);
c->random = random_seed();
- grpc_mdctx_ref(c->mdctx);
+ c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
+ &c->root_external_state_watcher;
grpc_closure_init(&c->connected, subchannel_connected, c);
grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
"subchannel");
@@ -312,38 +304,18 @@ grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
return c;
}
-void grpc_subchannel_cancel_waiting_call(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *subchannel,
- int iomgr_success) {
- waiting_for_connect *w4c;
- gpr_mu_lock(&subchannel->mu);
- w4c = subchannel->waiting;
- subchannel->waiting = NULL;
- gpr_mu_unlock(&subchannel->mu);
- while (w4c != NULL) {
- waiting_for_connect *next = w4c->next;
- grpc_subchannel_del_interested_party(exec_ctx, w4c->subchannel,
- w4c->pollset);
- if (w4c->notify) {
- w4c->notify->cb(exec_ctx, w4c->notify->cb_arg, iomgr_success);
- }
-
- GRPC_SUBCHANNEL_UNREF(exec_ctx, w4c->subchannel, "waiting_for_connect");
- gpr_free(w4c);
-
- w4c = next;
- }
-}
-
static void continue_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
grpc_connect_in_args args;
- args.interested_parties = c->pollset_set;
+ args.interested_parties = &c->pollset_set;
args.addr = c->addr;
args.addr_len = c->addr_len;
args.deadline = compute_connect_deadline(c);
args.channel_args = c->args;
+ args.initial_connect_string = c->initial_connect_string;
+ grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
+ GRPC_CHANNEL_CONNECTING, "state_change");
grpc_connector_connect(exec_ctx, c->connector, &args, &c->connecting_result,
&c->connected);
}
@@ -356,59 +328,6 @@ static void start_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
continue_connect(exec_ctx, c);
}
-static void continue_creating_call(grpc_exec_ctx *exec_ctx, void *arg,
- int iomgr_success) {
- grpc_subchannel_call_create_status call_creation_status;
- waiting_for_connect *w4c = arg;
- grpc_subchannel_del_interested_party(exec_ctx, w4c->subchannel, w4c->pollset);
- call_creation_status = grpc_subchannel_create_call(
- exec_ctx, w4c->subchannel, w4c->pollset, w4c->target, w4c->notify);
- GPR_ASSERT(call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY);
- w4c->notify->cb(exec_ctx, w4c->notify->cb_arg, iomgr_success);
- GRPC_SUBCHANNEL_UNREF(exec_ctx, w4c->subchannel, "waiting_for_connect");
- gpr_free(w4c);
-}
-
-grpc_subchannel_call_create_status grpc_subchannel_create_call(
- grpc_exec_ctx *exec_ctx, grpc_subchannel *c, grpc_pollset *pollset,
- grpc_subchannel_call **target, grpc_closure *notify) {
- connection *con;
- gpr_mu_lock(&c->mu);
- if (c->active != NULL) {
- con = c->active;
- CONNECTION_REF_LOCKED(con, "call");
- gpr_mu_unlock(&c->mu);
-
- *target = create_call(exec_ctx, con);
- return GRPC_SUBCHANNEL_CALL_CREATE_READY;
- } else {
- waiting_for_connect *w4c = gpr_malloc(sizeof(*w4c));
- w4c->next = c->waiting;
- w4c->notify = notify;
- w4c->pollset = pollset;
- w4c->target = target;
- w4c->subchannel = c;
- /* released when clearing w4c */
- SUBCHANNEL_REF_LOCKED(c, "waiting_for_connect");
- grpc_closure_init(&w4c->continuation, continue_creating_call, w4c);
- c->waiting = w4c;
- grpc_subchannel_add_interested_party(exec_ctx, c, pollset);
- if (!c->connecting) {
- c->connecting = 1;
- connectivity_state_changed_locked(exec_ctx, c, "create_call");
- /* released by connection */
- SUBCHANNEL_REF_LOCKED(c, "connecting");
- GRPC_CHANNEL_INTERNAL_REF(c->master, "connecting");
- gpr_mu_unlock(&c->mu);
-
- start_connect(exec_ctx, c);
- } else {
- gpr_mu_unlock(&c->mu);
- }
- return GRPC_SUBCHANNEL_CALL_CREATE_PENDING;
- }
-}
-
grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c) {
grpc_connectivity_state state;
gpr_mu_lock(&c->mu);
@@ -417,153 +336,149 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c) {
return state;
}
-void grpc_subchannel_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c,
- grpc_connectivity_state *state,
- grpc_closure *notify) {
+static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
+ int success) {
+ external_state_watcher *w = arg;
+ grpc_closure *follow_up = w->notify;
+ if (w->pollset_set != NULL) {
+ grpc_pollset_set_del_pollset_set(exec_ctx, &w->subchannel->pollset_set,
+ w->pollset_set);
+ }
+ gpr_mu_lock(&w->subchannel->mu);
+ w->next->prev = w->prev;
+ w->prev->next = w->next;
+ gpr_mu_unlock(&w->subchannel->mu);
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, w->subchannel, "external_state_watcher");
+ gpr_free(w);
+ follow_up->cb(exec_ctx, follow_up->cb_arg, success);
+}
+
+void grpc_subchannel_notify_on_state_change(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
+ grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
+ grpc_closure *notify) {
int do_connect = 0;
- gpr_mu_lock(&c->mu);
- if (grpc_connectivity_state_notify_on_state_change(
- exec_ctx, &c->state_tracker, state, notify)) {
- do_connect = 1;
- c->connecting = 1;
- /* released by connection */
- SUBCHANNEL_REF_LOCKED(c, "connecting");
- GRPC_CHANNEL_INTERNAL_REF(c->master, "connecting");
- connectivity_state_changed_locked(exec_ctx, c, "state_change");
+ external_state_watcher *w;
+
+ if (state == NULL) {
+ gpr_mu_lock(&c->mu);
+ for (w = c->root_external_state_watcher.next;
+ w != &c->root_external_state_watcher; w = w->next) {
+ if (w->notify == notify) {
+ grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &c->state_tracker, NULL, &w->closure);
+ }
+ }
+ gpr_mu_unlock(&c->mu);
+ } else {
+ w = gpr_malloc(sizeof(*w));
+ w->subchannel = c;
+ w->pollset_set = interested_parties;
+ w->notify = notify;
+ grpc_closure_init(&w->closure, on_external_state_watcher_done, w);
+ if (interested_parties != NULL) {
+ grpc_pollset_set_add_pollset_set(exec_ctx, &c->pollset_set,
+ interested_parties);
+ }
+ GRPC_SUBCHANNEL_WEAK_REF(c, "external_state_watcher");
+ gpr_mu_lock(&c->mu);
+ w->next = &c->root_external_state_watcher;
+ w->prev = w->next->prev;
+ w->next->prev = w->prev->next = w;
+ if (grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &c->state_tracker, state, &w->closure)) {
+ do_connect = 1;
+ c->connecting = 1;
+ /* released by connection */
+ GRPC_SUBCHANNEL_WEAK_REF(c, "connecting");
+ }
+ gpr_mu_unlock(&c->mu);
}
- gpr_mu_unlock(&c->mu);
if (do_connect) {
start_connect(exec_ctx, c);
}
}
-int grpc_subchannel_state_change_unsubscribe(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c,
- grpc_closure *subscribed_notify) {
- int success;
- gpr_mu_lock(&c->mu);
- success = grpc_connectivity_state_change_unsubscribe(
- exec_ctx, &c->state_tracker, subscribed_notify);
- gpr_mu_unlock(&c->mu);
- return success;
+void grpc_connected_subchannel_process_transport_op(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
+ grpc_transport_op *op) {
+ grpc_channel_stack *channel_stack = CHANNEL_STACK_FROM_CONNECTION(con);
+ grpc_channel_element *top_elem = grpc_channel_stack_element(channel_stack, 0);
+ top_elem->filter->start_transport_op(exec_ctx, top_elem, op);
}
-void grpc_subchannel_process_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c,
- grpc_transport_op *op) {
- connection *con = NULL;
- grpc_subchannel *destroy;
- int cancel_alarm = 0;
- gpr_mu_lock(&c->mu);
- if (c->active != NULL) {
- con = c->active;
- CONNECTION_REF_LOCKED(con, "transport-op");
- }
- if (op->disconnect) {
- c->disconnected = 1;
- connectivity_state_changed_locked(exec_ctx, c, "disconnect");
- if (c->have_alarm) {
- cancel_alarm = 1;
- }
- }
- gpr_mu_unlock(&c->mu);
+static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
+ int iomgr_success) {
+ state_watcher *sw = p;
+ grpc_subchannel *c = sw->subchannel;
+ gpr_mu *mu = &c->mu;
- if (con != NULL) {
- grpc_channel_stack *channel_stack = CHANNEL_STACK_FROM_CONNECTION(con);
- grpc_channel_element *top_elem =
- grpc_channel_stack_element(channel_stack, 0);
- top_elem->filter->start_transport_op(exec_ctx, top_elem, op);
+ gpr_mu_lock(mu);
- gpr_mu_lock(&c->mu);
- destroy = CONNECTION_UNREF_LOCKED(exec_ctx, con, "transport-op");
- gpr_mu_unlock(&c->mu);
- if (destroy) {
- subchannel_destroy(exec_ctx, destroy);
+ /* if we failed just leave this closure */
+ if (iomgr_success) {
+ if (sw->connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ /* any errors on a subchannel ==> we're done, create a new one */
+ sw->connectivity_state = GRPC_CHANNEL_FATAL_FAILURE;
+ }
+ grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
+ sw->connectivity_state, "reflect_child");
+ if (sw->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE) {
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, GET_CONNECTED_SUBCHANNEL(c, no_barrier), NULL,
+ &sw->connectivity_state, &sw->closure);
+ GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher");
+ sw = NULL;
}
}
- if (cancel_alarm) {
- grpc_timer_cancel(exec_ctx, &c->alarm);
- }
-
- if (op->disconnect) {
- grpc_connector_shutdown(exec_ctx, c->connector);
- }
+ gpr_mu_unlock(mu);
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "state_watcher");
+ gpr_free(sw);
}
-static void on_state_changed(grpc_exec_ctx *exec_ctx, void *p,
- int iomgr_success) {
- state_watcher *sw = p;
- grpc_subchannel *c = sw->subchannel;
- gpr_mu *mu = &c->mu;
- int destroy;
+static void connected_subchannel_state_op(grpc_exec_ctx *exec_ctx,
+ grpc_connected_subchannel *con,
+ grpc_pollset_set *interested_parties,
+ grpc_connectivity_state *state,
+ grpc_closure *closure) {
grpc_transport_op op;
grpc_channel_element *elem;
- connection *destroy_connection = NULL;
-
- gpr_mu_lock(mu);
-
- /* if we failed or there is a version number mismatch, just leave
- this closure */
- if (!iomgr_success || sw->subchannel->active_version != sw->version) {
- goto done;
- }
+ memset(&op, 0, sizeof(op));
+ op.connectivity_state = state;
+ op.on_connectivity_state_change = closure;
+ op.bind_pollset_set = interested_parties;
+ elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
+ elem->filter->start_transport_op(exec_ctx, elem, &op);
+}
- switch (sw->connectivity_state) {
- case GRPC_CHANNEL_CONNECTING:
- case GRPC_CHANNEL_READY:
- case GRPC_CHANNEL_IDLE:
- /* all is still good: keep watching */
- memset(&op, 0, sizeof(op));
- op.connectivity_state = &sw->connectivity_state;
- op.on_connectivity_state_change = &sw->closure;
- elem = grpc_channel_stack_element(
- CHANNEL_STACK_FROM_CONNECTION(c->active), 0);
- elem->filter->start_transport_op(exec_ctx, elem, &op);
- /* early out */
- gpr_mu_unlock(mu);
- return;
- case GRPC_CHANNEL_FATAL_FAILURE:
- case GRPC_CHANNEL_TRANSIENT_FAILURE:
- /* things have gone wrong, deactivate and enter idle */
- if (sw->subchannel->active->refs == 0) {
- destroy_connection = sw->subchannel->active;
- }
- sw->subchannel->active = NULL;
- grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
- c->disconnected
- ? GRPC_CHANNEL_FATAL_FAILURE
- : GRPC_CHANNEL_TRANSIENT_FAILURE,
- "connection_failed");
- break;
- }
+void grpc_connected_subchannel_notify_on_state_change(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
+ grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
+ grpc_closure *closure) {
+ connected_subchannel_state_op(exec_ctx, con, interested_parties, state,
+ closure);
+}
-done:
- connectivity_state_changed_locked(exec_ctx, c, "transport_state_changed");
- destroy = SUBCHANNEL_UNREF_LOCKED(c, "state_watcher");
- gpr_free(sw);
- gpr_mu_unlock(mu);
- if (destroy) {
- subchannel_destroy(exec_ctx, c);
- }
- if (destroy_connection != NULL) {
- connection_destroy(exec_ctx, destroy_connection);
- }
+void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx,
+ grpc_connected_subchannel *con,
+ grpc_closure *closure) {
+ grpc_transport_op op;
+ grpc_channel_element *elem;
+ memset(&op, 0, sizeof(op));
+ op.send_ping = closure;
+ elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
+ elem->filter->start_transport_op(exec_ctx, elem, &op);
}
static void publish_transport(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
size_t channel_stack_size;
- connection *con;
+ grpc_connected_subchannel *con;
grpc_channel_stack *stk;
size_t num_filters;
const grpc_channel_filter **filters;
- waiting_for_connect *w4c;
- grpc_transport_op op;
- state_watcher *sw;
- connection *destroy_connection = NULL;
- grpc_channel_element *elem;
+ state_watcher *sw_subchannel;
/* build final filter list */
num_filters = c->num_filters + c->connecting_result.num_filters + 1;
@@ -575,74 +490,52 @@ static void publish_transport(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
/* construct channel stack */
channel_stack_size = grpc_channel_stack_size(filters, num_filters);
- con = gpr_malloc(sizeof(connection) + channel_stack_size);
- stk = (grpc_channel_stack *)(con + 1);
- con->refs = 0;
- con->subchannel = c;
- grpc_channel_stack_init(exec_ctx, filters, num_filters, c->master, c->args,
- c->mdctx, stk);
+ con = gpr_malloc(channel_stack_size);
+ stk = CHANNEL_STACK_FROM_CONNECTION(con);
+ grpc_channel_stack_init(exec_ctx, 1, connection_destroy, con, filters,
+ num_filters, c->args, "CONNECTED_SUBCHANNEL", stk);
grpc_connected_channel_bind_transport(stk, c->connecting_result.transport);
gpr_free((void *)c->connecting_result.filters);
memset(&c->connecting_result, 0, sizeof(c->connecting_result));
/* initialize state watcher */
- sw = gpr_malloc(sizeof(*sw));
- grpc_closure_init(&sw->closure, on_state_changed, sw);
- sw->subchannel = c;
- sw->connectivity_state = GRPC_CHANNEL_READY;
+ sw_subchannel = gpr_malloc(sizeof(*sw_subchannel));
+ sw_subchannel->subchannel = c;
+ sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
+ grpc_closure_init(&sw_subchannel->closure, subchannel_on_child_state_changed,
+ sw_subchannel);
gpr_mu_lock(&c->mu);
if (c->disconnected) {
gpr_mu_unlock(&c->mu);
- gpr_free(sw);
+ gpr_free(sw_subchannel);
gpr_free((void *)filters);
grpc_channel_stack_destroy(exec_ctx, stk);
- GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->master, "connecting");
- GRPC_SUBCHANNEL_UNREF(exec_ctx, c, "connecting");
+ gpr_free(con);
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
return;
}
/* publish */
- if (c->active != NULL && c->active->refs == 0) {
- destroy_connection = c->active;
- }
- c->active = con;
- c->active_version++;
- sw->version = c->active_version;
+ GPR_ASSERT(gpr_atm_no_barrier_cas(&c->connected_subchannel, 0, (gpr_atm)con));
c->connecting = 0;
- /* watch for changes; subchannel ref for connecting is donated
+ /* setup subchannel watching connected subchannel for changes; subchannel ref
+ for connecting is donated
to the state watcher */
- memset(&op, 0, sizeof(op));
- op.connectivity_state = &sw->connectivity_state;
- op.on_connectivity_state_change = &sw->closure;
- op.bind_pollset_set = c->pollset_set;
- SUBCHANNEL_REF_LOCKED(c, "state_watcher");
- GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->master, "connecting");
- GPR_ASSERT(!SUBCHANNEL_UNREF_LOCKED(c, "connecting"));
- elem =
- grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(c->active), 0);
- elem->filter->start_transport_op(exec_ctx, elem, &op);
+ GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher");
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+ grpc_connected_subchannel_notify_on_state_change(
+ exec_ctx, con, &c->pollset_set, &sw_subchannel->connectivity_state,
+ &sw_subchannel->closure);
/* signal completion */
- connectivity_state_changed_locked(exec_ctx, c, "connected");
- w4c = c->waiting;
- c->waiting = NULL;
+ grpc_connectivity_state_set(exec_ctx, &c->state_tracker, GRPC_CHANNEL_READY,
+ "connected");
gpr_mu_unlock(&c->mu);
-
- while (w4c != NULL) {
- waiting_for_connect *next = w4c->next;
- grpc_exec_ctx_enqueue(exec_ctx, &w4c->continuation, 1);
- w4c = next;
- }
-
gpr_free((void *)filters);
-
- if (destroy_connection != NULL) {
- connection_destroy(exec_ctx, destroy_connection);
- }
}
/* Generate a random number between 0 and 1. */
@@ -653,10 +546,25 @@ static double generate_uniform_random_number(grpc_subchannel *c) {
/* Update backoff_delta and next_attempt in subchannel */
static void update_reconnect_parameters(grpc_subchannel *c) {
+ size_t i;
gpr_int32 backoff_delta_millis, jitter;
gpr_int32 max_backoff_millis =
GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS * 1000;
double jitter_range;
+
+ if (c->args) {
+ for (i = 0; i < c->args->num_args; i++) {
+ if (0 == strcmp(c->args->args[i].key,
+ "grpc.testing.fixed_reconnect_backoff")) {
+ GPR_ASSERT(c->args->args[i].type == GRPC_ARG_INTEGER);
+ c->next_attempt = gpr_time_add(
+ gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_millis(c->args->args[i].value.integer, GPR_TIMESPAN));
+ return;
+ }
+ }
+ }
+
backoff_delta_millis =
(gpr_int32)(gpr_time_to_millis(c->backoff_delta) *
GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER);
@@ -681,29 +589,31 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, int iomgr_success) {
if (c->disconnected) {
iomgr_success = 0;
}
- connectivity_state_changed_locked(exec_ctx, c, "alarm");
gpr_mu_unlock(&c->mu);
if (iomgr_success) {
update_reconnect_parameters(c);
continue_connect(exec_ctx, c);
} else {
- grpc_subchannel_cancel_waiting_call(exec_ctx, c, iomgr_success);
- GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->master, "connecting");
- GRPC_SUBCHANNEL_UNREF(exec_ctx, c, "connecting");
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
}
}
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
grpc_subchannel *c = arg;
+
if (c->connecting_result.transport != NULL) {
publish_transport(exec_ctx, c);
+ } else if (c->disconnected) {
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
} else {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->have_alarm);
c->have_alarm = 1;
- connectivity_state_changed_locked(exec_ctx, c, "connect_failed");
+ grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "connect_failed");
grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, on_alarm, c, now);
gpr_mu_unlock(&c->mu);
}
@@ -720,53 +630,29 @@ static gpr_timespec compute_connect_deadline(grpc_subchannel *c) {
: min_deadline;
}
-static grpc_connectivity_state compute_connectivity_locked(grpc_subchannel *c) {
- if (c->disconnected) {
- return GRPC_CHANNEL_FATAL_FAILURE;
- }
- if (c->connecting) {
- if (c->have_alarm) {
- return GRPC_CHANNEL_TRANSIENT_FAILURE;
- }
- return GRPC_CHANNEL_CONNECTING;
- }
- if (c->active) {
- return GRPC_CHANNEL_READY;
- }
- return GRPC_CHANNEL_IDLE;
-}
-
-static void connectivity_state_changed_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c,
- const char *reason) {
- grpc_connectivity_state current = compute_connectivity_locked(c);
- grpc_connectivity_state_set(exec_ctx, &c->state_tracker, current, reason);
-}
-
/*
* grpc_subchannel_call implementation
*/
+static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
+ int success) {
+ grpc_subchannel_call *c = call;
+ GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
+ grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c));
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, c->connection, "subchannel_call");
+ gpr_free(c);
+ GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
+}
+
void grpc_subchannel_call_ref(grpc_subchannel_call *c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
- gpr_ref(&c->refs);
+ GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
}
void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
- if (gpr_unref(&c->refs)) {
- gpr_mu *mu = &c->connection->subchannel->mu;
- grpc_subchannel *destroy;
- grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c));
- gpr_mu_lock(mu);
- destroy = CONNECTION_UNREF_LOCKED(exec_ctx, c->connection, "call");
- gpr_mu_unlock(mu);
- gpr_free(c);
- if (destroy != NULL) {
- subchannel_destroy(exec_ctx, destroy);
- }
- }
+ GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
}
char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
@@ -784,22 +670,27 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
top_elem->filter->start_transport_stream_op(exec_ctx, top_elem, op);
}
-static grpc_subchannel_call *create_call(grpc_exec_ctx *exec_ctx,
- connection *con) {
+grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
+ grpc_subchannel *c) {
+ return GET_CONNECTED_SUBCHANNEL(c, acq);
+}
+
+grpc_subchannel_call *grpc_connected_subchannel_create_call(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
+ grpc_pollset *pollset) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
grpc_subchannel_call *call =
gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(call);
call->connection = con;
- gpr_ref_init(&call->refs, 1);
- grpc_call_stack_init(exec_ctx, chanstk, NULL, NULL, callstk);
+ GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
+ grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, call,
+ NULL, NULL, callstk);
+ grpc_call_stack_set_pollset(exec_ctx, callstk, pollset);
return call;
}
-grpc_mdctx *grpc_subchannel_get_mdctx(grpc_subchannel *subchannel) {
- return subchannel->mdctx;
-}
-
-grpc_channel *grpc_subchannel_get_master(grpc_subchannel *subchannel) {
- return subchannel->master;
+grpc_call_stack *grpc_subchannel_call_get_call_stack(
+ grpc_subchannel_call *subchannel_call) {
+ return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call);
}
diff --git a/src/core/client_config/subchannel.h b/src/core/client_config/subchannel.h
index ec1cc7cc69..57c7c9dc67 100644
--- a/src/core/client_config/subchannel.h
+++ b/src/core/client_config/subchannel.h
@@ -41,14 +41,23 @@
/** A (sub-)channel that knows how to connect to exactly one target
address. Provides a target for load balancing. */
typedef struct grpc_subchannel grpc_subchannel;
+typedef struct grpc_connected_subchannel grpc_connected_subchannel;
typedef struct grpc_subchannel_call grpc_subchannel_call;
typedef struct grpc_subchannel_args grpc_subchannel_args;
-#ifdef GRPC_SUBCHANNEL_REFCOUNT_DEBUG
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define GRPC_SUBCHANNEL_REF(p, r) \
grpc_subchannel_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_UNREF(cl, p, r) \
grpc_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_WEAK_REF(p, r) \
+ grpc_subchannel_weak_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \
+ grpc_subchannel_weak_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) \
+ grpc_connected_subchannel_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \
+ grpc_connected_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) \
grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
@@ -58,6 +67,12 @@ typedef struct grpc_subchannel_args grpc_subchannel_args;
#else
#define GRPC_SUBCHANNEL_REF(p, r) grpc_subchannel_ref((p))
#define GRPC_SUBCHANNEL_UNREF(cl, p, r) grpc_subchannel_unref((cl), (p))
+#define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p))
+#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \
+ grpc_subchannel_weak_unref((cl), (p))
+#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) grpc_connected_subchannel_ref((p))
+#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \
+ grpc_connected_subchannel_unref((cl), (p))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p))
#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
grpc_subchannel_call_unref((cl), (p))
@@ -69,38 +84,31 @@ void grpc_subchannel_ref(grpc_subchannel *channel
void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_weak_ref(grpc_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_connected_subchannel_ref(grpc_connected_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
+ grpc_connected_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_ref(grpc_subchannel_call *call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-typedef enum {
- GRPC_SUBCHANNEL_CALL_CREATE_READY,
- GRPC_SUBCHANNEL_CALL_CREATE_PENDING
-} grpc_subchannel_call_create_status;
-
-/** construct a subchannel call (possibly asynchronously).
- *
- * If the returned status is \a GRPC_SUBCHANNEL_CALL_CREATE_READY, the call will
- * return immediately and \a target will point to a connected \a subchannel_call
- * instance. Note that \a notify will \em not be invoked in this case.
- * Otherwise, if the returned status is GRPC_SUBCHANNEL_CALL_CREATE_PENDING, the
- * subchannel call will be created asynchronously, invoking the \a notify
- * callback upon completion. */
-grpc_subchannel_call_create_status grpc_subchannel_create_call(
- grpc_exec_ctx *exec_ctx, grpc_subchannel *subchannel, grpc_pollset *pollset,
- grpc_subchannel_call **target, grpc_closure *notify);
-
-/** cancel \a call in the waiting state. */
-void grpc_subchannel_cancel_waiting_call(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *subchannel,
- int iomgr_success);
+/** construct a subchannel call */
+grpc_subchannel_call *grpc_connected_subchannel_create_call(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
+ grpc_pollset *pollset);
/** process a transport level op */
-void grpc_subchannel_process_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *subchannel,
- grpc_transport_op *op);
+void grpc_connected_subchannel_process_transport_op(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *subchannel,
+ grpc_transport_op *op);
/** poll the current connectivity state of a channel */
grpc_connectivity_state grpc_subchannel_check_connectivity(
@@ -108,26 +116,22 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the channel */
-void grpc_subchannel_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *channel,
- grpc_connectivity_state *state,
- grpc_closure *notify);
-
-/** Remove \a subscribed_notify from the list of closures to be called on a
- * state change if present, returning 1. Otherwise, nothing is done and return
- * 0. */
-int grpc_subchannel_state_change_unsubscribe(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *channel,
- grpc_closure *subscribed_notify);
-
-/** express interest in \a channel's activities through \a pollset. */
-void grpc_subchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *channel,
- grpc_pollset *pollset);
-/** stop following \a channel's activity through \a pollset. */
-void grpc_subchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *channel,
- grpc_pollset *pollset);
+void grpc_subchannel_notify_on_state_change(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel *channel,
+ grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
+ grpc_closure *notify);
+void grpc_connected_subchannel_notify_on_state_change(
+ grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *channel,
+ grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
+ grpc_closure *notify);
+void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx,
+ grpc_connected_subchannel *channel,
+ grpc_closure *notify);
+
+/** retrieve the grpc_connected_subchannel - or NULL if called before
+ the subchannel becomes connected */
+grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
+ grpc_subchannel *subchannel);
/** continue processing a transport op */
void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
@@ -138,6 +142,9 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call);
+grpc_call_stack *grpc_subchannel_call_get_call_stack(
+ grpc_subchannel_call *subchannel_call);
+
struct grpc_subchannel_args {
/** Channel filters for this channel - wrapped factories will likely
want to mutate this */
@@ -149,20 +156,10 @@ struct grpc_subchannel_args {
/** Address to connect to */
struct sockaddr *addr;
size_t addr_len;
- /** metadata context to use */
- grpc_mdctx *mdctx;
- /** master channel */
- grpc_channel *master;
};
/** create a subchannel given a connector */
grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
grpc_subchannel_args *args);
-/** Return the metadata context associated with the subchannel */
-grpc_mdctx *grpc_subchannel_get_mdctx(grpc_subchannel *subchannel);
-
-/** Return the master channel associated with the subchannel */
-grpc_channel *grpc_subchannel_get_master(grpc_subchannel *subchannel);
-
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_H */
diff --git a/src/core/client_config/subchannel_factory_decorators/merge_channel_args.c b/src/core/client_config/subchannel_factory_decorators/merge_channel_args.c
deleted file mode 100644
index cd25fdcf0f..0000000000
--- a/src/core/client_config/subchannel_factory_decorators/merge_channel_args.c
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/client_config/subchannel_factory_decorators/merge_channel_args.h"
-#include <grpc/support/alloc.h>
-#include "src/core/channel/channel_args.h"
-
-typedef struct {
- grpc_subchannel_factory base;
- gpr_refcount refs;
- grpc_subchannel_factory *wrapped;
- grpc_channel_args *merge_args;
-} merge_args_factory;
-
-static void merge_args_factory_ref(grpc_subchannel_factory *scf) {
- merge_args_factory *f = (merge_args_factory *)scf;
- gpr_ref(&f->refs);
-}
-
-static void merge_args_factory_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_factory *scf) {
- merge_args_factory *f = (merge_args_factory *)scf;
- if (gpr_unref(&f->refs)) {
- grpc_subchannel_factory_unref(exec_ctx, f->wrapped);
- grpc_channel_args_destroy(f->merge_args);
- gpr_free(f);
- }
-}
-
-static grpc_subchannel *merge_args_factory_create_subchannel(
- grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *scf,
- grpc_subchannel_args *args) {
- merge_args_factory *f = (merge_args_factory *)scf;
- grpc_channel_args *final_args =
- grpc_channel_args_merge(args->args, f->merge_args);
- grpc_subchannel *s;
- args->args = final_args;
- s = grpc_subchannel_factory_create_subchannel(exec_ctx, f->wrapped, args);
- grpc_channel_args_destroy(final_args);
- return s;
-}
-
-static const grpc_subchannel_factory_vtable merge_args_factory_vtable = {
- merge_args_factory_ref, merge_args_factory_unref,
- merge_args_factory_create_subchannel};
-
-grpc_subchannel_factory *grpc_subchannel_factory_merge_channel_args(
- grpc_subchannel_factory *input, const grpc_channel_args *args) {
- merge_args_factory *f = gpr_malloc(sizeof(*f));
- f->base.vtable = &merge_args_factory_vtable;
- gpr_ref_init(&f->refs, 1);
- grpc_subchannel_factory_ref(input);
- f->wrapped = input;
- f->merge_args = grpc_channel_args_copy(args);
- return &f->base;
-}
diff --git a/src/core/compression/algorithm.c b/src/core/compression/algorithm.c
index fd95a3c891..8e4e5c91d4 100644
--- a/src/core/compression/algorithm.c
+++ b/src/core/compression/algorithm.c
@@ -37,7 +37,9 @@
#include <grpc/compression.h>
#include <grpc/support/useful.h>
+#include "src/core/compression/algorithm_metadata.h"
#include "src/core/surface/api_trace.h"
+#include "src/core/transport/static_metadata.h"
int grpc_compression_algorithm_parse(const char *name, size_t name_length,
grpc_compression_algorithm *algorithm) {
@@ -72,17 +74,55 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
switch (algorithm) {
case GRPC_COMPRESS_NONE:
*name = "identity";
- break;
+ return 1;
case GRPC_COMPRESS_DEFLATE:
*name = "deflate";
- break;
+ return 1;
case GRPC_COMPRESS_GZIP:
*name = "gzip";
- break;
- default:
+ return 1;
+ case GRPC_COMPRESS_ALGORITHMS_COUNT:
return 0;
}
- return 1;
+ return 0;
+}
+
+grpc_compression_algorithm grpc_compression_algorithm_from_mdstr(
+ grpc_mdstr *str) {
+ if (str == GRPC_MDSTR_IDENTITY) return GRPC_COMPRESS_NONE;
+ if (str == GRPC_MDSTR_DEFLATE) return GRPC_COMPRESS_DEFLATE;
+ if (str == GRPC_MDSTR_GZIP) return GRPC_COMPRESS_GZIP;
+ return GRPC_COMPRESS_ALGORITHMS_COUNT;
+}
+
+grpc_mdstr *grpc_compression_algorithm_mdstr(
+ grpc_compression_algorithm algorithm) {
+ switch (algorithm) {
+ case GRPC_COMPRESS_NONE:
+ return GRPC_MDSTR_IDENTITY;
+ case GRPC_COMPRESS_DEFLATE:
+ return GRPC_MDSTR_DEFLATE;
+ case GRPC_COMPRESS_GZIP:
+ return GRPC_MDSTR_GZIP;
+ case GRPC_COMPRESS_ALGORITHMS_COUNT:
+ return NULL;
+ }
+ return NULL;
+}
+
+grpc_mdelem *grpc_compression_encoding_mdelem(
+ grpc_compression_algorithm algorithm) {
+ switch (algorithm) {
+ case GRPC_COMPRESS_NONE:
+ return GRPC_MDELEM_GRPC_ENCODING_IDENTITY;
+ case GRPC_COMPRESS_DEFLATE:
+ return GRPC_MDELEM_GRPC_ENCODING_DEFLATE;
+ case GRPC_COMPRESS_GZIP:
+ return GRPC_MDELEM_GRPC_ENCODING_GZIP;
+ default:
+ break;
+ }
+ return NULL;
}
/* TODO(dgq): Add the ability to specify parameters to the individual
@@ -99,25 +139,9 @@ grpc_compression_algorithm grpc_compression_algorithm_for_level(
case GRPC_COMPRESS_LEVEL_HIGH:
return GRPC_COMPRESS_DEFLATE;
default:
- /* we shouldn't be making it here */
- abort();
- return GRPC_COMPRESS_NONE;
- }
-}
-
-grpc_compression_level grpc_compression_level_for_algorithm(
- grpc_compression_algorithm algorithm) {
- grpc_compression_level clevel;
- GRPC_API_TRACE("grpc_compression_level_for_algorithm(algorithm=%d)", 1,
- ((int)algorithm));
- for (clevel = GRPC_COMPRESS_LEVEL_NONE; clevel < GRPC_COMPRESS_LEVEL_COUNT;
- ++clevel) {
- if (grpc_compression_algorithm_for_level(clevel) == algorithm) {
- return clevel;
- }
+ break;
}
- abort();
- return GRPC_COMPRESS_LEVEL_NONE;
+ GPR_UNREACHABLE_CODE(return GRPC_COMPRESS_NONE);
}
void grpc_compression_options_init(grpc_compression_options *opts) {
diff --git a/src/core/client_config/subchannel_factory_decorators/add_channel_arg.h b/src/core/compression/algorithm_metadata.h
index 76a535ebed..882633c307 100644
--- a/src/core/client_config/subchannel_factory_decorators/add_channel_arg.h
+++ b/src/core/compression/algorithm_metadata.h
@@ -31,16 +31,23 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_ADD_CHANNEL_ARG_H
-#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_ADD_CHANNEL_ARG_H
+#ifndef GRPC_INTERNAL_CORE_COMPRESSION_ALGORITHM_METADATA_H
+#define GRPC_INTERNAL_CORE_COMPRESSION_ALGORITHM_METADATA_H
-#include "src/core/client_config/subchannel_factory.h"
+#include <grpc/compression.h>
+#include "src/core/transport/metadata.h"
-/** Takes a subchannel factory, returns a new one that mutates incoming
- channel_args by adding a new argument; ownership of input, arg is retained
- by the caller. */
-grpc_subchannel_factory *grpc_subchannel_factory_add_channel_arg(
- grpc_subchannel_factory *input, const grpc_arg *arg);
+/** Return compression algorithm based metadata value */
+grpc_mdstr *grpc_compression_algorithm_mdstr(
+ grpc_compression_algorithm algorithm);
-#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_ADD_CHANNEL_ARG_H \
- */
+/** Return compression algorithm based metadata element (grpc-encoding: xxx) */
+grpc_mdelem *grpc_compression_encoding_mdelem(
+ grpc_compression_algorithm algorithm);
+
+/** Find compression algorithm based on passed in mdstr - returns
+ * GRPC_COMPRESS_ALGORITHM_COUNT on failure */
+grpc_compression_algorithm grpc_compression_algorithm_from_mdstr(
+ grpc_mdstr *str);
+
+#endif /* GRPC_INTERNAL_CORE_COMPRESSION_ALGORITHM_METADATA_H */
diff --git a/src/core/compression/message_compress.c b/src/core/compression/message_compress.c
index 209c1f0ff1..edc21a9eb7 100644
--- a/src/core/compression/message_compress.c
+++ b/src/core/compression/message_compress.c
@@ -69,8 +69,8 @@ static int zlib_body(z_stream* zs, gpr_slice_buffer* input,
zs->next_out = GPR_SLICE_START_PTR(outbuf);
}
r = flate(zs, flush);
- if (r == Z_STREAM_ERROR) {
- gpr_log(GPR_INFO, "zlib: stream error");
+ if (r < 0 && r != Z_BUF_ERROR /* not fatal */) {
+ gpr_log(GPR_INFO, "zlib error (%d)", r);
goto error;
}
} while (zs->avail_out == 0);
@@ -91,6 +91,12 @@ error:
return 0;
}
+static void* zalloc_gpr(void* opaque, unsigned int items, unsigned int size) {
+ return gpr_malloc(items * size);
+}
+
+static void zfree_gpr(void* opaque, void* address) { gpr_free(address); }
+
static int zlib_compress(gpr_slice_buffer* input, gpr_slice_buffer* output,
int gzip) {
z_stream zs;
@@ -99,12 +105,11 @@ static int zlib_compress(gpr_slice_buffer* input, gpr_slice_buffer* output,
size_t count_before = output->count;
size_t length_before = output->length;
memset(&zs, 0, sizeof(zs));
+ zs.zalloc = zalloc_gpr;
+ zs.zfree = zfree_gpr;
r = deflateInit2(&zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 15 | (gzip ? 16 : 0),
8, Z_DEFAULT_STRATEGY);
- if (r != Z_OK) {
- gpr_log(GPR_ERROR, "deflateInit2 returns %d", r);
- return 0;
- }
+ GPR_ASSERT(r == Z_OK);
r = zlib_body(&zs, input, output, deflate) && output->length < input->length;
if (!r) {
for (i = count_before; i < output->count; i++) {
@@ -125,11 +130,10 @@ static int zlib_decompress(gpr_slice_buffer* input, gpr_slice_buffer* output,
size_t count_before = output->count;
size_t length_before = output->length;
memset(&zs, 0, sizeof(zs));
+ zs.zalloc = zalloc_gpr;
+ zs.zfree = zfree_gpr;
r = inflateInit2(&zs, 15 | (gzip ? 16 : 0));
- if (r != Z_OK) {
- gpr_log(GPR_ERROR, "inflateInit2 returns %d", r);
- return 0;
- }
+ GPR_ASSERT(r == Z_OK);
r = zlib_body(&zs, input, output, inflate);
if (!r) {
for (i = count_before; i < output->count; i++) {
@@ -150,8 +154,8 @@ static int copy(gpr_slice_buffer* input, gpr_slice_buffer* output) {
return 1;
}
-int compress_inner(grpc_compression_algorithm algorithm,
- gpr_slice_buffer* input, gpr_slice_buffer* output) {
+static int compress_inner(grpc_compression_algorithm algorithm,
+ gpr_slice_buffer* input, gpr_slice_buffer* output) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
/* the fallback path always needs to be send uncompressed: we simply
diff --git a/src/core/httpcli/httpcli.c b/src/core/httpcli/httpcli.c
index a87f1aa87b..b5cd8d8d2a 100644
--- a/src/core/httpcli/httpcli.c
+++ b/src/core/httpcli/httpcli.c
@@ -53,6 +53,7 @@ typedef struct {
size_t next_address;
grpc_endpoint *ep;
char *host;
+ char *ssl_host_override;
gpr_timespec deadline;
int have_read_byte;
const grpc_httpcli_handshaker *handshaker;
@@ -106,6 +107,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
}
gpr_slice_unref(req->request_text);
gpr_free(req->host);
+ gpr_free(req->ssl_host_override);
grpc_iomgr_unregister_object(&req->iomgr_obj);
gpr_slice_buffer_destroy(&req->incoming);
gpr_slice_buffer_destroy(&req->outgoing);
@@ -180,8 +182,10 @@ static void on_connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
next_address(exec_ctx, req);
return;
}
- req->handshaker->handshake(exec_ctx, req, req->ep, req->host,
- on_handshake_done);
+ req->handshaker->handshake(
+ exec_ctx, req, req->ep,
+ req->ssl_host_override ? req->ssl_host_override : req->host,
+ on_handshake_done);
}
static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req) {
@@ -231,6 +235,7 @@ static void internal_request_begin(
gpr_slice_buffer_init(&req->outgoing);
grpc_iomgr_register_object(&req->iomgr_obj, name);
req->host = gpr_strdup(request->host);
+ req->ssl_host_override = gpr_strdup(request->ssl_host_override);
grpc_pollset_set_add_pollset(exec_ctx, &req->context->pollset_set,
req->pollset);
diff --git a/src/core/httpcli/httpcli.h b/src/core/httpcli/httpcli.h
index 6469c2f03e..30875d71f1 100644
--- a/src/core/httpcli/httpcli.h
+++ b/src/core/httpcli/httpcli.h
@@ -74,6 +74,8 @@ extern const grpc_httpcli_handshaker grpc_httpcli_ssl;
typedef struct grpc_httpcli_request {
/* The host name to connect to */
char *host;
+ /* The host to verify in the SSL handshake (or NULL) */
+ char *ssl_host_override;
/* The path of the resource to fetch */
char *path;
/* Additional headers: count and key/values; the following are supplied
diff --git a/src/core/httpcli/httpcli_security_connector.c b/src/core/httpcli/httpcli_security_connector.c
index fc6699c918..a5aa551373 100644
--- a/src/core/httpcli/httpcli_security_connector.c
+++ b/src/core/httpcli/httpcli_security_connector.c
@@ -68,7 +68,7 @@ static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
tsi_result result = TSI_OK;
tsi_handshaker *handshaker;
if (c->handshaker_factory == NULL) {
- cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL);
return;
}
result = tsi_ssl_handshaker_factory_create_handshaker(
@@ -76,7 +76,7 @@ static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
- cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL);
} else {
grpc_do_security_handshake(exec_ctx, handshaker, sc, nonsecure_endpoint, cb,
user_data);
@@ -149,7 +149,6 @@ typedef struct {
static void on_secure_transport_setup_done(grpc_exec_ctx *exec_ctx, void *rp,
grpc_security_status status,
- grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
on_done_closure *c = rp;
if (status != GRPC_SECURITY_OK) {
diff --git a/src/core/iomgr/closure.c b/src/core/iomgr/closure.c
index b4f1817de4..4aae52a454 100644
--- a/src/core/iomgr/closure.c
+++ b/src/core/iomgr/closure.c
@@ -39,18 +39,17 @@ void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) {
closure->cb = cb;
closure->cb_arg = cb_arg;
- closure->next = NULL;
+ closure->final_data = 0;
}
void grpc_closure_list_add(grpc_closure_list *closure_list,
grpc_closure *closure, int success) {
if (closure == NULL) return;
- closure->next = NULL;
- closure->success = success;
+ closure->final_data = (success != 0);
if (closure_list->head == NULL) {
closure_list->head = closure;
} else {
- closure_list->tail->next = closure;
+ closure_list->tail->final_data |= (gpr_uintptr)closure;
}
closure_list->tail = closure;
}
@@ -66,22 +65,12 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst) {
if (dst->head == NULL) {
*dst = *src;
} else {
- dst->tail->next = src->head;
+ dst->tail->final_data |= (gpr_uintptr)src->head;
dst->tail = src->tail;
}
src->head = src->tail = NULL;
}
-grpc_closure *grpc_closure_list_pop(grpc_closure_list *list) {
- grpc_closure *head;
- if (list->head == NULL) {
- return NULL;
- }
- head = list->head;
- list->head = list->head->next;
- return head;
-}
-
typedef struct {
grpc_iomgr_cb_func cb;
void *cb_arg;
@@ -103,3 +92,7 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
return &wc->wrapper;
}
+
+grpc_closure *grpc_closure_next(grpc_closure *closure) {
+ return (grpc_closure *)(closure->final_data & ~(gpr_uintptr)1);
+}
diff --git a/src/core/iomgr/closure.h b/src/core/iomgr/closure.h
index 7a9f7ccad0..a1d738bf5a 100644
--- a/src/core/iomgr/closure.h
+++ b/src/core/iomgr/closure.h
@@ -34,7 +34,7 @@
#ifndef GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H
#define GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H
-#include <stddef.h>
+#include <grpc/support/port_platform.h>
struct grpc_closure;
typedef struct grpc_closure grpc_closure;
@@ -64,13 +64,10 @@ struct grpc_closure {
/** Arguments to be passed to "cb". */
void *cb_arg;
- /** Internal. A boolean indication to "cb" on the state of the iomgr.
- * For instance, closures created during a shutdown would have this field set
- * to false. */
- int success;
-
- /**< Internal. Do not touch */
- struct grpc_closure *next;
+ /** Once enqueued, contains in the lower bit the success of the closure,
+ and in the upper bits the pointer to the next closure in the list.
+ Before enqueing for execution, this is usable for scratch data. */
+ gpr_uintptr final_data;
};
/** Initializes \a closure with \a cb and \a cb_arg. */
@@ -91,10 +88,10 @@ void grpc_closure_list_add(grpc_closure_list *list, grpc_closure *closure,
/** append all closures from \a src to \a dst and empty \a src. */
void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
-/** pop (return and remove) the head closure from \a list. */
-grpc_closure *grpc_closure_list_pop(grpc_closure_list *list);
-
/** return whether \a list is empty. */
int grpc_closure_list_empty(grpc_closure_list list);
+/** return the next pointer for a queued closure list */
+grpc_closure *grpc_closure_next(grpc_closure *closure);
+
#endif /* GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H */
diff --git a/src/core/iomgr/endpoint_pair_posix.c b/src/core/iomgr/endpoint_pair_posix.c
index deae9c6875..56f6f146fd 100644
--- a/src/core/iomgr/endpoint_pair_posix.c
+++ b/src/core/iomgr/endpoint_pair_posix.c
@@ -36,6 +36,7 @@
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/endpoint_pair.h"
+#include "src/core/iomgr/socket_utils_posix.h"
#include <errno.h>
#include <fcntl.h>
@@ -56,6 +57,8 @@ static void create_sockets(int sv[2]) {
GPR_ASSERT(fcntl(sv[0], F_SETFL, flags | O_NONBLOCK) == 0);
flags = fcntl(sv[1], F_GETFL, 0);
GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
+ GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[0]));
+ GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]));
}
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
diff --git a/src/core/iomgr/exec_ctx.c b/src/core/iomgr/exec_ctx.c
index 410b34c521..e95eaf267a 100644
--- a/src/core/iomgr/exec_ctx.c
+++ b/src/core/iomgr/exec_ctx.c
@@ -44,10 +44,11 @@ int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
- grpc_closure *next = c->next;
+ int success = (int)(c->final_data & 1);
+ grpc_closure *next = (grpc_closure *)(c->final_data & ~(gpr_uintptr)1);
did_something++;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
- c->cb(exec_ctx, c->cb_arg, c->success);
+ c->cb(exec_ctx, c->cb_arg, success);
GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
c = next;
}
diff --git a/src/core/iomgr/executor.c b/src/core/iomgr/executor.c
index 457e5cdbac..00c68f7828 100644
--- a/src/core/iomgr/executor.c
+++ b/src/core/iomgr/executor.c
@@ -63,8 +63,6 @@ void grpc_executor_init() {
/* thread body */
static void closure_exec_thread_func(void *ignored) {
- grpc_closure *closure;
-
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (1) {
gpr_mu_lock(&g_executor.mu);
@@ -72,16 +70,16 @@ static void closure_exec_thread_func(void *ignored) {
gpr_mu_unlock(&g_executor.mu);
break;
}
- closure = grpc_closure_list_pop(&g_executor.closures);
- if (closure == NULL) {
+ if (grpc_closure_list_empty(g_executor.closures)) {
/* no more work, time to die */
GPR_ASSERT(g_executor.busy == 1);
g_executor.busy = 0;
gpr_mu_unlock(&g_executor.mu);
break;
+ } else {
+ grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures);
}
gpr_mu_unlock(&g_executor.mu);
- closure->cb(&exec_ctx, closure->cb_arg, closure->success);
grpc_exec_ctx_flush(&exec_ctx);
}
grpc_exec_ctx_finish(&exec_ctx);
@@ -125,7 +123,6 @@ void grpc_executor_enqueue(grpc_closure *closure, int success) {
void grpc_executor_shutdown() {
int pending_join;
- grpc_closure *closure;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(&g_executor.mu);
@@ -136,9 +133,7 @@ void grpc_executor_shutdown() {
* list below because we aren't accepting new work */
/* Execute pending callbacks, some may be performing cleanups */
- while ((closure = grpc_closure_list_pop(&g_executor.closures)) != NULL) {
- closure->cb(&exec_ctx, closure->cb_arg, closure->success);
- }
+ grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
if (pending_join) {
diff --git a/src/core/iomgr/fd_posix.c b/src/core/iomgr/fd_posix.c
index 7ff80e6cf8..00710d83bd 100644
--- a/src/core/iomgr/fd_posix.c
+++ b/src/core/iomgr/fd_posix.c
@@ -43,6 +43,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#define CLOSURE_NOT_READY ((grpc_closure *)0)
@@ -158,7 +159,10 @@ void grpc_fd_global_shutdown(void) {
grpc_fd *grpc_fd_create(int fd, const char *name) {
grpc_fd *r = alloc_fd(fd);
- grpc_iomgr_register_object(&r->iomgr_object, name);
+ char *name2;
+ gpr_asprintf(&name2, "%s fd=%d", name, fd);
+ grpc_iomgr_register_object(&r->iomgr_object, name2);
+ gpr_free(name2);
#ifdef GRPC_FD_REF_COUNT_DEBUG
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, r, name);
#endif
@@ -207,14 +211,21 @@ static int has_watchers(grpc_fd *fd) {
}
void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
- const char *reason) {
+ int *release_fd, const char *reason) {
fd->on_done_closure = on_done;
- shutdown(fd->fd, SHUT_RDWR);
+ fd->released = release_fd != NULL;
+ if (!fd->released) {
+ shutdown(fd->fd, SHUT_RDWR);
+ } else {
+ *release_fd = fd->fd;
+ }
gpr_mu_lock(&fd->mu);
REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
if (!has_watchers(fd)) {
fd->closed = 1;
- close(fd->fd);
+ if (!fd->released) {
+ close(fd->fd);
+ }
grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
} else {
wake_all_watchers_locked(fd);
@@ -406,7 +417,9 @@ void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
}
if (grpc_fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
fd->closed = 1;
- close(fd->fd);
+ if (!fd->released) {
+ close(fd->fd);
+ }
grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
}
gpr_mu_unlock(&fd->mu);
diff --git a/src/core/iomgr/fd_posix.h b/src/core/iomgr/fd_posix.h
index dc917ebbc0..df4eb64d4c 100644
--- a/src/core/iomgr/fd_posix.h
+++ b/src/core/iomgr/fd_posix.h
@@ -62,6 +62,7 @@ struct grpc_fd {
gpr_mu mu;
int shutdown;
int closed;
+ int released;
/* The watcher list.
@@ -107,11 +108,12 @@ grpc_fd *grpc_fd_create(int fd, const char *name);
/* Releases fd to be asynchronously destroyed.
on_done is called when the underlying file descriptor is definitely close()d.
If on_done is NULL, no callback will be made.
+ If release_fd is not NULL, it's set to fd and fd will not be closed.
Requires: *fd initialized; no outstanding notify_on_read or
notify_on_write.
MUST NOT be called with a pollset lock taken */
void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
- const char *reason);
+ int *release_fd, const char *reason);
/* Begin polling on an fd.
Registers that the given pollset is interested in this fd - so that if read
@@ -168,6 +170,7 @@ void grpc_fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
void grpc_fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
/* Reference counting for fds */
+/*#define GRPC_FD_REF_COUNT_DEBUG*/
#ifdef GRPC_FD_REF_COUNT_DEBUG
void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file, int line);
diff --git a/src/core/iomgr/pollset.h b/src/core/iomgr/pollset.h
index d15553a12a..c6b0214dea 100644
--- a/src/core/iomgr/pollset.h
+++ b/src/core/iomgr/pollset.h
@@ -55,8 +55,13 @@
#endif
void grpc_pollset_init(grpc_pollset *pollset);
+/* Begin shutting down the pollset, and call closure when done.
+ * GRPC_POLLSET_MU(pollset) must be held */
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure);
+/** Reset the pollset to its initial state (perhaps with some cached objects);
+ * must have been previously shutdown */
+void grpc_pollset_reset(grpc_pollset *pollset);
void grpc_pollset_destroy(grpc_pollset *pollset);
/* Do some work on a pollset.
diff --git a/src/core/iomgr/pollset_multipoller_with_epoll.c b/src/core/iomgr/pollset_multipoller_with_epoll.c
index 2aafd21dfb..6e31efa013 100644
--- a/src/core/iomgr/pollset_multipoller_with_epoll.c
+++ b/src/core/iomgr/pollset_multipoller_with_epoll.c
@@ -47,21 +47,13 @@
#include "src/core/support/block_annotate.h"
#include "src/core/profiling/timers.h"
-typedef struct wakeup_fd_hdl {
- grpc_wakeup_fd wakeup_fd;
- struct wakeup_fd_hdl *next;
-} wakeup_fd_hdl;
-
typedef struct {
grpc_pollset *pollset;
grpc_fd *fd;
grpc_closure closure;
} delayed_add;
-typedef struct {
- int epoll_fd;
- wakeup_fd_hdl *free_wakeup_fds;
-} pollset_hdr;
+typedef struct { int epoll_fd; } pollset_hdr;
static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
@@ -131,26 +123,6 @@ static void multipoll_with_epoll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
}
}
-static void multipoll_with_epoll_pollset_del_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_fd *fd,
- int and_unlock_pollset) {
- pollset_hdr *h = pollset->data.ptr;
- int err;
-
- if (and_unlock_pollset) {
- gpr_mu_unlock(&pollset->mu);
- }
-
- /* Note that this can race with concurrent poll, but that should be fine since
- * at worst it creates a spurious read event on a reused grpc_fd object. */
- err = epoll_ctl(h->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
- if (err < 0) {
- gpr_log(GPR_ERROR, "epoll_ctl del for %d failed: %s", fd->fd,
- strerror(errno));
- }
-}
-
/* TODO(klempner): We probably want to turn this down a bit */
#define GRPC_EPOLL_MAX_EVENTS 1000
@@ -174,7 +146,7 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);
- pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
+ pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
pfds[0].events = POLLIN;
pfds[0].revents = 0;
pfds[1].fd = h->epoll_fd;
@@ -197,7 +169,7 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
/* do nothing */
} else {
if (pfds[0].revents) {
- grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
}
if (pfds[1].revents) {
do {
@@ -243,7 +215,7 @@ static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable multipoll_with_epoll_pollset = {
- multipoll_with_epoll_pollset_add_fd, multipoll_with_epoll_pollset_del_fd,
+ multipoll_with_epoll_pollset_add_fd,
multipoll_with_epoll_pollset_maybe_work_and_unlock,
multipoll_with_epoll_pollset_finish_shutdown,
multipoll_with_epoll_pollset_destroy};
diff --git a/src/core/iomgr/pollset_multipoller_with_poll_posix.c b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
index faa6c14491..b619b8c3db 100644
--- a/src/core/iomgr/pollset_multipoller_with_poll_posix.c
+++ b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
@@ -82,23 +82,6 @@ exit:
}
}
-static void multipoll_with_poll_pollset_del_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_fd *fd,
- int and_unlock_pollset) {
- /* will get removed next poll cycle */
- pollset_hdr *h = pollset->data.ptr;
- if (h->del_count == h->del_capacity) {
- h->del_capacity = GPR_MAX(h->del_capacity + 8, h->del_count * 3 / 2);
- h->dels = gpr_realloc(h->dels, sizeof(grpc_fd *) * h->del_capacity);
- }
- h->dels[h->del_count++] = fd;
- GRPC_FD_REF(fd, "multipoller_del");
- if (and_unlock_pollset) {
- gpr_mu_unlock(&pollset->mu);
- }
-}
-
static void multipoll_with_poll_pollset_maybe_work_and_unlock(
grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline, gpr_timespec now) {
@@ -124,7 +107,7 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
pfds[0].events = POLLIN;
pfds[0].revents = 0;
- pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
+ pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
pfds[1].events = POLLIN;
pfds[1].revents = 0;
for (i = 0; i < h->fd_count; i++) {
@@ -174,7 +157,7 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
if (pfds[1].revents & POLLIN_CHECK) {
- grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
}
for (i = 2; i < pfd_count; i++) {
if (watchers[i].fd == NULL) {
@@ -212,7 +195,7 @@ static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable multipoll_with_poll_pollset = {
- multipoll_with_poll_pollset_add_fd, multipoll_with_poll_pollset_del_fd,
+ multipoll_with_poll_pollset_add_fd,
multipoll_with_poll_pollset_maybe_work_and_unlock,
multipoll_with_poll_pollset_finish_shutdown,
multipoll_with_poll_pollset_destroy};
diff --git a/src/core/iomgr/pollset_posix.c b/src/core/iomgr/pollset_posix.c
index 6f478ccacb..9195344758 100644
--- a/src/core/iomgr/pollset_posix.c
+++ b/src/core/iomgr/pollset_posix.c
@@ -111,7 +111,7 @@ void grpc_pollset_kick_ext(grpc_pollset *p,
for (specific_worker = p->root_worker.next;
specific_worker != &p->root_worker;
specific_worker = specific_worker->next) {
- grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
}
p->kicked_without_pollers = 1;
GPR_TIMER_END("grpc_pollset_kick_ext.broadcast", 0);
@@ -122,14 +122,14 @@ void grpc_pollset_kick_ext(grpc_pollset *p,
specific_worker->reevaluate_polling_on_wakeup = 1;
}
specific_worker->kicked_specifically = 1;
- grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
} else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
GPR_TIMER_MARK("kick_yoself", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
specific_worker->reevaluate_polling_on_wakeup = 1;
}
specific_worker->kicked_specifically = 1;
- grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
}
} else if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p) {
GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
@@ -151,7 +151,7 @@ void grpc_pollset_kick_ext(grpc_pollset *p,
if (specific_worker != NULL) {
GPR_TIMER_MARK("finally_kick", 0);
push_back_worker(p, specific_worker);
- grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
}
} else {
GPR_TIMER_MARK("kicked_no_pollers", 0);
@@ -177,9 +177,9 @@ void grpc_pollset_global_init(void) {
void grpc_pollset_global_shutdown(void) {
grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
- grpc_wakeup_fd_global_destroy();
gpr_tls_destroy(&g_current_thread_poller);
gpr_tls_destroy(&g_current_thread_worker);
+ grpc_wakeup_fd_global_destroy();
}
void grpc_kick_poller(void) { grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd); }
@@ -194,30 +194,45 @@ void grpc_pollset_init(grpc_pollset *pollset) {
pollset->in_flight_cbs = 0;
pollset->shutting_down = 0;
pollset->called_shutdown = 0;
+ pollset->kicked_without_pollers = 0;
pollset->idle_jobs.head = pollset->idle_jobs.tail = NULL;
+ pollset->local_wakeup_cache = NULL;
+ pollset->kicked_without_pollers = 0;
become_basic_pollset(pollset, NULL);
}
-void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
- gpr_mu_lock(&pollset->mu);
- pollset->vtable->add_fd(exec_ctx, pollset, fd, 1);
-/* the following (enabled only in debug) will reacquire and then release
- our lock - meaning that if the unlocking flag passed to del_fd above is
- not respected, the code will deadlock (in a way that we have a chance of
- debugging) */
-#ifndef NDEBUG
- gpr_mu_lock(&pollset->mu);
- gpr_mu_unlock(&pollset->mu);
-#endif
+void grpc_pollset_destroy(grpc_pollset *pollset) {
+ GPR_ASSERT(pollset->in_flight_cbs == 0);
+ GPR_ASSERT(!grpc_pollset_has_workers(pollset));
+ GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
+ pollset->vtable->destroy(pollset);
+ gpr_mu_destroy(&pollset->mu);
+ while (pollset->local_wakeup_cache) {
+ grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next;
+ grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd);
+ gpr_free(pollset->local_wakeup_cache);
+ pollset->local_wakeup_cache = next;
+ }
}
-void grpc_pollset_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+void grpc_pollset_reset(grpc_pollset *pollset) {
+ GPR_ASSERT(pollset->shutting_down);
+ GPR_ASSERT(pollset->in_flight_cbs == 0);
+ GPR_ASSERT(!grpc_pollset_has_workers(pollset));
+ GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
+ pollset->vtable->destroy(pollset);
+ pollset->shutting_down = 0;
+ pollset->called_shutdown = 0;
+ pollset->kicked_without_pollers = 0;
+ become_basic_pollset(pollset, NULL);
+}
+
+void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
- pollset->vtable->del_fd(exec_ctx, pollset, fd, 1);
+ pollset->vtable->add_fd(exec_ctx, pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
- our lock - meaning that if the unlocking flag passed to del_fd above is
+ our lock - meaning that if the unlocking flag passed to add_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
@@ -244,13 +259,19 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* this must happen before we (potentially) drop pollset->mu */
worker->next = worker->prev = NULL;
worker->reevaluate_polling_on_wakeup = 0;
+ if (pollset->local_wakeup_cache != NULL) {
+ worker->wakeup_fd = pollset->local_wakeup_cache;
+ pollset->local_wakeup_cache = worker->wakeup_fd->next;
+ } else {
+ worker->wakeup_fd = gpr_malloc(sizeof(*worker->wakeup_fd));
+ grpc_wakeup_fd_init(&worker->wakeup_fd->fd);
+ }
worker->kicked_specifically = 0;
- /* TODO(ctiller): pool these */
- grpc_wakeup_fd_init(&worker->wakeup_fd);
/* If there's work waiting for the pollset to be idle, and the
pollset is idle, then do that work */
if (!grpc_pollset_has_workers(pollset) &&
!grpc_closure_list_empty(pollset->idle_jobs)) {
+ GPR_TIMER_MARK("grpc_pollset_work.idle_jobs", 0);
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
goto done;
}
@@ -259,16 +280,19 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
May update deadline to ensure timely wakeups.
TODO(ctiller): can this work be localized? */
if (grpc_timer_check(exec_ctx, now, &deadline)) {
+ GPR_TIMER_MARK("grpc_pollset_work.alarm_triggered", 0);
gpr_mu_unlock(&pollset->mu);
locked = 0;
goto done;
}
/* If we're shutting down then we don't execute any extended work */
if (pollset->shutting_down) {
+ GPR_TIMER_MARK("grpc_pollset_work.shutting_down", 0);
goto done;
}
/* Give do_promote priority so we don't starve it out */
if (pollset->in_flight_cbs) {
+ GPR_TIMER_MARK("grpc_pollset_work.in_flight_cbs", 0);
gpr_mu_unlock(&pollset->mu);
locked = 0;
goto done;
@@ -293,6 +317,7 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
locked = 0;
gpr_tls_set(&g_current_thread_poller, 0);
} else {
+ GPR_TIMER_MARK("grpc_pollset_work.kicked_without_pollers", 0);
pollset->kicked_without_pollers = 0;
}
/* Finished execution - start cleaning up.
@@ -323,7 +348,10 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
remove_worker(pollset, worker);
gpr_tls_set(&g_current_thread_worker, 0);
}
- grpc_wakeup_fd_destroy(&worker->wakeup_fd);
+ /* release wakeup fd to the local pool */
+ worker->wakeup_fd->next = pollset->local_wakeup_cache;
+ pollset->local_wakeup_cache = worker->wakeup_fd;
+ /* check shutdown conditions */
if (pollset->shutting_down) {
if (grpc_pollset_has_workers(pollset)) {
grpc_pollset_kick(pollset, NULL);
@@ -338,8 +366,8 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu);
} else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
- gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
+ gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
}
@@ -349,35 +377,20 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
- int call_shutdown = 0;
- gpr_mu_lock(&pollset->mu);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1;
- if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
- !grpc_pollset_has_workers(pollset)) {
- pollset->called_shutdown = 1;
- call_shutdown = 1;
- }
+ pollset->shutdown_done = closure;
+ grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!grpc_pollset_has_workers(pollset)) {
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
}
- pollset->shutdown_done = closure;
- grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
- gpr_mu_unlock(&pollset->mu);
-
- if (call_shutdown) {
+ if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
+ !grpc_pollset_has_workers(pollset)) {
+ pollset->called_shutdown = 1;
finish_shutdown(exec_ctx, pollset);
}
}
-void grpc_pollset_destroy(grpc_pollset *pollset) {
- GPR_ASSERT(pollset->shutting_down);
- GPR_ASSERT(pollset->in_flight_cbs == 0);
- GPR_ASSERT(!grpc_pollset_has_workers(pollset));
- pollset->vtable->destroy(pollset);
- gpr_mu_destroy(&pollset->mu);
-}
-
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now) {
gpr_timespec timeout;
@@ -520,19 +533,6 @@ exit:
}
}
-static void basic_pollset_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd, int and_unlock_pollset) {
- GPR_ASSERT(fd);
- if (fd == pollset->data.ptr) {
- GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
- pollset->data.ptr = NULL;
- }
-
- if (and_unlock_pollset) {
- gpr_mu_unlock(&pollset->mu);
- }
-}
-
static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
grpc_pollset_worker *worker,
@@ -557,7 +557,7 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
pfd[0].events = POLLIN;
pfd[0].revents = 0;
- pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
+ pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
pfd[1].events = POLLIN;
pfd[1].revents = 0;
nfds = 2;
@@ -586,7 +586,9 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("poll", 0);
if (r < 0) {
- gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
+ if (errno != EINTR) {
+ gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
+ }
if (fd) {
grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
}
@@ -599,7 +601,7 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
if (pfd[1].revents & POLLIN_CHECK) {
- grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
}
if (nfds > 2) {
grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
@@ -622,9 +624,8 @@ static void basic_pollset_destroy(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable basic_pollset = {
- basic_pollset_add_fd, basic_pollset_del_fd,
- basic_pollset_maybe_work_and_unlock, basic_pollset_destroy,
- basic_pollset_destroy};
+ basic_pollset_add_fd, basic_pollset_maybe_work_and_unlock,
+ basic_pollset_destroy, basic_pollset_destroy};
static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) {
pollset->vtable = &basic_pollset;
diff --git a/src/core/iomgr/pollset_posix.h b/src/core/iomgr/pollset_posix.h
index 95ebeab1c2..29de4a2026 100644
--- a/src/core/iomgr/pollset_posix.h
+++ b/src/core/iomgr/pollset_posix.h
@@ -48,8 +48,13 @@ typedef struct grpc_pollset_vtable grpc_pollset_vtable;
use the struct tag */
struct grpc_fd;
+typedef struct grpc_cached_wakeup_fd {
+ grpc_wakeup_fd fd;
+ struct grpc_cached_wakeup_fd *next;
+} grpc_cached_wakeup_fd;
+
typedef struct grpc_pollset_worker {
- grpc_wakeup_fd wakeup_fd;
+ grpc_cached_wakeup_fd *wakeup_fd;
int reevaluate_polling_on_wakeup;
int kicked_specifically;
struct grpc_pollset_worker *next;
@@ -74,13 +79,13 @@ typedef struct grpc_pollset {
int fd;
void *ptr;
} data;
+ /* Local cache of eventfds for workers */
+ grpc_cached_wakeup_fd *local_wakeup_cache;
} grpc_pollset;
struct grpc_pollset_vtable {
void (*add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd, int and_unlock_pollset);
- void (*del_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- struct grpc_fd *fd, int and_unlock_pollset);
void (*maybe_work_and_unlock)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker,
gpr_timespec deadline, gpr_timespec now);
@@ -93,10 +98,6 @@ struct grpc_pollset_vtable {
/* Add an fd to a pollset */
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd);
-/* Force remove an fd from a pollset (normally they are removed on the next
- poll after an fd is orphaned) */
-void grpc_pollset_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- struct grpc_fd *fd);
/* Returns the fd to listen on for kicks */
int grpc_kick_read_fd(grpc_pollset *p);
diff --git a/src/core/iomgr/pollset_set.h b/src/core/iomgr/pollset_set.h
index 0fdcba01a4..09c04438f7 100644
--- a/src/core/iomgr/pollset_set.h
+++ b/src/core/iomgr/pollset_set.h
@@ -49,13 +49,19 @@
#include "src/core/iomgr/pollset_set_windows.h"
#endif
-void grpc_pollset_set_init(grpc_pollset_set* pollset_set);
-void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set);
-void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
- grpc_pollset_set* pollset_set,
- grpc_pollset* pollset);
-void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
- grpc_pollset_set* pollset_set,
- grpc_pollset* pollset);
+void grpc_pollset_set_init(grpc_pollset_set *pollset_set);
+void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set);
+void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set,
+ grpc_pollset *pollset);
+void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set,
+ grpc_pollset *pollset);
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *bag,
+ grpc_pollset_set *item);
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *bag,
+ grpc_pollset_set *item);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */
diff --git a/src/core/iomgr/pollset_set_posix.c b/src/core/iomgr/pollset_set_posix.c
index c86ed3d5da..4ec92202e3 100644
--- a/src/core/iomgr/pollset_set_posix.c
+++ b/src/core/iomgr/pollset_set_posix.c
@@ -52,9 +52,10 @@ void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
size_t i;
gpr_mu_destroy(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
- GRPC_FD_UNREF(pollset_set->fds[i], "pollset");
+ GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
}
gpr_free(pollset_set->pollsets);
+ gpr_free(pollset_set->pollset_sets);
gpr_free(pollset_set->fds);
}
@@ -73,7 +74,7 @@ void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
if (grpc_fd_is_orphaned(pollset_set->fds[i])) {
- GRPC_FD_UNREF(pollset_set->fds[i], "pollset");
+ GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
} else {
grpc_pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
pollset_set->fds[j++] = pollset_set->fds[i];
@@ -99,6 +100,46 @@ void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *bag,
+ grpc_pollset_set *item) {
+ size_t i, j;
+ gpr_mu_lock(&bag->mu);
+ if (bag->pollset_set_count == bag->pollset_set_capacity) {
+ bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
+ bag->pollset_sets =
+ gpr_realloc(bag->pollset_sets,
+ bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
+ }
+ bag->pollset_sets[bag->pollset_set_count++] = item;
+ for (i = 0, j = 0; i < bag->fd_count; i++) {
+ if (grpc_fd_is_orphaned(bag->fds[i])) {
+ GRPC_FD_UNREF(bag->fds[i], "pollset_set");
+ } else {
+ grpc_pollset_set_add_fd(exec_ctx, item, bag->fds[i]);
+ bag->fds[j++] = bag->fds[i];
+ }
+ }
+ bag->fd_count = j;
+ gpr_mu_unlock(&bag->mu);
+}
+
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *bag,
+ grpc_pollset_set *item) {
+ size_t i;
+ gpr_mu_lock(&bag->mu);
+ for (i = 0; i < bag->pollset_set_count; i++) {
+ if (bag->pollset_sets[i] == item) {
+ bag->pollset_set_count--;
+ GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i],
+ bag->pollset_sets[bag->pollset_set_count]);
+ break;
+ }
+ }
+ gpr_mu_unlock(&bag->mu);
+}
+
void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set, grpc_fd *fd) {
size_t i;
@@ -113,6 +154,9 @@ void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
for (i = 0; i < pollset_set->pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
}
+ for (i = 0; i < pollset_set->pollset_set_count; i++) {
+ grpc_pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
+ }
gpr_mu_unlock(&pollset_set->mu);
}
@@ -129,6 +173,9 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
break;
}
}
+ for (i = 0; i < pollset_set->pollset_set_count; i++) {
+ grpc_pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
+ }
gpr_mu_unlock(&pollset_set->mu);
}
diff --git a/src/core/iomgr/pollset_set_posix.h b/src/core/iomgr/pollset_set_posix.h
index 05234fb642..4820a61e4b 100644
--- a/src/core/iomgr/pollset_set_posix.h
+++ b/src/core/iomgr/pollset_set_posix.h
@@ -44,6 +44,10 @@ typedef struct grpc_pollset_set {
size_t pollset_capacity;
grpc_pollset **pollsets;
+ size_t pollset_set_count;
+ size_t pollset_set_capacity;
+ struct grpc_pollset_set **pollset_sets;
+
size_t fd_count;
size_t fd_capacity;
grpc_fd **fds;
diff --git a/src/core/iomgr/pollset_set_windows.c b/src/core/iomgr/pollset_set_windows.c
index 53d5d3dcd4..157b46ec32 100644
--- a/src/core/iomgr/pollset_set_windows.c
+++ b/src/core/iomgr/pollset_set_windows.c
@@ -49,4 +49,12 @@ void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {}
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+
#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/pollset_windows.c b/src/core/iomgr/pollset_windows.c
index 9f74580273..deb661548d 100644
--- a/src/core/iomgr/pollset_windows.c
+++ b/src/core/iomgr/pollset_windows.c
@@ -35,6 +35,7 @@
#ifdef GPR_WINSOCK_SOCKET
+#include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include "src/core/iomgr/timer_internal.h"
@@ -112,7 +113,6 @@ void grpc_pollset_init(grpc_pollset *pollset) {
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
- gpr_mu_lock(&grpc_polling_mu);
pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
@@ -120,11 +120,20 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
} else {
pollset->on_shutdown = closure;
}
- gpr_mu_unlock(&grpc_polling_mu);
}
void grpc_pollset_destroy(grpc_pollset *pollset) {}
+void grpc_pollset_reset(grpc_pollset *pollset) {
+ GPR_ASSERT(pollset->shutting_down);
+ GPR_ASSERT(
+ !has_workers(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET));
+ pollset->shutting_down = 0;
+ pollset->is_iocp_worker = 0;
+ pollset->kicked_without_pollers = 0;
+ pollset->on_shutdown = NULL;
+}
+
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker, gpr_timespec now,
gpr_timespec deadline) {
diff --git a/src/core/iomgr/tcp_client_posix.c b/src/core/iomgr/tcp_client_posix.c
index abd6315ca1..d9d24ee9a3 100644
--- a/src/core/iomgr/tcp_client_posix.c
+++ b/src/core/iomgr/tcp_client_posix.c
@@ -196,7 +196,7 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, int success) {
finish:
if (fd != NULL) {
grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
- grpc_fd_orphan(exec_ctx, fd, NULL, "tcp_client_orphan");
+ grpc_fd_orphan(exec_ctx, fd, NULL, NULL, "tcp_client_orphan");
fd = NULL;
}
done = (--ac->refs == 0);
@@ -265,7 +265,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
gpr_log(GPR_ERROR, "connect error to '%s': %s", addr_str, strerror(errno));
- grpc_fd_orphan(exec_ctx, fdobj, NULL, "tcp_client_connect_error");
+ grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error");
grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
goto done;
}
diff --git a/src/core/iomgr/tcp_posix.c b/src/core/iomgr/tcp_posix.c
index 915553d509..f3be41aa57 100644
--- a/src/core/iomgr/tcp_posix.c
+++ b/src/core/iomgr/tcp_posix.c
@@ -90,6 +90,8 @@ typedef struct {
grpc_closure *read_cb;
grpc_closure *write_cb;
+ grpc_closure *release_fd_cb;
+ int *release_fd;
grpc_closure read_closure;
grpc_closure write_closure;
@@ -108,7 +110,8 @@ static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
}
static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
- grpc_fd_orphan(exec_ctx, tcp->em_fd, NULL, "tcp_unref_orphan");
+ grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
+ "tcp_unref_orphan");
gpr_slice_buffer_destroy(&tcp->last_read_buffer);
gpr_free(tcp->peer_string);
gpr_free(tcp);
@@ -452,6 +455,8 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
tcp->fd = em_fd->fd;
tcp->read_cb = NULL;
tcp->write_cb = NULL;
+ tcp->release_fd_cb = NULL;
+ tcp->release_fd = NULL;
tcp->incoming_buffer = NULL;
tcp->slice_size = slice_size;
tcp->iov_size = 1;
@@ -468,4 +473,13 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
return &tcp->base;
}
+void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ int *fd, grpc_closure *done) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ GPR_ASSERT(ep->vtable == &vtable);
+ tcp->release_fd = fd;
+ tcp->release_fd_cb = done;
+ TCP_UNREF(exec_ctx, tcp, "destroy");
+}
+
#endif
diff --git a/src/core/iomgr/tcp_posix.h b/src/core/iomgr/tcp_posix.h
index 40b3ae2679..b554983ae1 100644
--- a/src/core/iomgr/tcp_posix.h
+++ b/src/core/iomgr/tcp_posix.h
@@ -56,4 +56,10 @@ extern int grpc_tcp_trace;
grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size,
const char *peer_string);
+/* Destroy the tcp endpoint without closing its fd. *fd will be set and done
+ * will be called when the endpoint is destroyed.
+ * Requires: ep must be a tcp endpoint and fd must not be NULL. */
+void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ int *fd, grpc_closure *done);
+
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_POSIX_H */
diff --git a/src/core/iomgr/tcp_server.h b/src/core/iomgr/tcp_server.h
index 882635f638..3294e13797 100644
--- a/src/core/iomgr/tcp_server.h
+++ b/src/core/iomgr/tcp_server.h
@@ -39,6 +39,9 @@
/* Forward decl of grpc_tcp_server */
typedef struct grpc_tcp_server grpc_tcp_server;
+/* Forward decl of grpc_tcp_listener */
+typedef struct grpc_tcp_listener grpc_tcp_listener;
+
/* Called for newly connected TCP connections. */
typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *ep);
@@ -51,19 +54,17 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
grpc_pollset **pollsets, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb, void *cb_arg);
-/* Add a port to the server, returning port number on success, or negative
- on failure.
+/* Add a port to the server, returning the newly created listener on success,
+ or a null pointer on failure.
The :: and 0.0.0.0 wildcard addresses are treated identically, accepting
both IPv4 and IPv6 connections, but :: is the preferred style. This usually
creates one socket, but possibly two on systems which support IPv6,
- but not dualstack sockets.
-
- For raw access to the underlying sockets, see grpc_tcp_server_get_fd(). */
+ but not dualstack sockets. */
/* TODO(ctiller): deprecate this, and make grpc_tcp_server_add_ports to handle
all of the multiple socket port matching logic in one place */
-int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- size_t addr_len);
+grpc_tcp_listener *grpc_tcp_server_add_port(grpc_tcp_server *s,
+ const void *addr, size_t addr_len);
/* Returns the file descriptor of the Nth listening socket on this server,
or -1 if the index is out of bounds.
@@ -75,4 +76,8 @@ int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned index);
void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
grpc_closure *closure);
+int grpc_tcp_listener_get_port(grpc_tcp_listener *listener);
+void grpc_tcp_listener_ref(grpc_tcp_listener *listener);
+void grpc_tcp_listener_unref(grpc_tcp_listener *listener);
+
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_SERVER_H */
diff --git a/src/core/iomgr/tcp_server_posix.c b/src/core/iomgr/tcp_server_posix.c
index 99c76dcbe9..835675c390 100644
--- a/src/core/iomgr/tcp_server_posix.c
+++ b/src/core/iomgr/tcp_server_posix.c
@@ -67,14 +67,13 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
-#define INIT_PORT_CAP 2
#define MIN_SAFE_ACCEPT_QUEUE_SIZE 100
static gpr_once s_init_max_accept_queue_size;
static int s_max_accept_queue_size;
/* one listening port */
-typedef struct {
+struct grpc_tcp_listener {
int fd;
grpc_fd *emfd;
grpc_tcp_server *server;
@@ -84,9 +83,18 @@ typedef struct {
struct sockaddr_un un;
} addr;
size_t addr_len;
+ int port;
grpc_closure read_closure;
grpc_closure destroyed_closure;
-} server_port;
+ gpr_refcount refs;
+ struct grpc_tcp_listener *next;
+ /* When we add a listener, more than one can be created, mainly because of
+ IPv6. A sibling will still be in the normal list, but will be flagged
+ as such. Any action, such as ref or unref, will affect all of the
+ siblings in the list. */
+ struct grpc_tcp_listener *sibling;
+ int is_sibling;
+};
static void unlink_if_unix_domain_socket(const struct sockaddr_un *un) {
struct stat st;
@@ -112,10 +120,9 @@ struct grpc_tcp_server {
/* is this server shutting down? (boolean) */
int shutdown;
- /* all listening ports */
- server_port *ports;
- size_t nports;
- size_t port_capacity;
+ /* linked list of server ports */
+ grpc_tcp_listener *head;
+ unsigned nports;
/* shutdown callback */
grpc_closure *shutdown_complete;
@@ -134,9 +141,8 @@ grpc_tcp_server *grpc_tcp_server_create(void) {
s->shutdown = 0;
s->on_accept_cb = NULL;
s->on_accept_cb_arg = NULL;
- s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
+ s->head = NULL;
s->nports = 0;
- s->port_capacity = INIT_PORT_CAP;
return s;
}
@@ -145,7 +151,12 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_mu_destroy(&s->mu);
- gpr_free(s->ports);
+ while (s->head) {
+ grpc_tcp_listener *sp = s->head;
+ s->head = sp->next;
+ grpc_tcp_listener_unref(sp);
+ }
+
gpr_free(s);
}
@@ -166,8 +177,6 @@ static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, int success) {
events will be received on them - at this point it's safe to destroy
things */
static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
- size_t i;
-
/* delete ALL the things */
gpr_mu_lock(&s->mu);
@@ -176,15 +185,15 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
return;
}
- if (s->nports) {
- for (i = 0; i < s->nports; i++) {
- server_port *sp = &s->ports[i];
+ if (s->head) {
+ grpc_tcp_listener *sp;
+ for (sp = s->head; sp; sp = sp->next) {
if (sp->addr.sockaddr.sa_family == AF_UNIX) {
unlink_if_unix_domain_socket(&sp->addr.un);
}
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
- grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure,
+ grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
"tcp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
@@ -196,7 +205,6 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
grpc_closure *closure) {
- size_t i;
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
@@ -206,8 +214,9 @@ void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
/* shutdown all fd's */
if (s->active_ports) {
- for (i = 0; i < s->nports; i++) {
- grpc_fd_shutdown(exec_ctx, s->ports[i].emfd);
+ grpc_tcp_listener *sp;
+ for (sp = s->head; sp; sp = sp->next) {
+ grpc_fd_shutdown(exec_ctx, sp->emfd);
}
gpr_mu_unlock(&s->mu);
} else {
@@ -298,7 +307,7 @@ error:
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
- server_port *sp = arg;
+ grpc_tcp_listener *sp = arg;
grpc_fd *fdobj;
size_t i;
@@ -364,9 +373,10 @@ error:
}
}
-static int add_socket_to_server(grpc_tcp_server *s, int fd,
- const struct sockaddr *addr, size_t addr_len) {
- server_port *sp;
+static grpc_tcp_listener *add_socket_to_server(grpc_tcp_server *s, int fd,
+ const struct sockaddr *addr,
+ size_t addr_len) {
+ grpc_tcp_listener *sp = NULL;
int port;
char *addr_str;
char *name;
@@ -376,32 +386,33 @@ static int add_socket_to_server(grpc_tcp_server *s, int fd,
grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s", addr_str);
gpr_mu_lock(&s->mu);
+ s->nports++;
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- /* append it to the list under a lock */
- if (s->nports == s->port_capacity) {
- s->port_capacity *= 2;
- s->ports = gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
- }
- sp = &s->ports[s->nports++];
+ sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp->next = s->head;
+ s->head = sp;
sp->server = s;
sp->fd = fd;
sp->emfd = grpc_fd_create(fd, name);
memcpy(sp->addr.untyped, addr, addr_len);
sp->addr_len = addr_len;
+ sp->port = port;
+ sp->is_sibling = 0;
+ sp->sibling = NULL;
+ gpr_ref_init(&sp->refs, 1);
GPR_ASSERT(sp->emfd);
gpr_mu_unlock(&s->mu);
gpr_free(addr_str);
gpr_free(name);
}
- return port;
+ return sp;
}
-int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- size_t addr_len) {
- int allocated_port1 = -1;
- int allocated_port2 = -1;
- unsigned i;
+grpc_tcp_listener *grpc_tcp_server_add_port(grpc_tcp_server *s,
+ const void *addr, size_t addr_len) {
+ grpc_tcp_listener *sp;
+ grpc_tcp_listener *sp2 = NULL;
int fd;
grpc_dualstack_mode dsmode;
struct sockaddr_in6 addr6_v4mapped;
@@ -420,9 +431,9 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
/* Check if this is a wildcard port, and if so, try to keep the port the same
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
- for (i = 0; i < s->nports; i++) {
+ for (sp = s->head; sp; sp = sp->next) {
sockname_len = sizeof(sockname_temp);
- if (0 == getsockname(s->ports[i].fd, (struct sockaddr *)&sockname_temp,
+ if (0 == getsockname(sp->fd, (struct sockaddr *)&sockname_temp,
&sockname_len)) {
port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
if (port > 0) {
@@ -436,6 +447,8 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
}
}
+ sp = NULL;
+
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
addr = (const struct sockaddr *)&addr6_v4mapped;
addr_len = sizeof(addr6_v4mapped);
@@ -449,14 +462,15 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
addr = (struct sockaddr *)&wild6;
addr_len = sizeof(wild6);
fd = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode);
- allocated_port1 = add_socket_to_server(s, fd, addr, addr_len);
+ sp = add_socket_to_server(s, fd, addr, addr_len);
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
goto done;
}
/* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
- if (port == 0 && allocated_port1 > 0) {
- grpc_sockaddr_set_port((struct sockaddr *)&wild4, allocated_port1);
+ if (port == 0 && sp != NULL) {
+ grpc_sockaddr_set_port((struct sockaddr *)&wild4, sp->port);
+ sp2 = sp;
}
addr = (struct sockaddr *)&wild4;
addr_len = sizeof(wild4);
@@ -471,22 +485,32 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
addr = (struct sockaddr *)&addr4_copy;
addr_len = sizeof(addr4_copy);
}
- allocated_port2 = add_socket_to_server(s, fd, addr, addr_len);
+ sp = add_socket_to_server(s, fd, addr, addr_len);
+ if (sp != NULL) sp->sibling = sp2;
+ if (sp2 != NULL) sp2->is_sibling = 1;
done:
gpr_free(allocated_addr);
- return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
+ return sp;
}
int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned port_index) {
- return (port_index < s->nports) ? s->ports[port_index].fd : -1;
+ grpc_tcp_listener *sp;
+ for (sp = s->head; sp && port_index != 0; sp = sp->next, port_index--)
+ ;
+ if (port_index == 0 && sp) {
+ return sp->fd;
+ } else {
+ return -1;
+ }
}
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
grpc_pollset **pollsets, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb,
void *on_accept_cb_arg) {
- size_t i, j;
+ size_t i;
+ grpc_tcp_listener *sp;
GPR_ASSERT(on_accept_cb);
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->on_accept_cb);
@@ -495,17 +519,44 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
s->on_accept_cb_arg = on_accept_cb_arg;
s->pollsets = pollsets;
s->pollset_count = pollset_count;
- for (i = 0; i < s->nports; i++) {
- for (j = 0; j < pollset_count; j++) {
- grpc_pollset_add_fd(exec_ctx, pollsets[j], s->ports[i].emfd);
+ for (sp = s->head; sp; sp = sp->next) {
+ for (i = 0; i < pollset_count; i++) {
+ grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
}
- s->ports[i].read_closure.cb = on_read;
- s->ports[i].read_closure.cb_arg = &s->ports[i];
- grpc_fd_notify_on_read(exec_ctx, s->ports[i].emfd,
- &s->ports[i].read_closure);
+ sp->read_closure.cb = on_read;
+ sp->read_closure.cb_arg = sp;
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
}
+int grpc_tcp_listener_get_port(grpc_tcp_listener *listener) {
+ if (listener != NULL) {
+ grpc_tcp_listener *sp = listener;
+ return sp->port;
+ } else {
+ return 0;
+ }
+}
+
+void grpc_tcp_listener_ref(grpc_tcp_listener *listener) {
+ grpc_tcp_listener *sp = listener;
+ gpr_ref(&sp->refs);
+}
+
+void grpc_tcp_listener_unref(grpc_tcp_listener *listener) {
+ grpc_tcp_listener *sp = listener;
+ if (sp->is_sibling) return;
+ if (gpr_unref(&sp->refs)) {
+ grpc_tcp_listener *sibling = sp->sibling;
+ while (sibling) {
+ sp = sibling;
+ sibling = sp->sibling;
+ gpr_free(sp);
+ }
+ gpr_free(listener);
+ }
+}
+
#endif
diff --git a/src/core/iomgr/tcp_server_windows.c b/src/core/iomgr/tcp_server_windows.c
index 3fea8b5b35..583cab4890 100644
--- a/src/core/iomgr/tcp_server_windows.c
+++ b/src/core/iomgr/tcp_server_windows.c
@@ -35,7 +35,8 @@
#ifdef GPR_WINSOCK_SOCKET
-#define _GNU_SOURCE
+#include <io.h>
+
#include "src/core/iomgr/sockaddr_utils.h"
#include <grpc/support/alloc.h>
@@ -51,25 +52,29 @@
#include "src/core/iomgr/tcp_server.h"
#include "src/core/iomgr/tcp_windows.h"
-#define INIT_PORT_CAP 2
#define MIN_SAFE_ACCEPT_QUEUE_SIZE 100
/* one listening port */
-typedef struct server_port {
+struct grpc_tcp_listener {
/* This seemingly magic number comes from AcceptEx's documentation. each
address buffer needs to have at least 16 more bytes at their end. */
gpr_uint8 addresses[(sizeof(struct sockaddr_in6) + 16) * 2];
/* This will hold the socket for the next accept. */
SOCKET new_socket;
- /* The listener winsocked. */
+ /* The listener winsocket. */
grpc_winsocket *socket;
+ /* The actual TCP port number. */
+ int port;
grpc_tcp_server *server;
/* The cached AcceptEx for that port. */
LPFN_ACCEPTEX AcceptEx;
int shutting_down;
/* closure for socket notification of accept being ready */
grpc_closure on_accept;
-} server_port;
+ gpr_refcount refs;
+ /* linked list */
+ struct grpc_tcp_listener *next;
+};
/* the overall server */
struct grpc_tcp_server {
@@ -82,10 +87,8 @@ struct grpc_tcp_server {
/* active port count: how many ports are actually still listening */
int active_ports;
- /* all listening ports */
- server_port *ports;
- size_t nports;
- size_t port_capacity;
+ /* linked list of server ports */
+ grpc_tcp_listener *head;
/* shutdown callback */
grpc_closure *shutdown_complete;
@@ -99,9 +102,7 @@ grpc_tcp_server *grpc_tcp_server_create(void) {
s->active_ports = 0;
s->on_accept_cb = NULL;
s->on_accept_cb_arg = NULL;
- s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
- s->nports = 0;
- s->port_capacity = INIT_PORT_CAP;
+ s->head = NULL;
s->shutdown_complete = NULL;
return s;
}
@@ -109,26 +110,26 @@ grpc_tcp_server *grpc_tcp_server_create(void) {
static void dont_care_about_shutdown_completion(void *arg) {}
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
- size_t i;
-
grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1);
/* Now that the accepts have been aborted, we can destroy the sockets.
The IOCP won't get notified on these, so we can flag them as already
closed by the system. */
- for (i = 0; i < s->nports; i++) {
- server_port *sp = &s->ports[i];
+ while (s->head) {
+ grpc_tcp_listener *sp = s->head;
+ s->head = sp->next;
+ sp->next = NULL;
grpc_winsocket_destroy(sp->socket);
+ grpc_tcp_listener_unref(sp);
}
- gpr_free(s->ports);
gpr_free(s);
}
/* Public function. Stops and destroys a grpc_tcp_server. */
void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
grpc_closure *shutdown_complete) {
- size_t i;
int immediately_done = 0;
+ grpc_tcp_listener *sp;
gpr_mu_lock(&s->mu);
s->shutdown_complete = shutdown_complete;
@@ -138,8 +139,7 @@ void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
if (s->active_ports == 0) {
immediately_done = 1;
}
- for (i = 0; i < s->nports; i++) {
- server_port *sp = &s->ports[i];
+ for (sp = s->head; sp; sp = sp->next) {
sp->shutting_down = 1;
grpc_winsocket_shutdown(sp->socket);
}
@@ -199,7 +199,7 @@ error:
}
static void decrement_active_ports_and_notify(grpc_exec_ctx *exec_ctx,
- server_port *sp) {
+ grpc_tcp_listener *sp) {
int notify = 0;
sp->shutting_down = 0;
gpr_mu_lock(&sp->server->mu);
@@ -216,7 +216,7 @@ static void decrement_active_ports_and_notify(grpc_exec_ctx *exec_ctx,
/* In order to do an async accept, we need to create a socket first which
will be the one assigned to the new incoming connection. */
-static void start_accept(grpc_exec_ctx *exec_ctx, server_port *port) {
+static void start_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *port) {
SOCKET sock = INVALID_SOCKET;
char *message;
char *utf8_message;
@@ -276,7 +276,7 @@ failure:
/* Event manager callback when reads are ready. */
static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, int from_iocp) {
- server_port *sp = arg;
+ grpc_tcp_listener *sp = arg;
SOCKET sock = sp->new_socket;
grpc_winsocket_callback_info *info = &sp->socket->read_info;
grpc_endpoint *ep = NULL;
@@ -351,16 +351,17 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, int from_iocp) {
start_accept(exec_ctx, sp);
}
-static int add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
- const struct sockaddr *addr, size_t addr_len) {
- server_port *sp;
+static grpc_tcp_listener *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
+ const struct sockaddr *addr,
+ size_t addr_len) {
+ grpc_tcp_listener *sp = NULL;
int port;
int status;
GUID guid = WSAID_ACCEPTEX;
DWORD ioctl_num_bytes;
LPFN_ACCEPTEX AcceptEx;
- if (sock == INVALID_SOCKET) return -1;
+ if (sock == INVALID_SOCKET) return NULL;
/* We need to grab the AcceptEx pointer for that port, as it may be
interface-dependent. We'll cache it to avoid doing that again. */
@@ -373,37 +374,34 @@ static int add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
gpr_log(GPR_ERROR, "on_connect error: %s", utf8_message);
gpr_free(utf8_message);
closesocket(sock);
- return -1;
+ return NULL;
}
port = prepare_socket(sock, addr, addr_len);
if (port >= 0) {
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- /* append it to the list under a lock */
- if (s->nports == s->port_capacity) {
- /* too many ports, and we need to store their address in a closure */
- /* TODO(ctiller): make server_port a linked list */
- abort();
- }
- sp = &s->ports[s->nports++];
+ sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp->next = s->head;
+ s->head = sp;
sp->server = s;
sp->socket = grpc_winsocket_create(sock, "listener");
sp->shutting_down = 0;
sp->AcceptEx = AcceptEx;
sp->new_socket = INVALID_SOCKET;
+ sp->port = port;
+ gpr_ref_init(&sp->refs, 1);
grpc_closure_init(&sp->on_accept, on_accept, sp);
GPR_ASSERT(sp->socket);
gpr_mu_unlock(&s->mu);
}
- return port;
+ return sp;
}
-int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- size_t addr_len) {
- int allocated_port = -1;
- unsigned i;
+grpc_tcp_listener *grpc_tcp_server_add_port(grpc_tcp_server *s,
+ const void *addr, size_t addr_len) {
+ grpc_tcp_listener *sp;
SOCKET sock;
struct sockaddr_in6 addr6_v4mapped;
struct sockaddr_in6 wildcard;
@@ -415,9 +413,9 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
/* Check if this is a wildcard port, and if so, try to keep the port the same
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
- for (i = 0; i < s->nports; i++) {
+ for (sp = s->head; sp; sp = sp->next) {
sockname_len = sizeof(sockname_temp);
- if (0 == getsockname(s->ports[i].socket->socket,
+ if (0 == getsockname(sp->socket->socket,
(struct sockaddr *)&sockname_temp, &sockname_len)) {
port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
if (port > 0) {
@@ -452,33 +450,60 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
gpr_free(utf8_message);
}
- allocated_port = add_socket_to_server(s, sock, addr, addr_len);
+ sp = add_socket_to_server(s, sock, addr, addr_len);
gpr_free(allocated_addr);
- return allocated_port;
+ return sp;
}
-SOCKET
-grpc_tcp_server_get_socket(grpc_tcp_server *s, unsigned index) {
- return (index < s->nports) ? s->ports[index].socket->socket : INVALID_SOCKET;
+int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned port_index) {
+ grpc_tcp_listener *sp;
+ for (sp = s->head; sp && port_index != 0; sp = sp->next, port_index--)
+ ;
+ if (port_index == 0 && sp) {
+ return _open_osfhandle(sp->socket->socket, 0);
+ } else {
+ return -1;
+ }
}
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
grpc_pollset **pollset, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb,
void *on_accept_cb_arg) {
- size_t i;
+ grpc_tcp_listener *sp;
GPR_ASSERT(on_accept_cb);
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->on_accept_cb);
GPR_ASSERT(s->active_ports == 0);
s->on_accept_cb = on_accept_cb;
s->on_accept_cb_arg = on_accept_cb_arg;
- for (i = 0; i < s->nports; i++) {
- start_accept(exec_ctx, s->ports + i);
+ for (sp = s->head; sp; sp = sp->next) {
+ start_accept(exec_ctx, sp);
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
}
+int grpc_tcp_listener_get_port(grpc_tcp_listener *listener) {
+ if (listener != NULL) {
+ grpc_tcp_listener *sp = listener;
+ return sp->port;
+ } else {
+ return 0;
+ }
+}
+
+void grpc_tcp_listener_ref(grpc_tcp_listener *listener) {
+ grpc_tcp_listener *sp = listener;
+ gpr_ref(&sp->refs);
+}
+
+void grpc_tcp_listener_unref(grpc_tcp_listener *listener) {
+ grpc_tcp_listener *sp = listener;
+ if (gpr_unref(&sp->refs)) {
+ gpr_free(listener);
+ }
+}
+
#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/tcp_windows.c b/src/core/iomgr/tcp_windows.c
index 5ff78231bd..cc7f7ff8d2 100644
--- a/src/core/iomgr/tcp_windows.c
+++ b/src/core/iomgr/tcp_windows.c
@@ -197,7 +197,8 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->read_slice = gpr_slice_malloc(8192);
- buffer.len = GPR_SLICE_LENGTH(tcp->read_slice);
+ buffer.len = (ULONG)GPR_SLICE_LENGTH(
+ tcp->read_slice); // we know slice size fits in 32bit.
buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
TCP_REF(tcp, "read");
@@ -273,6 +274,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
WSABUF local_buffers[16];
WSABUF *allocated = NULL;
WSABUF *buffers = local_buffers;
+ size_t len;
if (tcp->shutting_down) {
grpc_exec_ctx_enqueue(exec_ctx, cb, 0);
@@ -281,19 +283,21 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb;
tcp->write_slices = slices;
-
+ GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) {
buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count);
allocated = buffers;
}
for (i = 0; i < tcp->write_slices->count; i++) {
- buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]);
+ len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]);
+ GPR_ASSERT(len <= ULONG_MAX);
+ buffers[i].len = (ULONG)len;
buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices->slices[i]);
}
/* First, let's try a synchronous, non-blocking write. */
- status = WSASend(socket->socket, buffers, tcp->write_slices->count,
+ status = WSASend(socket->socket, buffers, (DWORD)tcp->write_slices->count,
&bytes_sent, 0, NULL, NULL);
info->wsa_error = status == 0 ? 0 : WSAGetLastError();
@@ -322,7 +326,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
/* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
operation, this time asynchronously. */
memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED));
- status = WSASend(socket->socket, buffers, tcp->write_slices->count,
+ status = WSASend(socket->socket, buffers, (DWORD)tcp->write_slices->count,
&bytes_sent, 0, &socket->write_info.overlapped, NULL);
if (allocated) gpr_free(allocated);
diff --git a/src/core/iomgr/timer.c b/src/core/iomgr/timer.c
index 66fafe75ad..bbf9800049 100644
--- a/src/core/iomgr/timer.c
+++ b/src/core/iomgr/timer.c
@@ -126,8 +126,8 @@ static double ts_to_dbl(gpr_timespec ts) {
static gpr_timespec dbl_to_ts(double d) {
gpr_timespec ts;
- ts.tv_sec = (time_t)d;
- ts.tv_nsec = (int)(1e9 * (d - (double)ts.tv_sec));
+ ts.tv_sec = (gpr_int64)d;
+ ts.tv_nsec = (gpr_int32)(1e9 * (d - (double)ts.tv_sec));
ts.clock_type = GPR_TIMESPAN;
return ts;
}
@@ -343,11 +343,3 @@ int grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
exec_ctx, now, next,
gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0);
}
-
-gpr_timespec grpc_timer_list_next_timeout(void) {
- gpr_timespec out;
- gpr_mu_lock(&g_mu);
- out = g_shard_queue[0]->min_deadline;
- gpr_mu_unlock(&g_mu);
- return out;
-}
diff --git a/src/core/iomgr/timer_internal.h b/src/core/iomgr/timer_internal.h
index f180eca36e..f182e73764 100644
--- a/src/core/iomgr/timer_internal.h
+++ b/src/core/iomgr/timer_internal.h
@@ -54,8 +54,6 @@ int grpc_timer_check(grpc_exec_ctx* exec_ctx, gpr_timespec now,
void grpc_timer_list_init(gpr_timespec now);
void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx);
-gpr_timespec grpc_timer_list_next_timeout(void);
-
/* the following must be implemented by each iomgr implementation */
void grpc_kick_poller(void);
diff --git a/src/core/iomgr/udp_server.c b/src/core/iomgr/udp_server.c
index 9903e970e6..28f1bfae26 100644
--- a/src/core/iomgr/udp_server.c
+++ b/src/core/iomgr/udp_server.c
@@ -38,6 +38,7 @@
#include <grpc/support/port_platform.h>
+#ifdef GRPC_NEED_UDP
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/udp_server.h"
@@ -179,7 +180,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
}
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
- grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure,
+ grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
"udp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
@@ -435,3 +436,4 @@ void grpc_udp_server_write(server_port *sp, const char *buffer, size_t buf_len,
}
#endif
+#endif
diff --git a/src/core/iomgr/wakeup_fd_posix.c b/src/core/iomgr/wakeup_fd_posix.c
index d09fb78d12..f40be081b0 100644
--- a/src/core/iomgr/wakeup_fd_posix.c
+++ b/src/core/iomgr/wakeup_fd_posix.c
@@ -40,19 +40,17 @@
#include <stddef.h>
static const grpc_wakeup_fd_vtable *wakeup_fd_vtable = NULL;
+int grpc_allow_specialized_wakeup_fd = 1;
void grpc_wakeup_fd_global_init(void) {
- if (grpc_specialized_wakeup_fd_vtable.check_availability()) {
+ if (grpc_allow_specialized_wakeup_fd &&
+ grpc_specialized_wakeup_fd_vtable.check_availability()) {
wakeup_fd_vtable = &grpc_specialized_wakeup_fd_vtable;
} else {
wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
}
}
-void grpc_wakeup_fd_global_init_force_fallback(void) {
- wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
-}
-
void grpc_wakeup_fd_global_destroy(void) { wakeup_fd_vtable = NULL; }
void grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info) {
diff --git a/src/core/iomgr/wakeup_fd_posix.h b/src/core/iomgr/wakeup_fd_posix.h
index fe71b5abe9..ffd60d1d4e 100644
--- a/src/core/iomgr/wakeup_fd_posix.h
+++ b/src/core/iomgr/wakeup_fd_posix.h
@@ -85,6 +85,8 @@ struct grpc_wakeup_fd {
int write_fd;
};
+extern int grpc_allow_specialized_wakeup_fd;
+
#define GRPC_WAKEUP_FD_GET_READ_FD(fd_info) ((fd_info)->read_fd)
void grpc_wakeup_fd_init(grpc_wakeup_fd* fd_info);
diff --git a/src/core/iomgr/workqueue_posix.c b/src/core/iomgr/workqueue_posix.c
index 0a0f3c364e..d2a1c34612 100644
--- a/src/core/iomgr/workqueue_posix.c
+++ b/src/core/iomgr/workqueue_posix.c
@@ -103,6 +103,9 @@ void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx,
void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
gpr_mu_lock(&workqueue->mu);
+ if (grpc_closure_list_empty(workqueue->closure_list)) {
+ grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);
+ }
grpc_closure_list_move(&exec_ctx->closure_list, &workqueue->closure_list);
gpr_mu_unlock(&workqueue->mu);
}
@@ -115,7 +118,7 @@ static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, int success) {
/* HACK: let wakeup_fd code know that we stole the fd */
workqueue->wakeup_fd.read_fd = 0;
grpc_wakeup_fd_destroy(&workqueue->wakeup_fd);
- grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, "destroy");
+ grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, NULL, "destroy");
gpr_free(workqueue);
} else {
gpr_mu_lock(&workqueue->mu);
@@ -129,8 +132,6 @@ static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, int success) {
void grpc_workqueue_push(grpc_workqueue *workqueue, grpc_closure *closure,
int success) {
- closure->success = success;
- closure->next = NULL;
gpr_mu_lock(&workqueue->mu);
if (grpc_closure_list_empty(workqueue->closure_list)) {
grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);
diff --git a/src/core/json/json_reader.c b/src/core/json/json_reader.c
index 8abad01252..256995240a 100644
--- a/src/core/json/json_reader.c
+++ b/src/core/json/json_reader.c
@@ -35,6 +35,8 @@
#include <grpc/support/port_platform.h>
+#include <grpc/support/log.h>
+
#include "src/core/json/json_reader.h"
static void json_reader_string_clear(grpc_json_reader *reader) {
@@ -224,13 +226,13 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
reader->in_array = 1;
break;
case GRPC_JSON_TOP_LEVEL:
- if (reader->depth != 0) return GRPC_JSON_INTERNAL_ERROR;
+ GPR_ASSERT(reader->depth == 0);
reader->in_object = 0;
reader->in_array = 0;
reader->state = GRPC_JSON_STATE_END;
break;
default:
- return GRPC_JSON_INTERNAL_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
}
}
break;
@@ -279,8 +281,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
break;
case GRPC_JSON_STATE_OBJECT_KEY_STRING:
- if (reader->unicode_high_surrogate != 0)
- return GRPC_JSON_PARSE_ERROR;
+ GPR_ASSERT(reader->unicode_high_surrogate == 0);
if (c == '"') {
reader->state = GRPC_JSON_STATE_OBJECT_KEY_END;
json_reader_set_key(reader);
@@ -461,7 +462,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
}
break;
default:
- return GRPC_JSON_INTERNAL_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
}
break;
@@ -641,7 +642,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
case ',':
case '}':
case ']':
- return GRPC_JSON_INTERNAL_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
break;
default:
@@ -655,5 +656,5 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
}
}
- return GRPC_JSON_INTERNAL_ERROR;
+ GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
}
diff --git a/src/core/json/json_string.c b/src/core/json/json_string.c
index 0461c2703f..06c157dc98 100644
--- a/src/core/json/json_string.c
+++ b/src/core/json/json_string.c
@@ -353,7 +353,7 @@ static void json_dump_recursive(grpc_json_writer *writer, grpc_json *json,
grpc_json_writer_value_raw_with_len(writer, "null", 4);
break;
default:
- abort();
+ GPR_UNREACHABLE_CODE(abort());
}
json = json->next;
}
diff --git a/src/core/profiling/basic_timers.c b/src/core/profiling/basic_timers.c
index b49cdd07b3..eedd387ebc 100644
--- a/src/core/profiling/basic_timers.c
+++ b/src/core/profiling/basic_timers.c
@@ -50,60 +50,198 @@ typedef struct gpr_timer_entry {
gpr_timespec tm;
const char *tagstr;
const char *file;
- int line;
+ short line;
char type;
gpr_uint8 important;
+ int thd;
} gpr_timer_entry;
-#define MAX_COUNT (1024 * 1024 / sizeof(gpr_timer_entry))
+#define MAX_COUNT 1000000
-static __thread gpr_timer_entry g_log[MAX_COUNT];
-static __thread int g_count;
+typedef struct gpr_timer_log {
+ size_t num_entries;
+ struct gpr_timer_log *next;
+ struct gpr_timer_log *prev;
+ gpr_timer_entry log[MAX_COUNT];
+} gpr_timer_log;
+
+typedef struct gpr_timer_log_list {
+ gpr_timer_log *head;
+ /* valid iff head!=NULL */
+ gpr_timer_log *tail;
+} gpr_timer_log_list;
+
+static __thread gpr_timer_log *g_thread_log;
static gpr_once g_once_init = GPR_ONCE_INIT;
static FILE *output_file;
+static const char *output_filename = "latency_trace.txt";
+static pthread_mutex_t g_mu;
+static pthread_cond_t g_cv;
+static gpr_timer_log_list g_in_progress_logs;
+static gpr_timer_log_list g_done_logs;
+static int g_shutdown;
+static gpr_thd_id g_writing_thread;
+static __thread int g_thread_id;
+static int g_next_thread_id;
-static void close_output() { fclose(output_file); }
+static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) {
+ if (list->head == NULL) {
+ list->head = list->tail = log;
+ log->next = log->prev = NULL;
+ return 1;
+ } else {
+ log->prev = list->tail;
+ log->next = NULL;
+ list->tail->next = log;
+ list->tail = log;
+ return 0;
+ }
+}
-static void init_output() {
- output_file = fopen("latency_trace.txt", "w");
- GPR_ASSERT(output_file);
- atexit(close_output);
+static gpr_timer_log *timer_log_pop_front(gpr_timer_log_list *list) {
+ gpr_timer_log *out = list->head;
+ if (out != NULL) {
+ list->head = out->next;
+ if (list->head != NULL) {
+ list->head->prev = NULL;
+ } else {
+ list->tail = NULL;
+ }
+ }
+ return out;
}
-static void log_report() {
- int i;
- gpr_once_init(&g_once_init, init_output);
- for (i = 0; i < g_count; i++) {
- gpr_timer_entry *entry = &(g_log[i]);
+static void timer_log_remove(gpr_timer_log_list *list, gpr_timer_log *log) {
+ if (log->prev == NULL) {
+ list->head = log->next;
+ if (list->head != NULL) {
+ list->head->prev = NULL;
+ }
+ } else {
+ log->prev->next = log->next;
+ }
+ if (log->next == NULL) {
+ list->tail = log->prev;
+ if (list->tail != NULL) {
+ list->tail->next = NULL;
+ }
+ } else {
+ log->next->prev = log->prev;
+ }
+}
+
+static void write_log(gpr_timer_log *log) {
+ size_t i;
+ if (output_file == NULL) {
+ output_file = fopen(output_filename, "w");
+ }
+ for (i = 0; i < log->num_entries; i++) {
+ gpr_timer_entry *entry = &(log->log[i]);
+ if (gpr_time_cmp(entry->tm, gpr_time_0(entry->tm.clock_type)) < 0) {
+ entry->tm = gpr_time_0(entry->tm.clock_type);
+ }
fprintf(output_file,
- "{\"t\": %ld.%09d, \"thd\": \"%p\", \"type\": \"%c\", \"tag\": "
+ "{\"t\": %lld.%09d, \"thd\": \"%d\", \"type\": \"%c\", \"tag\": "
"\"%s\", \"file\": \"%s\", \"line\": %d, \"imp\": %d}\n",
- entry->tm.tv_sec, entry->tm.tv_nsec,
- (void *)(gpr_intptr)gpr_thd_currentid(), entry->type, entry->tagstr,
- entry->file, entry->line, entry->important);
+ (long long)entry->tm.tv_sec, (int)entry->tm.tv_nsec, entry->thd,
+ entry->type, entry->tagstr, entry->file, entry->line,
+ entry->important);
+ }
+}
+
+static void writing_thread(void *unused) {
+ gpr_timer_log *log;
+ pthread_mutex_lock(&g_mu);
+ for (;;) {
+ while ((log = timer_log_pop_front(&g_done_logs)) == NULL && !g_shutdown) {
+ pthread_cond_wait(&g_cv, &g_mu);
+ }
+ if (log != NULL) {
+ pthread_mutex_unlock(&g_mu);
+ write_log(log);
+ free(log);
+ pthread_mutex_lock(&g_mu);
+ }
+ if (g_shutdown) {
+ pthread_mutex_unlock(&g_mu);
+ return;
+ }
}
+}
- /* Now clear out the log */
- g_count = 0;
+static void flush_logs(gpr_timer_log_list *list) {
+ gpr_timer_log *log;
+ while ((log = timer_log_pop_front(list)) != NULL) {
+ write_log(log);
+ free(log);
+ }
+}
+
+static void finish_writing() {
+ pthread_mutex_lock(&g_mu);
+ g_shutdown = 1;
+ pthread_cond_signal(&g_cv);
+ pthread_mutex_unlock(&g_mu);
+ gpr_thd_join(g_writing_thread);
+
+ gpr_log(GPR_INFO, "flushing logs");
+
+ pthread_mutex_lock(&g_mu);
+ flush_logs(&g_done_logs);
+ flush_logs(&g_in_progress_logs);
+ pthread_mutex_unlock(&g_mu);
+
+ if (output_file) {
+ fclose(output_file);
+ }
+}
+
+void gpr_timers_set_log_filename(const char *filename) {
+ output_filename = filename;
+}
+
+static void init_output() {
+ gpr_thd_options options = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&options);
+ gpr_thd_new(&g_writing_thread, writing_thread, NULL, &options);
+ atexit(finish_writing);
+}
+
+static void rotate_log() {
+ gpr_timer_log *new = malloc(sizeof(*new));
+ gpr_once_init(&g_once_init, init_output);
+ new->num_entries = 0;
+ pthread_mutex_lock(&g_mu);
+ if (g_thread_log != NULL) {
+ timer_log_remove(&g_in_progress_logs, g_thread_log);
+ if (timer_log_push_back(&g_done_logs, g_thread_log)) {
+ pthread_cond_signal(&g_cv);
+ }
+ } else {
+ g_thread_id = g_next_thread_id++;
+ }
+ timer_log_push_back(&g_in_progress_logs, new);
+ pthread_mutex_unlock(&g_mu);
+ g_thread_log = new;
}
static void gpr_timers_log_add(const char *tagstr, marker_type type,
int important, const char *file, int line) {
gpr_timer_entry *entry;
- /* TODO (vpai) : Improve concurrency */
- if (g_count == MAX_COUNT) {
- log_report();
+ if (g_thread_log == NULL || g_thread_log->num_entries == MAX_COUNT) {
+ rotate_log();
}
- entry = &g_log[g_count++];
+ entry = &g_thread_log->log[g_thread_log->num_entries++];
entry->tm = gpr_now(GPR_CLOCK_PRECISE);
entry->tagstr = tagstr;
entry->type = type;
entry->file = file;
- entry->line = line;
+ entry->line = (short)line;
entry->important = important != 0;
+ entry->thd = g_thread_id;
}
/* Latency profiler API implementation. */
@@ -131,4 +269,6 @@ void gpr_timers_global_destroy(void) {}
void gpr_timers_global_init(void) {}
void gpr_timers_global_destroy(void) {}
+
+void gpr_timers_set_log_filename(const char *filename) {}
#endif /* GRPC_BASIC_PROFILER */
diff --git a/src/core/profiling/timers.h b/src/core/profiling/timers.h
index 0d112e7248..6a188dc566 100644
--- a/src/core/profiling/timers.h
+++ b/src/core/profiling/timers.h
@@ -48,6 +48,8 @@ void gpr_timer_begin(const char *tagstr, int important, const char *file,
void gpr_timer_end(const char *tagstr, int important, const char *file,
int line);
+void gpr_timers_set_log_filename(const char *filename);
+
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
/* No profiling. No-op all the things. */
#define GPR_TIMER_MARK(tag, important) \
diff --git a/src/core/security/client_auth_filter.c b/src/core/security/client_auth_filter.c
index 18f18410d5..b1fd733c91 100644
--- a/src/core/security/client_auth_filter.c
+++ b/src/core/security/client_auth_filter.c
@@ -39,12 +39,13 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include "src/core/support/string.h"
#include "src/core/channel/channel_stack.h"
-#include "src/core/security/security_context.h"
-#include "src/core/security/security_connector.h"
#include "src/core/security/credentials.h"
+#include "src/core/security/security_connector.h"
+#include "src/core/security/security_context.h"
+#include "src/core/support/string.h"
#include "src/core/surface/call.h"
+#include "src/core/transport/static_metadata.h"
#define MAX_CREDENTIALS_METADATA_COUNT 4
@@ -59,8 +60,6 @@ typedef struct {
progress */
grpc_pollset *pollset;
grpc_transport_stream_op op;
- size_t op_md_idx;
- int sent_initial_metadata;
gpr_uint8 security_context_set;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context;
@@ -69,11 +68,6 @@ typedef struct {
/* We can have a per-channel credentials. */
typedef struct {
grpc_channel_security_connector *security_connector;
- grpc_mdctx *md_ctx;
- grpc_mdstr *authority_string;
- grpc_mdstr *path_string;
- grpc_mdstr *error_msg_key;
- grpc_mdstr *status_key;
} channel_data;
static void reset_auth_metadata_context(
@@ -106,7 +100,6 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_credentials_status status) {
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
grpc_transport_stream_op *op = &calld->op;
grpc_metadata_batch *mdb;
size_t i;
@@ -117,13 +110,12 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
return;
}
GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
- GPR_ASSERT(op->send_ops && op->send_ops->nops > calld->op_md_idx &&
- op->send_ops->ops[calld->op_md_idx].type == GRPC_OP_METADATA);
- mdb = &op->send_ops->ops[calld->op_md_idx].data.metadata;
+ GPR_ASSERT(op->send_initial_metadata != NULL);
+ mdb = op->send_initial_metadata;
for (i = 0; i < num_md; i++) {
grpc_metadata_batch_add_tail(
mdb, &calld->md_links[i],
- grpc_mdelem_from_slices(chand->md_ctx, gpr_slice_ref(md_elems[i].key),
+ grpc_mdelem_from_slices(gpr_slice_ref(md_elems[i].key),
gpr_slice_ref(md_elems[i].value)));
}
grpc_call_next_op(exec_ctx, elem, op);
@@ -223,7 +215,6 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_linked_mdelem *l;
- size_t i;
grpc_client_security_context *sec_ctx = NULL;
if (calld->security_context_set == 0 &&
@@ -242,53 +233,41 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
chand->security_connector->base.auth_context, "client_auth_filter");
}
- if (op->bind_pollset != NULL) {
- calld->pollset = op->bind_pollset;
- }
-
- if (op->send_ops != NULL && !calld->sent_initial_metadata) {
- size_t nops = op->send_ops->nops;
- grpc_stream_op *ops = op->send_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *sop = &ops[i];
- if (sop->type != GRPC_OP_METADATA) continue;
- calld->op_md_idx = i;
- calld->sent_initial_metadata = 1;
- for (l = sop->data.metadata.list.head; l != NULL; l = l->next) {
- grpc_mdelem *md = l->md;
- /* Pointer comparison is OK for md_elems created from the same context.
- */
- if (md->key == chand->authority_string) {
- if (calld->host != NULL) GRPC_MDSTR_UNREF(calld->host);
- calld->host = GRPC_MDSTR_REF(md->value);
- } else if (md->key == chand->path_string) {
- if (calld->method != NULL) GRPC_MDSTR_UNREF(calld->method);
- calld->method = GRPC_MDSTR_REF(md->value);
- }
+ if (op->send_initial_metadata != NULL) {
+ for (l = op->send_initial_metadata->list.head; l != NULL; l = l->next) {
+ grpc_mdelem *md = l->md;
+ /* Pointer comparison is OK for md_elems created from the same context.
+ */
+ if (md->key == GRPC_MDSTR_AUTHORITY) {
+ if (calld->host != NULL) GRPC_MDSTR_UNREF(calld->host);
+ calld->host = GRPC_MDSTR_REF(md->value);
+ } else if (md->key == GRPC_MDSTR_PATH) {
+ if (calld->method != NULL) GRPC_MDSTR_UNREF(calld->method);
+ calld->method = GRPC_MDSTR_REF(md->value);
}
- if (calld->host != NULL) {
- grpc_security_status status;
- const char *call_host = grpc_mdstr_as_c_string(calld->host);
- calld->op = *op; /* Copy op (originates from the caller's stack). */
- status = grpc_channel_security_connector_check_call_host(
- exec_ctx, chand->security_connector, call_host, on_host_checked,
- elem);
- if (status != GRPC_SECURITY_OK) {
- if (status == GRPC_SECURITY_ERROR) {
- char *error_msg;
- gpr_asprintf(&error_msg,
- "Invalid host %s set in :authority metadata.",
- call_host);
- bubble_up_error(exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT,
- error_msg);
- gpr_free(error_msg);
- }
- return; /* early exit */
+ }
+ if (calld->host != NULL) {
+ grpc_security_status status;
+ const char *call_host = grpc_mdstr_as_c_string(calld->host);
+ calld->op = *op; /* Copy op (originates from the caller's stack). */
+ status = grpc_channel_security_connector_check_call_host(
+ exec_ctx, chand->security_connector, call_host, on_host_checked,
+ elem);
+ if (status != GRPC_SECURITY_OK) {
+ if (status == GRPC_SECURITY_ERROR) {
+ char *error_msg;
+ gpr_asprintf(&error_msg,
+ "Invalid host %s set in :authority metadata.",
+ call_host);
+ bubble_up_error(exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT,
+ error_msg);
+ gpr_free(error_msg);
}
+ return; /* early exit */
}
- send_security_metadata(exec_ctx, elem, op);
- return; /* early exit */
}
+ send_security_metadata(exec_ctx, elem, op);
+ return; /* early exit */
}
/* pass control down the stack */
@@ -297,11 +276,15 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
+ grpc_call_element_args *args) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(*calld));
- GPR_ASSERT(!initial_op || !initial_op->send_ops);
+}
+
+static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset) {
+ call_data *calld = elem->call_data;
+ calld->pollset = pollset;
}
/* Destructor for call_data */
@@ -320,18 +303,17 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *args,
- grpc_mdctx *metadata_context, int is_first,
- int is_last) {
- grpc_security_connector *sc = grpc_find_security_connector_in_args(args);
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ grpc_security_connector *sc =
+ grpc_find_security_connector_in_args(args->channel_args);
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
- GPR_ASSERT(!is_last);
+ GPR_ASSERT(!args->is_last);
GPR_ASSERT(sc != NULL);
/* initialize members */
@@ -339,11 +321,6 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->security_connector =
(grpc_channel_security_connector *)GRPC_SECURITY_CONNECTOR_REF(
sc, "client_auth_filter");
- chand->md_ctx = metadata_context;
- chand->authority_string = grpc_mdstr_from_string(chand->md_ctx, ":authority");
- chand->path_string = grpc_mdstr_from_string(chand->md_ctx, ":path");
- chand->error_msg_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-message");
- chand->status_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-status");
}
/* Destructor for channel data */
@@ -352,24 +329,13 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
grpc_channel_security_connector *ctx = chand->security_connector;
- if (ctx != NULL)
+ if (ctx != NULL) {
GRPC_SECURITY_CONNECTOR_UNREF(&ctx->base, "client_auth_filter");
- if (chand->authority_string != NULL) {
- GRPC_MDSTR_UNREF(chand->authority_string);
- }
- if (chand->error_msg_key != NULL) {
- GRPC_MDSTR_UNREF(chand->error_msg_key);
- }
- if (chand->status_key != NULL) {
- GRPC_MDSTR_UNREF(chand->status_key);
- }
- if (chand->path_string != NULL) {
- GRPC_MDSTR_UNREF(chand->path_string);
}
}
const grpc_channel_filter grpc_client_auth_filter = {
auth_start_transport_op, grpc_channel_next_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data),
- init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
+ init_call_elem, set_pollset, destroy_call_elem, sizeof(channel_data),
+ init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
"client-auth"};
diff --git a/src/core/security/credentials.c b/src/core/security/credentials.c
index 543c75044b..a0054741ad 100644
--- a/src/core/security/credentials.c
+++ b/src/core/security/credentials.c
@@ -39,7 +39,7 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/http_client_filter.h"
#include "src/core/httpcli/httpcli.h"
-#include "src/core/iomgr/iomgr.h"
+#include "src/core/iomgr/executor.h"
#include "src/core/json/json.h"
#include "src/core/support/string.h"
#include "src/core/surface/api_trace.h"
@@ -48,7 +48,6 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
-#include <grpc/support/thd.h>
#include <grpc/support/time.h>
/* -- Common. -- */
@@ -511,10 +510,11 @@ grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
"grpc_service_account_jwt_access_credentials_create("
"json_key=%s, "
"token_lifetime="
- "gpr_timespec { tv_sec: %ld, tv_nsec: %d, clock_type: %d }, "
+ "gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
- 5, (json_key, (long)token_lifetime.tv_sec, token_lifetime.tv_nsec,
- (int)token_lifetime.clock_type, reserved));
+ 5,
+ (json_key, (long long)token_lifetime.tv_sec, (int)token_lifetime.tv_nsec,
+ (int)token_lifetime.clock_type, reserved));
GPR_ASSERT(reserved == NULL);
return grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
grpc_auth_json_key_create_from_string(json_key), token_lifetime);
@@ -792,15 +792,14 @@ static void md_only_test_destruct(grpc_call_credentials *creds) {
grpc_credentials_md_store_unref(c->md_store);
}
-static void on_simulated_token_fetch_done(void *user_data) {
+static void on_simulated_token_fetch_done(grpc_exec_ctx *exec_ctx,
+ void *user_data, int success) {
grpc_credentials_metadata_request *r =
(grpc_credentials_metadata_request *)user_data;
grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)r->creds;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- r->cb(&exec_ctx, r->user_data, c->md_store->entries, c->md_store->num_entries,
+ r->cb(exec_ctx, r->user_data, c->md_store->entries, c->md_store->num_entries,
GRPC_CREDENTIALS_OK);
grpc_credentials_metadata_request_destroy(r);
- grpc_exec_ctx_finish(&exec_ctx);
}
static void md_only_test_get_request_metadata(
@@ -810,10 +809,10 @@ static void md_only_test_get_request_metadata(
grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds;
if (c->is_async) {
- gpr_thd_id thd_id;
grpc_credentials_metadata_request *cb_arg =
grpc_credentials_metadata_request_create(creds, cb, user_data);
- gpr_thd_new(&thd_id, on_simulated_token_fetch_done, cb_arg, NULL);
+ grpc_executor_enqueue(
+ grpc_closure_create(on_simulated_token_fetch_done, cb_arg), 1);
} else {
cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK);
}
diff --git a/src/core/security/credentials.h b/src/core/security/credentials.h
index 96135f4776..3cd652cd57 100644
--- a/src/core/security/credentials.h
+++ b/src/core/security/credentials.h
@@ -34,7 +34,7 @@
#ifndef GRPC_INTERNAL_CORE_SECURITY_CREDENTIALS_H
#define GRPC_INTERNAL_CORE_SECURITY_CREDENTIALS_H
-#include "src/core/transport/stream_op.h"
+#include "src/core/transport/metadata_batch.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/sync.h>
@@ -93,6 +93,14 @@ typedef enum {
/* It is the caller's responsibility to gpr_free the result if not NULL. */
char *grpc_get_well_known_google_credentials_file_path(void);
+/* Implementation function for the different platforms. */
+char *grpc_get_well_known_google_credentials_file_path_impl(void);
+
+/* Override for testing only. Not thread-safe */
+typedef char *(*grpc_well_known_credentials_path_getter)(void);
+void grpc_override_well_known_credentials_path_getter(
+ grpc_well_known_credentials_path_getter getter);
+
/* --- grpc_channel_credentials. --- */
typedef struct {
@@ -201,6 +209,7 @@ grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
const struct grpc_httpcli_response *response,
grpc_credentials_md_store **token_md, gpr_timespec *token_lifetime);
+
void grpc_flush_cached_google_default_credentials(void);
/* Metadata-only credentials with the specified key and value where
diff --git a/src/core/security/credentials_posix.c b/src/core/security/credentials_posix.c
index 20f67a7f14..0c92bd4a96 100644
--- a/src/core/security/credentials_posix.c
+++ b/src/core/security/credentials_posix.c
@@ -44,7 +44,7 @@
#include "src/core/support/env.h"
#include "src/core/support/string.h"
-char *grpc_get_well_known_google_credentials_file_path(void) {
+char *grpc_get_well_known_google_credentials_file_path_impl(void) {
char *result = NULL;
char *home = gpr_getenv("HOME");
if (home == NULL) {
diff --git a/src/core/security/credentials_win32.c b/src/core/security/credentials_win32.c
index 92dfd9bdfe..8ee9f706a1 100644
--- a/src/core/security/credentials_win32.c
+++ b/src/core/security/credentials_win32.c
@@ -44,7 +44,7 @@
#include "src/core/support/env.h"
#include "src/core/support/string.h"
-char *grpc_get_well_known_google_credentials_file_path(void) {
+char *grpc_get_well_known_google_credentials_file_path_impl(void) {
char *result = NULL;
char *appdata_path = gpr_getenv("APPDATA");
if (appdata_path == NULL) {
diff --git a/src/core/security/google_default_credentials.c b/src/core/security/google_default_credentials.c
index 6a54fe4e47..5385e41130 100644
--- a/src/core/security/google_default_credentials.c
+++ b/src/core/security/google_default_credentials.c
@@ -241,5 +241,20 @@ void grpc_flush_cached_google_default_credentials(void) {
grpc_channel_credentials_unref(default_credentials);
default_credentials = NULL;
}
+ compute_engine_detection_done = 0;
gpr_mu_unlock(&g_mu);
}
+
+/* -- Well known credentials path. -- */
+
+static grpc_well_known_credentials_path_getter creds_path_getter = NULL;
+
+char *grpc_get_well_known_google_credentials_file_path(void) {
+ if (creds_path_getter != NULL) return creds_path_getter();
+ return grpc_get_well_known_google_credentials_file_path_impl();
+}
+
+void grpc_override_well_known_credentials_path_getter(
+ grpc_well_known_credentials_path_getter getter) {
+ creds_path_getter = getter;
+}
diff --git a/src/core/security/handshake.c b/src/core/security/handshake.c
index adbdd0b40e..6734187fce 100644
--- a/src/core/security/handshake.c
+++ b/src/core/security/handshake.c
@@ -64,12 +64,39 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *setup,
int success);
+static void security_connector_remove_handshake(grpc_security_handshake *h) {
+ grpc_security_connector_handshake_list *node;
+ grpc_security_connector_handshake_list *tmp;
+ grpc_security_connector *sc = h->connector;
+ gpr_mu_lock(&sc->mu);
+ node = sc->handshaking_handshakes;
+ if (node && node->handshake == h) {
+ sc->handshaking_handshakes = node->next;
+ gpr_free(node);
+ gpr_mu_unlock(&sc->mu);
+ return;
+ }
+ while (node) {
+ if (node->next->handshake == h) {
+ tmp = node->next;
+ node->next = node->next->next;
+ gpr_free(tmp);
+ gpr_mu_unlock(&sc->mu);
+ return;
+ }
+ node = node->next;
+ }
+ gpr_mu_unlock(&sc->mu);
+}
+
static void security_handshake_done(grpc_exec_ctx *exec_ctx,
grpc_security_handshake *h,
int is_success) {
+ if (!h->connector->is_client_side) {
+ security_connector_remove_handshake(h);
+ }
if (is_success) {
- h->cb(exec_ctx, h->user_data, GRPC_SECURITY_OK, h->wrapped_endpoint,
- h->secure_endpoint);
+ h->cb(exec_ctx, h->user_data, GRPC_SECURITY_OK, h->secure_endpoint);
} else {
if (h->secure_endpoint != NULL) {
grpc_endpoint_shutdown(exec_ctx, h->secure_endpoint);
@@ -77,8 +104,7 @@ static void security_handshake_done(grpc_exec_ctx *exec_ctx,
} else {
grpc_endpoint_destroy(exec_ctx, h->wrapped_endpoint);
}
- h->cb(exec_ctx, h->user_data, GRPC_SECURITY_ERROR, h->wrapped_endpoint,
- NULL);
+ h->cb(exec_ctx, h->user_data, GRPC_SECURITY_ERROR, NULL);
}
if (h->handshaker != NULL) tsi_handshaker_destroy(h->handshaker);
if (h->handshake_buffer != NULL) gpr_free(h->handshake_buffer);
@@ -268,6 +294,7 @@ void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
void *user_data) {
+ grpc_security_connector_handshake_list *handshake_node;
grpc_security_handshake *h = gpr_malloc(sizeof(grpc_security_handshake));
memset(h, 0, sizeof(grpc_security_handshake));
h->handshaker = handshaker;
@@ -284,5 +311,19 @@ void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
gpr_slice_buffer_init(&h->left_overs);
gpr_slice_buffer_init(&h->outgoing);
gpr_slice_buffer_init(&h->incoming);
+ if (!connector->is_client_side) {
+ handshake_node = gpr_malloc(sizeof(grpc_security_connector_handshake_list));
+ handshake_node->handshake = h;
+ gpr_mu_lock(&connector->mu);
+ handshake_node->next = connector->handshaking_handshakes;
+ connector->handshaking_handshakes = handshake_node;
+ gpr_mu_unlock(&connector->mu);
+ }
send_handshake_bytes_to_peer(exec_ctx, h);
}
+
+void grpc_security_handshake_shutdown(grpc_exec_ctx *exec_ctx,
+ void *handshake) {
+ grpc_security_handshake *h = handshake;
+ grpc_endpoint_shutdown(exec_ctx, h->wrapped_endpoint);
+}
diff --git a/src/core/security/handshake.h b/src/core/security/handshake.h
index 28eaa79dc3..44215d16ef 100644
--- a/src/core/security/handshake.h
+++ b/src/core/security/handshake.h
@@ -45,4 +45,6 @@ void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
grpc_security_handshake_done_cb cb,
void *user_data);
+void grpc_security_handshake_shutdown(grpc_exec_ctx *exec_ctx, void *handshake);
+
#endif /* GRPC_INTERNAL_CORE_SECURITY_HANDSHAKE_H */
diff --git a/src/core/security/json_token.c b/src/core/security/json_token.c
index 021912f333..92775d885d 100644
--- a/src/core/security/json_token.c
+++ b/src/core/security/json_token.c
@@ -215,8 +215,8 @@ static char *encoded_jwt_claim(const grpc_auth_json_key *json_key,
gpr_log(GPR_INFO, "Cropping token lifetime to maximum allowed value.");
expiration = gpr_time_add(now, grpc_max_auth_token_lifetime);
}
- gpr_ltoa(now.tv_sec, now_str);
- gpr_ltoa(expiration.tv_sec, expiration_str);
+ gpr_int64toa(now.tv_sec, now_str);
+ gpr_int64toa(expiration.tv_sec, expiration_str);
child =
create_child(NULL, json, "iss", json_key->client_email, GRPC_JSON_STRING);
diff --git a/src/core/security/security_connector.c b/src/core/security/security_connector.c
index 3c54a4deae..8c6ab0b8a4 100644
--- a/src/core/security/security_connector.c
+++ b/src/core/security/security_connector.c
@@ -102,13 +102,29 @@ const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer,
return NULL;
}
+void grpc_security_connector_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *connector) {
+ grpc_security_connector_handshake_list *tmp;
+ if (!connector->is_client_side) {
+ gpr_mu_lock(&connector->mu);
+ while (connector->handshaking_handshakes) {
+ tmp = connector->handshaking_handshakes;
+ grpc_security_handshake_shutdown(
+ exec_ctx, connector->handshaking_handshakes->handshake);
+ connector->handshaking_handshakes = tmp->next;
+ gpr_free(tmp);
+ }
+ gpr_mu_unlock(&connector->mu);
+ }
+}
+
void grpc_security_connector_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
void *user_data) {
if (sc == NULL || nonsecure_endpoint == NULL) {
- cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL);
} else {
sc->vtable->do_handshake(exec_ctx, sc, nonsecure_endpoint, cb, user_data);
}
@@ -219,6 +235,7 @@ static void fake_channel_destroy(grpc_security_connector *sc) {
static void fake_server_destroy(grpc_security_connector *sc) {
GRPC_AUTH_CONTEXT_UNREF(sc->auth_context, "connector");
+ gpr_mu_destroy(&sc->mu);
gpr_free(sc);
}
@@ -319,6 +336,7 @@ grpc_security_connector *grpc_fake_server_security_connector_create(void) {
c->is_client_side = 0;
c->vtable = &fake_server_vtable;
c->url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
+ gpr_mu_init(&c->mu);
return c;
}
@@ -354,10 +372,12 @@ static void ssl_channel_destroy(grpc_security_connector *sc) {
static void ssl_server_destroy(grpc_security_connector *sc) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
+
if (c->handshaker_factory != NULL) {
tsi_ssl_handshaker_factory_destroy(c->handshaker_factory);
}
GRPC_AUTH_CONTEXT_UNREF(sc->auth_context, "connector");
+ gpr_mu_destroy(&sc->mu);
gpr_free(sc);
}
@@ -390,7 +410,7 @@ static void ssl_channel_do_handshake(grpc_exec_ctx *exec_ctx,
: c->target_name,
&handshaker);
if (status != GRPC_SECURITY_OK) {
- cb(exec_ctx, user_data, status, nonsecure_endpoint, NULL);
+ cb(exec_ctx, user_data, status, NULL);
} else {
grpc_do_security_handshake(exec_ctx, handshaker, sc, nonsecure_endpoint, cb,
user_data);
@@ -408,7 +428,7 @@ static void ssl_server_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_security_status status =
ssl_create_handshaker(c->handshaker_factory, 0, NULL, &handshaker);
if (status != GRPC_SECURITY_OK) {
- cb(exec_ctx, user_data, status, nonsecure_endpoint, NULL);
+ cb(exec_ctx, user_data, status, NULL);
} else {
grpc_do_security_handshake(exec_ctx, handshaker, sc, nonsecure_endpoint, cb,
user_data);
@@ -691,6 +711,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
*sc = NULL;
goto error;
}
+ gpr_mu_init(&c->base.mu);
*sc = &c->base;
gpr_free((void *)alpn_protocol_strings);
gpr_free(alpn_protocol_string_lengths);
diff --git a/src/core/security/security_connector.h b/src/core/security/security_connector.h
index c5f00c5563..7edb05a662 100644
--- a/src/core/security/security_connector.h
+++ b/src/core/security/security_connector.h
@@ -67,7 +67,6 @@ typedef void (*grpc_security_check_cb)(grpc_exec_ctx *exec_ctx, void *user_data,
typedef void (*grpc_security_handshake_done_cb)(grpc_exec_ctx *exec_ctx,
void *user_data,
grpc_security_status status,
- grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint);
typedef struct {
@@ -80,12 +79,22 @@ typedef struct {
void *user_data);
} grpc_security_connector_vtable;
+typedef struct grpc_security_connector_handshake_list {
+ void *handshake;
+ struct grpc_security_connector_handshake_list *next;
+} grpc_security_connector_handshake_list;
+
struct grpc_security_connector {
const grpc_security_connector_vtable *vtable;
gpr_refcount refcount;
int is_client_side;
const char *url_scheme;
grpc_auth_context *auth_context; /* Populated after the peer is checked. */
+ /* Used on server side only. */
+ /* TODO(yangg) maybe create a grpc_server_security_connector with these */
+ gpr_mu mu;
+ grpc_security_connector_handshake_list *handshaking_handshakes;
+ const grpc_channel_args *channel_args;
};
/* Refcounting. */
@@ -126,6 +135,9 @@ grpc_security_status grpc_security_connector_check_peer(
grpc_security_connector *sc, tsi_peer peer, grpc_security_check_cb cb,
void *user_data);
+void grpc_security_connector_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *connector);
+
/* Util to encapsulate the connector in a channel arg. */
grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc);
diff --git a/src/core/security/server_auth_filter.c b/src/core/security/server_auth_filter.c
index 67b3826162..5cfee6d139 100644
--- a/src/core/security/server_auth_filter.c
+++ b/src/core/security/server_auth_filter.c
@@ -41,8 +41,7 @@
#include <grpc/support/log.h>
typedef struct call_data {
- gpr_uint8 got_client_metadata;
- grpc_stream_op_buffer *recv_ops;
+ grpc_metadata_batch *recv_initial_metadata;
/* Closure to call when finished with the auth_on_recv hook. */
grpc_closure *on_done_recv;
/* Receive closures are chained: we inject this closure as the on_done_recv
@@ -53,14 +52,12 @@ typedef struct call_data {
grpc_metadata_array md;
const grpc_metadata *consumed_md;
size_t num_consumed_md;
- grpc_stream_op *md_op;
grpc_auth_context *auth_context;
} call_data;
typedef struct channel_data {
grpc_auth_context *auth_context;
grpc_server_credentials *creds;
- grpc_mdctx *mdctx;
} channel_data;
static grpc_metadata_array metadata_batch_to_md_array(
@@ -128,20 +125,28 @@ static void on_md_processing_done(
if (status == GRPC_STATUS_OK) {
calld->consumed_md = consumed_md;
calld->num_consumed_md = num_consumed_md;
- grpc_metadata_batch_filter(&calld->md_op->data.metadata, remove_consumed_md,
+ grpc_metadata_batch_filter(calld->recv_initial_metadata, remove_consumed_md,
elem);
grpc_metadata_array_destroy(&calld->md);
calld->on_done_recv->cb(&exec_ctx, calld->on_done_recv->cb_arg, 1);
} else {
gpr_slice message;
+ grpc_transport_stream_op close_op;
+ memset(&close_op, 0, sizeof(close_op));
grpc_metadata_array_destroy(&calld->md);
error_details = error_details != NULL
? error_details
: "Authentication metadata processing failed.";
message = gpr_slice_from_copied_string(error_details);
- grpc_sopb_reset(calld->recv_ops);
- grpc_transport_stream_op_add_close(&calld->transport_op, status, &message);
- grpc_call_next_op(&exec_ctx, elem, &calld->transport_op);
+ calld->transport_op.send_initial_metadata = NULL;
+ if (calld->transport_op.send_message != NULL) {
+ grpc_byte_stream_destroy(calld->transport_op.send_message);
+ calld->transport_op.send_message = NULL;
+ }
+ calld->transport_op.send_trailing_metadata = NULL;
+ grpc_transport_stream_op_add_close(&close_op, status, &message);
+ grpc_call_next_op(&exec_ctx, elem, &close_op);
+ calld->on_done_recv->cb(&exec_ctx, calld->on_done_recv->cb_arg, 0);
}
grpc_exec_ctx_finish(&exec_ctx);
@@ -153,16 +158,8 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (success) {
- size_t i;
- size_t nops = calld->recv_ops->nops;
- grpc_stream_op *ops = calld->recv_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- if (op->type != GRPC_OP_METADATA || calld->got_client_metadata) continue;
- calld->got_client_metadata = 1;
- if (chand->creds->processor.process == NULL) continue;
- calld->md_op = op;
- calld->md = metadata_batch_to_md_array(&op->data.metadata);
+ if (chand->creds->processor.process != NULL) {
+ calld->md = metadata_batch_to_md_array(calld->recv_initial_metadata);
chand->creds->processor.process(
chand->creds->processor.state, calld->auth_context,
calld->md.metadata, calld->md.count, on_md_processing_done, elem);
@@ -176,11 +173,11 @@ static void set_recv_ops_md_callbacks(grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
- if (op->recv_ops && !calld->got_client_metadata) {
+ if (op->recv_initial_metadata != NULL) {
/* substitute our callback for the higher callback */
- calld->recv_ops = op->recv_ops;
- calld->on_done_recv = op->on_done_recv;
- op->on_done_recv = &calld->auth_on_recv;
+ calld->recv_initial_metadata = op->recv_initial_metadata;
+ calld->on_done_recv = op->on_complete;
+ op->on_complete = &calld->auth_on_recv;
calld->transport_op = *op;
}
}
@@ -199,8 +196,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
+ grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
@@ -210,46 +206,39 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
memset(calld, 0, sizeof(*calld));
grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem);
- GPR_ASSERT(initial_op && initial_op->context != NULL &&
- initial_op->context[GRPC_CONTEXT_SECURITY].value == NULL);
-
- /* Create a security context for the call and reference the auth context from
- the channel. */
- if (initial_op->context[GRPC_CONTEXT_SECURITY].value != NULL) {
- initial_op->context[GRPC_CONTEXT_SECURITY].destroy(
- initial_op->context[GRPC_CONTEXT_SECURITY].value);
+ if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) {
+ args->context[GRPC_CONTEXT_SECURITY].destroy(
+ args->context[GRPC_CONTEXT_SECURITY].value);
}
+
server_ctx = grpc_server_security_context_create();
server_ctx->auth_context = grpc_auth_context_create(chand->auth_context);
- server_ctx->auth_context->pollset = initial_op->bind_pollset;
- initial_op->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
- initial_op->context[GRPC_CONTEXT_SECURITY].destroy =
- grpc_server_security_context_destroy;
calld->auth_context = server_ctx->auth_context;
- /* Set the metadata callbacks. */
- set_recv_ops_md_callbacks(elem, initial_op);
+ args->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
+ args->context[GRPC_CONTEXT_SECURITY].destroy =
+ grpc_server_security_context_destroy;
}
+static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_pollset *pollset) {}
+
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {}
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
- grpc_auth_context *auth_context = grpc_find_auth_context_in_args(args);
- grpc_server_credentials *creds = grpc_find_server_credentials_in_args(args);
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ grpc_auth_context *auth_context =
+ grpc_find_auth_context_in_args(args->channel_args);
+ grpc_server_credentials *creds =
+ grpc_find_server_credentials_in_args(args->channel_args);
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
- /* The first and the last filters tend to be implemented differently to
- handle the case that there's no 'next' filter to call on the up or down
- path */
- GPR_ASSERT(!is_first);
- GPR_ASSERT(!is_last);
+ GPR_ASSERT(!args->is_last);
GPR_ASSERT(auth_context != NULL);
GPR_ASSERT(creds != NULL);
@@ -257,7 +246,6 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->auth_context =
GRPC_AUTH_CONTEXT_REF(auth_context, "server_auth_filter");
chand->creds = grpc_server_credentials_ref(creds);
- chand->mdctx = mdctx;
}
/* Destructor for channel data */
@@ -271,6 +259,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
const grpc_channel_filter grpc_server_auth_filter = {
auth_start_transport_op, grpc_channel_next_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data),
- init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
+ init_call_elem, set_pollset, destroy_call_elem, sizeof(channel_data),
+ init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
"server-auth"};
diff --git a/src/core/security/server_secure_chttp2.c b/src/core/security/server_secure_chttp2.c
index 851e0cfab3..d1468e40e0 100644
--- a/src/core/security/server_secure_chttp2.c
+++ b/src/core/security/server_secure_chttp2.c
@@ -52,17 +52,11 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
-typedef struct tcp_endpoint_list {
- grpc_endpoint *tcp_endpoint;
- struct tcp_endpoint_list *next;
-} tcp_endpoint_list;
-
typedef struct grpc_server_secure_state {
grpc_server *server;
grpc_tcp_server *tcp;
grpc_security_connector *sc;
grpc_server_credentials *creds;
- tcp_endpoint_list *handshaking_tcp_endpoints;
int is_shutdown;
gpr_mu mu;
gpr_refcount refcount;
@@ -87,7 +81,7 @@ static void state_unref(grpc_server_secure_state *state) {
}
static void setup_transport(grpc_exec_ctx *exec_ctx, void *statep,
- grpc_transport *transport, grpc_mdctx *mdctx) {
+ grpc_transport *transport) {
static grpc_channel_filter const *extra_filters[] = {
&grpc_server_auth_filter, &grpc_http_server_filter};
grpc_server_secure_state *state = statep;
@@ -99,58 +93,32 @@ static void setup_transport(grpc_exec_ctx *exec_ctx, void *statep,
grpc_server_get_channel_args(state->server), args_to_add,
GPR_ARRAY_SIZE(args_to_add));
grpc_server_setup_transport(exec_ctx, state->server, transport, extra_filters,
- GPR_ARRAY_SIZE(extra_filters), mdctx, args_copy);
+ GPR_ARRAY_SIZE(extra_filters), args_copy);
grpc_channel_args_destroy(args_copy);
}
-static int remove_tcp_from_list_locked(grpc_server_secure_state *state,
- grpc_endpoint *tcp) {
- tcp_endpoint_list *node = state->handshaking_tcp_endpoints;
- tcp_endpoint_list *tmp = NULL;
- if (node && node->tcp_endpoint == tcp) {
- state->handshaking_tcp_endpoints = state->handshaking_tcp_endpoints->next;
- gpr_free(node);
- return 0;
- }
- while (node) {
- if (node->next->tcp_endpoint == tcp) {
- tmp = node->next;
- node->next = node->next->next;
- gpr_free(tmp);
- return 0;
- }
- node = node->next;
- }
- return -1;
-}
-
static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
grpc_security_status status,
- grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
grpc_server_secure_state *state = statep;
grpc_transport *transport;
- grpc_mdctx *mdctx;
if (status == GRPC_SECURITY_OK) {
- gpr_mu_lock(&state->mu);
- remove_tcp_from_list_locked(state, wrapped_endpoint);
- if (!state->is_shutdown) {
- mdctx = grpc_mdctx_create();
- transport = grpc_create_chttp2_transport(
- exec_ctx, grpc_server_get_channel_args(state->server),
- secure_endpoint, mdctx, 0);
- setup_transport(exec_ctx, state, transport, mdctx);
- grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
- } else {
- /* We need to consume this here, because the server may already have gone
- * away. */
- grpc_endpoint_destroy(exec_ctx, secure_endpoint);
+ if (secure_endpoint) {
+ gpr_mu_lock(&state->mu);
+ if (!state->is_shutdown) {
+ transport = grpc_create_chttp2_transport(
+ exec_ctx, grpc_server_get_channel_args(state->server),
+ secure_endpoint, 0);
+ setup_transport(exec_ctx, state, transport);
+ grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
+ } else {
+ /* We need to consume this here, because the server may already have
+ * gone away. */
+ grpc_endpoint_destroy(exec_ctx, secure_endpoint);
+ }
+ gpr_mu_unlock(&state->mu);
}
- gpr_mu_unlock(&state->mu);
} else {
- gpr_mu_lock(&state->mu);
- remove_tcp_from_list_locked(state, wrapped_endpoint);
- gpr_mu_unlock(&state->mu);
gpr_log(GPR_ERROR, "Secure transport failed with error %d", status);
}
state_unref(state);
@@ -159,14 +127,7 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
static void on_accept(grpc_exec_ctx *exec_ctx, void *statep,
grpc_endpoint *tcp) {
grpc_server_secure_state *state = statep;
- tcp_endpoint_list *node;
state_ref(state);
- node = gpr_malloc(sizeof(tcp_endpoint_list));
- node->tcp_endpoint = tcp;
- gpr_mu_lock(&state->mu);
- node->next = state->handshaking_tcp_endpoints;
- state->handshaking_tcp_endpoints = node;
- gpr_mu_unlock(&state->mu);
grpc_security_connector_do_handshake(exec_ctx, state->sc, tcp,
on_secure_handshake_done, state);
}
@@ -183,14 +144,7 @@ static void destroy_done(grpc_exec_ctx *exec_ctx, void *statep, int success) {
grpc_server_secure_state *state = statep;
state->destroy_callback->cb(exec_ctx, state->destroy_callback->cb_arg,
success);
- gpr_mu_lock(&state->mu);
- while (state->handshaking_tcp_endpoints != NULL) {
- grpc_endpoint_shutdown(exec_ctx,
- state->handshaking_tcp_endpoints->tcp_endpoint);
- remove_tcp_from_list_locked(state,
- state->handshaking_tcp_endpoints->tcp_endpoint);
- }
- gpr_mu_unlock(&state->mu);
+ grpc_security_connector_shutdown(exec_ctx, state->sc);
state_unref(state);
}
@@ -236,6 +190,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
creds->type);
goto error;
}
+ sc->channel_args = grpc_server_get_channel_args(server);
/* resolve address */
resolved = grpc_blocking_resolve_address(addr, "https");
@@ -249,10 +204,12 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
}
for (i = 0; i < resolved->naddrs; i++) {
- port_temp = grpc_tcp_server_add_port(
+ grpc_tcp_listener *listener;
+ listener = grpc_tcp_server_add_port(
tcp, (struct sockaddr *)&resolved->addrs[i].addr,
resolved->addrs[i].len);
- if (port_temp >= 0) {
+ port_temp = grpc_tcp_listener_get_port(listener);
+ if (port_temp > 0) {
if (port_num == -1) {
port_num = port_temp;
} else {
@@ -280,7 +237,6 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
state->sc = sc;
state->creds = grpc_server_credentials_ref(creds);
- state->handshaking_tcp_endpoints = NULL;
state->is_shutdown = 0;
gpr_mu_init(&state->mu);
gpr_ref_init(&state->refcount, 1);
diff --git a/src/core/statistics/window_stats.c b/src/core/statistics/window_stats.c
index 4d0d3cca4a..e744006bb5 100644
--- a/src/core/statistics/window_stats.c
+++ b/src/core/statistics/window_stats.c
@@ -94,7 +94,7 @@ static gpr_int64 timespec_to_ns(const gpr_timespec ts) {
if (ts.tv_sec > max_seconds) {
return GPR_INT64_MAX - 1;
}
- return (gpr_int64)ts.tv_sec * GPR_NS_PER_SEC + ts.tv_nsec;
+ return ts.tv_sec * GPR_NS_PER_SEC + ts.tv_nsec;
}
static void cws_initialize_statistic(void *statistic,
diff --git a/src/core/support/alloc.c b/src/core/support/alloc.c
index bfcb77956b..ca72379b1d 100644
--- a/src/core/support/alloc.c
+++ b/src/core/support/alloc.c
@@ -34,13 +34,27 @@
#include <grpc/support/alloc.h>
#include <stdlib.h>
+#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/profiling/timers.h"
+static gpr_allocation_functions g_alloc_functions = {malloc, realloc, free};
+
+gpr_allocation_functions gpr_get_allocation_functions() {
+ return g_alloc_functions;
+}
+
+void gpr_set_allocation_functions(gpr_allocation_functions functions) {
+ GPR_ASSERT(functions.malloc_fn != NULL);
+ GPR_ASSERT(functions.realloc_fn != NULL);
+ GPR_ASSERT(functions.free_fn != NULL);
+ g_alloc_functions = functions;
+}
+
void *gpr_malloc(size_t size) {
void *p;
GPR_TIMER_BEGIN("gpr_malloc", 0);
- p = malloc(size);
+ p = g_alloc_functions.malloc_fn(size);
if (!p) {
abort();
}
@@ -50,13 +64,13 @@ void *gpr_malloc(size_t size) {
void gpr_free(void *p) {
GPR_TIMER_BEGIN("gpr_free", 0);
- free(p);
+ g_alloc_functions.free_fn(p);
GPR_TIMER_END("gpr_free", 0);
}
void *gpr_realloc(void *p, size_t size) {
GPR_TIMER_BEGIN("gpr_realloc", 0);
- p = realloc(p, size);
+ p = g_alloc_functions.realloc_fn(p, size);
if (!p) {
abort();
}
diff --git a/src/core/support/avl.c b/src/core/support/avl.c
new file mode 100644
index 0000000000..9734c9987f
--- /dev/null
+++ b/src/core/support/avl.c
@@ -0,0 +1,288 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/avl.h>
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
+gpr_avl gpr_avl_create(const gpr_avl_vtable *vtable) {
+ gpr_avl out;
+ out.vtable = vtable;
+ out.root = NULL;
+ return out;
+}
+
+static gpr_avl_node *ref_node(gpr_avl_node *node) {
+ if (node) {
+ gpr_ref(&node->refs);
+ }
+ return node;
+}
+
+static void unref_node(const gpr_avl_vtable *vtable, gpr_avl_node *node) {
+ if (node == NULL) {
+ return;
+ }
+ if (gpr_unref(&node->refs)) {
+ vtable->destroy_key(node->key);
+ vtable->destroy_value(node->value);
+ unref_node(vtable, node->left);
+ unref_node(vtable, node->right);
+ gpr_free(node);
+ }
+}
+
+static long node_height(gpr_avl_node *node) {
+ return node == NULL ? 0 : node->height;
+}
+
+#ifndef NDEBUG
+static long calculate_height(gpr_avl_node *node) {
+ return node == NULL ? 0 : 1 + GPR_MAX(calculate_height(node->left),
+ calculate_height(node->right));
+}
+
+static gpr_avl_node *assert_invariants(gpr_avl_node *n) {
+ if (n == NULL) return NULL;
+ assert_invariants(n->left);
+ assert_invariants(n->right);
+ assert(calculate_height(n) == n->height);
+ assert(labs(node_height(n->left) - node_height(n->right)) <= 1);
+ return n;
+}
+#else
+static gpr_avl_node *assert_invariants(gpr_avl_node *n) { return n; }
+#endif
+
+gpr_avl_node *new_node(void *key, void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ gpr_avl_node *node = gpr_malloc(sizeof(*node));
+ gpr_ref_init(&node->refs, 1);
+ node->key = key;
+ node->value = value;
+ node->left = assert_invariants(left);
+ node->right = assert_invariants(right);
+ node->height = 1 + GPR_MAX(node_height(left), node_height(right));
+ return node;
+}
+
+static gpr_avl_node *get(const gpr_avl_vtable *vtable, gpr_avl_node *node,
+ void *key) {
+ long cmp;
+
+ if (node == NULL) {
+ return NULL;
+ }
+
+ cmp = vtable->compare_keys(node->key, key);
+ if (cmp == 0) {
+ return node;
+ } else if (cmp > 0) {
+ return get(vtable, node->left, key);
+ } else {
+ return get(vtable, node->right, key);
+ }
+}
+
+void *gpr_avl_get(gpr_avl avl, void *key) {
+ gpr_avl_node *node = get(avl.vtable, avl.root, key);
+ return node ? node->value : NULL;
+}
+
+static gpr_avl_node *rotate_left(const gpr_avl_vtable *vtable, void *key,
+ void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ gpr_avl_node *n =
+ new_node(vtable->copy_key(right->key), vtable->copy_value(right->value),
+ new_node(key, value, left, ref_node(right->left)),
+ ref_node(right->right));
+ unref_node(vtable, right);
+ return n;
+}
+
+static gpr_avl_node *rotate_right(const gpr_avl_vtable *vtable, void *key,
+ void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ gpr_avl_node *n = new_node(
+ vtable->copy_key(left->key), vtable->copy_value(left->value),
+ ref_node(left->left), new_node(key, value, ref_node(left->right), right));
+ unref_node(vtable, left);
+ return n;
+}
+
+static gpr_avl_node *rotate_left_right(const gpr_avl_vtable *vtable, void *key,
+ void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ /* rotate_right(..., rotate_left(left), right) */
+ gpr_avl_node *n = new_node(
+ vtable->copy_key(left->right->key),
+ vtable->copy_value(left->right->value),
+ new_node(vtable->copy_key(left->key), vtable->copy_value(left->value),
+ ref_node(left->left), ref_node(left->right->left)),
+ new_node(key, value, ref_node(left->right->right), right));
+ unref_node(vtable, left);
+ return n;
+}
+
+static gpr_avl_node *rotate_right_left(const gpr_avl_vtable *vtable, void *key,
+ void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ /* rotate_left(..., left, rotate_right(right)) */
+ gpr_avl_node *n = new_node(
+ vtable->copy_key(right->left->key),
+ vtable->copy_value(right->left->value),
+ new_node(key, value, left, ref_node(right->left->left)),
+ new_node(vtable->copy_key(right->key), vtable->copy_key(right->value),
+ ref_node(right->left->right), ref_node(right->right)));
+ unref_node(vtable, right);
+ return n;
+}
+
+static gpr_avl_node *rebalance(const gpr_avl_vtable *vtable, void *key,
+ void *value, gpr_avl_node *left,
+ gpr_avl_node *right) {
+ switch (node_height(left) - node_height(right)) {
+ case 2:
+ if (node_height(left->left) - node_height(left->right) == -1) {
+ return assert_invariants(
+ rotate_left_right(vtable, key, value, left, right));
+ } else {
+ return assert_invariants(rotate_right(vtable, key, value, left, right));
+ }
+ case -2:
+ if (node_height(right->left) - node_height(right->right) == 1) {
+ return assert_invariants(
+ rotate_right_left(vtable, key, value, left, right));
+ } else {
+ return assert_invariants(rotate_left(vtable, key, value, left, right));
+ }
+ default:
+ return assert_invariants(new_node(key, value, left, right));
+ }
+}
+
+static gpr_avl_node *add(const gpr_avl_vtable *vtable, gpr_avl_node *node,
+ void *key, void *value) {
+ long cmp;
+ if (node == NULL) {
+ return new_node(key, value, NULL, NULL);
+ }
+ cmp = vtable->compare_keys(node->key, key);
+ if (cmp == 0) {
+ return new_node(key, value, ref_node(node->left), ref_node(node->right));
+ } else if (cmp > 0) {
+ return rebalance(
+ vtable, vtable->copy_key(node->key), vtable->copy_value(node->value),
+ add(vtable, node->left, key, value), ref_node(node->right));
+ } else {
+ return rebalance(vtable, vtable->copy_key(node->key),
+ vtable->copy_value(node->value), ref_node(node->left),
+ add(vtable, node->right, key, value));
+ }
+}
+
+gpr_avl gpr_avl_add(gpr_avl avl, void *key, void *value) {
+ gpr_avl_node *old_root = avl.root;
+ avl.root = add(avl.vtable, avl.root, key, value);
+ assert_invariants(avl.root);
+ unref_node(avl.vtable, old_root);
+ return avl;
+}
+
+static gpr_avl_node *in_order_head(gpr_avl_node *node) {
+ while (node->left != NULL) {
+ node = node->left;
+ }
+ return node;
+}
+
+static gpr_avl_node *in_order_tail(gpr_avl_node *node) {
+ while (node->right != NULL) {
+ node = node->right;
+ }
+ return node;
+}
+
+static gpr_avl_node *remove(const gpr_avl_vtable *vtable, gpr_avl_node *node,
+ void *key) {
+ long cmp;
+ if (node == NULL) {
+ return NULL;
+ }
+ cmp = vtable->compare_keys(node->key, key);
+ if (cmp == 0) {
+ if (node->left == NULL) {
+ return ref_node(node->right);
+ } else if (node->right == NULL) {
+ return ref_node(node->left);
+ } else if (node->left->height < node->right->height) {
+ gpr_avl_node *h = in_order_head(node->right);
+ return rebalance(vtable, vtable->copy_key(h->key),
+ vtable->copy_value(h->value), ref_node(node->left),
+ remove(vtable, node->right, h->key));
+ } else {
+ gpr_avl_node *h = in_order_tail(node->left);
+ return rebalance(
+ vtable, vtable->copy_key(h->key), vtable->copy_value(h->value),
+ remove(vtable, node->left, h->key), ref_node(node->right));
+ }
+ } else if (cmp > 0) {
+ return rebalance(vtable, vtable->copy_key(node->key),
+ vtable->copy_value(node->value),
+ remove(vtable, node->left, key), ref_node(node->right));
+ } else {
+ return rebalance(vtable, vtable->copy_key(node->key),
+ vtable->copy_value(node->value), ref_node(node->left),
+ remove(vtable, node->right, key));
+ }
+}
+
+gpr_avl gpr_avl_remove(gpr_avl avl, void *key) {
+ gpr_avl_node *old_root = avl.root;
+ avl.root = remove(avl.vtable, avl.root, key);
+ assert_invariants(avl.root);
+ unref_node(avl.vtable, old_root);
+ return avl;
+}
+
+gpr_avl gpr_avl_ref(gpr_avl avl) {
+ ref_node(avl.root);
+ return avl;
+}
+
+void gpr_avl_unref(gpr_avl avl) { unref_node(avl.vtable, avl.root); }
diff --git a/src/core/support/cmdline.c b/src/core/support/cmdline.c
index 87f60bca2e..b517f30b2d 100644
--- a/src/core/support/cmdline.c
+++ b/src/core/support/cmdline.c
@@ -62,11 +62,13 @@ struct gpr_cmdline {
void (*extra_arg)(void *user_data, const char *arg);
void *extra_arg_user_data;
- void (*state)(gpr_cmdline *cl, char *arg);
+ int (*state)(gpr_cmdline *cl, char *arg);
arg *cur_arg;
+
+ int survive_failure;
};
-static void normal_state(gpr_cmdline *cl, char *arg);
+static int normal_state(gpr_cmdline *cl, char *arg);
gpr_cmdline *gpr_cmdline_create(const char *description) {
gpr_cmdline *cl = gpr_malloc(sizeof(gpr_cmdline));
@@ -78,6 +80,10 @@ gpr_cmdline *gpr_cmdline_create(const char *description) {
return cl;
}
+void gpr_cmdline_set_survive_failure(gpr_cmdline *cl) {
+ cl->survive_failure = 1;
+}
+
void gpr_cmdline_destroy(gpr_cmdline *cl) {
while (cl->args) {
arg *a = cl->args;
@@ -185,16 +191,22 @@ char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0) {
return tmp;
}
-static void print_usage_and_die(gpr_cmdline *cl) {
+static int print_usage_and_die(gpr_cmdline *cl) {
char *usage = gpr_cmdline_usage_string(cl, cl->argv0);
fprintf(stderr, "%s", usage);
gpr_free(usage);
- exit(1);
+ if (!cl->survive_failure) {
+ exit(1);
+ }
+ return 0;
}
-static void extra_state(gpr_cmdline *cl, char *str) {
- if (!cl->extra_arg) print_usage_and_die(cl);
+static int extra_state(gpr_cmdline *cl, char *str) {
+ if (!cl->extra_arg) {
+ return print_usage_and_die(cl);
+ }
cl->extra_arg(cl->extra_arg_user_data, str);
+ return 1;
}
static arg *find_arg(gpr_cmdline *cl, char *name) {
@@ -208,13 +220,13 @@ static arg *find_arg(gpr_cmdline *cl, char *name) {
if (!a) {
fprintf(stderr, "Unknown argument: %s\n", name);
- print_usage_and_die(cl);
+ return NULL;
}
return a;
}
-static void value_state(gpr_cmdline *cl, char *str) {
+static int value_state(gpr_cmdline *cl, char *str) {
long intval;
char *end;
@@ -226,7 +238,7 @@ static void value_state(gpr_cmdline *cl, char *str) {
if (*end || intval < INT_MIN || intval > INT_MAX) {
fprintf(stderr, "expected integer, got '%s' for %s\n", str,
cl->cur_arg->name);
- print_usage_and_die(cl);
+ return print_usage_and_die(cl);
}
*(int *)cl->cur_arg->value = (int)intval;
break;
@@ -238,7 +250,7 @@ static void value_state(gpr_cmdline *cl, char *str) {
} else {
fprintf(stderr, "expected boolean, got '%s' for %s\n", str,
cl->cur_arg->name);
- print_usage_and_die(cl);
+ return print_usage_and_die(cl);
}
break;
case ARGTYPE_STRING:
@@ -247,16 +259,18 @@ static void value_state(gpr_cmdline *cl, char *str) {
}
cl->state = normal_state;
+ return 1;
}
-static void normal_state(gpr_cmdline *cl, char *str) {
+static int normal_state(gpr_cmdline *cl, char *str) {
char *eq = NULL;
char *tmp = NULL;
char *arg_name = NULL;
+ int r = 1;
if (0 == strcmp(str, "-help") || 0 == strcmp(str, "--help") ||
0 == strcmp(str, "-h")) {
- print_usage_and_die(cl);
+ return print_usage_and_die(cl);
}
cl->cur_arg = NULL;
@@ -266,7 +280,7 @@ static void normal_state(gpr_cmdline *cl, char *str) {
if (str[2] == 0) {
/* handle '--' to move to just extra args */
cl->state = extra_state;
- return;
+ return 1;
}
str += 2;
} else {
@@ -277,12 +291,15 @@ static void normal_state(gpr_cmdline *cl, char *str) {
/* str is of the form '--no-foo' - it's a flag disable */
str += 3;
cl->cur_arg = find_arg(cl, str);
+ if (cl->cur_arg == NULL) {
+ return print_usage_and_die(cl);
+ }
if (cl->cur_arg->type != ARGTYPE_BOOL) {
fprintf(stderr, "%s is not a flag argument\n", str);
- print_usage_and_die(cl);
+ return print_usage_and_die(cl);
}
*(int *)cl->cur_arg->value = 0;
- return; /* early out */
+ return 1; /* early out */
}
eq = strchr(str, '=');
if (eq != NULL) {
@@ -294,9 +311,12 @@ static void normal_state(gpr_cmdline *cl, char *str) {
arg_name = str;
}
cl->cur_arg = find_arg(cl, arg_name);
+ if (cl->cur_arg == NULL) {
+ return print_usage_and_die(cl);
+ }
if (eq != NULL) {
/* str was of the type --foo=value, parse the value */
- value_state(cl, eq + 1);
+ r = value_state(cl, eq + 1);
} else if (cl->cur_arg->type != ARGTYPE_BOOL) {
/* flag types don't have a '--foo value' variant, other types do */
cl->state = value_state;
@@ -305,19 +325,23 @@ static void normal_state(gpr_cmdline *cl, char *str) {
*(int *)cl->cur_arg->value = 1;
}
} else {
- extra_state(cl, str);
+ r = extra_state(cl, str);
}
gpr_free(tmp);
+ return r;
}
-void gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv) {
+int gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv) {
int i;
GPR_ASSERT(argc >= 1);
cl->argv0 = argv[0];
for (i = 1; i < argc; i++) {
- cl->state(cl, argv[i]);
+ if (!cl->state(cl, argv[i])) {
+ return 0;
+ }
}
+ return 1;
}
diff --git a/src/core/support/log.c b/src/core/support/log.c
index f52c2035b9..04156a5b1f 100644
--- a/src/core/support/log.c
+++ b/src/core/support/log.c
@@ -32,6 +32,7 @@
*/
#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
#include <stdio.h>
#include <string.h>
@@ -48,7 +49,7 @@ const char *gpr_log_severity_string(gpr_log_severity severity) {
case GPR_LOG_SEVERITY_ERROR:
return "E";
}
- return "UNKNOWN";
+ GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
void gpr_log_message(const char *file, int line, gpr_log_severity severity,
diff --git a/src/core/support/log_linux.c b/src/core/support/log_linux.c
index 02f64d8b7e..d66b7a3cc0 100644
--- a/src/core/support/log_linux.c
+++ b/src/core/support/log_linux.c
@@ -76,16 +76,18 @@ void gpr_default_log(gpr_log_func_args *args) {
char *prefix;
const char *display_file;
char time_buffer[64];
+ time_t timer;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
struct tm tm;
+ timer = (time_t)now.tv_sec;
final_slash = strrchr(args->file, '/');
if (final_slash == NULL)
display_file = args->file;
else
display_file = final_slash + 1;
- if (!localtime_r(&now.tv_sec, &tm)) {
+ if (!localtime_r(&timer, &tm)) {
strcpy(time_buffer, "error:localtime");
} else if (0 ==
strftime(time_buffer, sizeof(time_buffer), "%m%d %H:%M:%S", &tm)) {
diff --git a/src/core/support/log_posix.c b/src/core/support/log_posix.c
index 8b050dbee7..8986254e4e 100644
--- a/src/core/support/log_posix.c
+++ b/src/core/support/log_posix.c
@@ -75,16 +75,18 @@ void gpr_default_log(gpr_log_func_args *args) {
char *final_slash;
const char *display_file;
char time_buffer[64];
+ time_t timer;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
struct tm tm;
+ timer = (time_t)now.tv_sec;
final_slash = strrchr(args->file, '/');
if (final_slash == NULL)
display_file = args->file;
else
display_file = final_slash + 1;
- if (!localtime_r(&now.tv_sec, &tm)) {
+ if (!localtime_r(&timer, &tm)) {
strcpy(time_buffer, "error:localtime");
} else if (0 ==
strftime(time_buffer, sizeof(time_buffer), "%m%d %H:%M:%S", &tm)) {
diff --git a/src/core/support/log_win32.c b/src/core/support/log_win32.c
index b68239f8f5..28e7768f80 100644
--- a/src/core/support/log_win32.c
+++ b/src/core/support/log_win32.c
@@ -84,16 +84,18 @@ void gpr_default_log(gpr_log_func_args *args) {
char *final_slash;
const char *display_file;
char time_buffer[64];
+ time_t timer;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
struct tm tm;
+ timer = (time_t)now.tv_sec;
final_slash = strrchr(args->file, '\\');
if (final_slash == NULL)
display_file = args->file;
else
display_file = final_slash + 1;
- if (localtime_s(&tm, &now.tv_sec)) {
+ if (localtime_s(&tm, &timer)) {
strcpy(time_buffer, "error:localtime");
} else if (0 ==
strftime(time_buffer, sizeof(time_buffer), "%m%d %H:%M:%S", &tm)) {
diff --git a/src/core/support/slice.c b/src/core/support/slice.c
index 53024e88f1..9f0ded4932 100644
--- a/src/core/support/slice.c
+++ b/src/core/support/slice.c
@@ -57,6 +57,21 @@ void gpr_slice_unref(gpr_slice slice) {
}
}
+/* gpr_slice_from_static_string support structure - a refcount that does
+ nothing */
+static void noop_ref_or_unref(void *unused) {}
+
+static gpr_slice_refcount noop_refcount = {noop_ref_or_unref,
+ noop_ref_or_unref};
+
+gpr_slice gpr_slice_from_static_string(const char *s) {
+ gpr_slice slice;
+ slice.refcount = &noop_refcount;
+ slice.data.refcounted.bytes = (gpr_uint8 *)s;
+ slice.data.refcounted.length = strlen(s);
+ return slice;
+}
+
/* gpr_slice_new support structures - we create a refcount object extended
with the user provided data pointer & destroy function */
typedef struct new_slice_refcount {
@@ -326,10 +341,3 @@ int gpr_slice_str_cmp(gpr_slice a, const char *b) {
if (d != 0) return d;
return memcmp(GPR_SLICE_START_PTR(a), b, b_length);
}
-
-char *gpr_slice_to_cstring(gpr_slice slice) {
- char *result = gpr_malloc(GPR_SLICE_LENGTH(slice) + 1);
- memcpy(result, GPR_SLICE_START_PTR(slice), GPR_SLICE_LENGTH(slice));
- result[GPR_SLICE_LENGTH(slice)] = '\0';
- return result;
-}
diff --git a/src/core/support/slice_buffer.c b/src/core/support/slice_buffer.c
index 310fbe1350..856d3a2439 100644
--- a/src/core/support/slice_buffer.c
+++ b/src/core/support/slice_buffer.c
@@ -31,6 +31,7 @@
*
*/
+#include <grpc/support/port_platform.h>
#include <grpc/support/slice_buffer.h>
#include <string.h>
@@ -208,6 +209,44 @@ void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst) {
src->length = 0;
}
+void gpr_slice_buffer_move_first(gpr_slice_buffer *src, size_t n,
+ gpr_slice_buffer *dst) {
+ size_t src_idx;
+ size_t output_len = dst->length + n;
+ size_t new_input_len = src->length - n;
+ GPR_ASSERT(src->length >= n);
+ if (src->length == n) {
+ gpr_slice_buffer_move_into(src, dst);
+ return;
+ }
+ src_idx = 0;
+ while (src_idx < src->capacity) {
+ gpr_slice slice = src->slices[src_idx];
+ size_t slice_len = GPR_SLICE_LENGTH(slice);
+ if (n > slice_len) {
+ gpr_slice_buffer_add(dst, slice);
+ n -= slice_len;
+ src_idx++;
+ } else if (n == slice_len) {
+ gpr_slice_buffer_add(dst, slice);
+ src_idx++;
+ break;
+ } else { /* n < slice_len */
+ src->slices[src_idx] = gpr_slice_split_tail(&slice, n);
+ GPR_ASSERT(GPR_SLICE_LENGTH(slice) == n);
+ GPR_ASSERT(GPR_SLICE_LENGTH(src->slices[src_idx]) == slice_len - n);
+ gpr_slice_buffer_add(dst, slice);
+ break;
+ }
+ }
+ GPR_ASSERT(dst->length == output_len);
+ memmove(src->slices, src->slices + src_idx,
+ sizeof(gpr_slice) * (src->count - src_idx));
+ src->count -= src_idx;
+ src->length = new_input_len;
+ GPR_ASSERT(src->count > 0);
+}
+
void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n,
gpr_slice_buffer *garbage) {
GPR_ASSERT(n <= sb->length);
@@ -231,3 +270,13 @@ void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n,
}
}
}
+
+gpr_slice gpr_slice_buffer_take_first(gpr_slice_buffer *sb) {
+ gpr_slice slice;
+ GPR_ASSERT(sb->count > 0);
+ slice = sb->slices[0];
+ memmove(&sb->slices[0], &sb->slices[1], (sb->count - 1) * sizeof(gpr_slice));
+ sb->count--;
+ sb->length -= GPR_SLICE_LENGTH(slice);
+ return slice;
+}
diff --git a/src/core/support/string.c b/src/core/support/string.c
index e0ffeb8a4a..46a7ca3d46 100644
--- a/src/core/support/string.c
+++ b/src/core/support/string.c
@@ -153,8 +153,8 @@ void gpr_reverse_bytes(char *str, int len) {
}
int gpr_ltoa(long value, char *string) {
+ long sign;
int i = 0;
- int neg = value < 0;
if (value == 0) {
string[0] = '0';
@@ -162,12 +162,33 @@ int gpr_ltoa(long value, char *string) {
return 1;
}
- if (neg) value = -value;
+ sign = value < 0 ? -1 : 1;
while (value) {
- string[i++] = (char)('0' + value % 10);
+ string[i++] = (char)('0' + sign * (value % 10));
value /= 10;
}
- if (neg) string[i++] = '-';
+ if (sign < 0) string[i++] = '-';
+ gpr_reverse_bytes(string, i);
+ string[i] = 0;
+ return i;
+}
+
+int gpr_int64toa(gpr_int64 value, char *string) {
+ gpr_int64 sign;
+ int i = 0;
+
+ if (value == 0) {
+ string[0] = '0';
+ string[1] = 0;
+ return 1;
+ }
+
+ sign = value < 0 ? -1 : 1;
+ while (value) {
+ string[i++] = (char)('0' + sign * (value % 10));
+ value /= 10;
+ }
+ if (sign < 0) string[i++] = '-';
gpr_reverse_bytes(string, i);
string[i] = 0;
return i;
diff --git a/src/core/support/string.h b/src/core/support/string.h
index a28e00fd3e..9b604ac5bf 100644
--- a/src/core/support/string.h
+++ b/src/core/support/string.h
@@ -70,6 +70,16 @@ int gpr_parse_bytes_to_uint32(const char *data, size_t length,
output must be at least GPR_LTOA_MIN_BUFSIZE bytes long. */
int gpr_ltoa(long value, char *output);
+/* Minimum buffer size for calling int64toa */
+#define GPR_INT64TOA_MIN_BUFSIZE (3 * sizeof(gpr_int64))
+
+/* Convert an int64 to a string in base 10; returns the length of the
+output string (or 0 on failure).
+output must be at least GPR_INT64TOA_MIN_BUFSIZE bytes long.
+NOTE: This function ensures sufficient bit width even on Win x64,
+where long is 32bit is size.*/
+int gpr_int64toa(gpr_int64 value, char *output);
+
/* Reverse a run of bytes */
void gpr_reverse_bytes(char *str, int len);
diff --git a/src/core/support/sync_posix.c b/src/core/support/sync_posix.c
index 39c96feb13..d6a0f7c325 100644
--- a/src/core/support/sync_posix.c
+++ b/src/core/support/sync_posix.c
@@ -59,8 +59,11 @@ void gpr_mu_unlock(gpr_mu* mu) {
}
int gpr_mu_trylock(gpr_mu* mu) {
- int err = pthread_mutex_trylock(mu);
+ int err;
+ GPR_TIMER_BEGIN("gpr_mu_trylock", 0);
+ err = pthread_mutex_trylock(mu);
GPR_ASSERT(err == 0 || err == EBUSY);
+ GPR_TIMER_END("gpr_mu_trylock", 0);
return err == 0;
}
diff --git a/src/core/support/thd_posix.c b/src/core/support/thd_posix.c
index c36d94d044..653a1c88c1 100644
--- a/src/core/support/thd_posix.c
+++ b/src/core/support/thd_posix.c
@@ -53,7 +53,7 @@ struct thd_arg {
/* Body of every thread started via gpr_thd_new. */
static void *thread_body(void *v) {
struct thd_arg a = *(struct thd_arg *)v;
- gpr_free(v);
+ free(v);
(*a.body)(a.arg);
return NULL;
}
@@ -63,7 +63,10 @@ int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
int thread_started;
pthread_attr_t attr;
pthread_t p;
- struct thd_arg *a = gpr_malloc(sizeof(*a));
+ /* don't use gpr_malloc as we may cause an infinite recursion with
+ * the profiling code */
+ struct thd_arg *a = malloc(sizeof(*a));
+ GPR_ASSERT(a != NULL);
a->body = thd_body;
a->arg = arg;
@@ -78,7 +81,7 @@ int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
thread_started = (pthread_create(&p, &attr, &thread_body, a) == 0);
GPR_ASSERT(pthread_attr_destroy(&attr) == 0);
if (!thread_started) {
- gpr_free(a);
+ free(a);
}
*t = (gpr_thd_id)p;
return thread_started;
diff --git a/src/core/support/time.c b/src/core/support/time.c
index 929adac918..197fa9ad44 100644
--- a/src/core/support/time.c
+++ b/src/core/support/time.c
@@ -56,22 +56,6 @@ gpr_timespec gpr_time_max(gpr_timespec a, gpr_timespec b) {
return gpr_time_cmp(a, b) > 0 ? a : b;
}
-/* There's no standard TIME_T_MIN and TIME_T_MAX, so we construct them. The
- following assumes that signed types are two's-complement and that bytes are
- 8 bits. */
-
-/* The top bit of integral type t. */
-#define TOP_BIT_OF_TYPE(t) (((gpr_uintmax)1) << ((8 * sizeof(t)) - 1))
-
-/* Return whether integral type t is signed. */
-#define TYPE_IS_SIGNED(t) (((t)1) > (t) ~(t)0)
-
-/* The minimum and maximum value of integral type t. */
-#define TYPE_MIN(t) ((t)(TYPE_IS_SIGNED(t) ? TOP_BIT_OF_TYPE(t) : 0))
-#define TYPE_MAX(t) \
- ((t)(TYPE_IS_SIGNED(t) ? (TOP_BIT_OF_TYPE(t) - 1) \
- : ((TOP_BIT_OF_TYPE(t) - 1) << 1) + 1))
-
gpr_timespec gpr_time_0(gpr_clock_type type) {
gpr_timespec out;
out.tv_sec = 0;
@@ -82,7 +66,7 @@ gpr_timespec gpr_time_0(gpr_clock_type type) {
gpr_timespec gpr_inf_future(gpr_clock_type type) {
gpr_timespec out;
- out.tv_sec = TYPE_MAX(time_t);
+ out.tv_sec = INT64_MAX;
out.tv_nsec = 0;
out.clock_type = type;
return out;
@@ -90,7 +74,7 @@ gpr_timespec gpr_inf_future(gpr_clock_type type) {
gpr_timespec gpr_inf_past(gpr_clock_type type) {
gpr_timespec out;
- out.tv_sec = TYPE_MIN(time_t);
+ out.tv_sec = INT64_MIN;
out.tv_nsec = 0;
out.clock_type = type;
return out;
@@ -108,11 +92,11 @@ gpr_timespec gpr_time_from_nanos(long ns, gpr_clock_type type) {
result = gpr_inf_past(type);
} else if (ns >= 0) {
result.tv_sec = ns / GPR_NS_PER_SEC;
- result.tv_nsec = (int)(ns - result.tv_sec * GPR_NS_PER_SEC);
+ result.tv_nsec = (gpr_int32)(ns - result.tv_sec * GPR_NS_PER_SEC);
} else {
/* Calculation carefully formulated to avoid any possible under/overflow. */
result.tv_sec = (-(999999999 - (ns + GPR_NS_PER_SEC)) / GPR_NS_PER_SEC) - 1;
- result.tv_nsec = (int)(ns - result.tv_sec * GPR_NS_PER_SEC);
+ result.tv_nsec = (gpr_int32)(ns - result.tv_sec * GPR_NS_PER_SEC);
}
return result;
}
@@ -126,11 +110,11 @@ gpr_timespec gpr_time_from_micros(long us, gpr_clock_type type) {
result = gpr_inf_past(type);
} else if (us >= 0) {
result.tv_sec = us / 1000000;
- result.tv_nsec = (int)((us - result.tv_sec * 1000000) * 1000);
+ result.tv_nsec = (gpr_int32)((us - result.tv_sec * 1000000) * 1000);
} else {
/* Calculation carefully formulated to avoid any possible under/overflow. */
result.tv_sec = (-(999999 - (us + 1000000)) / 1000000) - 1;
- result.tv_nsec = (int)((us - result.tv_sec * 1000000) * 1000);
+ result.tv_nsec = (gpr_int32)((us - result.tv_sec * 1000000) * 1000);
}
return result;
}
@@ -144,11 +128,11 @@ gpr_timespec gpr_time_from_millis(long ms, gpr_clock_type type) {
result = gpr_inf_past(type);
} else if (ms >= 0) {
result.tv_sec = ms / 1000;
- result.tv_nsec = (int)((ms - result.tv_sec * 1000) * 1000000);
+ result.tv_nsec = (gpr_int32)((ms - result.tv_sec * 1000) * 1000000);
} else {
/* Calculation carefully formulated to avoid any possible under/overflow. */
result.tv_sec = (-(999 - (ms + 1000)) / 1000) - 1;
- result.tv_nsec = (int)((ms - result.tv_sec * 1000) * 1000000);
+ result.tv_nsec = (gpr_int32)((ms - result.tv_sec * 1000) * 1000000);
}
return result;
}
@@ -197,7 +181,7 @@ gpr_timespec gpr_time_from_hours(long h, gpr_clock_type type) {
gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
gpr_timespec sum;
- int inc = 0;
+ gpr_int64 inc = 0;
GPR_ASSERT(b.clock_type == GPR_TIMESPAN);
sum.clock_type = a.clock_type;
sum.tv_nsec = a.tv_nsec + b.tv_nsec;
@@ -205,17 +189,17 @@ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
sum.tv_nsec -= GPR_NS_PER_SEC;
inc++;
}
- if (a.tv_sec == TYPE_MAX(time_t) || a.tv_sec == TYPE_MIN(time_t)) {
+ if (a.tv_sec == INT64_MAX || a.tv_sec == INT64_MIN) {
sum = a;
- } else if (b.tv_sec == TYPE_MAX(time_t) ||
- (b.tv_sec >= 0 && a.tv_sec >= TYPE_MAX(time_t) - b.tv_sec)) {
+ } else if (b.tv_sec == INT64_MAX ||
+ (b.tv_sec >= 0 && a.tv_sec >= INT64_MAX - b.tv_sec)) {
sum = gpr_inf_future(sum.clock_type);
- } else if (b.tv_sec == TYPE_MIN(time_t) ||
- (b.tv_sec <= 0 && a.tv_sec <= TYPE_MIN(time_t) - b.tv_sec)) {
+ } else if (b.tv_sec == INT64_MIN ||
+ (b.tv_sec <= 0 && a.tv_sec <= INT64_MIN - b.tv_sec)) {
sum = gpr_inf_past(sum.clock_type);
} else {
sum.tv_sec = a.tv_sec + b.tv_sec;
- if (inc != 0 && sum.tv_sec == TYPE_MAX(time_t) - 1) {
+ if (inc != 0 && sum.tv_sec == INT64_MAX - 1) {
sum = gpr_inf_future(sum.clock_type);
} else {
sum.tv_sec += inc;
@@ -226,7 +210,7 @@ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) {
gpr_timespec diff;
- int dec = 0;
+ gpr_int64 dec = 0;
if (b.clock_type == GPR_TIMESPAN) {
diff.clock_type = a.clock_type;
} else {
@@ -238,17 +222,17 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) {
diff.tv_nsec += GPR_NS_PER_SEC;
dec++;
}
- if (a.tv_sec == TYPE_MAX(time_t) || a.tv_sec == TYPE_MIN(time_t)) {
+ if (a.tv_sec == INT64_MAX || a.tv_sec == INT64_MIN) {
diff = a;
- } else if (b.tv_sec == TYPE_MIN(time_t) ||
- (b.tv_sec <= 0 && a.tv_sec >= TYPE_MAX(time_t) + b.tv_sec)) {
+ } else if (b.tv_sec == INT64_MIN ||
+ (b.tv_sec <= 0 && a.tv_sec >= INT64_MAX + b.tv_sec)) {
diff = gpr_inf_future(GPR_CLOCK_REALTIME);
- } else if (b.tv_sec == TYPE_MAX(time_t) ||
- (b.tv_sec >= 0 && a.tv_sec <= TYPE_MIN(time_t) + b.tv_sec)) {
+ } else if (b.tv_sec == INT64_MAX ||
+ (b.tv_sec >= 0 && a.tv_sec <= INT64_MIN + b.tv_sec)) {
diff = gpr_inf_past(GPR_CLOCK_REALTIME);
} else {
diff.tv_sec = a.tv_sec - b.tv_sec;
- if (dec != 0 && diff.tv_sec == TYPE_MIN(time_t) + 1) {
+ if (dec != 0 && diff.tv_sec == INT64_MIN + 1) {
diff = gpr_inf_past(GPR_CLOCK_REALTIME);
} else {
diff.tv_sec -= dec;
@@ -297,11 +281,11 @@ gpr_timespec gpr_convert_clock_type(gpr_timespec t, gpr_clock_type clock_type) {
}
if (t.tv_nsec == 0) {
- if (t.tv_sec == TYPE_MAX(time_t)) {
+ if (t.tv_sec == INT64_MAX) {
t.clock_type = clock_type;
return t;
}
- if (t.tv_sec == TYPE_MIN(time_t)) {
+ if (t.tv_sec == INT64_MIN) {
t.clock_type = clock_type;
return t;
}
diff --git a/src/core/support/time_posix.c b/src/core/support/time_posix.c
index 02cfca8555..ba72572e05 100644
--- a/src/core/support/time_posix.c
+++ b/src/core/support/time_posix.c
@@ -45,7 +45,11 @@
static struct timespec timespec_from_gpr(gpr_timespec gts) {
struct timespec rv;
- rv.tv_sec = gts.tv_sec;
+ if (sizeof(time_t) < sizeof(gpr_int64)) {
+ /* fine to assert, as this is only used in gpr_sleep_until */
+ GPR_ASSERT(gts.tv_sec <= INT32_MAX && gts.tv_sec >= INT32_MIN);
+ }
+ rv.tv_sec = (time_t)gts.tv_sec;
rv.tv_nsec = gts.tv_nsec;
return rv;
}
@@ -53,9 +57,14 @@ static struct timespec timespec_from_gpr(gpr_timespec gts) {
#if _POSIX_TIMERS > 0
static gpr_timespec gpr_from_timespec(struct timespec ts,
gpr_clock_type clock_type) {
+ /*
+ * timespec.tv_sec can have smaller size than gpr_timespec.tv_sec,
+ * but we are only using this function to implement gpr_now
+ * so there's no need to handle "infinity" values.
+ */
gpr_timespec rv;
rv.tv_sec = ts.tv_sec;
- rv.tv_nsec = (int)ts.tv_nsec;
+ rv.tv_nsec = (gpr_int32)ts.tv_nsec;
rv.clock_type = clock_type;
return rv;
}
@@ -110,8 +119,8 @@ gpr_timespec gpr_now(gpr_clock_type clock) {
break;
case GPR_CLOCK_MONOTONIC:
now_dbl = (mach_absolute_time() - g_time_start) * g_time_scale;
- now.tv_sec = (time_t)(now_dbl * 1e-9);
- now.tv_nsec = (int)(now_dbl - ((double)now.tv_sec) * 1e9);
+ now.tv_sec = (gpr_int64)(now_dbl * 1e-9);
+ now.tv_nsec = (gpr_int32)(now_dbl - ((double)now.tv_sec) * 1e9);
break;
case GPR_CLOCK_PRECISE:
gpr_precise_clock_now(&now);
diff --git a/src/core/support/time_precise.c b/src/core/support/time_precise.c
index b37517e639..4de1d9b071 100644
--- a/src/core/support/time_precise.c
+++ b/src/core/support/time_precise.c
@@ -75,8 +75,8 @@ void gpr_precise_clock_now(gpr_timespec *clk) {
gpr_get_cycle_counter(&counter);
secs = (double)(counter - start_cycle) / cycles_per_second;
clk->clock_type = GPR_CLOCK_PRECISE;
- clk->tv_sec = (time_t)secs;
- clk->tv_nsec = (int)(1e9 * (secs - (double)clk->tv_sec));
+ clk->tv_sec = (gpr_int64)secs;
+ clk->tv_nsec = (gpr_int32)(1e9 * (secs - (double)clk->tv_sec));
}
#else /* GRPC_TIMERS_RDTSC */
diff --git a/src/core/support/time_win32.c b/src/core/support/time_win32.c
index 623a8d9233..7ccaaa248d 100644
--- a/src/core/support/time_win32.c
+++ b/src/core/support/time_win32.c
@@ -62,15 +62,15 @@ gpr_timespec gpr_now(gpr_clock_type clock) {
switch (clock) {
case GPR_CLOCK_REALTIME:
_ftime_s(&now_tb);
- now_tv.tv_sec = now_tb.time;
+ now_tv.tv_sec = (gpr_int64)now_tb.time;
now_tv.tv_nsec = now_tb.millitm * 1000000;
break;
case GPR_CLOCK_MONOTONIC:
case GPR_CLOCK_PRECISE:
QueryPerformanceCounter(&timestamp);
now_dbl = (timestamp.QuadPart - g_start_time.QuadPart) * g_time_scale;
- now_tv.tv_sec = (time_t)now_dbl;
- now_tv.tv_nsec = (int)((now_dbl - (double)now_tv.tv_sec) * 1e9);
+ now_tv.tv_sec = (gpr_int64)now_dbl;
+ now_tv.tv_nsec = (gpr_int32)((now_dbl - (double)now_tv.tv_sec) * 1e9);
break;
}
return now_tv;
diff --git a/src/core/surface/byte_buffer_queue.c b/src/core/surface/byte_buffer_queue.c
deleted file mode 100644
index e47dc4f4ce..0000000000
--- a/src/core/surface/byte_buffer_queue.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/surface/byte_buffer_queue.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/useful.h>
-
-static void bba_destroy(grpc_bbq_array *array, size_t start_pos) {
- size_t i;
- for (i = start_pos; i < array->count; i++) {
- grpc_byte_buffer_destroy(array->data[i]);
- }
- gpr_free(array->data);
-}
-
-/* Append an operation to an array, expanding as needed */
-static void bba_push(grpc_bbq_array *a, grpc_byte_buffer *buffer) {
- if (a->count == a->capacity) {
- a->capacity = GPR_MAX(a->capacity * 2, 8);
- a->data = gpr_realloc(a->data, sizeof(grpc_byte_buffer *) * a->capacity);
- }
- a->data[a->count++] = buffer;
-}
-
-void grpc_bbq_destroy(grpc_byte_buffer_queue *q) {
- bba_destroy(&q->filling, 0);
- bba_destroy(&q->draining, q->drain_pos);
-}
-
-int grpc_bbq_empty(grpc_byte_buffer_queue *q) {
- return (q->drain_pos == q->draining.count && q->filling.count == 0);
-}
-
-void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *buffer) {
- q->bytes += grpc_byte_buffer_length(buffer);
- bba_push(&q->filling, buffer);
-}
-
-void grpc_bbq_flush(grpc_byte_buffer_queue *q) {
- grpc_byte_buffer *bb;
- while ((bb = grpc_bbq_pop(q))) {
- grpc_byte_buffer_destroy(bb);
- }
-}
-
-size_t grpc_bbq_bytes(grpc_byte_buffer_queue *q) { return q->bytes; }
-
-grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q) {
- grpc_bbq_array temp_array;
- grpc_byte_buffer *out;
-
- if (q->drain_pos == q->draining.count) {
- if (q->filling.count == 0) {
- return NULL;
- }
- q->draining.count = 0;
- q->drain_pos = 0;
- /* swap arrays */
- temp_array = q->filling;
- q->filling = q->draining;
- q->draining = temp_array;
- }
-
- out = q->draining.data[q->drain_pos++];
- q->bytes -= grpc_byte_buffer_length(out);
- return out;
-}
diff --git a/src/core/surface/call.c b/src/core/surface/call.c
index 81ff215c0c..a162d99193 100644
--- a/src/core/surface/call.c
+++ b/src/core/surface/call.c
@@ -31,6 +31,7 @@
*
*/
#include <assert.h>
+#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -42,16 +43,17 @@
#include <grpc/support/useful.h>
#include "src/core/channel/channel_stack.h"
+#include "src/core/compression/algorithm_metadata.h"
#include "src/core/iomgr/timer.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
#include "src/core/surface/api_trace.h"
-#include "src/core/surface/byte_buffer_queue.h"
#include "src/core/surface/call.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/completion_queue.h"
+#include "src/core/transport/static_metadata.h"
-/** The maximum number of completions possible.
+/** The maximum number of concurrent batches possible.
Based upon the maximum number of individually queueable ops in the batch
api:
- initial metadata send
@@ -60,7 +62,7 @@
- initial metadata recv
- message recv
- status/close recv (depending on client/server) */
-#define MAX_CONCURRENT_COMPLETIONS 6
+#define MAX_CONCURRENT_BATCHES 6
typedef struct {
grpc_ioreq_completion_func on_complete;
@@ -68,24 +70,7 @@ typedef struct {
int success;
} completed_request;
-/* See request_set in grpc_call below for a description */
-#define REQSET_EMPTY 'X'
-#define REQSET_DONE 'Y'
-
-#define MAX_SEND_INITIAL_METADATA_COUNT 3
-
-typedef struct {
- /* Overall status of the operation: starts OK, may degrade to
- non-OK */
- gpr_uint8 success;
- /* a bit mask of which request ops are needed (1u << opid) */
- gpr_uint16 need_mask;
- /* a bit mask of which request ops are now completed */
- gpr_uint16 complete_mask;
- /* Completion function to call at the end of the operation */
- grpc_ioreq_completion_func on_complete;
- void *user_data;
-} reqinfo_master;
+#define MAX_SEND_EXTRA_METADATA_COUNT 3
/* Status data for a request can come from several sources; this
enumerates them all, and acts as a priority sorting for which
@@ -130,106 +115,63 @@ typedef enum {
WRITE_STATE_WRITE_CLOSED
} write_state;
+typedef struct batch_control {
+ grpc_call *call;
+ grpc_cq_completion cq_completion;
+ grpc_closure finish_batch;
+ void *notify_tag;
+ gpr_refcount steps_to_complete;
+
+ gpr_uint8 send_initial_metadata;
+ gpr_uint8 send_message;
+ gpr_uint8 send_final_op;
+ gpr_uint8 recv_initial_metadata;
+ gpr_uint8 recv_message;
+ gpr_uint8 recv_final_op;
+ gpr_uint8 is_notify_tag_closure;
+ gpr_uint8 success;
+} batch_control;
+
struct grpc_call {
grpc_completion_queue *cq;
grpc_channel *channel;
grpc_call *parent;
grpc_call *first_child;
- grpc_mdctx *metadata_context;
/* TODO(ctiller): share with cq if possible? */
gpr_mu mu;
- gpr_mu completion_mu;
- /* how far through the stream have we read? */
- read_state read_state;
- /* how far through the stream have we written? */
- write_state write_state;
/* client or server call */
gpr_uint8 is_client;
/* is the alarm set */
gpr_uint8 have_alarm;
- /* are we currently performing a send operation */
- gpr_uint8 sending;
- /* are we currently performing a recv operation */
- gpr_uint8 receiving;
- /* are we currently completing requests */
- gpr_uint8 completing;
/** has grpc_call_destroy been called */
gpr_uint8 destroy_called;
- /* pairs with completed_requests */
- gpr_uint8 num_completed_requests;
- /* are we currently reading a message? */
- gpr_uint8 reading_message;
- /* have we bound a pollset yet? */
- gpr_uint8 bound_pollset;
- /* is an error status set */
- gpr_uint8 error_status_set;
- /** bitmask of allocated completion events in completions */
- gpr_uint8 allocated_completions;
/** flag indicating that cancellation is inherited */
gpr_uint8 cancellation_is_inherited;
+ /** bitmask of live batches */
+ gpr_uint8 used_batches;
+ /** which ops are in-flight */
+ gpr_uint8 sent_initial_metadata;
+ gpr_uint8 sending_message;
+ gpr_uint8 sent_final_op;
+ gpr_uint8 received_initial_metadata;
+ gpr_uint8 receiving_message;
+ gpr_uint8 received_final_op;
+
+ batch_control active_batches[MAX_CONCURRENT_BATCHES];
+
+ /* first idx: is_receiving, second idx: is_trailing */
+ grpc_metadata_batch metadata_batch[2][2];
- /* flags with bits corresponding to write states allowing us to determine
- what was sent */
- gpr_uint16 last_send_contains;
- /* cancel with this status on the next outgoing transport op */
- grpc_status_code cancel_with_status;
-
- /* Active ioreqs.
- request_set and request_data contain one element per active ioreq
- operation.
-
- request_set[op] is an integer specifying a set of operations to which
- the request belongs:
- - if it is < GRPC_IOREQ_OP_COUNT, then this operation is pending
- completion, and the integer represents to which group of operations
- the ioreq belongs. Each group is represented by one master, and the
- integer in request_set is an index into masters to find the master
- data.
- - if it is REQSET_EMPTY, the ioreq op is inactive and available to be
- started
- - finally, if request_set[op] is REQSET_DONE, then the operation is
- complete and unavailable to be started again
-
- request_data[op] is the request data as supplied by the initiator of
- a request, and is valid iff request_set[op] <= GRPC_IOREQ_OP_COUNT.
- The set fields are as per the request type specified by op.
-
- Finally, one element of masters is set per active _set_ of ioreq
- operations. It describes work left outstanding, result status, and
- what work to perform upon operation completion. As one ioreq of each
- op type can be active at once, by convention we choose the first element
- of the group to be the master -- ie the master of in-progress operation
- op is masters[request_set[op]]. This allows constant time allocation
- and a strong upper bound of a count of masters to be calculated. */
- gpr_uint8 request_set[GRPC_IOREQ_OP_COUNT];
- grpc_ioreq_data request_data[GRPC_IOREQ_OP_COUNT];
- gpr_uint32 request_flags[GRPC_IOREQ_OP_COUNT];
- reqinfo_master masters[GRPC_IOREQ_OP_COUNT];
-
- /* Dynamic array of ioreq's that have completed: the count of
- elements is queued in num_completed_requests.
- This list is built up under lock(), and flushed entirely during
- unlock().
- We know the upper bound of the number of elements as we can only
- have one ioreq of each type active at once. */
- completed_request completed_requests[GRPC_IOREQ_OP_COUNT];
- /* Incoming buffer of messages */
- grpc_byte_buffer_queue incoming_queue;
/* Buffered read metadata waiting to be returned to the application.
Element 0 is initial metadata, element 1 is trailing metadata. */
- grpc_metadata_array buffered_metadata[2];
- /* All metadata received - unreffed at once at the end of the call */
- grpc_mdelem **owned_metadata;
- size_t owned_metadata_count;
- size_t owned_metadata_capacity;
+ grpc_metadata_array *buffered_metadata[2];
/* Received call statuses from various sources */
received_status status[STATUS_SOURCE_COUNT];
/* Compression algorithm for the call */
grpc_compression_algorithm compression_algorithm;
-
/* Supported encodings (compression algorithms), a bitset */
gpr_uint32 encodings_accepted_by_peer;
@@ -239,35 +181,36 @@ struct grpc_call {
/* Deadline alarm - if have_alarm is non-zero */
grpc_timer alarm;
- /* Call refcount - to keep the call alive during asynchronous operations */
- gpr_refcount internal_refcount;
-
- grpc_linked_mdelem send_initial_metadata[MAX_SEND_INITIAL_METADATA_COUNT];
- grpc_linked_mdelem status_link;
- grpc_linked_mdelem details_link;
- size_t send_initial_metadata_count;
+ /* for the client, extra metadata is initial metadata; for the
+ server, it's trailing metadata */
+ grpc_linked_mdelem send_extra_metadata[MAX_SEND_EXTRA_METADATA_COUNT];
+ int send_extra_metadata_count;
gpr_timespec send_deadline;
- grpc_stream_op_buffer send_ops;
- grpc_stream_op_buffer recv_ops;
- grpc_stream_state recv_state;
-
- gpr_slice_buffer incoming_message;
- gpr_uint32 incoming_message_length;
- gpr_uint32 incoming_message_flags;
- grpc_closure destroy_closure;
- grpc_closure on_done_recv;
- grpc_closure on_done_send;
- grpc_closure on_done_bind;
-
- /** completion events - for completion queue use */
- grpc_cq_completion completions[MAX_CONCURRENT_COMPLETIONS];
-
/** siblings: children of the same parent form a list, and this list is
protected under
parent->mu */
grpc_call *sibling_next;
grpc_call *sibling_prev;
+
+ grpc_slice_buffer_stream sending_stream;
+ grpc_byte_stream *receiving_stream;
+ grpc_byte_buffer **receiving_buffer;
+ gpr_slice receiving_slice;
+ grpc_closure receiving_slice_ready;
+ grpc_closure receiving_stream_ready;
+ gpr_uint32 test_only_last_message_flags;
+
+ union {
+ struct {
+ grpc_status_code *status;
+ char **status_details;
+ size_t *status_details_capacity;
+ } client;
+ struct {
+ int *cancelled;
+ } server;
+ } final_op;
};
#define CALL_STACK_FROM_CALL(call) ((grpc_call_stack *)((call) + 1))
@@ -279,20 +222,15 @@ struct grpc_call {
static void set_deadline_alarm(grpc_exec_ctx *exec_ctx, grpc_call *call,
gpr_timespec deadline);
-static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *call, int success);
-static void call_on_done_send(grpc_exec_ctx *exec_ctx, void *call, int success);
-static int fill_send_ops(grpc_call *call, grpc_transport_stream_op *op);
static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_transport_stream_op *op);
-static void recv_metadata(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_metadata_batch *metadata);
-static void finish_read_ops(grpc_call *call);
-static grpc_call_error cancel_with_status(grpc_call *c, grpc_status_code status,
+static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ grpc_status_code status,
const char *description);
-static void finished_loose_op(grpc_exec_ctx *exec_ctx, void *call, int success);
-
-static void lock(grpc_call *call);
-static void unlock(grpc_exec_ctx *exec_ctx, grpc_call *call);
+static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
+ int success);
+static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
+ int success);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
gpr_uint32 propagation_mask,
@@ -301,9 +239,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count,
gpr_timespec send_deadline) {
- size_t i;
- grpc_transport_stream_op initial_op;
- grpc_transport_stream_op *initial_op_ptr = NULL;
+ size_t i, j;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call;
@@ -311,51 +247,36 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
memset(call, 0, sizeof(grpc_call));
gpr_mu_init(&call->mu);
- gpr_mu_init(&call->completion_mu);
call->channel = channel;
call->cq = cq;
- if (cq != NULL) {
- GRPC_CQ_INTERNAL_REF(cq, "bind");
- }
call->parent = parent_call;
call->is_client = server_transport_data == NULL;
- for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) {
- call->request_set[i] = REQSET_EMPTY;
- }
if (call->is_client) {
- call->request_set[GRPC_IOREQ_SEND_TRAILING_METADATA] = REQSET_DONE;
- call->request_set[GRPC_IOREQ_SEND_STATUS] = REQSET_DONE;
+ GPR_ASSERT(add_initial_metadata_count < MAX_SEND_EXTRA_METADATA_COUNT);
+ for (i = 0; i < add_initial_metadata_count; i++) {
+ call->send_extra_metadata[i].md = add_initial_metadata[i];
+ }
+ call->send_extra_metadata_count = (int)add_initial_metadata_count;
+ } else {
+ GPR_ASSERT(add_initial_metadata_count == 0);
+ call->send_extra_metadata_count = 0;
}
- GPR_ASSERT(add_initial_metadata_count < MAX_SEND_INITIAL_METADATA_COUNT);
- for (i = 0; i < add_initial_metadata_count; i++) {
- call->send_initial_metadata[i].md = add_initial_metadata[i];
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 2; j++) {
+ call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ }
}
- call->send_initial_metadata_count = add_initial_metadata_count;
call->send_deadline = send_deadline;
GRPC_CHANNEL_INTERNAL_REF(channel, "call");
- call->metadata_context = grpc_channel_get_metadata_context(channel);
- grpc_sopb_init(&call->send_ops);
- grpc_sopb_init(&call->recv_ops);
- gpr_slice_buffer_init(&call->incoming_message);
- grpc_closure_init(&call->on_done_recv, call_on_done_recv, call);
- grpc_closure_init(&call->on_done_send, call_on_done_send, call);
- grpc_closure_init(&call->on_done_bind, finished_loose_op, call);
- /* dropped in destroy and when READ_STATE_STREAM_CLOSED received */
- gpr_ref_init(&call->internal_refcount, 2);
- /* server hack: start reads immediately so we can get initial metadata.
- TODO(ctiller): figure out a cleaner solution */
- if (!call->is_client) {
- memset(&initial_op, 0, sizeof(initial_op));
- initial_op.recv_ops = &call->recv_ops;
- initial_op.recv_state = &call->recv_state;
- initial_op.on_done_recv = &call->on_done_recv;
- initial_op.context = call->context;
- call->receiving = 1;
- GRPC_CALL_INTERNAL_REF(call, "receiving");
- initial_op_ptr = &initial_op;
+ /* initial refcount dropped by grpc_call_destroy */
+ grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
+ call->context, server_transport_data,
+ CALL_STACK_FROM_CALL(call));
+ if (cq != NULL) {
+ GRPC_CQ_INTERNAL_REF(cq, "bind");
+ grpc_call_stack_set_pollset(&exec_ctx, CALL_STACK_FROM_CALL(call),
+ grpc_cq_pollset(cq));
}
- grpc_call_stack_init(&exec_ctx, channel_stack, server_transport_data,
- initial_op_ptr, CALL_STACK_FROM_CALL(call));
if (parent_call != NULL) {
GRPC_CALL_INTERNAL_REF(parent_call, "child");
GPR_ASSERT(call->is_client);
@@ -408,90 +329,55 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq) {
- lock(call);
+ GPR_ASSERT(cq);
call->cq = cq;
- if (cq) {
- GRPC_CQ_INTERNAL_REF(cq, "bind");
- }
- unlock(exec_ctx, call);
+ GRPC_CQ_INTERNAL_REF(cq, "bind");
+ grpc_call_stack_set_pollset(exec_ctx, CALL_STACK_FROM_CALL(call),
+ grpc_cq_pollset(cq));
}
-grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call) {
- return call->cq;
-}
-
-static grpc_cq_completion *allocate_completion(grpc_call *call) {
- gpr_uint8 i;
- gpr_mu_lock(&call->completion_mu);
- for (i = 0; i < GPR_ARRAY_SIZE(call->completions); i++) {
- if (call->allocated_completions & (1u << i)) {
- continue;
- }
- /* NB: the following integer arithmetic operation needs to be in its
- * expanded form due to the "integral promotion" performed (see section
- * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
- * is then required to avoid the compiler warning */
- call->allocated_completions =
- (gpr_uint8)(call->allocated_completions | (1u << i));
- gpr_mu_unlock(&call->completion_mu);
- return &call->completions[i];
- }
- GPR_UNREACHABLE_CODE(return NULL);
- return NULL;
-}
-
-static void done_completion(grpc_exec_ctx *exec_ctx, void *call,
- grpc_cq_completion *completion) {
- grpc_call *c = call;
- gpr_mu_lock(&c->completion_mu);
- c->allocated_completions &=
- (gpr_uint8) ~(1u << (completion - c->completions));
- gpr_mu_unlock(&c->completion_mu);
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, c, "completion");
-}
-
-#ifdef GRPC_CALL_REF_COUNT_DEBUG
-void grpc_call_internal_ref(grpc_call *c, const char *reason) {
- gpr_log(GPR_DEBUG, "CALL: ref %p %d -> %d [%s]", c,
- c->internal_refcount.count, c->internal_refcount.count + 1, reason);
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define REF_REASON reason
+#define REF_ARG , const char *reason
#else
-void grpc_call_internal_ref(grpc_call *c) {
+#define REF_REASON ""
+#define REF_ARG
#endif
- gpr_ref(&c->internal_refcount);
+void grpc_call_internal_ref(grpc_call *c REF_ARG) {
+ GRPC_CALL_STACK_REF(CALL_STACK_FROM_CALL(c), REF_REASON);
+}
+void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) {
+ GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON);
}
-static void destroy_call(grpc_exec_ctx *exec_ctx, grpc_call *call) {
+static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, int success) {
size_t i;
+ int ii;
grpc_call *c = call;
GPR_TIMER_BEGIN("destroy_call", 0);
+ for (i = 0; i < 2; i++) {
+ grpc_metadata_batch_destroy(
+ &c->metadata_batch[1 /* is_receiving */][i /* is_initial */]);
+ }
+ if (c->receiving_stream != NULL) {
+ grpc_byte_stream_destroy(c->receiving_stream);
+ }
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c));
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->channel, "call");
gpr_mu_destroy(&c->mu);
- gpr_mu_destroy(&c->completion_mu);
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (c->status[i].details) {
GRPC_MDSTR_UNREF(c->status[i].details);
}
}
- for (i = 0; i < c->owned_metadata_count; i++) {
- GRPC_MDELEM_UNREF(c->owned_metadata[i]);
- }
- gpr_free(c->owned_metadata);
- for (i = 0; i < GPR_ARRAY_SIZE(c->buffered_metadata); i++) {
- gpr_free(c->buffered_metadata[i].metadata);
- }
- for (i = 0; i < c->send_initial_metadata_count; i++) {
- GRPC_MDELEM_UNREF(c->send_initial_metadata[i].md);
+ for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
+ GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md);
}
for (i = 0; i < GRPC_CONTEXT_COUNT; i++) {
if (c->context[i].destroy) {
c->context[i].destroy(c->context[i].value);
}
}
- grpc_sopb_destroy(&c->send_ops);
- grpc_sopb_destroy(&c->recv_ops);
- grpc_bbq_destroy(&c->incoming_queue);
- gpr_slice_buffer_destroy(&c->incoming_message);
if (c->cq) {
GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
}
@@ -499,30 +385,14 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, grpc_call *call) {
GPR_TIMER_END("destroy_call", 0);
}
-#ifdef GRPC_CALL_REF_COUNT_DEBUG
-void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c,
- const char *reason) {
- gpr_log(GPR_DEBUG, "CALL: unref %p %d -> %d [%s]", c,
- c->internal_refcount.count, c->internal_refcount.count - 1, reason);
-#else
-void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c) {
-#endif
- if (gpr_unref(&c->internal_refcount)) {
- destroy_call(exec_ctx, c);
- }
-}
-
static void set_status_code(grpc_call *call, status_source source,
gpr_uint32 status) {
if (call->status[source].is_set) return;
call->status[source].is_set = 1;
call->status[source].code = (grpc_status_code)status;
- call->error_status_set = status != GRPC_STATUS_OK;
- if (status != GRPC_STATUS_OK && !grpc_bbq_empty(&call->incoming_queue)) {
- grpc_bbq_flush(&call->incoming_queue);
- }
+ /* TODO(ctiller): what to do about the flush that was previously here */
}
static void set_compression_algorithm(grpc_call *call,
@@ -539,6 +409,14 @@ grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
return algorithm;
}
+gpr_uint32 grpc_call_test_only_get_message_flags(grpc_call *call) {
+ gpr_uint32 flags;
+ gpr_mu_lock(&call->mu);
+ flags = call->test_only_last_message_flags;
+ gpr_mu_unlock(&call->mu);
+ return flags;
+}
+
static void destroy_encodings_accepted_by_peer(void *p) { return; }
static void set_encodings_accepted_by_peer(grpc_call *call, grpc_mdelem *mdel) {
@@ -596,14 +474,6 @@ gpr_uint32 grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) {
return encodings_accepted_by_peer;
}
-gpr_uint32 grpc_call_test_only_get_message_flags(grpc_call *call) {
- gpr_uint32 flags;
- gpr_mu_lock(&call->mu);
- flags = call->incoming_message_flags;
- gpr_mu_unlock(&call->mu);
- return flags;
-}
-
static void set_status_details(grpc_call *call, status_source source,
grpc_mdstr *status) {
if (call->status[source].details != NULL) {
@@ -612,149 +482,39 @@ static void set_status_details(grpc_call *call, status_source source,
call->status[source].details = status;
}
-static int is_op_live(grpc_call *call, grpc_ioreq_op op) {
- gpr_uint8 set = call->request_set[op];
- reqinfo_master *master;
- if (set >= GRPC_IOREQ_OP_COUNT) return 0;
- master = &call->masters[set];
- return (master->complete_mask & (1u << op)) == 0;
-}
-
-static void lock(grpc_call *call) { gpr_mu_lock(&call->mu); }
-
-static int need_more_data(grpc_call *call) {
- if (call->read_state == READ_STATE_STREAM_CLOSED) return 0;
- /* TODO(ctiller): this needs some serious cleanup */
- return is_op_live(call, GRPC_IOREQ_RECV_INITIAL_METADATA) ||
- (is_op_live(call, GRPC_IOREQ_RECV_MESSAGE) &&
- grpc_bbq_empty(&call->incoming_queue)) ||
- is_op_live(call, GRPC_IOREQ_RECV_TRAILING_METADATA) ||
- is_op_live(call, GRPC_IOREQ_RECV_STATUS) ||
- is_op_live(call, GRPC_IOREQ_RECV_STATUS_DETAILS) ||
- (is_op_live(call, GRPC_IOREQ_RECV_CLOSE) &&
- grpc_bbq_empty(&call->incoming_queue)) ||
- (call->write_state == WRITE_STATE_INITIAL && !call->is_client) ||
- (call->cancel_with_status != GRPC_STATUS_OK) || call->destroy_called;
-}
-
-static void unlock(grpc_exec_ctx *exec_ctx, grpc_call *call) {
- grpc_transport_stream_op op;
- completed_request completed_requests[GRPC_IOREQ_OP_COUNT];
- int completing_requests = 0;
- int start_op = 0;
- int i;
- const size_t MAX_RECV_PEEK_AHEAD = 65536;
- size_t buffered_bytes;
-
- GPR_TIMER_BEGIN("unlock", 0);
-
- memset(&op, 0, sizeof(op));
-
- op.cancel_with_status = call->cancel_with_status;
- start_op = op.cancel_with_status != GRPC_STATUS_OK;
- call->cancel_with_status = GRPC_STATUS_OK; /* reset */
-
- if (!call->receiving && need_more_data(call)) {
- if (grpc_bbq_empty(&call->incoming_queue) && call->reading_message) {
- op.max_recv_bytes = call->incoming_message_length -
- call->incoming_message.length + MAX_RECV_PEEK_AHEAD;
- } else {
- buffered_bytes = grpc_bbq_bytes(&call->incoming_queue);
- if (buffered_bytes > MAX_RECV_PEEK_AHEAD) {
- op.max_recv_bytes = 0;
- } else {
- op.max_recv_bytes = MAX_RECV_PEEK_AHEAD - buffered_bytes;
- }
- }
- /* TODO(ctiller): 1024 is basically to cover a bug
- I don't understand yet */
- if (op.max_recv_bytes > 1024) {
- op.recv_ops = &call->recv_ops;
- op.recv_state = &call->recv_state;
- op.on_done_recv = &call->on_done_recv;
- call->receiving = 1;
- GRPC_CALL_INTERNAL_REF(call, "receiving");
- start_op = 1;
- }
- }
-
- if (!call->sending) {
- if (fill_send_ops(call, &op)) {
- call->sending = 1;
- GRPC_CALL_INTERNAL_REF(call, "sending");
- start_op = 1;
- }
- }
-
- if (!call->bound_pollset && call->cq && (!call->is_client || start_op)) {
- call->bound_pollset = 1;
- op.bind_pollset = grpc_cq_pollset(call->cq);
- start_op = 1;
- }
-
- if (!call->completing && call->num_completed_requests != 0) {
- completing_requests = call->num_completed_requests;
- memcpy(completed_requests, call->completed_requests,
- sizeof(completed_requests));
- call->num_completed_requests = 0;
- call->completing = 1;
- GRPC_CALL_INTERNAL_REF(call, "completing");
- }
-
- gpr_mu_unlock(&call->mu);
-
- if (start_op) {
- execute_op(exec_ctx, call, &op);
- }
-
- if (completing_requests > 0) {
- for (i = 0; i < completing_requests; i++) {
- completed_requests[i].on_complete(exec_ctx, call,
- completed_requests[i].success,
- completed_requests[i].user_data);
- }
- lock(call);
- call->completing = 0;
- unlock(exec_ctx, call);
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completing");
- }
-
- GPR_TIMER_END("unlock", 0);
-}
-
-static void get_final_status(grpc_call *call, grpc_ioreq_data out) {
+static void get_final_status(grpc_call *call,
+ void (*set_value)(grpc_status_code code,
+ void *user_data),
+ void *set_value_user_data) {
int i;
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (call->status[i].is_set) {
- out.recv_status.set_value(call->status[i].code,
- out.recv_status.user_data);
+ set_value(call->status[i].code, set_value_user_data);
return;
}
}
if (call->is_client) {
- out.recv_status.set_value(GRPC_STATUS_UNKNOWN, out.recv_status.user_data);
+ set_value(GRPC_STATUS_UNKNOWN, set_value_user_data);
} else {
- out.recv_status.set_value(GRPC_STATUS_OK, out.recv_status.user_data);
+ set_value(GRPC_STATUS_OK, set_value_user_data);
}
}
-static void get_final_details(grpc_call *call, grpc_ioreq_data out) {
+static void get_final_details(grpc_call *call, char **out_details,
+ size_t *out_details_capacity) {
int i;
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (call->status[i].is_set) {
if (call->status[i].details) {
gpr_slice details = call->status[i].details->slice;
size_t len = GPR_SLICE_LENGTH(details);
- if (len + 1 > *out.recv_status_details.details_capacity) {
- *out.recv_status_details.details_capacity = GPR_MAX(
- len + 1, *out.recv_status_details.details_capacity * 3 / 2);
- *out.recv_status_details.details =
- gpr_realloc(*out.recv_status_details.details,
- *out.recv_status_details.details_capacity);
+ if (len + 1 > *out_details_capacity) {
+ *out_details_capacity =
+ GPR_MAX(len + 1, *out_details_capacity * 3 / 2);
+ *out_details = gpr_realloc(*out_details, *out_details_capacity);
}
- memcpy(*out.recv_status_details.details, GPR_SLICE_START_PTR(details),
- len);
- (*out.recv_status_details.details)[len] = 0;
+ memcpy(*out_details, GPR_SLICE_START_PTR(details), len);
+ (*out_details)[len] = 0;
} else {
goto no_details;
}
@@ -763,333 +523,45 @@ static void get_final_details(grpc_call *call, grpc_ioreq_data out) {
}
no_details:
- if (0 == *out.recv_status_details.details_capacity) {
- *out.recv_status_details.details_capacity = 8;
- *out.recv_status_details.details =
- gpr_malloc(*out.recv_status_details.details_capacity);
- }
- **out.recv_status_details.details = 0;
-}
-
-static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op,
- int success) {
- completed_request *cr;
- gpr_uint8 master_set = call->request_set[op];
- reqinfo_master *master;
- size_t i;
- /* ioreq is live: we need to do something */
- master = &call->masters[master_set];
- /* NB: the following integer arithmetic operation needs to be in its
- * expanded form due to the "integral promotion" performed (see section
- * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
- * is then required to avoid the compiler warning */
- master->complete_mask = (gpr_uint16)(master->complete_mask | (1u << op));
- if (!success) {
- master->success = 0;
- }
- if (master->complete_mask == master->need_mask) {
- for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) {
- if (call->request_set[i] != master_set) {
- continue;
- }
- call->request_set[i] = REQSET_DONE;
- switch ((grpc_ioreq_op)i) {
- case GRPC_IOREQ_RECV_MESSAGE:
- case GRPC_IOREQ_SEND_MESSAGE:
- call->request_set[i] = REQSET_EMPTY;
- if (!master->success) {
- call->write_state = WRITE_STATE_WRITE_CLOSED;
- }
- break;
- case GRPC_IOREQ_SEND_STATUS:
- if (call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details !=
- NULL) {
- GRPC_MDSTR_UNREF(
- call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details);
- call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details =
- NULL;
- }
- break;
- case GRPC_IOREQ_RECV_CLOSE:
- case GRPC_IOREQ_SEND_INITIAL_METADATA:
- case GRPC_IOREQ_SEND_TRAILING_METADATA:
- case GRPC_IOREQ_SEND_CLOSE:
- break;
- case GRPC_IOREQ_RECV_STATUS:
- get_final_status(call, call->request_data[GRPC_IOREQ_RECV_STATUS]);
- break;
- case GRPC_IOREQ_RECV_STATUS_DETAILS:
- get_final_details(call,
- call->request_data[GRPC_IOREQ_RECV_STATUS_DETAILS]);
- break;
- case GRPC_IOREQ_RECV_INITIAL_METADATA:
- GPR_SWAP(grpc_metadata_array, call->buffered_metadata[0],
- *call->request_data[GRPC_IOREQ_RECV_INITIAL_METADATA]
- .recv_metadata);
- break;
- case GRPC_IOREQ_RECV_TRAILING_METADATA:
- GPR_SWAP(grpc_metadata_array, call->buffered_metadata[1],
- *call->request_data[GRPC_IOREQ_RECV_TRAILING_METADATA]
- .recv_metadata);
- break;
- case GRPC_IOREQ_OP_COUNT:
- abort();
- break;
- }
- }
- cr = &call->completed_requests[call->num_completed_requests++];
- cr->success = master->success;
- cr->on_complete = master->on_complete;
- cr->user_data = master->user_data;
- }
-}
-
-static void finish_ioreq_op(grpc_call *call, grpc_ioreq_op op, int success) {
- if (is_op_live(call, op)) {
- finish_live_ioreq_op(call, op, success);
+ if (0 == *out_details_capacity) {
+ *out_details_capacity = 8;
+ *out_details = gpr_malloc(*out_details_capacity);
}
+ **out_details = 0;
}
-static void early_out_write_ops(grpc_call *call) {
- switch (call->write_state) {
- case WRITE_STATE_WRITE_CLOSED:
- finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, 0);
- finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, 0);
- finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, 0);
- finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, 1);
- /* fallthrough */
- case WRITE_STATE_STARTED:
- finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, 0);
- /* fallthrough */
- case WRITE_STATE_INITIAL:
- /* do nothing */
- break;
- }
+static grpc_linked_mdelem *linked_from_md(grpc_metadata *md) {
+ return (grpc_linked_mdelem *)&md->internal_data;
}
-static void call_on_done_send(grpc_exec_ctx *exec_ctx, void *pc, int success) {
- grpc_call *call = pc;
- GPR_TIMER_BEGIN("call_on_done_send", 0);
- lock(call);
- if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_INITIAL_METADATA)) {
- finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, success);
- call->write_state = WRITE_STATE_STARTED;
- }
- if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_MESSAGE)) {
- finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, success);
- }
- if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_CLOSE)) {
- finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, success);
- finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, success);
- finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, 1);
- call->write_state = WRITE_STATE_WRITE_CLOSED;
- }
- if (!success) {
- call->write_state = WRITE_STATE_WRITE_CLOSED;
- early_out_write_ops(call);
- }
- call->send_ops.nops = 0;
- call->last_send_contains = 0;
- call->sending = 0;
- unlock(exec_ctx, call);
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "sending");
- GPR_TIMER_END("call_on_done_send", 0);
-}
-
-static void finish_message(grpc_call *call) {
- GPR_TIMER_BEGIN("finish_message", 0);
- if (call->error_status_set == 0) {
- /* TODO(ctiller): this could be a lot faster if coded directly */
- grpc_byte_buffer *byte_buffer;
- /* some aliases for readability */
- gpr_slice *slices = call->incoming_message.slices;
- const size_t nslices = call->incoming_message.count;
-
- if ((call->incoming_message_flags & GRPC_WRITE_INTERNAL_COMPRESS) &&
- (call->compression_algorithm > GRPC_COMPRESS_NONE)) {
- byte_buffer = grpc_raw_compressed_byte_buffer_create(
- slices, nslices, call->compression_algorithm);
+static int prepare_application_metadata(grpc_call *call, int count,
+ grpc_metadata *metadata,
+ int is_trailing,
+ int prepend_extra_metadata) {
+ int i;
+ grpc_metadata_batch *batch =
+ &call->metadata_batch[0 /* is_receiving */][is_trailing];
+ if (prepend_extra_metadata) {
+ if (call->send_extra_metadata_count == 0) {
+ prepend_extra_metadata = 0;
} else {
- byte_buffer = grpc_raw_byte_buffer_create(slices, nslices);
- }
- grpc_bbq_push(&call->incoming_queue, byte_buffer);
- }
- gpr_slice_buffer_reset_and_unref(&call->incoming_message);
- GPR_ASSERT(call->incoming_message.count == 0);
- call->reading_message = 0;
- GPR_TIMER_END("finish_message", 0);
-}
-
-static int begin_message(grpc_call *call, grpc_begin_message msg) {
- /* can't begin a message when we're still reading a message */
- if (call->reading_message) {
- char *message = NULL;
- gpr_asprintf(
- &message, "Message terminated early; read %d bytes, expected %d",
- (int)call->incoming_message.length, (int)call->incoming_message_length);
- cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
- gpr_free(message);
- return 0;
- }
- /* sanity check: if message flags indicate a compressed message, the
- * compression level should already be present in the call, as parsed off its
- * corresponding metadata. */
- if ((msg.flags & GRPC_WRITE_INTERNAL_COMPRESS) &&
- (call->compression_algorithm == GRPC_COMPRESS_NONE)) {
- char *message = NULL;
- char *alg_name;
- if (!grpc_compression_algorithm_name(call->compression_algorithm,
- &alg_name)) {
- /* This shouldn't happen, other than due to data corruption */
- alg_name = "<unknown>";
- }
- gpr_asprintf(&message,
- "Invalid compression algorithm (%s) for compressed message.",
- alg_name);
- cancel_with_status(call, GRPC_STATUS_INTERNAL, message);
- gpr_free(message);
- return 0;
- }
- /* stash away parameters, and prepare for incoming slices */
- if (msg.length > grpc_channel_get_max_message_length(call->channel)) {
- char *message = NULL;
- gpr_asprintf(
- &message,
- "Maximum message length of %d exceeded by a message of length %d",
- grpc_channel_get_max_message_length(call->channel), msg.length);
- cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
- gpr_free(message);
- return 0;
- } else if (msg.length > 0) {
- call->reading_message = 1;
- call->incoming_message_length = msg.length;
- call->incoming_message_flags = msg.flags;
- return 1;
- } else {
- finish_message(call);
- return 1;
- }
-}
-
-static int add_slice_to_message(grpc_call *call, gpr_slice slice) {
- if (GPR_SLICE_LENGTH(slice) == 0) {
- gpr_slice_unref(slice);
- return 1;
- }
- /* we have to be reading a message to know what to do here */
- if (!call->reading_message) {
- gpr_slice_unref(slice);
- cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT,
- "Received payload data while not reading a message");
- return 0;
- }
- /* append the slice to the incoming buffer */
- gpr_slice_buffer_add(&call->incoming_message, slice);
- if (call->incoming_message.length > call->incoming_message_length) {
- /* if we got too many bytes, complain */
- char *message = NULL;
- gpr_asprintf(
- &message, "Receiving message overflow; read %d bytes, expected %d",
- (int)call->incoming_message.length, (int)call->incoming_message_length);
- cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
- gpr_free(message);
- return 0;
- } else if (call->incoming_message.length == call->incoming_message_length) {
- finish_message(call);
- return 1;
- } else {
- return 1;
- }
-}
-
-static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *pc, int success) {
- grpc_call *call = pc;
- grpc_call *child_call;
- grpc_call *next_child_call;
- size_t i;
- GPR_TIMER_BEGIN("call_on_done_recv", 0);
- lock(call);
- call->receiving = 0;
- if (success) {
- for (i = 0; success && i < call->recv_ops.nops; i++) {
- grpc_stream_op *op = &call->recv_ops.ops[i];
- switch (op->type) {
- case GRPC_NO_OP:
- break;
- case GRPC_OP_METADATA:
- GPR_TIMER_BEGIN("recv_metadata", 0);
- recv_metadata(exec_ctx, call, &op->data.metadata);
- GPR_TIMER_END("recv_metadata", 0);
- break;
- case GRPC_OP_BEGIN_MESSAGE:
- GPR_TIMER_BEGIN("begin_message", 0);
- success = begin_message(call, op->data.begin_message);
- GPR_TIMER_END("begin_message", 0);
- break;
- case GRPC_OP_SLICE:
- GPR_TIMER_BEGIN("add_slice_to_message", 0);
- success = add_slice_to_message(call, op->data.slice);
- GPR_TIMER_END("add_slice_to_message", 0);
- break;
+ for (i = 0; i < call->send_extra_metadata_count; i++) {
+ GRPC_MDELEM_REF(call->send_extra_metadata[i].md);
}
- }
- if (!success) {
- grpc_stream_ops_unref_owned_objects(&call->recv_ops.ops[i],
- call->recv_ops.nops - i);
- }
- if (call->recv_state == GRPC_STREAM_RECV_CLOSED) {
- GPR_ASSERT(call->read_state <= READ_STATE_READ_CLOSED);
- call->read_state = READ_STATE_READ_CLOSED;
- }
- if (call->recv_state == GRPC_STREAM_CLOSED) {
- GPR_ASSERT(call->read_state <= READ_STATE_STREAM_CLOSED);
- call->read_state = READ_STATE_STREAM_CLOSED;
- if (call->have_alarm) {
- grpc_timer_cancel(exec_ctx, &call->alarm);
+ for (i = 1; i < call->send_extra_metadata_count; i++) {
+ call->send_extra_metadata[i].prev = &call->send_extra_metadata[i - 1];
}
- /* propagate cancellation to any interested children */
- child_call = call->first_child;
- if (child_call != NULL) {
- do {
- next_child_call = child_call->sibling_next;
- if (child_call->cancellation_is_inherited) {
- GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel");
- grpc_call_cancel(child_call, NULL);
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel");
- }
- child_call = next_child_call;
- } while (child_call != call->first_child);
+ for (i = 0; i < call->send_extra_metadata_count - 1; i++) {
+ call->send_extra_metadata[i].next = &call->send_extra_metadata[i + 1];
}
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "closed");
}
- finish_read_ops(call);
- } else {
- finish_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, 0);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS, 0);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_CLOSE, 0);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_TRAILING_METADATA, 0);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_INITIAL_METADATA, 0);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS_DETAILS, 0);
}
- call->recv_ops.nops = 0;
- unlock(exec_ctx, call);
-
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "receiving");
- GPR_TIMER_END("call_on_done_recv", 0);
-}
-
-static int prepare_application_metadata(grpc_call *call, size_t count,
- grpc_metadata *metadata) {
- size_t i;
for (i = 0; i < count; i++) {
grpc_metadata *md = &metadata[i];
- grpc_metadata *next_md = (i == count - 1) ? NULL : &metadata[i + 1];
- grpc_metadata *prev_md = (i == 0) ? NULL : &metadata[i - 1];
grpc_linked_mdelem *l = (grpc_linked_mdelem *)&md->internal_data;
GPR_ASSERT(sizeof(grpc_linked_mdelem) == sizeof(md->internal_data));
- l->md = grpc_mdelem_from_string_and_buffer(call->metadata_context, md->key,
- (const gpr_uint8 *)md->value,
- md->value_length);
+ l->md = grpc_mdelem_from_string_and_buffer(
+ md->key, (const gpr_uint8 *)md->value, md->value_length);
if (!grpc_mdstr_is_legal_header(l->md->key)) {
gpr_log(GPR_ERROR, "attempt to send invalid metadata key: %s",
grpc_mdstr_as_c_string(l->md->key));
@@ -1099,243 +571,49 @@ static int prepare_application_metadata(grpc_call *call, size_t count,
gpr_log(GPR_ERROR, "attempt to send invalid metadata value");
return 0;
}
- l->next = next_md ? (grpc_linked_mdelem *)&next_md->internal_data : NULL;
- l->prev = prev_md ? (grpc_linked_mdelem *)&prev_md->internal_data : NULL;
}
- return 1;
-}
-
-static grpc_mdelem_list chain_metadata_from_app(grpc_call *call, size_t count,
- grpc_metadata *metadata) {
- grpc_mdelem_list out;
- if (count == 0) {
- out.head = out.tail = NULL;
- return out;
+ for (i = 1; i < count; i++) {
+ linked_from_md(&metadata[i])->prev = linked_from_md(&metadata[i - 1]);
}
- out.head = (grpc_linked_mdelem *)&(metadata[0].internal_data);
- out.tail = (grpc_linked_mdelem *)&(metadata[count - 1].internal_data);
- return out;
-}
-
-/* Copy the contents of a byte buffer into stream ops */
-static void copy_byte_buffer_to_stream_ops(grpc_byte_buffer *byte_buffer,
- grpc_stream_op_buffer *sopb) {
- size_t i;
-
- switch (byte_buffer->type) {
- case GRPC_BB_RAW:
- for (i = 0; i < byte_buffer->data.raw.slice_buffer.count; i++) {
- gpr_slice slice = byte_buffer->data.raw.slice_buffer.slices[i];
- gpr_slice_ref(slice);
- grpc_sopb_add_slice(sopb, slice);
- }
- break;
+ for (i = 0; i < count - 1; i++) {
+ linked_from_md(&metadata[i])->next = linked_from_md(&metadata[i + 1]);
}
-}
-
-static int fill_send_ops(grpc_call *call, grpc_transport_stream_op *op) {
- grpc_ioreq_data data;
- gpr_uint32 flags;
- grpc_metadata_batch mdb;
- size_t i;
- GPR_ASSERT(op->send_ops == NULL);
-
- switch (call->write_state) {
- case WRITE_STATE_INITIAL:
- if (!is_op_live(call, GRPC_IOREQ_SEND_INITIAL_METADATA)) {
- break;
- }
- data = call->request_data[GRPC_IOREQ_SEND_INITIAL_METADATA];
- mdb.list = chain_metadata_from_app(call, data.send_metadata.count,
- data.send_metadata.metadata);
- mdb.garbage.head = mdb.garbage.tail = NULL;
- mdb.deadline = call->send_deadline;
- for (i = 0; i < call->send_initial_metadata_count; i++) {
- grpc_metadata_batch_link_head(&mdb, &call->send_initial_metadata[i]);
- }
- grpc_sopb_add_metadata(&call->send_ops, mdb);
- op->send_ops = &call->send_ops;
- call->last_send_contains |= 1 << GRPC_IOREQ_SEND_INITIAL_METADATA;
- call->send_initial_metadata_count = 0;
- /* fall through intended */
- case WRITE_STATE_STARTED:
- if (is_op_live(call, GRPC_IOREQ_SEND_MESSAGE)) {
- size_t length;
- data = call->request_data[GRPC_IOREQ_SEND_MESSAGE];
- flags = call->request_flags[GRPC_IOREQ_SEND_MESSAGE];
- length = grpc_byte_buffer_length(data.send_message);
- GPR_ASSERT(length <= GPR_UINT32_MAX);
- grpc_sopb_add_begin_message(&call->send_ops, (gpr_uint32)length, flags);
- copy_byte_buffer_to_stream_ops(data.send_message, &call->send_ops);
- op->send_ops = &call->send_ops;
- call->last_send_contains |= 1 << GRPC_IOREQ_SEND_MESSAGE;
- }
- if (is_op_live(call, GRPC_IOREQ_SEND_CLOSE)) {
- op->is_last_send = 1;
- op->send_ops = &call->send_ops;
- call->last_send_contains |= 1 << GRPC_IOREQ_SEND_CLOSE;
- if (!call->is_client) {
- /* send trailing metadata */
- data = call->request_data[GRPC_IOREQ_SEND_TRAILING_METADATA];
- mdb.list = chain_metadata_from_app(call, data.send_metadata.count,
- data.send_metadata.metadata);
- mdb.garbage.head = mdb.garbage.tail = NULL;
- mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
- /* send status */
- /* TODO(ctiller): cache common status values */
- data = call->request_data[GRPC_IOREQ_SEND_STATUS];
- grpc_metadata_batch_add_tail(
- &mdb, &call->status_link,
- grpc_channel_get_reffed_status_elem(call->channel,
- data.send_status.code));
- if (data.send_status.details) {
- grpc_metadata_batch_add_tail(
- &mdb, &call->details_link,
- grpc_mdelem_from_metadata_strings(
- call->metadata_context,
- GRPC_MDSTR_REF(
- grpc_channel_get_message_string(call->channel)),
- data.send_status.details));
- call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details =
- NULL;
- }
- grpc_sopb_add_metadata(&call->send_ops, mdb);
- }
- }
+ switch (prepend_extra_metadata * 2 + (count != 0)) {
+ case 0:
+ /* no prepend, no metadata => nothing to do */
+ batch->list.head = batch->list.tail = NULL;
break;
- case WRITE_STATE_WRITE_CLOSED:
+ case 1:
+ /* metadata, but no prepend */
+ batch->list.head = linked_from_md(&metadata[0]);
+ batch->list.tail = linked_from_md(&metadata[count - 1]);
+ batch->list.head->prev = NULL;
+ batch->list.tail->next = NULL;
break;
- }
- if (op->send_ops) {
- op->on_done_send = &call->on_done_send;
- }
- return op->send_ops != NULL;
-}
-
-static grpc_call_error start_ioreq_error(grpc_call *call,
- gpr_uint32 mutated_ops,
- grpc_call_error ret) {
- size_t i;
- for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) {
- if (mutated_ops & (1u << i)) {
- call->request_set[i] = REQSET_EMPTY;
- }
- }
- return ret;
-}
-
-static void finish_read_ops(grpc_call *call) {
- int empty;
-
- if (is_op_live(call, GRPC_IOREQ_RECV_MESSAGE)) {
- empty =
- (NULL == (*call->request_data[GRPC_IOREQ_RECV_MESSAGE].recv_message =
- grpc_bbq_pop(&call->incoming_queue)));
- if (!empty) {
- finish_live_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, 1);
- empty = grpc_bbq_empty(&call->incoming_queue);
- }
- } else {
- empty = grpc_bbq_empty(&call->incoming_queue);
- }
-
- switch (call->read_state) {
- case READ_STATE_STREAM_CLOSED:
- if (empty && !call->have_alarm) {
- finish_ioreq_op(call, GRPC_IOREQ_RECV_CLOSE, 1);
- }
- /* fallthrough */
- case READ_STATE_READ_CLOSED:
- if (empty) {
- finish_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, 1);
- }
- finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS, 1);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS_DETAILS, 1);
- finish_ioreq_op(call, GRPC_IOREQ_RECV_TRAILING_METADATA, 1);
- /* fallthrough */
- case READ_STATE_GOT_INITIAL_METADATA:
- finish_ioreq_op(call, GRPC_IOREQ_RECV_INITIAL_METADATA, 1);
- /* fallthrough */
- case READ_STATE_INITIAL:
- /* do nothing */
+ case 2:
+ /* prepend, but no md */
+ batch->list.head = &call->send_extra_metadata[0];
+ batch->list.tail =
+ &call->send_extra_metadata[call->send_extra_metadata_count - 1];
+ batch->list.head->prev = NULL;
+ batch->list.tail->next = NULL;
break;
+ case 3:
+ /* prepend AND md */
+ batch->list.head = &call->send_extra_metadata[0];
+ call->send_extra_metadata[call->send_extra_metadata_count - 1].next =
+ linked_from_md(&metadata[0]);
+ linked_from_md(&metadata[0])->prev =
+ &call->send_extra_metadata[call->send_extra_metadata_count - 1];
+ batch->list.tail = linked_from_md(&metadata[count - 1]);
+ batch->list.head->prev = NULL;
+ batch->list.tail->next = NULL;
+ break;
+ default:
+ GPR_UNREACHABLE_CODE(return 0);
}
-}
-
-static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs,
- size_t nreqs,
- grpc_ioreq_completion_func completion,
- void *user_data) {
- size_t i;
- gpr_uint16 have_ops = 0;
- grpc_ioreq_op op;
- reqinfo_master *master;
- grpc_ioreq_data data;
- gpr_uint8 set;
-
- if (nreqs == 0) {
- return GRPC_CALL_OK;
- }
-
- set = reqs[0].op;
-
- for (i = 0; i < nreqs; i++) {
- op = reqs[i].op;
- if (call->request_set[op] < GRPC_IOREQ_OP_COUNT) {
- return start_ioreq_error(call, have_ops,
- GRPC_CALL_ERROR_TOO_MANY_OPERATIONS);
- } else if (call->request_set[op] == REQSET_DONE) {
- return start_ioreq_error(call, have_ops, GRPC_CALL_ERROR_ALREADY_INVOKED);
- }
- data = reqs[i].data;
- if (op == GRPC_IOREQ_SEND_INITIAL_METADATA ||
- op == GRPC_IOREQ_SEND_TRAILING_METADATA) {
- if (!prepare_application_metadata(call, data.send_metadata.count,
- data.send_metadata.metadata)) {
- return start_ioreq_error(call, have_ops,
- GRPC_CALL_ERROR_INVALID_METADATA);
- }
- }
- if (op == GRPC_IOREQ_SEND_STATUS) {
- set_status_code(call, STATUS_FROM_SERVER_STATUS,
- (gpr_uint32)reqs[i].data.send_status.code);
- if (reqs[i].data.send_status.details) {
- set_status_details(call, STATUS_FROM_SERVER_STATUS,
- GRPC_MDSTR_REF(reqs[i].data.send_status.details));
- }
- }
- /* NB: the following integer arithmetic operation needs to be in its
- * expanded form due to the "integral promotion" performed (see section
- * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
- * is then required to avoid the compiler warning */
- have_ops = (gpr_uint16)(have_ops | (1u << op));
-
- call->request_data[op] = data;
- call->request_flags[op] = reqs[i].flags;
- call->request_set[op] = set;
- }
-
- master = &call->masters[set];
- master->success = 1;
- master->need_mask = have_ops;
- master->complete_mask = 0;
- master->on_complete = completion;
- master->user_data = user_data;
-
- finish_read_ops(call);
- early_out_write_ops(call);
-
- return GRPC_CALL_OK;
-}
-grpc_call_error grpc_call_start_ioreq_and_call_back(
- grpc_exec_ctx *exec_ctx, grpc_call *call, const grpc_ioreq *reqs,
- size_t nreqs, grpc_ioreq_completion_func on_complete, void *user_data) {
- grpc_call_error err;
- lock(call);
- err = start_ioreq(call, reqs, nreqs, on_complete, user_data);
- unlock(exec_ctx, call);
- return err;
+ return 1;
}
void grpc_call_destroy(grpc_call *c) {
@@ -1343,6 +621,7 @@ void grpc_call_destroy(grpc_call *c) {
grpc_call *parent = c->parent;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_TIMER_BEGIN("grpc_call_destroy", 0);
GRPC_API_TRACE("grpc_call_destroy(c=%p)", 1, (c));
if (parent) {
@@ -1359,17 +638,18 @@ void grpc_call_destroy(grpc_call *c) {
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, parent, "child");
}
- lock(c);
+ gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->destroy_called);
c->destroy_called = 1;
if (c->have_alarm) {
grpc_timer_cancel(&exec_ctx, &c->alarm);
}
- cancel = c->read_state != READ_STATE_STREAM_CLOSED;
- unlock(&exec_ctx, c);
+ cancel = !c->received_final_op;
+ gpr_mu_unlock(&c->mu);
if (cancel) grpc_call_cancel(c, NULL);
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
grpc_exec_ctx_finish(&exec_ctx);
+ GPR_TIMER_END("grpc_call_destroy", 0);
}
grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
@@ -1390,73 +670,80 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
"c=%p, status=%d, description=%s, reserved=%p)",
4, (c, (int)status, description, reserved));
GPR_ASSERT(reserved == NULL);
- lock(c);
- r = cancel_with_status(c, status, description);
- unlock(&exec_ctx, c);
+ gpr_mu_lock(&c->mu);
+ r = cancel_with_status(&exec_ctx, c, status, description);
+ gpr_mu_unlock(&c->mu);
grpc_exec_ctx_finish(&exec_ctx);
return r;
}
-static grpc_call_error cancel_with_status(grpc_call *c, grpc_status_code status,
+typedef struct cancel_closure {
+ grpc_closure closure;
+ grpc_call *call;
+ grpc_status_code status;
+} cancel_closure;
+
+static void done_cancel(grpc_exec_ctx *exec_ctx, void *ccp, int success) {
+ cancel_closure *cc = ccp;
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, cc->call, "cancel");
+ gpr_free(cc);
+}
+
+static void send_cancel(grpc_exec_ctx *exec_ctx, void *ccp, int success) {
+ grpc_transport_stream_op op;
+ cancel_closure *cc = ccp;
+ memset(&op, 0, sizeof(op));
+ op.cancel_with_status = cc->status;
+ /* reuse closure to catch completion */
+ grpc_closure_init(&cc->closure, done_cancel, cc);
+ op.on_complete = &cc->closure;
+ execute_op(exec_ctx, cc->call, &op);
+}
+
+static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ grpc_status_code status,
const char *description) {
grpc_mdstr *details =
- description ? grpc_mdstr_from_string(c->metadata_context, description)
- : NULL;
+ description ? grpc_mdstr_from_string(description) : NULL;
+ cancel_closure *cc = gpr_malloc(sizeof(*cc));
GPR_ASSERT(status != GRPC_STATUS_OK);
set_status_code(c, STATUS_FROM_API_OVERRIDE, (gpr_uint32)status);
set_status_details(c, STATUS_FROM_API_OVERRIDE, details);
- c->cancel_with_status = status;
+ grpc_closure_init(&cc->closure, send_cancel, cc);
+ cc->call = c;
+ cc->status = status;
+ GRPC_CALL_INTERNAL_REF(c, "cancel");
+ grpc_exec_ctx_enqueue(exec_ctx, &cc->closure, 1);
return GRPC_CALL_OK;
}
-static void finished_loose_op(grpc_exec_ctx *exec_ctx, void *call,
- int success_ignored) {
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "loose-op");
-}
-
-typedef struct {
- grpc_call *call;
- grpc_closure closure;
-} finished_loose_op_allocated_args;
-
-static void finished_loose_op_allocated(grpc_exec_ctx *exec_ctx, void *alloc,
- int success) {
- finished_loose_op_allocated_args *args = alloc;
- finished_loose_op(exec_ctx, args->call, success);
- gpr_free(args);
-}
-
static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_transport_stream_op *op) {
grpc_call_element *elem;
- GPR_ASSERT(op->on_consumed == NULL);
- if (op->cancel_with_status != GRPC_STATUS_OK || op->bind_pollset) {
- GRPC_CALL_INTERNAL_REF(call, "loose-op");
- if (op->bind_pollset) {
- op->on_consumed = &call->on_done_bind;
- } else {
- finished_loose_op_allocated_args *args = gpr_malloc(sizeof(*args));
- args->call = call;
- grpc_closure_init(&args->closure, finished_loose_op_allocated, args);
- op->on_consumed = &args->closure;
- }
- }
-
+ GPR_TIMER_BEGIN("execute_op", 0);
elem = CALL_ELEM_FROM_CALL(call, 0);
op->context = call->context;
elem->filter->start_transport_stream_op(exec_ctx, elem, op);
+ GPR_TIMER_END("execute_op", 0);
}
char *grpc_call_get_peer(grpc_call *call) {
grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- char *result = elem->filter->get_peer(&exec_ctx, elem);
+ char *result;
GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call));
+ result = elem->filter->get_peer(&exec_ctx, elem);
+ if (result == NULL) {
+ result = grpc_channel_get_target(call->channel);
+ }
+ if (result == NULL) {
+ result = gpr_strdup("unknown");
+ }
grpc_exec_ctx_finish(&exec_ctx);
return result;
}
@@ -1467,14 +754,13 @@ grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
static void call_alarm(grpc_exec_ctx *exec_ctx, void *arg, int success) {
grpc_call *call = arg;
- lock(call);
+ gpr_mu_lock(&call->mu);
call->have_alarm = 0;
if (success) {
- cancel_with_status(call, GRPC_STATUS_DEADLINE_EXCEEDED,
+ cancel_with_status(exec_ctx, call, GRPC_STATUS_DEADLINE_EXCEEDED,
"Deadline Exceeded");
}
- finish_read_ops(call);
- unlock(exec_ctx, call);
+ gpr_mu_unlock(&call->mu);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "alarm");
}
@@ -1500,8 +786,12 @@ static void destroy_status(void *ignored) {}
static gpr_uint32 decode_status(grpc_mdelem *md) {
gpr_uint32 status;
- void *user_data = grpc_mdelem_get_user_data(md, destroy_status);
- if (user_data) {
+ void *user_data;
+ if (md == GRPC_MDELEM_GRPC_STATUS_0) return 0;
+ if (md == GRPC_MDELEM_GRPC_STATUS_1) return 1;
+ if (md == GRPC_MDELEM_GRPC_STATUS_2) return 2;
+ user_data = grpc_mdelem_get_user_data(md, destroy_status);
+ if (user_data != NULL) {
status = ((gpr_uint32)(gpr_intptr)user_data) - STATUS_OFFSET;
} else {
if (!gpr_parse_bytes_to_uint32(grpc_mdstr_as_c_string(md->value),
@@ -1515,101 +805,77 @@ static gpr_uint32 decode_status(grpc_mdelem *md) {
return status;
}
-/* just as for status above, we need to offset: metadata userdata can't hold a
- * zero (null), which in this case is used to signal no compression */
-#define COMPRESS_OFFSET 1
-static void destroy_compression(void *ignored) {}
-
static gpr_uint32 decode_compression(grpc_mdelem *md) {
- grpc_compression_algorithm algorithm;
- void *user_data = grpc_mdelem_get_user_data(md, destroy_compression);
- if (user_data) {
- algorithm =
- ((grpc_compression_algorithm)(gpr_intptr)user_data) - COMPRESS_OFFSET;
- } else {
+ grpc_compression_algorithm algorithm =
+ grpc_compression_algorithm_from_mdstr(md->value);
+ if (algorithm == GRPC_COMPRESS_ALGORITHMS_COUNT) {
const char *md_c_str = grpc_mdstr_as_c_string(md->value);
- if (!grpc_compression_algorithm_parse(md_c_str, strlen(md_c_str),
- &algorithm)) {
- gpr_log(GPR_ERROR, "Invalid compression algorithm: '%s'", md_c_str);
- assert(0);
- }
- grpc_mdelem_set_user_data(
- md, destroy_compression,
- (void *)(gpr_intptr)(algorithm + COMPRESS_OFFSET));
+ gpr_log(GPR_ERROR, "Invalid compression algorithm: '%s'", md_c_str);
}
return algorithm;
}
-static void recv_metadata(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_metadata_batch *md) {
- grpc_linked_mdelem *l;
+static grpc_mdelem *recv_common_filter(grpc_call *call, grpc_mdelem *elem) {
+ if (elem->key == GRPC_MDSTR_GRPC_STATUS) {
+ GPR_TIMER_BEGIN("status", 0);
+ set_status_code(call, STATUS_FROM_WIRE, decode_status(elem));
+ GPR_TIMER_END("status", 0);
+ return NULL;
+ } else if (elem->key == GRPC_MDSTR_GRPC_MESSAGE) {
+ GPR_TIMER_BEGIN("status-details", 0);
+ set_status_details(call, STATUS_FROM_WIRE, GRPC_MDSTR_REF(elem->value));
+ GPR_TIMER_END("status-details", 0);
+ return NULL;
+ }
+ return elem;
+}
+
+static grpc_mdelem *publish_app_metadata(grpc_call *call, grpc_mdelem *elem,
+ int is_trailing) {
grpc_metadata_array *dest;
grpc_metadata *mdusr;
- int is_trailing;
-
- is_trailing = call->read_state >= READ_STATE_GOT_INITIAL_METADATA;
- for (l = md->list.head; l != NULL; l = l->next) {
- grpc_mdelem *mdel = l->md;
- grpc_mdstr *key = mdel->key;
- if (key == grpc_channel_get_status_string(call->channel)) {
- GPR_TIMER_BEGIN("status", 0);
- set_status_code(call, STATUS_FROM_WIRE, decode_status(mdel));
- GPR_TIMER_END("status", 0);
- } else if (key == grpc_channel_get_message_string(call->channel)) {
- GPR_TIMER_BEGIN("status-details", 0);
- set_status_details(call, STATUS_FROM_WIRE, GRPC_MDSTR_REF(mdel->value));
- GPR_TIMER_END("status-details", 0);
- } else if (key ==
- grpc_channel_get_compression_algorithm_string(call->channel)) {
- GPR_TIMER_BEGIN("compression_algorithm", 0);
- set_compression_algorithm(call, decode_compression(mdel));
- GPR_TIMER_END("compression_algorithm", 0);
- } else if (key == grpc_channel_get_encodings_accepted_by_peer_string(
- call->channel)) {
- GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0);
- set_encodings_accepted_by_peer(call, mdel);
- GPR_TIMER_END("encodings_accepted_by_peer", 0);
- } else {
- GPR_TIMER_BEGIN("report_up", 0);
- dest = &call->buffered_metadata[is_trailing];
- if (dest->count == dest->capacity) {
- dest->capacity = GPR_MAX(dest->capacity + 8, dest->capacity * 2);
- dest->metadata =
- gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
- }
- mdusr = &dest->metadata[dest->count++];
- mdusr->key = grpc_mdstr_as_c_string(mdel->key);
- mdusr->value = grpc_mdstr_as_c_string(mdel->value);
- mdusr->value_length = GPR_SLICE_LENGTH(mdel->value->slice);
- if (call->owned_metadata_count == call->owned_metadata_capacity) {
- call->owned_metadata_capacity =
- GPR_MAX(call->owned_metadata_capacity + 8,
- call->owned_metadata_capacity * 2);
- call->owned_metadata =
- gpr_realloc(call->owned_metadata,
- sizeof(grpc_mdelem *) * call->owned_metadata_capacity);
- }
- call->owned_metadata[call->owned_metadata_count++] = mdel;
- l->md = NULL;
- GPR_TIMER_END("report_up", 0);
- }
- }
- if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
- 0 &&
- !call->is_client) {
- GPR_TIMER_BEGIN("set_deadline_alarm", 0);
- set_deadline_alarm(exec_ctx, call, md->deadline);
- GPR_TIMER_END("set_deadline_alarm", 0);
- }
- if (!is_trailing) {
- call->read_state = READ_STATE_GOT_INITIAL_METADATA;
- }
+ GPR_TIMER_BEGIN("publish_app_metadata", 0);
+ dest = call->buffered_metadata[is_trailing];
+ if (dest->count == dest->capacity) {
+ dest->capacity = GPR_MAX(dest->capacity + 8, dest->capacity * 2);
+ dest->metadata =
+ gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
+ }
+ mdusr = &dest->metadata[dest->count++];
+ mdusr->key = grpc_mdstr_as_c_string(elem->key);
+ mdusr->value = grpc_mdstr_as_c_string(elem->value);
+ mdusr->value_length = GPR_SLICE_LENGTH(elem->value->slice);
+ GPR_TIMER_END("publish_app_metadata", 0);
+ return elem;
+}
- for (l = md->list.head; l; l = l->next) {
- if (l->md) GRPC_MDELEM_UNREF(l->md);
+static grpc_mdelem *recv_initial_filter(void *callp, grpc_mdelem *elem) {
+ grpc_call *call = callp;
+ elem = recv_common_filter(call, elem);
+ if (elem == NULL) {
+ return NULL;
+ } else if (elem->key == GRPC_MDSTR_GRPC_ENCODING) {
+ GPR_TIMER_BEGIN("compression_algorithm", 0);
+ set_compression_algorithm(call, decode_compression(elem));
+ GPR_TIMER_END("compression_algorithm", 0);
+ return NULL;
+ } else if (elem->key == GRPC_MDSTR_GRPC_ACCEPT_ENCODING) {
+ GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0);
+ set_encodings_accepted_by_peer(call, elem);
+ GPR_TIMER_END("encodings_accepted_by_peer", 0);
+ return NULL;
+ } else {
+ return publish_app_metadata(call, elem, 0);
}
- for (l = md->garbage.head; l; l = l->next) {
- GRPC_MDELEM_UNREF(l->md);
+}
+
+static grpc_mdelem *recv_trailing_filter(void *callp, grpc_mdelem *elem) {
+ grpc_call *call = callp;
+ elem = recv_common_filter(call, elem);
+ if (elem == NULL) {
+ return NULL;
+ } else {
+ return publish_app_metadata(call, elem, 1);
}
}
@@ -1626,19 +892,7 @@ static void set_status_value_directly(grpc_status_code status, void *dest) {
}
static void set_cancelled_value(grpc_status_code status, void *dest) {
- *(grpc_status_code *)dest = (status != GRPC_STATUS_OK);
-}
-
-static void finish_batch(grpc_exec_ctx *exec_ctx, grpc_call *call, int success,
- void *tag) {
- grpc_cq_end_op(exec_ctx, call->cq, tag, success, done_completion, call,
- allocate_completion(call));
-}
-
-static void finish_batch_with_close(grpc_exec_ctx *exec_ctx, grpc_call *call,
- int success, void *tag) {
- grpc_cq_end_op(exec_ctx, call->cq, tag, 1, done_completion, call,
- allocate_completion(call));
+ *(int *)dest = (status != GRPC_STATUS_OK);
}
static int are_write_flags_valid(gpr_uint32 flags) {
@@ -1649,258 +903,504 @@ static int are_write_flags_valid(gpr_uint32 flags) {
return !(flags & invalid_positions);
}
-grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
- size_t nops, void *tag, void *reserved) {
- grpc_ioreq reqs[GRPC_IOREQ_OP_COUNT];
- size_t in;
- size_t out;
+static batch_control *allocate_batch_control(grpc_call *call) {
+ size_t i;
+ for (i = 0; i < MAX_CONCURRENT_BATCHES; i++) {
+ if ((call->used_batches & (1 << i)) == 0) {
+ call->used_batches =
+ (gpr_uint8)(call->used_batches | (gpr_uint8)(1 << i));
+ return &call->active_batches[i];
+ }
+ }
+ return NULL;
+}
+
+static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_cq_completion *storage) {
+ batch_control *bctl = user_data;
+ grpc_call *call = bctl->call;
+ gpr_mu_lock(&call->mu);
+ call->used_batches = (gpr_uint8)(
+ call->used_batches & ~(gpr_uint8)(1 << (bctl - call->active_batches)));
+ gpr_mu_unlock(&call->mu);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
+}
+
+static void post_batch_completion(grpc_exec_ctx *exec_ctx,
+ batch_control *bctl) {
+ grpc_call *call = bctl->call;
+ if (bctl->is_notify_tag_closure) {
+ grpc_exec_ctx_enqueue(exec_ctx, bctl->notify_tag, bctl->success);
+ gpr_mu_lock(&call->mu);
+ bctl->call->used_batches =
+ (gpr_uint8)(bctl->call->used_batches &
+ ~(gpr_uint8)(1 << (bctl - bctl->call->active_batches)));
+ gpr_mu_unlock(&call->mu);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
+ } else {
+ grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, bctl->success,
+ finish_batch_completion, bctl, &bctl->cq_completion);
+ }
+}
+
+static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
+ batch_control *bctl) {
+ grpc_call *call = bctl->call;
+ for (;;) {
+ size_t remaining = call->receiving_stream->length -
+ (*call->receiving_buffer)->data.raw.slice_buffer.length;
+ if (remaining == 0) {
+ call->receiving_message = 0;
+ grpc_byte_stream_destroy(call->receiving_stream);
+ call->receiving_stream = NULL;
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+ return;
+ }
+ if (grpc_byte_stream_next(exec_ctx, call->receiving_stream,
+ &call->receiving_slice, remaining,
+ &call->receiving_slice_ready)) {
+ gpr_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
+ call->receiving_slice);
+ } else {
+ return;
+ }
+ }
+}
+
+static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
+ int success) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+
+ GPR_ASSERT(success);
+ gpr_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
+ call->receiving_slice);
+
+ continue_receiving_slices(exec_ctx, bctl);
+}
+
+static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, int success) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+ grpc_call *child_call;
+ grpc_call *next_child_call;
+
+ gpr_mu_lock(&call->mu);
+ if (bctl->send_initial_metadata) {
+ grpc_metadata_batch_destroy(
+ &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]);
+ }
+ if (bctl->send_message) {
+ call->sending_message = 0;
+ }
+ if (bctl->send_final_op) {
+ grpc_metadata_batch_destroy(
+ &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]);
+ }
+ if (bctl->recv_initial_metadata) {
+ grpc_metadata_batch *md =
+ &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
+ grpc_metadata_batch_filter(md, recv_initial_filter, call);
+
+ if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
+ 0 &&
+ !call->is_client) {
+ GPR_TIMER_BEGIN("set_deadline_alarm", 0);
+ set_deadline_alarm(exec_ctx, call, md->deadline);
+ GPR_TIMER_END("set_deadline_alarm", 0);
+ }
+ }
+ if (bctl->recv_final_op) {
+ grpc_metadata_batch *md =
+ &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
+ grpc_metadata_batch_filter(md, recv_trailing_filter, call);
+
+ if (call->have_alarm) {
+ grpc_timer_cancel(exec_ctx, &call->alarm);
+ }
+ /* propagate cancellation to any interested children */
+ child_call = call->first_child;
+ if (child_call != NULL) {
+ do {
+ next_child_call = child_call->sibling_next;
+ if (child_call->cancellation_is_inherited) {
+ GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel");
+ grpc_call_cancel(child_call, NULL);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel");
+ }
+ child_call = next_child_call;
+ } while (child_call != call->first_child);
+ }
+
+ if (call->is_client) {
+ get_final_status(call, set_status_value_directly,
+ call->final_op.client.status);
+ get_final_details(call, call->final_op.client.status_details,
+ call->final_op.client.status_details_capacity);
+ } else {
+ get_final_status(call, set_cancelled_value,
+ call->final_op.server.cancelled);
+ }
+
+ success = 1;
+ }
+ bctl->success = success != 0;
+ gpr_mu_unlock(&call->mu);
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+}
+
+static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
+ int success) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+
+ if (call->receiving_stream == NULL) {
+ *call->receiving_buffer = NULL;
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+ } else if (call->receiving_stream->length >
+ grpc_channel_get_max_message_length(call->channel)) {
+ cancel_with_status(exec_ctx, call, GRPC_STATUS_INTERNAL,
+ "Max message size exceeded");
+ grpc_byte_stream_destroy(call->receiving_stream);
+ call->receiving_stream = NULL;
+ *call->receiving_buffer = NULL;
+ if (gpr_unref(&bctl->steps_to_complete)) {
+ post_batch_completion(exec_ctx, bctl);
+ }
+ } else {
+ call->test_only_last_message_flags = call->receiving_stream->flags;
+ if ((call->receiving_stream->flags & GRPC_WRITE_INTERNAL_COMPRESS) &&
+ (call->compression_algorithm > GRPC_COMPRESS_NONE)) {
+ *call->receiving_buffer = grpc_raw_compressed_byte_buffer_create(
+ NULL, 0, call->compression_algorithm);
+ } else {
+ *call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0);
+ }
+ grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready,
+ bctl);
+ continue_receiving_slices(exec_ctx, bctl);
+ /* early out */
+ return;
+ }
+}
+
+static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
+ grpc_call *call, const grpc_op *ops,
+ size_t nops, void *notify_tag,
+ int is_notify_tag_closure) {
+ grpc_transport_stream_op stream_op;
+ size_t i;
const grpc_op *op;
- grpc_ioreq *req;
- void (*finish_func)(grpc_exec_ctx *, grpc_call *, int, void *) = finish_batch;
- grpc_call_error error;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ batch_control *bctl;
+ int num_completion_callbacks_needed = 1;
+ grpc_call_error error = GRPC_CALL_OK;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
- GRPC_API_TRACE(
- "grpc_call_start_batch(call=%p, ops=%p, nops=%lu, tag=%p, reserved=%p)",
- 5, (call, ops, (unsigned long)nops, tag, reserved));
+ GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
- if (reserved != NULL) {
- error = GRPC_CALL_ERROR;
- goto done;
- }
+ memset(&stream_op, 0, sizeof(stream_op));
- GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, tag);
+ /* TODO(ctiller): this feels like it could be made lock-free */
+ gpr_mu_lock(&call->mu);
+ bctl = allocate_batch_control(call);
+ memset(bctl, 0, sizeof(*bctl));
+ bctl->call = call;
+ bctl->notify_tag = notify_tag;
+ bctl->is_notify_tag_closure = (gpr_uint8)(is_notify_tag_closure != 0);
if (nops == 0) {
- grpc_cq_begin_op(call->cq);
GRPC_CALL_INTERNAL_REF(call, "completion");
- grpc_cq_end_op(&exec_ctx, call->cq, tag, 1, done_completion, call,
- allocate_completion(call));
+ bctl->success = 1;
+ if (!is_notify_tag_closure) {
+ grpc_cq_begin_op(call->cq, notify_tag);
+ }
+ gpr_mu_unlock(&call->mu);
+ post_batch_completion(exec_ctx, bctl);
error = GRPC_CALL_OK;
goto done;
}
- /* rewrite batch ops into ioreq ops */
- for (in = 0, out = 0; in < nops; in++) {
- op = &ops[in];
+ /* rewrite batch ops into a transport op */
+ for (i = 0; i < nops; i++) {
+ op = &ops[i];
if (op->reserved != NULL) {
error = GRPC_CALL_ERROR;
- goto done;
+ goto done_with_error;
}
switch (op->op) {
case GRPC_OP_SEND_INITIAL_METADATA:
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
- goto done;
+ goto done_with_error;
+ }
+ if (call->sent_initial_metadata) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
}
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ if (op->data.send_initial_metadata.count > INT_MAX) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
}
- req->op = GRPC_IOREQ_SEND_INITIAL_METADATA;
- req->data.send_metadata.count = op->data.send_initial_metadata.count;
- req->data.send_metadata.metadata =
- op->data.send_initial_metadata.metadata;
- req->flags = op->flags;
+ bctl->send_initial_metadata = 1;
+ call->sent_initial_metadata = 1;
+ if (!prepare_application_metadata(
+ call, (int)op->data.send_initial_metadata.count,
+ op->data.send_initial_metadata.metadata, 0, call->is_client)) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
+ }
+ /* TODO(ctiller): just make these the same variable? */
+ call->metadata_batch[0][0].deadline = call->send_deadline;
+ stream_op.send_initial_metadata =
+ &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */];
break;
case GRPC_OP_SEND_MESSAGE:
if (!are_write_flags_valid(op->flags)) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
- goto done;
+ goto done_with_error;
}
if (op->data.send_message == NULL) {
error = GRPC_CALL_ERROR_INVALID_MESSAGE;
- goto done;
+ goto done_with_error;
}
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ if (call->sending_message) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
}
- req->op = GRPC_IOREQ_SEND_MESSAGE;
- req->data.send_message = op->data.send_message;
- req->flags = op->flags;
+ bctl->send_message = 1;
+ call->sending_message = 1;
+ grpc_slice_buffer_stream_init(
+ &call->sending_stream,
+ &op->data.send_message->data.raw.slice_buffer, op->flags);
+ stream_op.send_message = &call->sending_stream.base;
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
- goto done;
+ goto done_with_error;
}
if (!call->is_client) {
error = GRPC_CALL_ERROR_NOT_ON_SERVER;
- goto done;
+ goto done_with_error;
}
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ if (call->sent_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
}
- req->op = GRPC_IOREQ_SEND_CLOSE;
- req->flags = op->flags;
+ bctl->send_final_op = 1;
+ call->sent_final_op = 1;
+ stream_op.send_trailing_metadata =
+ &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
- goto done;
+ goto done_with_error;
}
if (call->is_client) {
error = GRPC_CALL_ERROR_NOT_ON_CLIENT;
- goto done;
+ goto done_with_error;
+ }
+ if (call->sent_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
}
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ if (op->data.send_status_from_server.trailing_metadata_count >
+ INT_MAX) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
}
- req->op = GRPC_IOREQ_SEND_TRAILING_METADATA;
- req->flags = op->flags;
- req->data.send_metadata.count =
- op->data.send_status_from_server.trailing_metadata_count;
- req->data.send_metadata.metadata =
- op->data.send_status_from_server.trailing_metadata;
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ bctl->send_final_op = 1;
+ call->sent_final_op = 1;
+ call->send_extra_metadata_count = 1;
+ call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem(
+ call->channel, op->data.send_status_from_server.status);
+ if (op->data.send_status_from_server.status_details != NULL) {
+ call->send_extra_metadata[1].md = grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_GRPC_MESSAGE,
+ grpc_mdstr_from_string(
+ op->data.send_status_from_server.status_details));
+ call->send_extra_metadata_count++;
+ set_status_details(
+ call, STATUS_FROM_API_OVERRIDE,
+ GRPC_MDSTR_REF(call->send_extra_metadata[1].md->value));
}
- req->op = GRPC_IOREQ_SEND_STATUS;
- req->data.send_status.code = op->data.send_status_from_server.status;
- req->data.send_status.details =
- op->data.send_status_from_server.status_details != NULL
- ? grpc_mdstr_from_string(
- call->metadata_context,
- op->data.send_status_from_server.status_details)
- : NULL;
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ set_status_code(call, STATUS_FROM_API_OVERRIDE,
+ (gpr_uint32)op->data.send_status_from_server.status);
+ if (!prepare_application_metadata(
+ call,
+ (int)op->data.send_status_from_server.trailing_metadata_count,
+ op->data.send_status_from_server.trailing_metadata, 1, 1)) {
+ error = GRPC_CALL_ERROR_INVALID_METADATA;
+ goto done_with_error;
}
- req->op = GRPC_IOREQ_SEND_CLOSE;
+ stream_op.send_trailing_metadata =
+ &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
case GRPC_OP_RECV_INITIAL_METADATA:
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
- goto done;
- }
- if (!call->is_client) {
- error = GRPC_CALL_ERROR_NOT_ON_SERVER;
- goto done;
+ goto done_with_error;
}
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ if (call->received_initial_metadata) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
}
- req->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
- req->data.recv_metadata = op->data.recv_initial_metadata;
- req->data.recv_metadata->count = 0;
- req->flags = op->flags;
+ call->received_initial_metadata = 1;
+ call->buffered_metadata[0] = op->data.recv_initial_metadata;
+ bctl->recv_initial_metadata = 1;
+ stream_op.recv_initial_metadata =
+ &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
break;
case GRPC_OP_RECV_MESSAGE:
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
- goto done;
+ goto done_with_error;
}
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ if (call->receiving_message) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
}
- req->op = GRPC_IOREQ_RECV_MESSAGE;
- req->data.recv_message = op->data.recv_message;
- req->flags = op->flags;
+ call->receiving_message = 1;
+ bctl->recv_message = 1;
+ call->receiving_buffer = op->data.recv_message;
+ stream_op.recv_message = &call->receiving_stream;
+ grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready,
+ bctl);
+ stream_op.recv_message_ready = &call->receiving_stream_ready;
+ num_completion_callbacks_needed++;
break;
case GRPC_OP_RECV_STATUS_ON_CLIENT:
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
- goto done;
+ goto done_with_error;
}
if (!call->is_client) {
error = GRPC_CALL_ERROR_NOT_ON_SERVER;
- goto done;
- }
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ goto done_with_error;
}
- req->op = GRPC_IOREQ_RECV_STATUS;
- req->flags = op->flags;
- req->data.recv_status.set_value = set_status_value_directly;
- req->data.recv_status.user_data = op->data.recv_status_on_client.status;
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ if (call->received_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
}
- req->op = GRPC_IOREQ_RECV_STATUS_DETAILS;
- req->data.recv_status_details.details =
+ call->received_final_op = 1;
+ call->buffered_metadata[1] =
+ op->data.recv_status_on_client.trailing_metadata;
+ call->final_op.client.status = op->data.recv_status_on_client.status;
+ call->final_op.client.status_details =
op->data.recv_status_on_client.status_details;
- req->data.recv_status_details.details_capacity =
+ call->final_op.client.status_details_capacity =
op->data.recv_status_on_client.status_details_capacity;
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
- }
- req->op = GRPC_IOREQ_RECV_TRAILING_METADATA;
- req->data.recv_metadata =
- op->data.recv_status_on_client.trailing_metadata;
- req->data.recv_metadata->count = 0;
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
- }
- req->op = GRPC_IOREQ_RECV_CLOSE;
- finish_func = finish_batch_with_close;
+ bctl->recv_final_op = 1;
+ stream_op.recv_trailing_metadata =
+ &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
- goto done;
+ goto done_with_error;
}
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ if (call->is_client) {
+ error = GRPC_CALL_ERROR_NOT_ON_CLIENT;
+ goto done_with_error;
}
- req->op = GRPC_IOREQ_RECV_STATUS;
- req->flags = op->flags;
- req->data.recv_status.set_value = set_cancelled_value;
- req->data.recv_status.user_data =
- op->data.recv_close_on_server.cancelled;
- req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) {
- error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
- goto done;
+ if (call->received_final_op) {
+ error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ goto done_with_error;
}
- req->op = GRPC_IOREQ_RECV_CLOSE;
- finish_func = finish_batch_with_close;
+ call->received_final_op = 1;
+ call->final_op.server.cancelled =
+ op->data.recv_close_on_server.cancelled;
+ bctl->recv_final_op = 1;
+ stream_op.recv_trailing_metadata =
+ &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
break;
}
}
GRPC_CALL_INTERNAL_REF(call, "completion");
- grpc_cq_begin_op(call->cq);
+ if (!is_notify_tag_closure) {
+ grpc_cq_begin_op(call->cq, notify_tag);
+ }
+ gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed);
+
+ stream_op.context = call->context;
+ grpc_closure_init(&bctl->finish_batch, finish_batch, bctl);
+ stream_op.on_complete = &bctl->finish_batch;
+ gpr_mu_unlock(&call->mu);
+
+ execute_op(exec_ctx, call, &stream_op);
- error = grpc_call_start_ioreq_and_call_back(&exec_ctx, call, reqs, out,
- finish_func, tag);
done:
- grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_call_start_batch", 0);
return error;
+
+done_with_error:
+ /* reverse any mutations that occured */
+ if (bctl->send_initial_metadata) {
+ call->sent_initial_metadata = 0;
+ grpc_metadata_batch_clear(&call->metadata_batch[0][0]);
+ }
+ if (bctl->send_message) {
+ call->sending_message = 0;
+ grpc_byte_stream_destroy(&call->sending_stream.base);
+ }
+ if (bctl->send_final_op) {
+ call->sent_final_op = 0;
+ grpc_metadata_batch_clear(&call->metadata_batch[0][1]);
+ }
+ if (bctl->recv_initial_metadata) {
+ call->received_initial_metadata = 0;
+ }
+ if (bctl->recv_message) {
+ call->receiving_message = 0;
+ }
+ if (bctl->recv_final_op) {
+ call->received_final_op = 0;
+ }
+ gpr_mu_unlock(&call->mu);
+ goto done;
+}
+
+grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
+ size_t nops, void *tag, void *reserved) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_call_error err;
+
+ GRPC_API_TRACE(
+ "grpc_call_start_batch(call=%p, ops=%p, nops=%lu, tag=%p, reserved=%p)",
+ 5, (call, ops, (unsigned long)nops, tag, reserved));
+
+ if (reserved != NULL) {
+ err = GRPC_CALL_ERROR;
+ } else {
+ err = call_start_batch(&exec_ctx, call, ops, nops, tag, 0);
+ }
+
+ grpc_exec_ctx_finish(&exec_ctx);
+ return err;
+}
+
+grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx,
+ grpc_call *call,
+ const grpc_op *ops,
+ size_t nops,
+ grpc_closure *closure) {
+ return call_start_batch(exec_ctx, call, ops, nops, closure, 1);
}
void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
diff --git a/src/core/surface/call.h b/src/core/surface/call.h
index 9b7c6f9bfb..b53340df8e 100644
--- a/src/core/surface/call.h
+++ b/src/core/surface/call.h
@@ -44,51 +44,6 @@
extern "C" {
#endif
-/* Primitive operation types - grpc_op's get rewritten into these */
-typedef enum {
- GRPC_IOREQ_RECV_INITIAL_METADATA,
- GRPC_IOREQ_RECV_MESSAGE,
- GRPC_IOREQ_RECV_TRAILING_METADATA,
- GRPC_IOREQ_RECV_STATUS,
- GRPC_IOREQ_RECV_STATUS_DETAILS,
- GRPC_IOREQ_RECV_CLOSE,
- GRPC_IOREQ_SEND_INITIAL_METADATA,
- GRPC_IOREQ_SEND_MESSAGE,
- GRPC_IOREQ_SEND_TRAILING_METADATA,
- GRPC_IOREQ_SEND_STATUS,
- GRPC_IOREQ_SEND_CLOSE,
- GRPC_IOREQ_OP_COUNT
-} grpc_ioreq_op;
-
-typedef union {
- grpc_metadata_array *recv_metadata;
- grpc_byte_buffer **recv_message;
- struct {
- void (*set_value)(grpc_status_code status, void *user_data);
- void *user_data;
- } recv_status;
- struct {
- char **details;
- size_t *details_capacity;
- } recv_status_details;
- struct {
- size_t count;
- grpc_metadata *metadata;
- } send_metadata;
- grpc_byte_buffer *send_message;
- struct {
- grpc_status_code code;
- grpc_mdstr *details;
- } send_status;
-} grpc_ioreq_data;
-
-typedef struct {
- grpc_ioreq_op op;
- gpr_uint32 flags;
- /**< A copy of the write flags from grpc_op */
- grpc_ioreq_data data;
-} grpc_ioreq;
-
typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
grpc_call *call, int success,
void *user_data);
@@ -103,9 +58,8 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq);
-grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call);
-#ifdef GRPC_CALL_REF_COUNT_DEBUG
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_call_internal_ref(grpc_call *call, const char *reason);
void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call,
const char *reason);
@@ -121,12 +75,14 @@ void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call);
grpc_call_internal_unref(exec_ctx, call)
#endif
-grpc_call_error grpc_call_start_ioreq_and_call_back(
- grpc_exec_ctx *exec_ctx, grpc_call *call, const grpc_ioreq *reqs,
- size_t nreqs, grpc_ioreq_completion_func on_complete, void *user_data);
-
grpc_call_stack *grpc_call_get_call_stack(grpc_call *call);
+grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx,
+ grpc_call *call,
+ const grpc_op *ops,
+ size_t nops,
+ grpc_closure *closure);
+
/* Given the top call_element, get the call object. */
grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element);
@@ -134,19 +90,6 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
grpc_call *call, const grpc_op *ops, size_t nops,
void *tag);
-void grpc_server_log_request_call(char *file, int line,
- gpr_log_severity severity,
- grpc_server *server, grpc_call **call,
- grpc_call_details *details,
- grpc_metadata_array *initial_metadata,
- grpc_completion_queue *cq_bound_to_call,
- grpc_completion_queue *cq_for_notification,
- void *tag);
-
-void grpc_server_log_shutdown(char *file, int line, gpr_log_severity severity,
- grpc_server *server, grpc_completion_queue *cq,
- void *tag);
-
/* Set a context pointer.
No thread safety guarantees are made wrt this value. */
void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
@@ -157,16 +100,6 @@ void *grpc_call_context_get(grpc_call *call, grpc_context_index elem);
#define GRPC_CALL_LOG_BATCH(sev, call, ops, nops, tag) \
if (grpc_api_trace) grpc_call_log_batch(sev, call, ops, nops, tag)
-#define GRPC_SERVER_LOG_REQUEST_CALL(sev, server, call, details, \
- initial_metadata, cq_bound_to_call, \
- cq_for_notifications, tag) \
- if (grpc_api_trace) \
- grpc_server_log_request_call(sev, server, call, details, initial_metadata, \
- cq_bound_to_call, cq_for_notifications, tag)
-
-#define GRPC_SERVER_LOG_SHUTDOWN(sev, server, cq, tag) \
- if (grpc_api_trace) grpc_server_log_shutdown(sev, server, cq, tag)
-
gpr_uint8 grpc_call_is_client(grpc_call *call);
#ifdef __cplusplus
diff --git a/src/core/surface/call_log_batch.c b/src/core/surface/call_log_batch.c
index 2dd9737cf8..46756f418b 100644
--- a/src/core/surface/call_log_batch.c
+++ b/src/core/surface/call_log_batch.c
@@ -110,36 +110,9 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
void *tag) {
char *tmp;
size_t i;
- gpr_log(file, line, severity,
- "grpc_call_start_batch(call=%p, ops=%p, nops=%d, tag=%p)", call, ops,
- nops, tag);
for (i = 0; i < nops; i++) {
tmp = grpc_op_string(&ops[i]);
gpr_log(file, line, severity, "ops[%d]: %s", i, tmp);
gpr_free(tmp);
}
}
-
-void grpc_server_log_request_call(char *file, int line,
- gpr_log_severity severity,
- grpc_server *server, grpc_call **call,
- grpc_call_details *details,
- grpc_metadata_array *initial_metadata,
- grpc_completion_queue *cq_bound_to_call,
- grpc_completion_queue *cq_for_notification,
- void *tag) {
- gpr_log(file, line, severity,
- "grpc_server_request_call(server=%p, call=%p, details=%p, "
- "initial_metadata=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
- "tag=%p)",
- server, call, details, initial_metadata, cq_bound_to_call,
- cq_for_notification, tag);
-}
-
-void grpc_server_log_shutdown(char *file, int line, gpr_log_severity severity,
- grpc_server *server, grpc_completion_queue *cq,
- void *tag) {
- gpr_log(file, line, severity,
- "grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", server,
- cq, tag);
-}
diff --git a/src/core/surface/channel.c b/src/core/surface/channel.c
index a9a5f828f2..d0a8b0be09 100644
--- a/src/core/surface/channel.c
+++ b/src/core/surface/channel.c
@@ -46,6 +46,7 @@
#include "src/core/surface/api_trace.h"
#include "src/core/surface/call.h"
#include "src/core/surface/init.h"
+#include "src/core/transport/static_metadata.h"
/** Cache grpc-status: X mdelems for X = 0..NUM_CACHED_STATUS_ELEMS.
* Avoids needing to take a metadata context lock for sending status
@@ -62,19 +63,8 @@ typedef struct registered_call {
struct grpc_channel {
int is_client;
- gpr_refcount refs;
gpr_uint32 max_message_length;
- grpc_mdctx *metadata_context;
- /** mdstr for the grpc-status key */
- grpc_mdstr *grpc_status_string;
- grpc_mdstr *grpc_compression_algorithm_string;
- grpc_mdstr *grpc_encodings_accepted_by_peer_string;
- grpc_mdstr *grpc_message_string;
- grpc_mdstr *path_string;
- grpc_mdstr *authority_string;
grpc_mdelem *default_authority;
- /** mdelem for grpc-status: 0 thru grpc-status: 2 */
- grpc_mdelem *grpc_status_elem[NUM_CACHED_STATUS_ELEMS];
gpr_mu registered_call_mu;
registered_call *registered_calls;
@@ -90,10 +80,12 @@ struct grpc_channel {
/* the protobuf library will (by default) start warning at 100megs */
#define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024)
+static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, int success);
+
grpc_channel *grpc_channel_create_from_filters(
grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_filter **filters, size_t num_filters,
- const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client) {
+ const grpc_channel_args *args, int is_client) {
size_t i;
size_t size =
sizeof(grpc_channel) + grpc_channel_stack_size(filters, num_filters);
@@ -102,24 +94,6 @@ grpc_channel *grpc_channel_create_from_filters(
channel->target = gpr_strdup(target);
GPR_ASSERT(grpc_is_initialized() && "call grpc_init()");
channel->is_client = is_client;
- /* decremented by grpc_channel_destroy */
- gpr_ref_init(&channel->refs, 1);
- channel->metadata_context = mdctx;
- channel->grpc_status_string = grpc_mdstr_from_string(mdctx, "grpc-status");
- channel->grpc_compression_algorithm_string =
- grpc_mdstr_from_string(mdctx, "grpc-encoding");
- channel->grpc_encodings_accepted_by_peer_string =
- grpc_mdstr_from_string(mdctx, "grpc-accept-encoding");
- channel->grpc_message_string = grpc_mdstr_from_string(mdctx, "grpc-message");
- for (i = 0; i < NUM_CACHED_STATUS_ELEMS; i++) {
- char buf[GPR_LTOA_MIN_BUFSIZE];
- gpr_ltoa((long)i, buf);
- channel->grpc_status_elem[i] = grpc_mdelem_from_metadata_strings(
- mdctx, GRPC_MDSTR_REF(channel->grpc_status_string),
- grpc_mdstr_from_string(mdctx, buf));
- }
- channel->path_string = grpc_mdstr_from_string(mdctx, ":path");
- channel->authority_string = grpc_mdstr_from_string(mdctx, ":authority");
gpr_mu_init(&channel->registered_call_mu);
channel->registered_calls = NULL;
@@ -138,7 +112,7 @@ grpc_channel *grpc_channel_create_from_filters(
}
} else if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) {
if (args->args[i].type != GRPC_ARG_STRING) {
- gpr_log(GPR_ERROR, "%s: must be an string",
+ gpr_log(GPR_ERROR, "%s ignored: it must be a string",
GRPC_ARG_DEFAULT_AUTHORITY);
} else {
if (channel->default_authority) {
@@ -146,21 +120,22 @@ grpc_channel *grpc_channel_create_from_filters(
GRPC_MDELEM_UNREF(channel->default_authority);
}
channel->default_authority = grpc_mdelem_from_strings(
- mdctx, ":authority", args->args[i].value.string);
+ ":authority", args->args[i].value.string);
}
} else if (0 ==
strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) {
if (args->args[i].type != GRPC_ARG_STRING) {
- gpr_log(GPR_ERROR, "%s: must be an string",
+ gpr_log(GPR_ERROR, "%s ignored: it must be a string",
GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
} else {
if (channel->default_authority) {
/* other ways of setting this (notably ssl) take precedence */
- gpr_log(GPR_ERROR, "%s: default host already set some other way",
- GRPC_ARG_DEFAULT_AUTHORITY);
+ gpr_log(GPR_ERROR,
+ "%s ignored: default host already set some other way",
+ GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
} else {
channel->default_authority = grpc_mdelem_from_strings(
- mdctx, ":authority", args->args[i].value.string);
+ ":authority", args->args[i].value.string);
}
}
}
@@ -171,14 +146,15 @@ grpc_channel *grpc_channel_create_from_filters(
target != NULL) {
char *default_authority = grpc_get_default_authority(target);
if (default_authority) {
- channel->default_authority = grpc_mdelem_from_strings(
- channel->metadata_context, ":authority", default_authority);
+ channel->default_authority =
+ grpc_mdelem_from_strings(":authority", default_authority);
}
gpr_free(default_authority);
}
- grpc_channel_stack_init(exec_ctx, filters, num_filters, channel, args,
- channel->metadata_context,
+ grpc_channel_stack_init(exec_ctx, 1, destroy_channel, channel, filters,
+ num_filters, args,
+ is_client ? "CLIENT_CHANNEL" : "SERVER_CHANNEL",
CHANNEL_STACK_FROM_CHANNEL(channel));
return channel;
@@ -219,21 +195,18 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
"grpc_channel_create_call("
"channel=%p, parent_call=%p, propagation_mask=%x, cq=%p, method=%s, "
"host=%s, "
- "deadline=gpr_timespec { tv_sec: %ld, tv_nsec: %d, clock_type: %d }, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
10, (channel, parent_call, (unsigned)propagation_mask, cq, method, host,
- (long)deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
- reserved));
+ (long long)deadline.tv_sec, (int)deadline.tv_nsec,
+ (int)deadline.clock_type, reserved));
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
channel, parent_call, propagation_mask, cq,
- grpc_mdelem_from_metadata_strings(
- channel->metadata_context, GRPC_MDSTR_REF(channel->path_string),
- grpc_mdstr_from_string(channel->metadata_context, method)),
- host ? grpc_mdelem_from_metadata_strings(
- channel->metadata_context,
- GRPC_MDSTR_REF(channel->authority_string),
- grpc_mdstr_from_string(channel->metadata_context, host))
+ grpc_mdelem_from_metadata_strings(GRPC_MDSTR_PATH,
+ grpc_mdstr_from_string(method)),
+ host ? grpc_mdelem_from_metadata_strings(GRPC_MDSTR_AUTHORITY,
+ grpc_mdstr_from_string(host))
: NULL,
deadline);
}
@@ -245,15 +218,11 @@ void *grpc_channel_register_call(grpc_channel *channel, const char *method,
"grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)",
4, (channel, method, host, reserved));
GPR_ASSERT(!reserved);
- rc->path = grpc_mdelem_from_metadata_strings(
- channel->metadata_context, GRPC_MDSTR_REF(channel->path_string),
- grpc_mdstr_from_string(channel->metadata_context, method));
- rc->authority =
- host ? grpc_mdelem_from_metadata_strings(
- channel->metadata_context,
- GRPC_MDSTR_REF(channel->authority_string),
- grpc_mdstr_from_string(channel->metadata_context, host))
- : NULL;
+ rc->path = grpc_mdelem_from_metadata_strings(GRPC_MDSTR_PATH,
+ grpc_mdstr_from_string(method));
+ rc->authority = host ? grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_AUTHORITY, grpc_mdstr_from_string(host))
+ : NULL;
gpr_mu_lock(&channel->registered_call_mu);
rc->next = channel->registered_calls;
channel->registered_calls = rc;
@@ -270,11 +239,11 @@ grpc_call *grpc_channel_create_registered_call(
"grpc_channel_create_registered_call("
"channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, "
"registered_call_handle=%p, "
- "deadline=gpr_timespec { tv_sec: %ld, tv_nsec: %d, clock_type: %d }, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
9, (channel, parent_call, (unsigned)propagation_mask, completion_queue,
- registered_call_handle, (long)deadline.tv_sec, deadline.tv_nsec,
- (int)deadline.clock_type, reserved));
+ registered_call_handle, (long long)deadline.tv_sec,
+ (int)deadline.tv_nsec, (int)deadline.clock_type, reserved));
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
channel, parent_call, propagation_mask, completion_queue,
@@ -282,28 +251,26 @@ grpc_call *grpc_channel_create_registered_call(
rc->authority ? GRPC_MDELEM_REF(rc->authority) : NULL, deadline);
}
-#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
-void grpc_channel_internal_ref(grpc_channel *c, const char *reason) {
- gpr_log(GPR_DEBUG, "CHANNEL: ref %p %d -> %d [%s]", c, c->refs.count,
- c->refs.count + 1, reason);
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define REF_REASON reason
+#define REF_ARG , const char *reason
#else
-void grpc_channel_internal_ref(grpc_channel *c) {
+#define REF_REASON ""
+#define REF_ARG
#endif
- gpr_ref(&c->refs);
+void grpc_channel_internal_ref(grpc_channel *c REF_ARG) {
+ GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
}
-static void destroy_channel(grpc_exec_ctx *exec_ctx, grpc_channel *channel) {
- size_t i;
+void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_channel *c REF_ARG) {
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
+}
+
+static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
+ grpc_channel *channel = arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel));
- for (i = 0; i < NUM_CACHED_STATUS_ELEMS; i++) {
- GRPC_MDELEM_UNREF(channel->grpc_status_elem[i]);
- }
- GRPC_MDSTR_UNREF(channel->grpc_status_string);
- GRPC_MDSTR_UNREF(channel->grpc_compression_algorithm_string);
- GRPC_MDSTR_UNREF(channel->grpc_encodings_accepted_by_peer_string);
- GRPC_MDSTR_UNREF(channel->grpc_message_string);
- GRPC_MDSTR_UNREF(channel->path_string);
- GRPC_MDSTR_UNREF(channel->authority_string);
while (channel->registered_calls) {
registered_call *rc = channel->registered_calls;
channel->registered_calls = rc->next;
@@ -316,26 +283,11 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, grpc_channel *channel) {
if (channel->default_authority != NULL) {
GRPC_MDELEM_UNREF(channel->default_authority);
}
- grpc_mdctx_unref(channel->metadata_context);
gpr_mu_destroy(&channel->registered_call_mu);
gpr_free(channel->target);
gpr_free(channel);
}
-#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
-void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
- const char *reason) {
- gpr_log(GPR_DEBUG, "CHANNEL: unref %p %d -> %d [%s]", channel,
- channel->refs.count, channel->refs.count - 1, reason);
-#else
-void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
- grpc_channel *channel) {
-#endif
- if (gpr_unref(&channel->refs)) {
- destroy_channel(exec_ctx, channel);
- }
-}
-
void grpc_channel_destroy(grpc_channel *channel) {
grpc_transport_op op;
grpc_channel_element *elem;
@@ -355,38 +307,19 @@ grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel) {
return CHANNEL_STACK_FROM_CHANNEL(channel);
}
-grpc_mdctx *grpc_channel_get_metadata_context(grpc_channel *channel) {
- return channel->metadata_context;
-}
-
-grpc_mdstr *grpc_channel_get_status_string(grpc_channel *channel) {
- return channel->grpc_status_string;
-}
-
-grpc_mdstr *grpc_channel_get_compression_algorithm_string(
- grpc_channel *channel) {
- return channel->grpc_compression_algorithm_string;
-}
-
-grpc_mdstr *grpc_channel_get_encodings_accepted_by_peer_string(
- grpc_channel *channel) {
- return channel->grpc_encodings_accepted_by_peer_string;
-}
-
grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel, int i) {
- if (i >= 0 && i < NUM_CACHED_STATUS_ELEMS) {
- return GRPC_MDELEM_REF(channel->grpc_status_elem[i]);
- } else {
- char tmp[GPR_LTOA_MIN_BUFSIZE];
- gpr_ltoa(i, tmp);
- return grpc_mdelem_from_metadata_strings(
- channel->metadata_context, GRPC_MDSTR_REF(channel->grpc_status_string),
- grpc_mdstr_from_string(channel->metadata_context, tmp));
+ char tmp[GPR_LTOA_MIN_BUFSIZE];
+ switch (i) {
+ case 0:
+ return GRPC_MDELEM_GRPC_STATUS_0;
+ case 1:
+ return GRPC_MDELEM_GRPC_STATUS_1;
+ case 2:
+ return GRPC_MDELEM_GRPC_STATUS_2;
}
-}
-
-grpc_mdstr *grpc_channel_get_message_string(grpc_channel *channel) {
- return channel->grpc_message_string;
+ gpr_ltoa(i, tmp);
+ return grpc_mdelem_from_metadata_strings(GRPC_MDSTR_GRPC_STATUS,
+ grpc_mdstr_from_string(tmp));
}
gpr_uint32 grpc_channel_get_max_message_length(grpc_channel *channel) {
diff --git a/src/core/surface/channel.h b/src/core/surface/channel.h
index e5030d52d2..3d2ff23542 100644
--- a/src/core/surface/channel.h
+++ b/src/core/surface/channel.h
@@ -40,29 +40,20 @@
grpc_channel *grpc_channel_create_from_filters(
grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_filter **filters, size_t count,
- const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client);
+ const grpc_channel_args *args, int is_client);
/** Get a (borrowed) pointer to this channels underlying channel stack */
grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel);
-/** Get a (borrowed) pointer to the channel wide metadata context */
-grpc_mdctx *grpc_channel_get_metadata_context(grpc_channel *channel);
-
/** Get a grpc_mdelem of grpc-status: X where X is the numeric value of
status_code.
The returned elem is owned by the caller. */
grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel,
int status_code);
-grpc_mdstr *grpc_channel_get_status_string(grpc_channel *channel);
-grpc_mdstr *grpc_channel_get_compression_algorithm_string(
- grpc_channel *channel);
-grpc_mdstr *grpc_channel_get_encodings_accepted_by_peer_string(
- grpc_channel *channel);
-grpc_mdstr *grpc_channel_get_message_string(grpc_channel *channel);
gpr_uint32 grpc_channel_get_max_message_length(grpc_channel *channel);
-#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_channel_internal_ref(grpc_channel *channel, const char *reason);
void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
const char *reason);
diff --git a/src/core/surface/channel_connectivity.c b/src/core/surface/channel_connectivity.c
index df2774b527..10f5c4da4d 100644
--- a/src/core/surface/channel_connectivity.c
+++ b/src/core/surface/channel_connectivity.c
@@ -83,7 +83,6 @@ typedef struct {
gpr_mu mu;
callback_phase phase;
int success;
- int removed;
grpc_closure on_complete;
grpc_timer alarm;
grpc_connectivity_state state;
@@ -135,30 +134,15 @@ static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
int due_to_completion) {
int delete = 0;
- grpc_channel_element *client_channel_elem = NULL;
- gpr_mu_lock(&w->mu);
- if (w->removed == 0) {
- w->removed = 1;
- client_channel_elem = grpc_channel_stack_last_element(
- grpc_channel_get_channel_stack(w->channel));
- if (client_channel_elem->filter == &grpc_client_channel_filter) {
- grpc_client_channel_del_interested_party(exec_ctx, client_channel_elem,
- grpc_cq_pollset(w->cq));
- } else {
- grpc_client_uchannel_del_interested_party(exec_ctx, client_channel_elem,
- grpc_cq_pollset(w->cq));
- }
- }
- gpr_mu_unlock(&w->mu);
if (due_to_completion) {
- gpr_mu_lock(&w->mu);
- w->success = 1;
- gpr_mu_unlock(&w->mu);
grpc_timer_cancel(exec_ctx, &w->alarm);
}
gpr_mu_lock(&w->mu);
+ if (due_to_completion) {
+ w->success = 1;
+ }
switch (w->phase) {
case WAITING:
w->phase = CALLING_BACK;
@@ -200,19 +184,18 @@ void grpc_channel_watch_connectivity_state(
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
"channel=%p, last_observed_state=%d, "
- "deadline=gpr_timespec { tv_sec: %ld, tv_nsec: %d, clock_type: %d }, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
"cq=%p, tag=%p)",
- 7, (channel, (int)last_observed_state, (long)deadline.tv_sec,
- deadline.tv_nsec, (int)deadline.clock_type, cq, tag));
+ 7, (channel, (int)last_observed_state, (long long)deadline.tv_sec,
+ (int)deadline.tv_nsec, (int)deadline.clock_type, cq, tag));
- grpc_cq_begin_op(cq);
+ grpc_cq_begin_op(cq, tag);
gpr_mu_init(&w->mu);
grpc_closure_init(&w->on_complete, watch_complete, w);
w->phase = WAITING;
w->state = last_observed_state;
w->success = 0;
- w->removed = 0;
w->cq = cq;
w->tag = tag;
w->channel = channel;
@@ -223,16 +206,14 @@ void grpc_channel_watch_connectivity_state(
if (client_channel_elem->filter == &grpc_client_channel_filter) {
GRPC_CHANNEL_INTERNAL_REF(channel, "watch_channel_connectivity");
- grpc_client_channel_add_interested_party(&exec_ctx, client_channel_elem,
- grpc_cq_pollset(cq));
grpc_client_channel_watch_connectivity_state(&exec_ctx, client_channel_elem,
- &w->state, &w->on_complete);
+ grpc_cq_pollset(cq), &w->state,
+ &w->on_complete);
} else if (client_channel_elem->filter == &grpc_client_uchannel_filter) {
GRPC_CHANNEL_INTERNAL_REF(channel, "watch_uchannel_connectivity");
- grpc_client_uchannel_add_interested_party(&exec_ctx, client_channel_elem,
- grpc_cq_pollset(cq));
grpc_client_uchannel_watch_connectivity_state(
- &exec_ctx, client_channel_elem, &w->state, &w->on_complete);
+ &exec_ctx, client_channel_elem, grpc_cq_pollset(cq), &w->state,
+ &w->on_complete);
}
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/surface/channel_create.c b/src/core/surface/channel_create.c
index 51d9130b63..97ec23408f 100644
--- a/src/core/surface/channel_create.c
+++ b/src/core/surface/channel_create.c
@@ -37,6 +37,8 @@
#include <string.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/slice.h>
+#include <grpc/support/slice_buffer.h>
#include "src/core/census/grpc_filter.h"
#include "src/core/channel/channel_args.h"
@@ -56,11 +58,11 @@ typedef struct {
grpc_closure *notify;
grpc_connect_in_args args;
grpc_connect_out_args *result;
+ grpc_closure initial_string_sent;
+ gpr_slice_buffer initial_string_buffer;
grpc_endpoint *tcp;
- grpc_mdctx *mdctx;
-
grpc_closure connected;
} connector;
@@ -72,18 +74,33 @@ static void connector_ref(grpc_connector *con) {
static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
connector *c = (connector *)con;
if (gpr_unref(&c->refs)) {
- grpc_mdctx_unref(c->mdctx);
+ /* c->initial_string_buffer does not need to be destroyed */
gpr_free(c);
}
}
+static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
+ int success) {
+ connector_unref(exec_ctx, arg);
+}
+
static void connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
connector *c = arg;
grpc_closure *notify;
grpc_endpoint *tcp = c->tcp;
if (tcp != NULL) {
- c->result->transport = grpc_create_chttp2_transport(
- exec_ctx, c->args.channel_args, tcp, c->mdctx, 1);
+ if (!GPR_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
+ grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
+ c);
+ gpr_slice_buffer_init(&c->initial_string_buffer);
+ gpr_slice_buffer_add(&c->initial_string_buffer,
+ c->args.initial_connect_string);
+ connector_ref(arg);
+ grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer,
+ &c->initial_string_sent);
+ }
+ c->result->transport =
+ grpc_create_chttp2_transport(exec_ctx, c->args.channel_args, tcp, 1);
grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL,
0);
GPR_ASSERT(c->result->transport);
@@ -123,7 +140,6 @@ static const grpc_connector_vtable connector_vtable = {
typedef struct {
grpc_subchannel_factory base;
gpr_refcount refs;
- grpc_mdctx *mdctx;
grpc_channel_args *merge_args;
grpc_channel *master;
} subchannel_factory;
@@ -139,7 +155,6 @@ static void subchannel_factory_unref(grpc_exec_ctx *exec_ctx,
if (gpr_unref(&f->refs)) {
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, f->master, "subchannel_factory");
grpc_channel_args_destroy(f->merge_args);
- grpc_mdctx_unref(f->mdctx);
gpr_free(f);
}
}
@@ -154,12 +169,8 @@ static grpc_subchannel *subchannel_factory_create_subchannel(
grpc_subchannel *s;
memset(c, 0, sizeof(*c));
c->base.vtable = &connector_vtable;
- c->mdctx = f->mdctx;
- grpc_mdctx_ref(c->mdctx);
gpr_ref_init(&c->refs, 1);
- args->mdctx = f->mdctx;
args->args = final_args;
- args->master = f->master;
s = grpc_subchannel_create(&c->base, args);
grpc_connector_unref(exec_ctx, &c->base);
grpc_channel_args_destroy(final_args);
@@ -182,7 +193,6 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
const grpc_channel_filter *filters[MAX_FILTERS];
grpc_resolver *resolver;
subchannel_factory *f;
- grpc_mdctx *mdctx = grpc_mdctx_create();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
size_t n = 0;
GRPC_API_TRACE(
@@ -196,19 +206,20 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
filters[n++] = &grpc_client_channel_filter;
GPR_ASSERT(n <= MAX_FILTERS);
- channel = grpc_channel_create_from_filters(&exec_ctx, target, filters, n,
- args, mdctx, 1);
+ channel =
+ grpc_channel_create_from_filters(&exec_ctx, target, filters, n, args, 1);
f = gpr_malloc(sizeof(*f));
f->base.vtable = &subchannel_factory_vtable;
gpr_ref_init(&f->refs, 1);
- grpc_mdctx_ref(mdctx);
- f->mdctx = mdctx;
f->merge_args = grpc_channel_args_copy(args);
f->master = channel;
GRPC_CHANNEL_INTERNAL_REF(f->master, "subchannel_factory");
resolver = grpc_resolver_create(target, &f->base);
if (!resolver) {
+ GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, f->master, "subchannel_factory");
+ grpc_subchannel_factory_unref(&exec_ctx, &f->base);
+ grpc_exec_ctx_finish(&exec_ctx);
return NULL;
}
diff --git a/src/core/surface/byte_buffer_queue.h b/src/core/surface/channel_ping.c
index 2c3b22d24e..b4ce282787 100644
--- a/src/core/surface/byte_buffer_queue.h
+++ b/src/core/surface/channel_ping.c
@@ -31,32 +31,49 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H
-#define GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H
+#include "src/core/surface/channel.h"
-#include <grpc/byte_buffer.h>
+#include <string.h>
-/* TODO(ctiller): inline an element or two into this struct to avoid per-call
- allocations */
-typedef struct {
- grpc_byte_buffer **data;
- size_t count;
- size_t capacity;
-} grpc_bbq_array;
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/surface/api_trace.h"
+#include "src/core/surface/completion_queue.h"
-/* should be initialized by zeroing memory */
typedef struct {
- size_t drain_pos;
- grpc_bbq_array filling;
- grpc_bbq_array draining;
- size_t bytes;
-} grpc_byte_buffer_queue;
-
-void grpc_bbq_destroy(grpc_byte_buffer_queue *q);
-grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q);
-void grpc_bbq_flush(grpc_byte_buffer_queue *q);
-int grpc_bbq_empty(grpc_byte_buffer_queue *q);
-void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *bb);
-size_t grpc_bbq_bytes(grpc_byte_buffer_queue *q);
-
-#endif /* GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H */
+ grpc_closure closure;
+ void *tag;
+ grpc_completion_queue *cq;
+ grpc_cq_completion completion_storage;
+} ping_result;
+
+static void ping_destroy(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_cq_completion *storage) {
+ gpr_free(arg);
+}
+
+static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, int success) {
+ ping_result *pr = arg;
+ grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, success, ping_destroy, pr,
+ &pr->completion_storage);
+}
+
+void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
+ void *tag, void *reserved) {
+ grpc_transport_op op;
+ ping_result *pr = gpr_malloc(sizeof(*pr));
+ grpc_channel_element *top_elem =
+ grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_ASSERT(reserved == NULL);
+ memset(&op, 0, sizeof(op));
+ pr->tag = tag;
+ pr->cq = cq;
+ grpc_closure_init(&pr->closure, ping_done, pr);
+ op.send_ping = &pr->closure;
+ op.bind_pollset = grpc_cq_pollset(cq);
+ grpc_cq_begin_op(cq, tag);
+ top_elem->filter->start_transport_op(&exec_ctx, top_elem, &op);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
diff --git a/src/core/surface/completion_queue.c b/src/core/surface/completion_queue.c
index aa90b3f7f5..848a33adc3 100644
--- a/src/core/surface/completion_queue.c
+++ b/src/core/surface/completion_queue.c
@@ -71,9 +71,38 @@ struct grpc_completion_queue {
int is_server_cq;
int num_pluckers;
plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
- grpc_closure pollset_destroy_done;
+ grpc_closure pollset_shutdown_done;
+
+#ifndef NDEBUG
+ void **outstanding_tags;
+ size_t outstanding_tag_count;
+ size_t outstanding_tag_capacity;
+#endif
+
+ grpc_completion_queue *next_free;
};
+static gpr_mu g_freelist_mu;
+grpc_completion_queue *g_freelist;
+
+static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *cc,
+ int success);
+
+void grpc_cq_global_init(void) { gpr_mu_init(&g_freelist_mu); }
+
+void grpc_cq_global_shutdown(void) {
+ gpr_mu_destroy(&g_freelist_mu);
+ while (g_freelist) {
+ grpc_completion_queue *next = g_freelist->next_free;
+ grpc_pollset_destroy(&g_freelist->pollset);
+#ifndef NDEBUG
+ gpr_free(g_freelist->outstanding_tags);
+#endif
+ gpr_free(g_freelist);
+ g_freelist = next;
+ }
+}
+
struct grpc_cq_alarm {
grpc_timer alarm;
grpc_cq_completion completion;
@@ -83,22 +112,48 @@ struct grpc_cq_alarm {
void *tag;
};
-static void on_pollset_destroy_done(grpc_exec_ctx *exec_ctx, void *cc,
- int success);
-
grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
- grpc_completion_queue *cc = gpr_malloc(sizeof(grpc_completion_queue));
- GRPC_API_TRACE("grpc_completion_queue_create(reserved=%p)", 1, (reserved));
+ grpc_completion_queue *cc;
GPR_ASSERT(!reserved);
- memset(cc, 0, sizeof(*cc));
+
+ GPR_TIMER_BEGIN("grpc_completion_queue_create", 0);
+
+ GRPC_API_TRACE("grpc_completion_queue_create(reserved=%p)", 1, (reserved));
+
+ gpr_mu_lock(&g_freelist_mu);
+ if (g_freelist == NULL) {
+ gpr_mu_unlock(&g_freelist_mu);
+
+ cc = gpr_malloc(sizeof(grpc_completion_queue));
+ grpc_pollset_init(&cc->pollset);
+#ifndef NDEBUG
+ cc->outstanding_tags = NULL;
+ cc->outstanding_tag_capacity = 0;
+#endif
+ } else {
+ cc = g_freelist;
+ g_freelist = g_freelist->next_free;
+ gpr_mu_unlock(&g_freelist_mu);
+ /* pollset already initialized */
+ }
+
/* Initial ref is dropped by grpc_completion_queue_shutdown */
gpr_ref_init(&cc->pending_events, 1);
/* One for destroy(), one for pollset_shutdown */
gpr_ref_init(&cc->owning_refs, 2);
- grpc_pollset_init(&cc->pollset);
cc->completed_tail = &cc->completed_head;
cc->completed_head.next = (gpr_uintptr)cc->completed_tail;
- grpc_closure_init(&cc->pollset_destroy_done, on_pollset_destroy_done, cc);
+ cc->shutdown = 0;
+ cc->shutdown_called = 0;
+ cc->is_server_cq = 0;
+ cc->num_pluckers = 0;
+#ifndef NDEBUG
+ cc->outstanding_tag_count = 0;
+#endif
+ grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc);
+
+ GPR_TIMER_END("grpc_completion_queue_create", 0);
+
return cc;
}
@@ -113,8 +168,8 @@ void grpc_cq_internal_ref(grpc_completion_queue *cc) {
gpr_ref(&cc->owning_refs);
}
-static void on_pollset_destroy_done(grpc_exec_ctx *exec_ctx, void *arg,
- int success) {
+static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
+ int success) {
grpc_completion_queue *cc = arg;
GRPC_CQ_INTERNAL_UNREF(cc, "pollset_destroy");
}
@@ -129,15 +184,25 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc) {
#endif
if (gpr_unref(&cc->owning_refs)) {
GPR_ASSERT(cc->completed_head.next == (gpr_uintptr)&cc->completed_head);
- grpc_pollset_destroy(&cc->pollset);
- gpr_free(cc);
+ grpc_pollset_reset(&cc->pollset);
+ gpr_mu_lock(&g_freelist_mu);
+ cc->next_free = g_freelist;
+ g_freelist = cc;
+ gpr_mu_unlock(&g_freelist_mu);
}
}
-void grpc_cq_begin_op(grpc_completion_queue *cc) {
+void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag) {
#ifndef NDEBUG
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
GPR_ASSERT(!cc->shutdown_called);
+ if (cc->outstanding_tag_count == cc->outstanding_tag_capacity) {
+ cc->outstanding_tag_capacity = GPR_MAX(4, 2 * cc->outstanding_tag_capacity);
+ cc->outstanding_tags =
+ gpr_realloc(cc->outstanding_tags, sizeof(*cc->outstanding_tags) *
+ cc->outstanding_tag_capacity);
+ }
+ cc->outstanding_tags[cc->outstanding_tag_count++] = tag;
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
#endif
gpr_ref(&cc->pending_events);
@@ -154,6 +219,9 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
int shutdown;
int i;
grpc_pollset_worker *pluck_worker;
+#ifndef NDEBUG
+ int found = 0;
+#endif
GPR_TIMER_BEGIN("grpc_cq_end_op", 0);
@@ -164,6 +232,18 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
((gpr_uintptr)&cc->completed_head) | ((gpr_uintptr)(success != 0));
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
+#ifndef NDEBUG
+ for (i = 0; i < (int)cc->outstanding_tag_count; i++) {
+ if (cc->outstanding_tags[i] == tag) {
+ cc->outstanding_tag_count--;
+ GPR_SWAP(void *, cc->outstanding_tags[i],
+ cc->outstanding_tags[cc->outstanding_tag_count]);
+ found = 1;
+ break;
+ }
+ }
+ GPR_ASSERT(found);
+#endif
shutdown = gpr_unref(&cc->pending_events);
if (!shutdown) {
cc->completed_tail->next =
@@ -185,8 +265,8 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GPR_ASSERT(!cc->shutdown);
GPR_ASSERT(cc->shutdown_called);
cc->shutdown = 1;
+ grpc_pollset_shutdown(exec_ctx, &cc->pollset, &cc->pollset_shutdown_done);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
- grpc_pollset_shutdown(exec_ctx, &cc->pollset, &cc->pollset_destroy_done);
}
GPR_TIMER_END("grpc_cq_end_op", 0);
@@ -205,10 +285,10 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
GRPC_API_TRACE(
"grpc_completion_queue_next("
"cc=%p, "
- "deadline=gpr_timespec { tv_sec: %ld, tv_nsec: %d, clock_type: %d }, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
- 5, (cc, (long)deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
- reserved));
+ 5, (cc, (long long)deadline.tv_sec, (int)deadline.tv_nsec,
+ (int)deadline.clock_type, reserved));
GPR_ASSERT(!reserved);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@@ -293,9 +373,9 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
GRPC_API_TRACE(
"grpc_completion_queue_pluck("
"cc=%p, tag=%p, "
- "deadline=gpr_timespec { tv_sec: %ld, tv_nsec: %d, clock_type: %d }, "
+ "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
- 6, (cc, tag, (long)deadline.tv_sec, deadline.tv_nsec,
+ 6, (cc, tag, (long long)deadline.tv_sec, (int)deadline.tv_nsec,
(int)deadline.clock_type, reserved));
GPR_ASSERT(!reserved);
@@ -365,29 +445,31 @@ done:
to zero here, then enter shutdown mode and wake up any waiters */
void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_TIMER_BEGIN("grpc_completion_queue_shutdown", 0);
GRPC_API_TRACE("grpc_completion_queue_shutdown(cc=%p)", 1, (cc));
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
if (cc->shutdown_called) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
+ GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
return;
}
cc->shutdown_called = 1;
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
-
if (gpr_unref(&cc->pending_events)) {
- gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
GPR_ASSERT(!cc->shutdown);
cc->shutdown = 1;
- gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
- grpc_pollset_shutdown(&exec_ctx, &cc->pollset, &cc->pollset_destroy_done);
+ grpc_pollset_shutdown(&exec_ctx, &cc->pollset, &cc->pollset_shutdown_done);
}
+ gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_exec_ctx_finish(&exec_ctx);
+ GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
}
void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
GRPC_API_TRACE("grpc_completion_queue_destroy(cc=%p)", 1, (cc));
+ GPR_TIMER_BEGIN("grpc_completion_queue_destroy", 0);
grpc_completion_queue_shutdown(cc);
GRPC_CQ_INTERNAL_UNREF(cc, "destroy");
+ GPR_TIMER_END("grpc_completion_queue_destroy", 0);
}
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
diff --git a/src/core/surface/completion_queue.h b/src/core/surface/completion_queue.h
index 5f8282e542..1e40c48bea 100644
--- a/src/core/surface/completion_queue.h
+++ b/src/core/surface/completion_queue.h
@@ -68,10 +68,12 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc);
#endif
/* Flag that an operation is beginning: the completion channel will not finish
- shutdown until a corrensponding grpc_cq_end_* call is made */
-void grpc_cq_begin_op(grpc_completion_queue *cc);
+ shutdown until a corrensponding grpc_cq_end_* call is made.
+ \a tag is currently used only in debug builds. */
+void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag);
-/* Queue a GRPC_OP_COMPLETED operation */
+/* Queue a GRPC_OP_COMPLETED operation; tag must correspond to the tag passed to
+ grpc_cq_begin_op */
void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
void *tag, int success,
void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
@@ -83,4 +85,7 @@ grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc);
void grpc_cq_mark_server_cq(grpc_completion_queue *cc);
int grpc_cq_is_server_cq(grpc_completion_queue *cc);
+void grpc_cq_global_init(void);
+void grpc_cq_global_shutdown(void);
+
#endif /* GRPC_INTERNAL_CORE_SURFACE_COMPLETION_QUEUE_H */
diff --git a/src/core/surface/init.c b/src/core/surface/init.c
index b2e66a830e..82027af651 100644
--- a/src/core/surface/init.c
+++ b/src/core/surface/init.c
@@ -52,11 +52,16 @@
#include "src/core/profiling/timers.h"
#include "src/core/surface/api_trace.h"
#include "src/core/surface/call.h"
+#include "src/core/surface/completion_queue.h"
#include "src/core/surface/init.h"
#include "src/core/surface/surface_trace.h"
#include "src/core/transport/chttp2_transport.h"
#include "src/core/transport/connectivity_state.h"
+#ifndef GRPC_DEFAULT_NAME_PREFIX
+#define GRPC_DEFAULT_NAME_PREFIX "dns:///"
+#endif
+
#define MAX_PLUGINS 128
static gpr_once g_basic_init = GPR_ONCE_INIT;
@@ -92,10 +97,11 @@ void grpc_init(void) {
gpr_mu_lock(&g_init_mu);
if (++g_initializations == 1) {
gpr_time_init();
+ grpc_mdctx_global_init();
grpc_lb_policy_registry_init(grpc_pick_first_lb_factory_create());
grpc_register_lb_policy(grpc_pick_first_lb_factory_create());
grpc_register_lb_policy(grpc_round_robin_lb_factory_create());
- grpc_resolver_registry_init("dns:///");
+ grpc_resolver_registry_init(GRPC_DEFAULT_NAME_PREFIX);
grpc_register_resolver_type(grpc_dns_resolver_factory_create());
grpc_register_resolver_type(grpc_ipv4_resolver_factory_create());
grpc_register_resolver_type(grpc_ipv6_resolver_factory_create());
@@ -118,6 +124,7 @@ void grpc_init(void) {
}
}
gpr_timers_global_init();
+ grpc_cq_global_init();
for (i = 0; i < g_number_of_plugins; i++) {
if (g_all_of_the_plugins[i].init != NULL) {
g_all_of_the_plugins[i].init();
@@ -133,17 +140,20 @@ void grpc_shutdown(void) {
GRPC_API_TRACE("grpc_shutdown(void)", 0, ());
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
- grpc_iomgr_shutdown();
grpc_executor_shutdown();
+ grpc_cq_global_shutdown();
+ grpc_iomgr_shutdown();
census_shutdown();
gpr_timers_global_destroy();
grpc_tracer_shutdown();
grpc_resolver_registry_shutdown();
+ grpc_lb_policy_registry_shutdown();
for (i = 0; i < g_number_of_plugins; i++) {
if (g_all_of_the_plugins[i].destroy != NULL) {
g_all_of_the_plugins[i].destroy();
}
}
+ grpc_mdctx_global_shutdown();
}
gpr_mu_unlock(&g_init_mu);
}
diff --git a/src/core/surface/lame_client.c b/src/core/surface/lame_client.c
index e72264fbcd..a60e9d20da 100644
--- a/src/core/surface/lame_client.c
+++ b/src/core/surface/lame_client.c
@@ -49,49 +49,41 @@ typedef struct {
} call_data;
typedef struct {
- grpc_mdctx *mdctx;
- grpc_channel *master;
grpc_status_code error_code;
const char *error_message;
} channel_data;
+static void fill_metadata(grpc_call_element *elem, grpc_metadata_batch *mdb) {
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
+ char tmp[GPR_LTOA_MIN_BUFSIZE];
+ gpr_ltoa(chand->error_code, tmp);
+ calld->status.md = grpc_mdelem_from_strings("grpc-status", tmp);
+ calld->details.md =
+ grpc_mdelem_from_strings("grpc-message", chand->error_message);
+ calld->status.prev = calld->details.next = NULL;
+ calld->status.next = &calld->details;
+ calld->details.prev = &calld->status;
+ mdb->list.head = &calld->status;
+ mdb->list.tail = &calld->details;
+ mdb->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+}
+
static void lame_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- if (op->send_ops != NULL) {
- grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
- op->on_done_send->cb(exec_ctx, op->on_done_send->cb_arg, 0);
- }
- if (op->recv_ops != NULL) {
- char tmp[GPR_LTOA_MIN_BUFSIZE];
- grpc_metadata_batch mdb;
- gpr_ltoa(chand->error_code, tmp);
- calld->status.md =
- grpc_mdelem_from_strings(chand->mdctx, "grpc-status", tmp);
- calld->details.md = grpc_mdelem_from_strings(chand->mdctx, "grpc-message",
- chand->error_message);
- calld->status.prev = calld->details.next = NULL;
- calld->status.next = &calld->details;
- calld->details.prev = &calld->status;
- mdb.list.head = &calld->status;
- mdb.list.tail = &calld->details;
- mdb.garbage.head = mdb.garbage.tail = NULL;
- mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
- grpc_sopb_add_metadata(op->recv_ops, mdb);
- *op->recv_state = GRPC_STREAM_CLOSED;
- op->on_done_recv->cb(exec_ctx, op->on_done_recv->cb_arg, 1);
- }
- if (op->on_consumed != NULL) {
- op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 0);
+ if (op->recv_initial_metadata != NULL) {
+ fill_metadata(elem, op->recv_initial_metadata);
+ } else if (op->recv_trailing_metadata != NULL) {
+ fill_metadata(elem, op->recv_trailing_metadata);
}
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_complete, 0);
+ grpc_exec_ctx_enqueue(exec_ctx, op->recv_message_ready, 0);
}
static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- return grpc_channel_get_target(chand->master);
+ return NULL;
}
static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
@@ -109,25 +101,16 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
}
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *transport_server_data,
- grpc_transport_stream_op *initial_op) {
- if (initial_op) {
- grpc_transport_stream_op_finish_with_failure(exec_ctx, initial_op);
- }
-}
+ grpc_call_element_args *args) {}
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {}
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *args, grpc_mdctx *mdctx,
- int is_first, int is_last) {
- channel_data *chand = elem->channel_data;
- GPR_ASSERT(is_first);
- GPR_ASSERT(is_last);
- chand->mdctx = mdctx;
- chand->master = master;
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ GPR_ASSERT(args->is_first);
+ GPR_ASSERT(args->is_last);
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
@@ -135,8 +118,9 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
static const grpc_channel_filter lame_filter = {
lame_start_transport_stream_op, lame_start_transport_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, lame_get_peer, "lame-client",
+ init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
+ sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+ lame_get_peer, "lame-client",
};
#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1))
@@ -149,8 +133,8 @@ grpc_channel *grpc_lame_client_channel_create(const char *target,
channel_data *chand;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
static const grpc_channel_filter *filters[] = {&lame_filter};
- channel = grpc_channel_create_from_filters(&exec_ctx, target, filters, 1,
- NULL, grpc_mdctx_create(), 1);
+ channel =
+ grpc_channel_create_from_filters(&exec_ctx, target, filters, 1, NULL, 1);
elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
GRPC_API_TRACE(
"grpc_lame_client_channel_create(target=%s, error_code=%d, "
diff --git a/src/core/surface/secure_channel_create.c b/src/core/surface/secure_channel_create.c
index 0dd0a31169..92bd53411d 100644
--- a/src/core/surface/secure_channel_create.c
+++ b/src/core/surface/secure_channel_create.c
@@ -37,6 +37,8 @@
#include <string.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/slice.h>
+#include <grpc/support/slice_buffer.h>
#include "src/core/census/grpc_filter.h"
#include "src/core/channel/channel_args.h"
@@ -61,14 +63,14 @@ typedef struct {
grpc_closure *notify;
grpc_connect_in_args args;
grpc_connect_out_args *result;
+ grpc_closure initial_string_sent;
+ gpr_slice_buffer initial_string_buffer;
gpr_mu mu;
grpc_endpoint *connecting_endpoint;
grpc_endpoint *newly_connecting_endpoint;
grpc_closure connected_closure;
-
- grpc_mdctx *mdctx;
} connector;
static void connector_ref(grpc_connector *con) {
@@ -79,14 +81,13 @@ static void connector_ref(grpc_connector *con) {
static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
connector *c = (connector *)con;
if (gpr_unref(&c->refs)) {
- grpc_mdctx_unref(c->mdctx);
+ /* c->initial_string_buffer does not need to be destroyed */
gpr_free(c);
}
}
static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_security_status status,
- grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
connector *c = arg;
grpc_closure *notify;
@@ -95,17 +96,15 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
memset(c->result, 0, sizeof(*c->result));
gpr_mu_unlock(&c->mu);
} else if (status != GRPC_SECURITY_OK) {
- GPR_ASSERT(c->connecting_endpoint == wrapped_endpoint);
gpr_log(GPR_ERROR, "Secure handshake failed with error %d.", status);
memset(c->result, 0, sizeof(*c->result));
c->connecting_endpoint = NULL;
gpr_mu_unlock(&c->mu);
} else {
- GPR_ASSERT(c->connecting_endpoint == wrapped_endpoint);
c->connecting_endpoint = NULL;
gpr_mu_unlock(&c->mu);
c->result->transport = grpc_create_chttp2_transport(
- exec_ctx, c->args.channel_args, secure_endpoint, c->mdctx, 1);
+ exec_ctx, c->args.channel_args, secure_endpoint, 1);
grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL,
0);
c->result->filters = gpr_malloc(sizeof(grpc_channel_filter *) * 2);
@@ -118,6 +117,14 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
notify->cb(exec_ctx, notify->cb_arg, 1);
}
+static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
+ int success) {
+ connector *c = arg;
+ grpc_security_connector_do_handshake(exec_ctx, &c->security_connector->base,
+ c->connecting_endpoint,
+ on_secure_handshake_done, c);
+}
+
static void connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
connector *c = arg;
grpc_closure *notify;
@@ -127,8 +134,19 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
GPR_ASSERT(c->connecting_endpoint == NULL);
c->connecting_endpoint = tcp;
gpr_mu_unlock(&c->mu);
- grpc_security_connector_do_handshake(exec_ctx, &c->security_connector->base,
- tcp, on_secure_handshake_done, c);
+ if (!GPR_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
+ grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
+ c);
+ gpr_slice_buffer_init(&c->initial_string_buffer);
+ gpr_slice_buffer_add(&c->initial_string_buffer,
+ c->args.initial_connect_string);
+ grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer,
+ &c->initial_string_sent);
+ } else {
+ grpc_security_connector_do_handshake(exec_ctx,
+ &c->security_connector->base, tcp,
+ on_secure_handshake_done, c);
+ }
} else {
memset(c->result, 0, sizeof(*c->result));
notify = c->notify;
@@ -174,7 +192,6 @@ static const grpc_connector_vtable connector_vtable = {
typedef struct {
grpc_subchannel_factory base;
gpr_refcount refs;
- grpc_mdctx *mdctx;
grpc_channel_args *merge_args;
grpc_channel_security_connector *security_connector;
grpc_channel *master;
@@ -193,7 +210,6 @@ static void subchannel_factory_unref(grpc_exec_ctx *exec_ctx,
"subchannel_factory");
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, f->master, "subchannel_factory");
grpc_channel_args_destroy(f->merge_args);
- grpc_mdctx_unref(f->mdctx);
gpr_free(f);
}
}
@@ -209,13 +225,9 @@ static grpc_subchannel *subchannel_factory_create_subchannel(
memset(c, 0, sizeof(*c));
c->base.vtable = &connector_vtable;
c->security_connector = f->security_connector;
- c->mdctx = f->mdctx;
gpr_mu_init(&c->mu);
- grpc_mdctx_ref(c->mdctx);
gpr_ref_init(&c->refs, 1);
args->args = final_args;
- args->master = f->master;
- args->mdctx = f->mdctx;
s = grpc_subchannel_create(&c->base, args);
grpc_connector_unref(exec_ctx, &c->base);
grpc_channel_args_destroy(final_args);
@@ -239,7 +251,6 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
grpc_channel_args *args_copy;
grpc_channel_args *new_args_from_connector;
grpc_channel_security_connector *security_connector;
- grpc_mdctx *mdctx;
grpc_resolver *resolver;
subchannel_factory *f;
#define MAX_FILTERS 3
@@ -269,7 +280,6 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
target, GRPC_STATUS_INVALID_ARGUMENT,
"Failed to create security connector.");
}
- mdctx = grpc_mdctx_create();
connector_arg = grpc_security_connector_to_arg(&security_connector->base);
args_copy = grpc_channel_args_copy_and_add(
@@ -283,35 +293,33 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
GPR_ASSERT(n <= MAX_FILTERS);
channel = grpc_channel_create_from_filters(&exec_ctx, target, filters, n,
- args_copy, mdctx, 1);
+ args_copy, 1);
f = gpr_malloc(sizeof(*f));
f->base.vtable = &subchannel_factory_vtable;
gpr_ref_init(&f->refs, 1);
- grpc_mdctx_ref(mdctx);
- f->mdctx = mdctx;
GRPC_SECURITY_CONNECTOR_REF(&security_connector->base, "subchannel_factory");
f->security_connector = security_connector;
f->merge_args = grpc_channel_args_copy(args_copy);
f->master = channel;
GRPC_CHANNEL_INTERNAL_REF(channel, "subchannel_factory");
resolver = grpc_resolver_create(target, &f->base);
- if (!resolver) {
- grpc_exec_ctx_finish(&exec_ctx);
- return NULL;
+ if (resolver) {
+ grpc_client_channel_set_resolver(
+ &exec_ctx, grpc_channel_get_channel_stack(channel), resolver);
+ GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "create");
}
-
- grpc_client_channel_set_resolver(
- &exec_ctx, grpc_channel_get_channel_stack(channel), resolver);
- GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "create");
grpc_subchannel_factory_unref(&exec_ctx, &f->base);
GRPC_SECURITY_CONNECTOR_UNREF(&security_connector->base, "channel_create");
-
grpc_channel_args_destroy(args_copy);
if (new_args_from_connector != NULL) {
grpc_channel_args_destroy(new_args_from_connector);
}
+ if (!resolver) {
+ GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, channel, "subchannel_factory");
+ channel = NULL;
+ }
grpc_exec_ctx_finish(&exec_ctx);
return channel;
diff --git a/src/core/surface/server.c b/src/core/surface/server.c
index 819226278d..1e1cde3648 100644
--- a/src/core/surface/server.c
+++ b/src/core/surface/server.c
@@ -54,6 +54,7 @@
#include "src/core/surface/completion_queue.h"
#include "src/core/surface/init.h"
#include "src/core/transport/metadata.h"
+#include "src/core/transport/static_metadata.h"
typedef struct listener {
void *arg;
@@ -84,18 +85,18 @@ typedef struct requested_call {
grpc_completion_queue *cq_for_notification;
grpc_call **call;
grpc_cq_completion completion;
+ grpc_metadata_array *initial_metadata;
union {
struct {
grpc_call_details *details;
- grpc_metadata_array *initial_metadata;
} batch;
struct {
registered_method *registered_method;
gpr_timespec *deadline;
- grpc_metadata_array *initial_metadata;
grpc_byte_buffer **optional_payload;
} registered;
} data;
+ grpc_closure publish;
} requested_call;
typedef struct channel_registered_method {
@@ -108,8 +109,6 @@ struct channel_data {
grpc_server *server;
grpc_connectivity_state connectivity_state;
grpc_channel *channel;
- grpc_mdstr *path_key;
- grpc_mdstr *authority_key;
/* linked list of all channels on a server */
channel_data *next;
channel_data *prev;
@@ -150,16 +149,16 @@ struct call_data {
grpc_mdstr *path;
grpc_mdstr *host;
gpr_timespec deadline;
- int got_initial_metadata;
grpc_completion_queue *cq_new;
- grpc_stream_op_buffer *recv_ops;
- grpc_stream_state *recv_state;
- grpc_closure *on_done_recv;
+ grpc_metadata_batch *recv_initial_metadata;
+ grpc_metadata_array initial_metadata;
- grpc_closure server_on_recv;
+ grpc_closure got_initial_metadata;
+ grpc_closure server_on_recv_initial_metadata;
grpc_closure kill_zombie_closure;
+ grpc_closure *on_done_recv_initial_metadata;
call_data *pending_next;
};
@@ -396,7 +395,6 @@ static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd,
int success) {
channel_data *chand = cd;
grpc_server *server = chand->server;
- gpr_log(GPR_DEBUG, "finish_destroy_channel: %p", chand->channel);
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server");
server_unref(exec_ctx, server);
}
@@ -559,91 +557,46 @@ static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
- channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
- if (md->key == chand->path_key) {
+ if (md->key == GRPC_MDSTR_PATH) {
calld->path = GRPC_MDSTR_REF(md->value);
return NULL;
- } else if (md->key == chand->authority_key) {
+ } else if (md->key == GRPC_MDSTR_AUTHORITY) {
calld->host = GRPC_MDSTR_REF(md->value);
return NULL;
}
return md;
}
-static void server_on_recv(grpc_exec_ctx *exec_ctx, void *ptr, int success) {
+static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
+ int success) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
gpr_timespec op_deadline;
- if (success && !calld->got_initial_metadata) {
- size_t i;
- size_t nops = calld->recv_ops->nops;
- grpc_stream_op *ops = calld->recv_ops->ops;
- for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
- grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
- op_deadline = op->data.metadata.deadline;
- if (0 !=
- gpr_time_cmp(op_deadline, gpr_inf_future(op_deadline.clock_type))) {
- calld->deadline = op->data.metadata.deadline;
- }
- if (calld->host && calld->path) {
- calld->got_initial_metadata = 1;
- start_new_rpc(exec_ctx, elem);
- }
- break;
- }
+ grpc_metadata_batch_filter(calld->recv_initial_metadata, server_filter, elem);
+ op_deadline = calld->recv_initial_metadata->deadline;
+ if (0 != gpr_time_cmp(op_deadline, gpr_inf_future(op_deadline.clock_type))) {
+ calld->deadline = op_deadline;
}
-
- switch (*calld->recv_state) {
- case GRPC_STREAM_OPEN:
- break;
- case GRPC_STREAM_SEND_CLOSED:
- break;
- case GRPC_STREAM_RECV_CLOSED:
- gpr_mu_lock(&calld->mu_state);
- if (calld->state == NOT_STARTED) {
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
- grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
- } else {
- gpr_mu_unlock(&calld->mu_state);
- }
- break;
- case GRPC_STREAM_CLOSED:
- gpr_mu_lock(&calld->mu_state);
- if (calld->state == NOT_STARTED) {
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
- grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
- } else if (calld->state == PENDING) {
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
- /* zombied call will be destroyed when it's removed from the pending
- queue... later */
- } else {
- gpr_mu_unlock(&calld->mu_state);
- }
- break;
+ if (calld->host && calld->path) {
+ /* do nothing */
+ } else {
+ success = 0;
}
- calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
+ calld->on_done_recv_initial_metadata->cb(
+ exec_ctx, calld->on_done_recv_initial_metadata->cb_arg, success);
}
static void server_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
- if (op->recv_ops) {
- /* substitute our callback for the higher callback */
- calld->recv_ops = op->recv_ops;
- calld->recv_state = op->recv_state;
- calld->on_done_recv = op->on_done_recv;
- op->on_done_recv = &calld->server_on_recv;
+ if (op->recv_initial_metadata != NULL) {
+ calld->recv_initial_metadata = op->recv_initial_metadata;
+ calld->on_done_recv_initial_metadata = op->on_complete;
+ op->on_complete = &calld->server_on_recv_initial_metadata;
}
}
@@ -655,12 +608,48 @@ static void server_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_next_op(exec_ctx, elem, op);
}
-static void accept_stream(void *cd, grpc_transport *transport,
+static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
+ int success) {
+ grpc_call_element *elem = ptr;
+ call_data *calld = elem->call_data;
+ if (success) {
+ start_new_rpc(exec_ctx, elem);
+ } else {
+ gpr_mu_lock(&calld->mu_state);
+ if (calld->state == NOT_STARTED) {
+ calld->state = ZOMBIED;
+ gpr_mu_unlock(&calld->mu_state);
+ grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
+ } else if (calld->state == PENDING) {
+ calld->state = ZOMBIED;
+ gpr_mu_unlock(&calld->mu_state);
+ /* zombied call will be destroyed when it's removed from the pending
+ queue... later */
+ } else {
+ gpr_mu_unlock(&calld->mu_state);
+ }
+ }
+}
+
+static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
+ grpc_transport *transport,
const void *transport_server_data) {
channel_data *chand = cd;
/* create a call */
- grpc_call_create(chand->channel, NULL, 0, NULL, transport_server_data, NULL,
- 0, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ grpc_call *call =
+ grpc_call_create(chand->channel, NULL, 0, NULL, transport_server_data,
+ NULL, 0, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ grpc_call_element *elem =
+ grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
+ call_data *calld = elem->call_data;
+ grpc_op op;
+ memset(&op, 0, sizeof(op));
+ op.op = GRPC_OP_RECV_INITIAL_METADATA;
+ op.data.recv_initial_metadata = &calld->initial_metadata;
+ grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem);
+ grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1,
+ &calld->got_initial_metadata);
}
static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
@@ -685,8 +674,7 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
}
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const void *server_transport_data,
- grpc_transport_stream_op *initial_op) {
+ grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
memset(calld, 0, sizeof(call_data));
@@ -694,11 +682,10 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
calld->call = grpc_call_from_top_element(elem);
gpr_mu_init(&calld->mu_state);
- grpc_closure_init(&calld->server_on_recv, server_on_recv, elem);
+ grpc_closure_init(&calld->server_on_recv_initial_metadata,
+ server_on_recv_initial_metadata, elem);
server_ref(chand->server);
-
- if (initial_op) server_mutate_op(elem, initial_op);
}
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
@@ -714,6 +701,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
if (calld->path) {
GRPC_MDSTR_UNREF(calld->path);
}
+ grpc_metadata_array_destroy(&calld->initial_metadata);
gpr_mu_destroy(&calld->mu_state);
@@ -721,17 +709,13 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
}
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *args,
- grpc_mdctx *metadata_context, int is_first,
- int is_last) {
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
- GPR_ASSERT(is_first);
- GPR_ASSERT(!is_last);
+ GPR_ASSERT(args->is_first);
+ GPR_ASSERT(!args->is_last);
chand->server = NULL;
chand->channel = NULL;
- chand->path_key = grpc_mdstr_from_string(metadata_context, ":path");
- chand->authority_key = grpc_mdstr_from_string(metadata_context, ":authority");
chand->next = chand->prev = chand;
chand->registered_methods = NULL;
chand->connectivity_state = GRPC_CHANNEL_IDLE;
@@ -761,16 +745,15 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
chand->next = chand->prev = chand;
maybe_finish_shutdown(exec_ctx, chand->server);
gpr_mu_unlock(&chand->server->mu_global);
- GRPC_MDSTR_UNREF(chand->path_key);
- GRPC_MDSTR_UNREF(chand->authority_key);
server_unref(exec_ctx, chand->server);
}
}
static const grpc_channel_filter server_surface_filter = {
server_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),
- init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
- destroy_channel_elem, grpc_call_next_get_peer, "server",
+ init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
+ sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+ grpc_call_next_get_peer, "server",
};
void grpc_server_register_completion_queue(grpc_server *server,
@@ -904,7 +887,7 @@ void grpc_server_start(grpc_server *server) {
void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
grpc_transport *transport,
grpc_channel_filter const **extra_filters,
- size_t num_extra_filters, grpc_mdctx *mdctx,
+ size_t num_extra_filters,
const grpc_channel_args *args) {
size_t num_filters = s->channel_filter_count + num_extra_filters + 1;
grpc_channel_filter const **filters =
@@ -939,7 +922,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
}
channel = grpc_channel_create_from_filters(exec_ctx, NULL, filters,
- num_filters, args, mdctx, 0);
+ num_filters, args, 0);
chand = (channel_data *)grpc_channel_stack_element(
grpc_channel_get_channel_stack(channel), 0)->channel_data;
chand->server = s;
@@ -958,8 +941,8 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
chand->registered_methods = gpr_malloc(alloc);
memset(chand->registered_methods, 0, alloc);
for (rm = s->registered_methods; rm; rm = rm->next) {
- host = rm->host ? grpc_mdstr_from_string(mdctx, rm->host) : NULL;
- method = grpc_mdstr_from_string(mdctx, rm->method);
+ host = rm->host ? grpc_mdstr_from_string(rm->host) : NULL;
+ method = grpc_mdstr_from_string(rm->method);
hash = GRPC_MDSTR_KV_HASH(host ? host->hash : 0, method->hash);
for (probes = 0; chand->registered_methods[(hash + probes) % slots]
.server_registered_method != NULL;
@@ -1022,11 +1005,9 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3,
(server, cq, tag));
- GRPC_SERVER_LOG_SHUTDOWN(GPR_INFO, server, cq, tag);
-
/* lock, and gather up some stuff to do */
gpr_mu_lock(&server->mu_global);
- grpc_cq_begin_op(cq);
+ grpc_cq_begin_op(cq, tag);
if (server->shutdown_published) {
grpc_cq_end_op(&exec_ctx, cq, tag, 1, done_published_shutdown, NULL,
gpr_malloc(sizeof(grpc_cq_completion)));
@@ -1187,18 +1168,15 @@ grpc_call_error grpc_server_request_call(
GRPC_API_TRACE(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
- "cq_bound_to_call=%p, cq_for_notification=%p, tag%p)",
+ "cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)",
7, (server, call, details, initial_metadata, cq_bound_to_call,
cq_for_notification, tag));
- GRPC_SERVER_LOG_REQUEST_CALL(GPR_INFO, server, call, details,
- initial_metadata, cq_bound_to_call,
- cq_for_notification, tag);
if (!grpc_cq_is_server_cq(cq_for_notification)) {
gpr_free(rc);
error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
goto done;
}
- grpc_cq_begin_op(cq_for_notification);
+ grpc_cq_begin_op(cq_for_notification, tag);
details->reserved = NULL;
rc->type = BATCH_CALL;
rc->server = server;
@@ -1207,7 +1185,7 @@ grpc_call_error grpc_server_request_call(
rc->cq_for_notification = cq_for_notification;
rc->call = call;
rc->data.batch.details = details;
- rc->data.batch.initial_metadata = initial_metadata;
+ rc->initial_metadata = initial_metadata;
error = queue_call_request(&exec_ctx, server, rc);
done:
grpc_exec_ctx_finish(&exec_ctx);
@@ -1235,7 +1213,7 @@ grpc_call_error grpc_server_request_registered_call(
error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
goto done;
}
- grpc_cq_begin_op(cq_for_notification);
+ grpc_cq_begin_op(cq_for_notification, tag);
rc->type = REGISTERED_CALL;
rc->server = server;
rc->tag = tag;
@@ -1244,7 +1222,7 @@ grpc_call_error grpc_server_request_registered_call(
rc->call = call;
rc->data.registered.registered_method = rm;
rc->data.registered.deadline = deadline;
- rc->data.registered.initial_metadata = initial_metadata;
+ rc->initial_metadata = initial_metadata;
rc->data.registered.optional_payload = optional_payload;
error = queue_call_request(&exec_ctx, server, rc);
done:
@@ -1253,12 +1231,7 @@ done:
}
static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx,
- grpc_call *call, int success,
- void *tag);
-static void publish_was_not_set(grpc_exec_ctx *exec_ctx, grpc_call *call,
- int success, void *tag) {
- abort();
-}
+ void *user_data, int success);
static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
gpr_slice slice = value->slice;
@@ -1273,9 +1246,10 @@ static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
call_data *calld, requested_call *rc) {
- grpc_ioreq_completion_func publish = publish_was_not_set;
- grpc_ioreq req[2];
- grpc_ioreq *r = req;
+ grpc_op ops[1];
+ grpc_op *op = ops;
+
+ memset(ops, 0, sizeof(ops));
/* called once initial metadata has been read by the call, but BEFORE
the ioreq to fetch it out of the call has been executed.
@@ -1284,8 +1258,10 @@ static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
an ioreq op, that should complete immediately. */
grpc_call_set_completion_queue(exec_ctx, calld->call, rc->cq_bound_to_call);
+ grpc_closure_init(&rc->publish, publish_registered_or_batch, rc);
*rc->call = calld->call;
calld->cq_new = rc->cq_for_notification;
+ GPR_SWAP(grpc_metadata_array, *rc->initial_metadata, calld->initial_metadata);
switch (rc->type) {
case BATCH_CALL:
GPR_ASSERT(calld->host != NULL);
@@ -1295,31 +1271,22 @@ static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
cpstr(&rc->data.batch.details->method,
&rc->data.batch.details->method_capacity, calld->path);
rc->data.batch.details->deadline = calld->deadline;
- r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
- r->data.recv_metadata = rc->data.batch.initial_metadata;
- r->flags = 0;
- r++;
- publish = publish_registered_or_batch;
break;
case REGISTERED_CALL:
*rc->data.registered.deadline = calld->deadline;
- r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
- r->data.recv_metadata = rc->data.registered.initial_metadata;
- r->flags = 0;
- r++;
if (rc->data.registered.optional_payload) {
- r->op = GRPC_IOREQ_RECV_MESSAGE;
- r->data.recv_message = rc->data.registered.optional_payload;
- r->flags = 0;
- r++;
+ op->op = GRPC_OP_RECV_MESSAGE;
+ op->data.recv_message = rc->data.registered.optional_payload;
+ op++;
}
- publish = publish_registered_or_batch;
break;
+ default:
+ GPR_UNREACHABLE_CODE(return );
}
GRPC_CALL_INTERNAL_REF(calld->call, "server");
- grpc_call_start_ioreq_and_call_back(exec_ctx, calld->call, req,
- (size_t)(r - req), publish, rc);
+ grpc_call_start_batch_and_execute(exec_ctx, calld->call, ops,
+ (size_t)(op - ops), &rc->publish);
}
static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
@@ -1342,25 +1309,19 @@ static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
requested_call *rc) {
*rc->call = NULL;
- switch (rc->type) {
- case BATCH_CALL:
- rc->data.batch.initial_metadata->count = 0;
- break;
- case REGISTERED_CALL:
- rc->data.registered.initial_metadata->count = 0;
- break;
- }
+ rc->initial_metadata->count = 0;
+
server_ref(server);
grpc_cq_end_op(exec_ctx, rc->cq_for_notification, rc->tag, 0,
done_request_event, rc, &rc->completion);
}
-static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx,
- grpc_call *call, int success,
- void *prc) {
+static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx, void *prc,
+ int success) {
+ requested_call *rc = prc;
+ grpc_call *call = *rc->call;
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
- requested_call *rc = prc;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
server_ref(chand->server);
diff --git a/src/core/surface/server.h b/src/core/surface/server.h
index 4c46d07679..a957fdb360 100644
--- a/src/core/surface/server.h
+++ b/src/core/surface/server.h
@@ -57,7 +57,7 @@ void grpc_server_add_listener(
void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *server,
grpc_transport *transport,
grpc_channel_filter const **extra_filters,
- size_t num_extra_filters, grpc_mdctx *mdctx,
+ size_t num_extra_filters,
const grpc_channel_args *args);
const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server);
diff --git a/src/core/surface/server_chttp2.c b/src/core/surface/server_chttp2.c
index 580b91573c..5ce7c1955b 100644
--- a/src/core/surface/server_chttp2.c
+++ b/src/core/surface/server_chttp2.c
@@ -44,11 +44,11 @@
#include <grpc/support/useful.h>
static void setup_transport(grpc_exec_ctx *exec_ctx, void *server,
- grpc_transport *transport, grpc_mdctx *mdctx) {
+ grpc_transport *transport) {
static grpc_channel_filter const *extra_filters[] = {
&grpc_http_server_filter};
grpc_server_setup_transport(exec_ctx, server, transport, extra_filters,
- GPR_ARRAY_SIZE(extra_filters), mdctx,
+ GPR_ARRAY_SIZE(extra_filters),
grpc_server_get_channel_args(server));
}
@@ -61,10 +61,9 @@ static void new_transport(grpc_exec_ctx *exec_ctx, void *server,
* (as in server_secure_chttp2.c) needs to add synchronization to avoid this
* case.
*/
- grpc_mdctx *mdctx = grpc_mdctx_create();
grpc_transport *transport = grpc_create_chttp2_transport(
- exec_ctx, grpc_server_get_channel_args(server), tcp, mdctx, 0);
- setup_transport(exec_ctx, server, transport, mdctx);
+ exec_ctx, grpc_server_get_channel_args(server), tcp, 0);
+ setup_transport(exec_ctx, server, transport);
grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
}
@@ -102,15 +101,15 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
}
tcp = grpc_tcp_server_create();
- if (!tcp) {
- goto error;
- }
+ GPR_ASSERT(tcp);
for (i = 0; i < resolved->naddrs; i++) {
- port_temp = grpc_tcp_server_add_port(
+ grpc_tcp_listener *listener;
+ listener = grpc_tcp_server_add_port(
tcp, (struct sockaddr *)&resolved->addrs[i].addr,
resolved->addrs[i].len);
- if (port_temp >= 0) {
+ port_temp = grpc_tcp_listener_get_port(listener);
+ if (port_temp > 0) {
if (port_num == -1) {
port_num = port_temp;
} else {
diff --git a/src/core/surface/server_create.c b/src/core/surface/server_create.c
index c7811a6d88..f30093e06b 100644
--- a/src/core/surface/server_create.c
+++ b/src/core/surface/server_create.c
@@ -32,14 +32,20 @@
*/
#include <grpc/grpc.h>
+#include "src/core/census/grpc_filter.h"
+#include "src/core/channel/channel_args.h"
+#include "src/core/channel/compress_filter.h"
#include "src/core/surface/api_trace.h"
#include "src/core/surface/completion_queue.h"
#include "src/core/surface/server.h"
-#include "src/core/channel/compress_filter.h"
grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
- const grpc_channel_filter *filters[] = {&grpc_compress_filter};
+ const grpc_channel_filter *filters[3];
+ size_t num_filters = 0;
+ filters[num_filters++] = &grpc_compress_filter;
+ if (grpc_channel_args_is_census_enabled(args)) {
+ filters[num_filters++] = &grpc_server_census_filter;
+ }
GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
- return grpc_server_create_from_filters(filters, GPR_ARRAY_SIZE(filters),
- args);
+ return grpc_server_create_from_filters(filters, num_filters, args);
}
diff --git a/src/core/transport/byte_stream.c b/src/core/transport/byte_stream.c
new file mode 100644
index 0000000000..81e8e77ccb
--- /dev/null
+++ b/src/core/transport/byte_stream.c
@@ -0,0 +1,76 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/transport/byte_stream.h"
+
+#include <stdlib.h>
+
+#include <grpc/support/log.h>
+
+int grpc_byte_stream_next(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream, gpr_slice *slice,
+ size_t max_size_hint, grpc_closure *on_complete) {
+ return byte_stream->next(exec_ctx, byte_stream, slice, max_size_hint,
+ on_complete);
+}
+
+void grpc_byte_stream_destroy(grpc_byte_stream *byte_stream) {
+ byte_stream->destroy(byte_stream);
+}
+
+/* slice_buffer_stream */
+
+static int slice_buffer_stream_next(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream,
+ gpr_slice *slice, size_t max_size_hint,
+ grpc_closure *on_complete) {
+ grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream;
+ GPR_ASSERT(stream->cursor < stream->backing_buffer->count);
+ *slice = gpr_slice_ref(stream->backing_buffer->slices[stream->cursor]);
+ stream->cursor++;
+ return 1;
+}
+
+static void slice_buffer_stream_destroy(grpc_byte_stream *byte_stream) {}
+
+void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
+ gpr_slice_buffer *slice_buffer,
+ gpr_uint32 flags) {
+ GPR_ASSERT(slice_buffer->length <= GPR_UINT32_MAX);
+ stream->base.length = (gpr_uint32)slice_buffer->length;
+ stream->base.flags = flags;
+ stream->base.next = slice_buffer_stream_next;
+ stream->base.destroy = slice_buffer_stream_destroy;
+ stream->backing_buffer = slice_buffer;
+ stream->cursor = 0;
+}
diff --git a/src/core/transport/byte_stream.h b/src/core/transport/byte_stream.h
new file mode 100644
index 0000000000..c94d8ff275
--- /dev/null
+++ b/src/core/transport/byte_stream.h
@@ -0,0 +1,88 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CORE_TRANSPORT_BYTE_STREAM_H
+#define GRPC_INTERNAL_CORE_TRANSPORT_BYTE_STREAM_H
+
+#include "src/core/iomgr/exec_ctx.h"
+#include <grpc/support/slice_buffer.h>
+
+/** Internal bit flag for grpc_begin_message's \a flags signaling the use of
+ * compression for the message */
+#define GRPC_WRITE_INTERNAL_COMPRESS (0x80000000u)
+/** Mask of all valid internal flags. */
+#define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS)
+
+struct grpc_byte_stream;
+typedef struct grpc_byte_stream grpc_byte_stream;
+
+struct grpc_byte_stream {
+ gpr_uint32 length;
+ gpr_uint32 flags;
+ int (*next)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream,
+ gpr_slice *slice, size_t max_size_hint,
+ grpc_closure *on_complete);
+ void (*destroy)(grpc_byte_stream *byte_stream);
+};
+
+/* returns 1 if the bytes are available immediately (in which case
+ * on_complete will not be called), 0 if the bytes will be available
+ * asynchronously.
+ *
+ * on entry, *remaining can be set as a hint as to the maximum number
+ * of bytes that would be acceptable to read.
+ *
+ * fills *buffer, *length, *remaining with the bytes, length of bytes
+ * and length of data remaining to be read before either returning 1
+ * or calling on_complete.
+ *
+ * once a slice is returned into *slice, it is owned by the caller.
+ */
+int grpc_byte_stream_next(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream, gpr_slice *slice,
+ size_t max_size_hint, grpc_closure *on_complete);
+
+void grpc_byte_stream_destroy(grpc_byte_stream *byte_stream);
+
+/* grpc_byte_stream that wraps a slice buffer */
+typedef struct grpc_slice_buffer_stream {
+ grpc_byte_stream base;
+ gpr_slice_buffer *backing_buffer;
+ size_t cursor;
+} grpc_slice_buffer_stream;
+
+void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
+ gpr_slice_buffer *slice_buffer,
+ gpr_uint32 flags);
+
+#endif /* GRPC_INTERNAL_CORE_TRANSPORT_BYTE_STREAM_H */
diff --git a/src/core/transport/chttp2/frame_data.c b/src/core/transport/chttp2/frame_data.c
index 07179a4571..732124b7c9 100644
--- a/src/core/transport/chttp2/frame_data.c
+++ b/src/core/transport/chttp2/frame_data.c
@@ -45,12 +45,20 @@
grpc_chttp2_parse_error grpc_chttp2_data_parser_init(
grpc_chttp2_data_parser *parser) {
parser->state = GRPC_CHTTP2_DATA_FH_0;
- grpc_sopb_init(&parser->incoming_sopb);
+ parser->parsing_frame = NULL;
return GRPC_CHTTP2_PARSE_OK;
}
-void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser *parser) {
- grpc_sopb_destroy(&parser->incoming_sopb);
+void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_data_parser *parser) {
+ grpc_byte_stream *bs;
+ if (parser->parsing_frame) {
+ grpc_chttp2_incoming_byte_stream_finished(exec_ctx, parser->parsing_frame);
+ }
+ while (
+ (bs = grpc_chttp2_incoming_frame_queue_pop(&parser->incoming_frames))) {
+ grpc_byte_stream_destroy(bs);
+ }
}
grpc_chttp2_parse_error grpc_chttp2_data_parser_begin_frame(
@@ -69,6 +77,62 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_begin_frame(
return GRPC_CHTTP2_PARSE_OK;
}
+void grpc_chttp2_incoming_frame_queue_merge(
+ grpc_chttp2_incoming_frame_queue *head_dst,
+ grpc_chttp2_incoming_frame_queue *tail_src) {
+ if (tail_src->head == NULL) {
+ return;
+ }
+
+ if (head_dst->head == NULL) {
+ *head_dst = *tail_src;
+ memset(tail_src, 0, sizeof(*tail_src));
+ return;
+ }
+
+ head_dst->tail->next_message = tail_src->head;
+ head_dst->tail = tail_src->tail;
+ memset(tail_src, 0, sizeof(*tail_src));
+}
+
+grpc_byte_stream *grpc_chttp2_incoming_frame_queue_pop(
+ grpc_chttp2_incoming_frame_queue *q) {
+ grpc_byte_stream *out;
+ if (q->head == NULL) {
+ return NULL;
+ }
+ out = &q->head->base;
+ if (q->head == q->tail) {
+ memset(q, 0, sizeof(*q));
+ } else {
+ q->head = q->head->next_message;
+ }
+ return out;
+}
+
+void grpc_chttp2_encode_data(gpr_uint32 id, gpr_slice_buffer *inbuf,
+ gpr_uint32 write_bytes, int is_eof,
+ gpr_slice_buffer *outbuf) {
+ gpr_slice hdr;
+ gpr_uint8 *p;
+
+ hdr = gpr_slice_malloc(9);
+ p = GPR_SLICE_START_PTR(hdr);
+ GPR_ASSERT(write_bytes < (1 << 24));
+ *p++ = (gpr_uint8)(write_bytes >> 16);
+ *p++ = (gpr_uint8)(write_bytes >> 8);
+ *p++ = (gpr_uint8)(write_bytes);
+ *p++ = GRPC_CHTTP2_FRAME_DATA;
+ *p++ = is_eof ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0;
+ *p++ = (gpr_uint8)(id >> 24);
+ *p++ = (gpr_uint8)(id >> 16);
+ *p++ = (gpr_uint8)(id >> 8);
+ *p++ = (gpr_uint8)(id);
+ gpr_slice_buffer_add(outbuf, hdr);
+
+ gpr_slice_buffer_move_first(inbuf, write_bytes, outbuf);
+}
+
grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
@@ -77,7 +141,8 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
gpr_uint8 *cur = beg;
grpc_chttp2_data_parser *p = parser;
- gpr_uint32 message_flags = 0;
+ gpr_uint32 message_flags;
+ grpc_chttp2_incoming_byte_stream *incoming_byte_stream;
if (is_last && p->is_last_frame) {
stream_parsing->received_close = 1;
@@ -132,11 +197,14 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
p->frame_size |= ((gpr_uint32)*cur);
p->state = GRPC_CHTTP2_DATA_FRAME;
++cur;
+ message_flags = 0;
if (p->is_frame_compressed) {
message_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
- grpc_sopb_add_begin_message(&p->incoming_sopb, p->frame_size,
- message_flags);
+ p->parsing_frame = incoming_byte_stream =
+ grpc_chttp2_incoming_byte_stream_create(
+ exec_ctx, transport_parsing, stream_parsing, p->frame_size,
+ message_flags, &p->incoming_frames);
/* fallthrough */
case GRPC_CHTTP2_DATA_FRAME:
if (cur == end) {
@@ -147,20 +215,25 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
if ((gpr_uint32)(end - cur) == p->frame_size) {
- grpc_sopb_add_slice(
- &p->incoming_sopb,
+ grpc_chttp2_incoming_byte_stream_push(
+ exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
+ grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame);
+ p->parsing_frame = NULL;
p->state = GRPC_CHTTP2_DATA_FH_0;
return GRPC_CHTTP2_PARSE_OK;
} else if ((gpr_uint32)(end - cur) > p->frame_size) {
- grpc_sopb_add_slice(&p->incoming_sopb,
- gpr_slice_sub(slice, (size_t)(cur - beg),
- (size_t)(cur + p->frame_size - beg)));
+ grpc_chttp2_incoming_byte_stream_push(
+ exec_ctx, p->parsing_frame,
+ gpr_slice_sub(slice, (size_t)(cur - beg),
+ (size_t)(cur + p->frame_size - beg)));
+ grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame);
+ p->parsing_frame = NULL;
cur += p->frame_size;
goto fh_0; /* loop */
} else {
- grpc_sopb_add_slice(
- &p->incoming_sopb,
+ grpc_chttp2_incoming_byte_stream_push(
+ exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
GPR_ASSERT((size_t)(end - cur) <= p->frame_size);
p->frame_size -= (gpr_uint32)(end - cur);
diff --git a/src/core/transport/chttp2/frame_data.h b/src/core/transport/chttp2/frame_data.h
index 6762484e5b..27d4d0043b 100644
--- a/src/core/transport/chttp2/frame_data.h
+++ b/src/core/transport/chttp2/frame_data.h
@@ -39,7 +39,7 @@
#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
-#include "src/core/transport/stream_op.h"
+#include "src/core/transport/byte_stream.h"
#include "src/core/transport/chttp2/frame.h"
typedef enum {
@@ -51,6 +51,14 @@ typedef enum {
GRPC_CHTTP2_DATA_FRAME
} grpc_chttp2_stream_state;
+typedef struct grpc_chttp2_incoming_byte_stream
+ grpc_chttp2_incoming_byte_stream;
+
+typedef struct grpc_chttp2_incoming_frame_queue {
+ grpc_chttp2_incoming_byte_stream *head;
+ grpc_chttp2_incoming_byte_stream *tail;
+} grpc_chttp2_incoming_frame_queue;
+
typedef struct {
grpc_chttp2_stream_state state;
gpr_uint8 is_last_frame;
@@ -58,14 +66,22 @@ typedef struct {
gpr_uint32 frame_size;
int is_frame_compressed;
- grpc_stream_op_buffer incoming_sopb;
+ grpc_chttp2_incoming_frame_queue incoming_frames;
+ grpc_chttp2_incoming_byte_stream *parsing_frame;
} grpc_chttp2_data_parser;
+void grpc_chttp2_incoming_frame_queue_merge(
+ grpc_chttp2_incoming_frame_queue *head_dst,
+ grpc_chttp2_incoming_frame_queue *tail_src);
+grpc_byte_stream *grpc_chttp2_incoming_frame_queue_pop(
+ grpc_chttp2_incoming_frame_queue *q);
+
/* initialize per-stream state for data frame parsing */
grpc_chttp2_parse_error grpc_chttp2_data_parser_init(
grpc_chttp2_data_parser *parser);
-void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser *parser);
+void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_data_parser *parser);
/* start processing a new data frame */
grpc_chttp2_parse_error grpc_chttp2_data_parser_begin_frame(
@@ -78,7 +94,8 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
-/* create a slice with an empty data frame and is_last set */
-gpr_slice grpc_chttp2_data_frame_create_empty_close(gpr_uint32 id);
+void grpc_chttp2_encode_data(gpr_uint32 id, gpr_slice_buffer *inbuf,
+ gpr_uint32 write_bytes, int is_eof,
+ gpr_slice_buffer *outbuf);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_DATA_H */
diff --git a/src/core/transport/chttp2/frame_ping.c b/src/core/transport/chttp2/frame_ping.c
index 4d2c54269d..8e763278ff 100644
--- a/src/core/transport/chttp2/frame_ping.c
+++ b/src/core/transport/chttp2/frame_ping.c
@@ -76,7 +76,6 @@ grpc_chttp2_parse_error grpc_chttp2_ping_parser_parse(
gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
gpr_uint8 *cur = beg;
grpc_chttp2_ping_parser *p = parser;
- grpc_chttp2_outstanding_ping *ping;
while (p->byte != 8 && cur != end) {
p->opaque_8bytes[p->byte] = *cur;
@@ -87,15 +86,7 @@ grpc_chttp2_parse_error grpc_chttp2_ping_parser_parse(
if (p->byte == 8) {
GPR_ASSERT(is_last);
if (p->is_ack) {
- for (ping = transport_parsing->pings.next;
- ping != &transport_parsing->pings; ping = ping->next) {
- if (0 == memcmp(p->opaque_8bytes, ping->id, 8)) {
- grpc_exec_ctx_enqueue(exec_ctx, ping->on_recv, 1);
- }
- ping->next->prev = ping->prev;
- ping->prev->next = ping->next;
- gpr_free(ping);
- }
+ grpc_chttp2_ack_ping(exec_ctx, transport_parsing, p->opaque_8bytes);
} else {
gpr_slice_buffer_add(&transport_parsing->qbuf,
grpc_chttp2_ping_create(1, p->opaque_8bytes));
diff --git a/src/core/transport/chttp2/frame_settings.c b/src/core/transport/chttp2/frame_settings.c
index 395a2da452..383b6e7f93 100644
--- a/src/core/transport/chttp2/frame_settings.c
+++ b/src/core/transport/chttp2/frame_settings.c
@@ -36,27 +36,35 @@
#include <string.h>
+#include <grpc/support/log.h>
+#include <grpc/support/useful.h>
+
#include "src/core/debug/trace.h"
#include "src/core/transport/chttp2/frame.h"
+#include "src/core/transport/chttp2/http2_errors.h"
#include "src/core/transport/chttp2_transport.h"
-#include <grpc/support/log.h>
-#include <grpc/support/useful.h>
+
+#define MAX_MAX_HEADER_LIST_SIZE (1024 * 1024 * 1024)
/* HTTP/2 mandated initial connection settings */
const grpc_chttp2_setting_parameters
grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS] = {
- {NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE},
+ {NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE,
+ GRPC_CHTTP2_PROTOCOL_ERROR},
{"HEADER_TABLE_SIZE", 4096, 0, 0xffffffff,
- GRPC_CHTTP2_CLAMP_INVALID_VALUE},
- {"ENABLE_PUSH", 1, 0, 1, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE},
+ GRPC_CHTTP2_CLAMP_INVALID_VALUE, GRPC_CHTTP2_PROTOCOL_ERROR},
+ {"ENABLE_PUSH", 1, 0, 1, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE,
+ GRPC_CHTTP2_PROTOCOL_ERROR},
{"MAX_CONCURRENT_STREAMS", 0xffffffffu, 0, 0xffffffffu,
- GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE},
- {"INITIAL_WINDOW_SIZE", 65535, 0, 0xffffffffu,
- GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE},
+ GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_CHTTP2_PROTOCOL_ERROR},
+ {"INITIAL_WINDOW_SIZE", 65535, 0, 0x7fffffffu,
+ GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE,
+ GRPC_CHTTP2_FLOW_CONTROL_ERROR},
{"MAX_FRAME_SIZE", 16384, 16384, 16777215,
- GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE},
- {"MAX_HEADER_LIST_SIZE", 0xffffffffu, 0, 0xffffffffu,
- GRPC_CHTTP2_CLAMP_INVALID_VALUE},
+ GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_CHTTP2_PROTOCOL_ERROR},
+ {"MAX_HEADER_LIST_SIZE", MAX_MAX_HEADER_LIST_SIZE, 0,
+ MAX_MAX_HEADER_LIST_SIZE, GRPC_CHTTP2_CLAMP_INVALID_VALUE,
+ GRPC_CHTTP2_PROTOCOL_ERROR},
};
static gpr_uint8 *fill_header(gpr_uint8 *out, gpr_uint32 length,
@@ -218,6 +226,10 @@ grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
GPR_CLAMP(parser->value, sp->min_value, sp->max_value);
break;
case GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE:
+ grpc_chttp2_goaway_append(
+ transport_parsing->last_incoming_stream_id, sp->error_value,
+ gpr_slice_from_static_string("HTTP2 settings error"),
+ &transport_parsing->qbuf);
gpr_log(GPR_ERROR, "invalid value %u passed for %s",
parser->value, sp->name);
return GRPC_CHTTP2_CONNECTION_ERROR;
diff --git a/src/core/transport/chttp2/frame_settings.h b/src/core/transport/chttp2/frame_settings.h
index cf857dd602..e9c3c440b5 100644
--- a/src/core/transport/chttp2/frame_settings.h
+++ b/src/core/transport/chttp2/frame_settings.h
@@ -79,6 +79,7 @@ typedef struct {
gpr_uint32 min_value;
gpr_uint32 max_value;
grpc_chttp2_invalid_value_behavior invalid_value_behavior;
+ gpr_uint32 error_value;
} grpc_chttp2_setting_parameters;
/* HTTP/2 mandated connection setting parameters */
diff --git a/src/core/transport/chttp2/frame_window_update.c b/src/core/transport/chttp2/frame_window_update.c
index 91bbcfe2c1..74ca29baf9 100644
--- a/src/core/transport/chttp2/frame_window_update.c
+++ b/src/core/transport/chttp2/frame_window_update.c
@@ -89,7 +89,8 @@ grpc_chttp2_parse_error grpc_chttp2_window_update_parser_parse(
}
if (p->byte == 4) {
- if (p->amount == 0 || (p->amount & 0x80000000u)) {
+ gpr_uint32 received_update = p->amount;
+ if (received_update == 0 || (received_update & 0x80000000u)) {
gpr_log(GPR_ERROR, "invalid window update bytes: %d", p->amount);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
@@ -97,17 +98,15 @@ grpc_chttp2_parse_error grpc_chttp2_window_update_parser_parse(
if (transport_parsing->incoming_stream_id != 0) {
if (stream_parsing != NULL) {
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("update", transport_parsing,
- stream_parsing, outgoing_window_update,
- p->amount);
- stream_parsing->outgoing_window_update += p->amount;
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", transport_parsing,
+ stream_parsing, outgoing_window,
+ received_update);
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
}
} else {
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT("update", transport_parsing,
- outgoing_window_update, p->amount);
- transport_parsing->outgoing_window_update += p->amount;
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", transport_parsing,
+ outgoing_window, received_update);
}
}
diff --git a/src/core/transport/chttp2/stream_encoder.c b/src/core/transport/chttp2/hpack_encoder.c
index 24a5d958b8..6c558bc1cb 100644
--- a/src/core/transport/chttp2/stream_encoder.c
+++ b/src/core/transport/chttp2/hpack_encoder.c
@@ -31,17 +31,20 @@
*
*/
-#include "src/core/transport/chttp2/stream_encoder.h"
+#include "src/core/transport/chttp2/hpack_encoder.h"
#include <assert.h>
#include <string.h>
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
+
#include "src/core/transport/chttp2/bin_encoder.h"
#include "src/core/transport/chttp2/hpack_table.h"
#include "src/core/transport/chttp2/timeout_encoding.h"
#include "src/core/transport/chttp2/varint.h"
+#include "src/core/transport/static_metadata.h"
#define HASH_FRAGMENT_1(x) ((x)&255)
#define HASH_FRAGMENT_2(x) ((x >> 8) & 255)
@@ -54,18 +57,13 @@
/* don't consider adding anything bigger than this to the hpack table */
#define MAX_DECODER_SPACE_USAGE 512
-/* what kind of frame our we encoding? */
-typedef enum { HEADER, DATA, NONE } frame_type;
-
typedef struct {
- frame_type cur_frame_type;
+ int is_first_frame;
/* number of bytes in 'output' when we started the frame - used to calculate
frame length */
size_t output_length_at_start_of_frame;
/* index (in output) of the header for the current frame */
size_t header_idx;
- /* was the last frame emitted a header? (if yes, we'll need a CONTINUATION */
- gpr_uint8 last_was_header;
/* have we seen a regular (non-colon-prefixed) header yet? */
gpr_uint8 seen_regular_header;
/* output stream id */
@@ -92,58 +90,35 @@ static void fill_header(gpr_uint8 *p, gpr_uint8 type, gpr_uint32 id, size_t len,
static void finish_frame(framer_state *st, int is_header_boundary,
int is_last_in_stream) {
gpr_uint8 type = 0xff;
- switch (st->cur_frame_type) {
- case HEADER:
- type = st->last_was_header ? GRPC_CHTTP2_FRAME_CONTINUATION
- : GRPC_CHTTP2_FRAME_HEADER;
- st->last_was_header = 1;
- break;
- case DATA:
- type = GRPC_CHTTP2_FRAME_DATA;
- st->last_was_header = 0;
- is_header_boundary = 0;
- break;
- case NONE:
- return;
- }
+ type = st->is_first_frame ? GRPC_CHTTP2_FRAME_HEADER
+ : GRPC_CHTTP2_FRAME_CONTINUATION;
fill_header(
GPR_SLICE_START_PTR(st->output->slices[st->header_idx]), type,
st->stream_id, st->output->length - st->output_length_at_start_of_frame,
(gpr_uint8)(
(is_last_in_stream ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0) |
(is_header_boundary ? GRPC_CHTTP2_DATA_FLAG_END_HEADERS : 0)));
- st->cur_frame_type = NONE;
+ st->is_first_frame = 0;
}
/* begin a new frame: reserve off header space, remember how many bytes we'd
output before beginning */
-static void begin_frame(framer_state *st, frame_type type) {
- GPR_ASSERT(type != NONE);
- GPR_ASSERT(st->cur_frame_type == NONE);
- st->cur_frame_type = type;
+static void begin_frame(framer_state *st) {
st->header_idx =
gpr_slice_buffer_add_indexed(st->output, gpr_slice_malloc(9));
st->output_length_at_start_of_frame = st->output->length;
}
-static void begin_new_frame(framer_state *st, frame_type type) {
- finish_frame(st, 1, 0);
- st->last_was_header = 0;
- begin_frame(st, type);
-}
-
/* make sure that the current frame is of the type desired, and has sufficient
space to add at least about_to_add bytes -- finishes the current frame if
needed */
-static void ensure_frame_type(framer_state *st, frame_type type,
- size_t need_bytes) {
- if (st->cur_frame_type == type &&
- st->output->length - st->output_length_at_start_of_frame + need_bytes <=
- GRPC_CHTTP2_MAX_PAYLOAD_LENGTH) {
+static void ensure_space(framer_state *st, size_t need_bytes) {
+ if (st->output->length - st->output_length_at_start_of_frame + need_bytes <=
+ GRPC_CHTTP2_MAX_PAYLOAD_LENGTH) {
return;
}
- finish_frame(st, type != HEADER, 0);
- begin_frame(st, type);
+ finish_frame(st, 0, 0);
+ begin_frame(st);
}
/* increment a filter count, halve all counts if one element reaches max */
@@ -165,54 +140,60 @@ static void add_header_data(framer_state *st, gpr_slice slice) {
size_t len = GPR_SLICE_LENGTH(slice);
size_t remaining;
if (len == 0) return;
- ensure_frame_type(st, HEADER, 1);
remaining = GRPC_CHTTP2_MAX_PAYLOAD_LENGTH +
st->output_length_at_start_of_frame - st->output->length;
if (len <= remaining) {
gpr_slice_buffer_add(st->output, slice);
} else {
gpr_slice_buffer_add(st->output, gpr_slice_split_head(&slice, remaining));
+ finish_frame(st, 0, 0);
+ begin_frame(st);
add_header_data(st, slice);
}
}
static gpr_uint8 *add_tiny_header_data(framer_state *st, size_t len) {
- ensure_frame_type(st, HEADER, len);
+ ensure_space(st, len);
return gpr_slice_buffer_tiny_add(st->output, len);
}
-/* add an element to the decoder table: returns metadata element to unref */
-static grpc_mdelem *add_elem(grpc_chttp2_hpack_compressor *c,
- grpc_mdelem *elem) {
+static void evict_entry(grpc_chttp2_hpack_compressor *c) {
+ c->tail_remote_index++;
+ GPR_ASSERT(c->tail_remote_index > 0);
+ GPR_ASSERT(c->table_size >=
+ c->table_elem_size[c->tail_remote_index % c->cap_table_elems]);
+ GPR_ASSERT(c->table_elems > 0);
+ c->table_size = (gpr_uint16)(
+ c->table_size -
+ c->table_elem_size[c->tail_remote_index % c->cap_table_elems]);
+ c->table_elems--;
+}
+
+/* add an element to the decoder table */
+static void add_elem(grpc_chttp2_hpack_compressor *c, grpc_mdelem *elem) {
gpr_uint32 key_hash = elem->key->hash;
gpr_uint32 elem_hash = GRPC_MDSTR_KV_HASH(key_hash, elem->value->hash);
gpr_uint32 new_index = c->tail_remote_index + c->table_elems + 1;
size_t elem_size = 32 + GPR_SLICE_LENGTH(elem->key->slice) +
GPR_SLICE_LENGTH(elem->value->slice);
- grpc_mdelem *elem_to_unref;
GPR_ASSERT(elem_size < 65536);
+ if (elem_size > c->max_table_size) {
+ while (c->table_size > 0) {
+ evict_entry(c);
+ }
+ return;
+ }
+
/* Reserve space for this element in the remote table: if this overflows
the current table, drop elements until it fits, matching the decompressor
algorithm */
- /* TODO(ctiller): constant */
- while (c->table_size + elem_size > 4096) {
- c->tail_remote_index++;
- GPR_ASSERT(c->tail_remote_index > 0);
- GPR_ASSERT(c->table_size >=
- c->table_elem_size[c->tail_remote_index %
- GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS]);
- GPR_ASSERT(c->table_elems > 0);
- c->table_size =
- (gpr_uint16)(c->table_size -
- c->table_elem_size[c->tail_remote_index %
- GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS]);
- c->table_elems--;
+ while (c->table_size + elem_size > c->max_table_size) {
+ evict_entry(c);
}
- GPR_ASSERT(c->table_elems < GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS);
- c->table_elem_size[new_index % GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS] =
- (gpr_uint16)elem_size;
+ GPR_ASSERT(c->table_elems < c->max_table_size);
+ c->table_elem_size[new_index % c->cap_table_elems] = (gpr_uint16)elem_size;
c->table_size = (gpr_uint16)(c->table_size + elem_size);
c->table_elems++;
@@ -220,31 +201,27 @@ static grpc_mdelem *add_elem(grpc_chttp2_hpack_compressor *c,
if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == elem) {
/* already there: update with new index */
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
- elem_to_unref = elem;
} else if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == elem) {
/* already there (cuckoo): update with new index */
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
- elem_to_unref = elem;
} else if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == NULL) {
/* not there, but a free element: add */
- c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = elem;
+ c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
- elem_to_unref = NULL;
} else if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == NULL) {
/* not there (cuckoo), but a free element: add */
- c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = elem;
+ c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
- elem_to_unref = NULL;
} else if (c->indices_elems[HASH_FRAGMENT_2(elem_hash)] <
c->indices_elems[HASH_FRAGMENT_3(elem_hash)]) {
/* not there: replace oldest */
- elem_to_unref = c->entries_elems[HASH_FRAGMENT_2(elem_hash)];
- c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = elem;
+ GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_2(elem_hash)]);
+ c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
} else {
/* not there: replace oldest */
- elem_to_unref = c->entries_elems[HASH_FRAGMENT_3(elem_hash)];
- c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = elem;
+ GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_3(elem_hash)]);
+ c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
}
@@ -270,8 +247,6 @@ static grpc_mdelem *add_elem(grpc_chttp2_hpack_compressor *c,
c->entries_keys[HASH_FRAGMENT_3(key_hash)] = GRPC_MDSTR_REF(elem->key);
c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
}
-
- return elem_to_unref;
}
static void emit_indexed(grpc_chttp2_hpack_compressor *c, gpr_uint32 elem_index,
@@ -364,15 +339,23 @@ static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor *c,
add_header_data(st, gpr_slice_ref(value_slice));
}
+static void emit_advertise_table_size_change(grpc_chttp2_hpack_compressor *c,
+ framer_state *st) {
+ gpr_uint32 len = GRPC_CHTTP2_VARINT_LENGTH(c->max_table_size, 3);
+ GRPC_CHTTP2_WRITE_VARINT(c->max_table_size, 3, 0x20,
+ add_tiny_header_data(st, len), len);
+ c->advertise_table_size_change = 0;
+}
+
static gpr_uint32 dynidx(grpc_chttp2_hpack_compressor *c,
gpr_uint32 elem_index) {
return 1 + GRPC_CHTTP2_LAST_STATIC_ENTRY + c->tail_remote_index +
c->table_elems - elem_index;
}
-/* encode an mdelem; returns metadata element to unref */
-static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
- grpc_mdelem *elem, framer_state *st) {
+/* encode an mdelem */
+static void hpack_enc(grpc_chttp2_hpack_compressor *c, grpc_mdelem *elem,
+ framer_state *st) {
gpr_uint32 key_hash = elem->key->hash;
gpr_uint32 elem_hash = GRPC_MDSTR_KV_HASH(key_hash, elem->value->hash);
size_t decoder_space_usage;
@@ -382,10 +365,10 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
GPR_ASSERT(GPR_SLICE_LENGTH(elem->key->slice) > 0);
if (GPR_SLICE_START_PTR(elem->key->slice)[0] != ':') { /* regular header */
st->seen_regular_header = 1;
- } else if (st->seen_regular_header != 0) { /* reserved header */
- gpr_log(GPR_ERROR,
- "Reserved header (colon-prefixed) happening after regular ones.");
- abort();
+ } else {
+ GPR_ASSERT(
+ st->seen_regular_header == 0 &&
+ "Reserved header (colon-prefixed) happening after regular ones.");
}
inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum, c->filter_elems);
@@ -397,7 +380,7 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
/* HIT: complete element (first cuckoo hash) */
emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]),
st);
- return elem;
+ return;
}
if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == elem &&
@@ -405,7 +388,7 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
/* HIT: complete element (second cuckoo hash) */
emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]),
st);
- return elem;
+ return;
}
/* should this elem be in the table? */
@@ -423,12 +406,13 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
/* HIT: key (first cuckoo hash) */
if (should_add_elem) {
emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
- return add_elem(c, elem);
+ add_elem(c, elem);
+ return;
} else {
emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
- return elem;
+ return;
}
- GPR_UNREACHABLE_CODE(return NULL);
+ GPR_UNREACHABLE_CODE(return );
}
indices_key = c->indices_keys[HASH_FRAGMENT_3(key_hash)];
@@ -437,24 +421,26 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
/* HIT: key (first cuckoo hash) */
if (should_add_elem) {
emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
- return add_elem(c, elem);
+ add_elem(c, elem);
+ return;
} else {
emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
- return elem;
+ return;
}
- GPR_UNREACHABLE_CODE(return NULL);
+ GPR_UNREACHABLE_CODE(return );
}
/* no elem, key in the table... fall back to literal emission */
if (should_add_elem) {
emit_lithdr_incidx_v(c, elem, st);
- return add_elem(c, elem);
+ add_elem(c, elem);
+ return;
} else {
emit_lithdr_noidx_v(c, elem, st);
- return elem;
+ return;
}
- GPR_UNREACHABLE_CODE(return NULL);
+ GPR_UNREACHABLE_CODE(return );
}
#define STRLEN_LIT(x) (sizeof(x) - 1)
@@ -467,23 +453,25 @@ static void deadline_enc(grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
grpc_chttp2_encode_timeout(
gpr_time_sub(deadline, gpr_now(deadline.clock_type)), timeout_str);
mdelem = grpc_mdelem_from_metadata_strings(
- c->mdctx, GRPC_MDSTR_REF(c->timeout_key_str),
- grpc_mdstr_from_string(c->mdctx, timeout_str));
- mdelem = hpack_enc(c, mdelem, st);
- if (mdelem) GRPC_MDELEM_UNREF(mdelem);
+ GRPC_MDSTR_GRPC_TIMEOUT, grpc_mdstr_from_string(timeout_str));
+ hpack_enc(c, mdelem, st);
+ GRPC_MDELEM_UNREF(mdelem);
}
-gpr_slice grpc_chttp2_data_frame_create_empty_close(gpr_uint32 id) {
- gpr_slice slice = gpr_slice_malloc(9);
- fill_header(GPR_SLICE_START_PTR(slice), GRPC_CHTTP2_FRAME_DATA, id, 0, 1);
- return slice;
+static gpr_uint32 elems_for_bytes(gpr_uint32 bytes) {
+ return (bytes + 31) / 32;
}
-void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c,
- grpc_mdctx *ctx) {
+void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c) {
memset(c, 0, sizeof(*c));
- c->mdctx = ctx;
- c->timeout_key_str = grpc_mdstr_from_string(ctx, "grpc-timeout");
+ c->max_table_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
+ c->cap_table_elems = elems_for_bytes(c->max_table_size);
+ c->max_table_elems = c->cap_table_elems;
+ c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
+ c->table_elem_size =
+ gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
+ memset(c->table_elem_size, 0,
+ sizeof(*c->table_elem_size) * c->cap_table_elems);
}
void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor *c) {
@@ -492,172 +480,88 @@ void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor *c) {
if (c->entries_keys[i]) GRPC_MDSTR_UNREF(c->entries_keys[i]);
if (c->entries_elems[i]) GRPC_MDELEM_UNREF(c->entries_elems[i]);
}
- GRPC_MDSTR_UNREF(c->timeout_key_str);
+ gpr_free(c->table_elem_size);
}
-gpr_uint32 grpc_chttp2_preencode(grpc_stream_op *inops, size_t *inops_count,
- gpr_uint32 max_flow_controlled_bytes,
- grpc_stream_op_buffer *outops) {
- gpr_slice slice;
- grpc_stream_op *op;
- gpr_uint32 max_take_size;
- gpr_uint32 flow_controlled_bytes_taken = 0;
- gpr_uint32 curop = 0;
- gpr_uint8 *p;
- gpr_uint8 compressed_flag_set = 0;
-
- while (curop < *inops_count) {
- GPR_ASSERT(flow_controlled_bytes_taken <= max_flow_controlled_bytes);
- op = &inops[curop];
- switch (op->type) {
- case GRPC_NO_OP:
- /* skip */
- curop++;
- break;
- case GRPC_OP_METADATA:
- grpc_metadata_batch_assert_ok(&op->data.metadata);
- /* these just get copied as they don't impact the number of flow
- controlled bytes */
- grpc_sopb_append(outops, op, 1);
- curop++;
- break;
- case GRPC_OP_BEGIN_MESSAGE:
- /* begin op: for now we just convert the op to a slice and fall
- through - this lets us reuse the slice framing code below */
- compressed_flag_set =
- (op->data.begin_message.flags & GRPC_WRITE_INTERNAL_COMPRESS) != 0;
- slice = gpr_slice_malloc(5);
-
- p = GPR_SLICE_START_PTR(slice);
- p[0] = compressed_flag_set;
- p[1] = (gpr_uint8)(op->data.begin_message.length >> 24);
- p[2] = (gpr_uint8)(op->data.begin_message.length >> 16);
- p[3] = (gpr_uint8)(op->data.begin_message.length >> 8);
- p[4] = (gpr_uint8)(op->data.begin_message.length);
- op->type = GRPC_OP_SLICE;
- op->data.slice = slice;
- /* fallthrough */
- case GRPC_OP_SLICE:
- slice = op->data.slice;
- if (!GPR_SLICE_LENGTH(slice)) {
- /* skip zero length slices */
- gpr_slice_unref(slice);
- curop++;
- break;
- }
- max_take_size = max_flow_controlled_bytes - flow_controlled_bytes_taken;
- if (max_take_size == 0) {
- goto exit_loop;
- }
- if (GPR_SLICE_LENGTH(slice) > max_take_size) {
- slice = gpr_slice_split_head(&op->data.slice, max_take_size);
- grpc_sopb_add_slice(outops, slice);
- } else {
- /* consume this op immediately */
- grpc_sopb_append(outops, op, 1);
- curop++;
- }
- flow_controlled_bytes_taken += (gpr_uint32)GPR_SLICE_LENGTH(slice);
- break;
- }
+void grpc_chttp2_hpack_compressor_set_max_usable_size(
+ grpc_chttp2_hpack_compressor *c, gpr_uint32 max_table_size) {
+ c->max_usable_size = max_table_size;
+ grpc_chttp2_hpack_compressor_set_max_table_size(
+ c, GPR_MIN(c->max_table_size, max_table_size));
+}
+
+static void rebuild_elems(grpc_chttp2_hpack_compressor *c, gpr_uint32 new_cap) {
+ gpr_uint16 *table_elem_size = gpr_malloc(sizeof(*table_elem_size) * new_cap);
+ gpr_uint32 i;
+
+ memset(table_elem_size, 0, sizeof(*table_elem_size) * new_cap);
+ GPR_ASSERT(c->table_elems <= new_cap);
+
+ for (i = 0; i < c->table_elems; i++) {
+ gpr_uint32 ofs = c->tail_remote_index + i + 1;
+ table_elem_size[ofs % new_cap] =
+ c->table_elem_size[ofs % c->cap_table_elems];
}
-exit_loop:
- *inops_count -= curop;
- memmove(inops, inops + curop, *inops_count * sizeof(grpc_stream_op));
- for (curop = 0; curop < *inops_count; curop++) {
- if (inops[curop].type == GRPC_OP_METADATA) {
- grpc_metadata_batch_assert_ok(&inops[curop].data.metadata);
+ c->cap_table_elems = new_cap;
+ gpr_free(c->table_elem_size);
+ c->table_elem_size = table_elem_size;
+}
+
+void grpc_chttp2_hpack_compressor_set_max_table_size(
+ grpc_chttp2_hpack_compressor *c, gpr_uint32 max_table_size) {
+ max_table_size = GPR_MIN(max_table_size, c->max_usable_size);
+ if (max_table_size == c->max_table_size) {
+ return;
+ }
+ while (c->table_size > 0 && c->table_size > max_table_size) {
+ evict_entry(c);
+ }
+ c->max_table_size = max_table_size;
+ c->max_table_elems = elems_for_bytes(max_table_size);
+ if (c->max_table_elems > c->cap_table_elems) {
+ rebuild_elems(c, GPR_MAX(c->max_table_elems, 2 * c->cap_table_elems));
+ } else if (c->max_table_elems < c->cap_table_elems / 3) {
+ gpr_uint32 new_cap = GPR_MAX(c->max_table_elems, 16);
+ if (new_cap != c->cap_table_elems) {
+ rebuild_elems(c, new_cap);
}
}
-
- return flow_controlled_bytes_taken;
+ c->advertise_table_size_change = 1;
+ gpr_log(GPR_DEBUG, "set max table size from encoder to %d", max_table_size);
}
-void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
- gpr_uint32 stream_id,
- grpc_chttp2_hpack_compressor *compressor,
- gpr_slice_buffer *output) {
+void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor *c,
+ gpr_uint32 stream_id,
+ grpc_metadata_batch *metadata, int is_eof,
+ gpr_slice_buffer *outbuf) {
framer_state st;
- gpr_slice slice;
- grpc_stream_op *op;
- size_t max_take_size;
- gpr_uint32 curop = 0;
- gpr_uint32 unref_op;
grpc_linked_mdelem *l;
- int need_unref = 0;
gpr_timespec deadline;
GPR_ASSERT(stream_id != 0);
- st.cur_frame_type = NONE;
- st.last_was_header = 0;
st.seen_regular_header = 0;
st.stream_id = stream_id;
- st.output = output;
-
- while (curop < ops_count) {
- op = &ops[curop];
- switch (op->type) {
- case GRPC_NO_OP:
- case GRPC_OP_BEGIN_MESSAGE:
- gpr_log(
- GPR_ERROR,
- "These stream ops should be filtered out by grpc_chttp2_preencode");
- abort();
- case GRPC_OP_METADATA:
- /* Encode a metadata batch; store the returned values, representing
- a metadata element that needs to be unreffed back into the metadata
- slot. THIS MAY NOT BE THE SAME ELEMENT (if a decoder table slot got
- updated). After this loop, we'll do a batch unref of elements. */
- begin_new_frame(&st, HEADER);
- need_unref |= op->data.metadata.garbage.head != NULL;
- grpc_metadata_batch_assert_ok(&op->data.metadata);
- for (l = op->data.metadata.list.head; l; l = l->next) {
- l->md = hpack_enc(compressor, l->md, &st);
- need_unref |= l->md != NULL;
- }
- deadline = op->data.metadata.deadline;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
- deadline_enc(compressor, deadline, &st);
- }
- curop++;
- break;
- case GRPC_OP_SLICE:
- slice = op->data.slice;
- if (st.cur_frame_type == DATA &&
- st.output->length - st.output_length_at_start_of_frame ==
- GRPC_CHTTP2_MAX_PAYLOAD_LENGTH) {
- finish_frame(&st, 0, 0);
- }
- ensure_frame_type(&st, DATA, 1);
- max_take_size = GRPC_CHTTP2_MAX_PAYLOAD_LENGTH +
- st.output_length_at_start_of_frame - st.output->length;
- if (GPR_SLICE_LENGTH(slice) > max_take_size) {
- slice = gpr_slice_split_head(&op->data.slice, max_take_size);
- } else {
- /* consume this op immediately */
- curop++;
- }
- gpr_slice_buffer_add(output, slice);
- break;
- }
+ st.output = outbuf;
+ st.is_first_frame = 1;
+
+ /* Encode a metadata batch; store the returned values, representing
+ a metadata element that needs to be unreffed back into the metadata
+ slot. THIS MAY NOT BE THE SAME ELEMENT (if a decoder table slot got
+ updated). After this loop, we'll do a batch unref of elements. */
+ begin_frame(&st);
+ if (c->advertise_table_size_change != 0) {
+ emit_advertise_table_size_change(c, &st);
}
- if (eof && st.cur_frame_type == NONE) {
- begin_frame(&st, DATA);
+ grpc_metadata_batch_assert_ok(metadata);
+ for (l = metadata->list.head; l; l = l->next) {
+ hpack_enc(c, l->md, &st);
}
- finish_frame(&st, 1, eof);
-
- if (need_unref) {
- for (unref_op = 0; unref_op < curop; unref_op++) {
- op = &ops[unref_op];
- if (op->type != GRPC_OP_METADATA) continue;
- for (l = op->data.metadata.list.head; l; l = l->next) {
- if (l->md) GRPC_MDELEM_UNREF(l->md);
- }
- for (l = op->data.metadata.garbage.head; l; l = l->next) {
- GRPC_MDELEM_UNREF(l->md);
- }
- }
+ deadline = metadata->deadline;
+ if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
+ deadline_enc(c, deadline, &st);
}
+
+ finish_frame(&st, 1, is_eof);
}
diff --git a/src/core/transport/chttp2/stream_encoder.h b/src/core/transport/chttp2/hpack_encoder.h
index db52f2a0f6..a3600436e9 100644
--- a/src/core/transport/chttp2/stream_encoder.h
+++ b/src/core/transport/chttp2/hpack_encoder.h
@@ -31,26 +31,38 @@
*
*/
-#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_ENCODER_H
-#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_ENCODER_H
+#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_ENCODER_H
+#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_ENCODER_H
#include "src/core/transport/chttp2/frame.h"
#include "src/core/transport/metadata.h"
-#include "src/core/transport/stream_op.h"
+#include "src/core/transport/metadata_batch.h"
#include <grpc/support/port_platform.h>
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
#define GRPC_CHTTP2_HPACKC_NUM_FILTERS 256
#define GRPC_CHTTP2_HPACKC_NUM_VALUES 256
-#define GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS (4096 / 32)
+/* initial table size, per spec */
+#define GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE 4096
+/* maximum table size we'll actually use */
+#define GRPC_CHTTP2_HPACKC_MAX_TABLE_SIZE (1024 * 1024)
typedef struct {
gpr_uint32 filter_elems_sum;
+ gpr_uint32 max_table_size;
+ gpr_uint32 max_table_elems;
+ gpr_uint32 cap_table_elems;
+ /** if non-zero, advertise to the decoder that we'll start using a table
+ of this size */
+ gpr_uint8 advertise_table_size_change;
+ /** maximum number of bytes we'll use for the decode table (to guard against
+ peers ooming us by setting decode table size high) */
+ gpr_uint32 max_usable_size;
/* one before the lowest usable table index */
gpr_uint32 tail_remote_index;
- gpr_uint16 table_size;
- gpr_uint16 table_elems;
+ gpr_uint32 table_size;
+ gpr_uint32 table_elems;
/* filter tables for elems: this tables provides an approximate
popularity count for particular hashes, and are used to determine whether
@@ -59,11 +71,6 @@ typedef struct {
been seen. When that count reaches max (255), all values are halved. */
gpr_uint8 filter_elems[GRPC_CHTTP2_HPACKC_NUM_FILTERS];
- /* metadata context */
- grpc_mdctx *mdctx;
- /* the string 'grpc-timeout' */
- grpc_mdstr *timeout_key_str;
-
/* entry tables for keys & elems: these tables track values that have been
seen and *may* be in the decompressor table */
grpc_mdstr *entries_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES];
@@ -71,23 +78,18 @@ typedef struct {
gpr_uint32 indices_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES];
gpr_uint32 indices_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES];
- gpr_uint16 table_elem_size[GRPC_CHTTP2_HPACKC_MAX_TABLE_ELEMS];
+ gpr_uint16 *table_elem_size;
} grpc_chttp2_hpack_compressor;
-void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c,
- grpc_mdctx *mdctx);
+void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c);
void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor *c);
+void grpc_chttp2_hpack_compressor_set_max_table_size(
+ grpc_chttp2_hpack_compressor *c, gpr_uint32 max_table_size);
+void grpc_chttp2_hpack_compressor_set_max_usable_size(
+ grpc_chttp2_hpack_compressor *c, gpr_uint32 max_table_size);
-/* select stream ops to be encoded, moving them from inops to outops, and
- moving subsequent ops in inops forward in the queue */
-gpr_uint32 grpc_chttp2_preencode(grpc_stream_op *inops, size_t *inops_count,
- gpr_uint32 max_flow_controlled_bytes,
- grpc_stream_op_buffer *outops);
-
-/* encode stream ops to output */
-void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
- gpr_uint32 stream_id,
- grpc_chttp2_hpack_compressor *compressor,
- gpr_slice_buffer *output);
+void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor *c, gpr_uint32 id,
+ grpc_metadata_batch *metadata, int is_eof,
+ gpr_slice_buffer *outbuf);
-#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_ENCODER_H */
+#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_ENCODER_H */
diff --git a/src/core/transport/chttp2/hpack_parser.c b/src/core/transport/chttp2/hpack_parser.c
index 20d8312d54..fea0000896 100644
--- a/src/core/transport/chttp2/hpack_parser.c
+++ b/src/core/transport/chttp2/hpack_parser.c
@@ -38,13 +38,15 @@
#include <string.h>
#include <assert.h>
-#include "src/core/transport/chttp2/bin_encoder.h"
-#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/useful.h>
+#include "src/core/profiling/timers.h"
+#include "src/core/support/string.h"
+#include "src/core/transport/chttp2/bin_encoder.h"
+
typedef enum {
NOT_BINARY,
B64_BYTE0,
@@ -72,6 +74,8 @@ static int parse_begin(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
const gpr_uint8 *end);
static int parse_error(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
const gpr_uint8 *end);
+static int parse_illegal_op(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
+ const gpr_uint8 *end);
static int parse_string_prefix(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end);
@@ -154,7 +158,7 @@ static const grpc_chttp2_hpack_parser_state first_byte_action[] = {
parse_lithdr_incidx_x, parse_lithdr_incidx_v, parse_lithdr_notidx,
parse_lithdr_notidx_x, parse_lithdr_notidx_v, parse_lithdr_nvridx,
parse_lithdr_nvridx_x, parse_lithdr_nvridx_v, parse_max_tbl_size,
- parse_max_tbl_size_x, parse_error};
+ parse_max_tbl_size_x, parse_illegal_op};
/* indexes the first byte to a parse state function - generated by
gen_hpack_tables.c */
@@ -167,7 +171,7 @@ static const gpr_uint8 first_byte_lut[256] = {
LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX,
LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX,
LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX, LITHDR_NVRIDX_X,
- ILLEGAL, MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE,
+ MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE,
MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE,
MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE,
MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE, MAX_TBL_SIZE,
@@ -620,19 +624,20 @@ static const gpr_uint8 inverse_base64[256] = {
};
/* emission helpers */
-static void on_hdr(grpc_chttp2_hpack_parser *p, grpc_mdelem *md,
- int add_to_table) {
+static int on_hdr(grpc_chttp2_hpack_parser *p, grpc_mdelem *md,
+ int add_to_table) {
if (add_to_table) {
- GRPC_MDELEM_REF(md);
- grpc_chttp2_hptbl_add(&p->table, md);
+ if (!grpc_chttp2_hptbl_add(&p->table, md)) {
+ return 0;
+ }
}
p->on_header(p->on_header_user_data, md);
+ return 1;
}
static grpc_mdstr *take_string(grpc_chttp2_hpack_parser *p,
grpc_chttp2_hpack_parser_string *str) {
- grpc_mdstr *s = grpc_mdstr_from_buffer(p->table.mdctx, (gpr_uint8 *)str->str,
- str->length);
+ grpc_mdstr *s = grpc_mdstr_from_buffer((gpr_uint8 *)str->str, str->length);
str->length = 0;
return s;
}
@@ -712,14 +717,18 @@ static int parse_stream_dep0(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
static int finish_indexed_field(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
+ if (md == NULL) {
+ gpr_log(GPR_ERROR, "Invalid HPACK index received: %d", p->index);
+ return 0;
+ }
GRPC_MDELEM_REF(md);
- on_hdr(p, md, 0);
- return parse_begin(p, cur, end);
+ return on_hdr(p, md, 0) && parse_begin(p, cur, end);
}
/* parse an indexed field with index < 127 */
static int parse_indexed_field(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
+ p->dynamic_table_update_allowed = 0;
p->index = (*cur) & 0x7f;
return finish_indexed_field(p, cur + 1, end);
}
@@ -729,6 +738,7 @@ static int parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
finish_indexed_field};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0x7f;
p->parsing.value = &p->index;
@@ -740,21 +750,20 @@ static int parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
static int finish_lithdr_incidx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 1);
- return parse_begin(p, cur, end);
+ GPR_ASSERT(md != NULL); /* handled in string parsing */
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 1) &&
+ parse_begin(p, cur, end);
}
/* finish a literal header with incremental indexing with no index */
static int finish_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- take_string(p, &p->key),
- take_string(p, &p->value)),
- 1);
- return parse_begin(p, cur, end);
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 1) &&
+ parse_begin(p, cur, end);
}
/* parse a literal header with incremental indexing; index < 63 */
@@ -762,6 +771,7 @@ static int parse_lithdr_incidx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_incidx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0x3f;
return parse_string_prefix(p, cur + 1, end);
@@ -773,6 +783,7 @@ static int parse_lithdr_incidx_x(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_incidx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0x3f;
p->parsing.value = &p->index;
@@ -785,6 +796,7 @@ static int parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_incidx_v};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
return parse_string_prefix(p, cur + 1, end);
}
@@ -793,21 +805,20 @@ static int parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
static int finish_lithdr_notidx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 0);
- return parse_begin(p, cur, end);
+ GPR_ASSERT(md != NULL); /* handled in string parsing */
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 0) &&
+ parse_begin(p, cur, end);
}
/* finish a literal header without incremental indexing with index = 0 */
static int finish_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- take_string(p, &p->key),
- take_string(p, &p->value)),
- 0);
- return parse_begin(p, cur, end);
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 0) &&
+ parse_begin(p, cur, end);
}
/* parse a literal header without incremental indexing; index < 15 */
@@ -815,6 +826,7 @@ static int parse_lithdr_notidx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_notidx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0xf;
return parse_string_prefix(p, cur + 1, end);
@@ -826,6 +838,7 @@ static int parse_lithdr_notidx_x(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_notidx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0xf;
p->parsing.value = &p->index;
@@ -838,6 +851,7 @@ static int parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_notidx_v};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
return parse_string_prefix(p, cur + 1, end);
}
@@ -846,21 +860,20 @@ static int parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
static int finish_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 0);
- return parse_begin(p, cur, end);
+ GPR_ASSERT(md != NULL); /* handled in string parsing */
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 0) &&
+ parse_begin(p, cur, end);
}
/* finish a literal header that is never indexed with an extra value */
static int finish_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
- on_hdr(p, grpc_mdelem_from_metadata_strings(p->table.mdctx,
- take_string(p, &p->key),
- take_string(p, &p->value)),
- 0);
- return parse_begin(p, cur, end);
+ return on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 0) &&
+ parse_begin(p, cur, end);
}
/* parse a literal header that is never indexed; index < 15 */
@@ -868,6 +881,7 @@ static int parse_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_nvridx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0xf;
return parse_string_prefix(p, cur + 1, end);
@@ -879,6 +893,7 @@ static int parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_nvridx};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0xf;
p->parsing.value = &p->index;
@@ -891,6 +906,7 @@ static int parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_nvridx_v};
+ p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
return parse_string_prefix(p, cur + 1, end);
}
@@ -899,14 +915,18 @@ static int parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
static int finish_max_tbl_size(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
gpr_log(GPR_INFO, "MAX TABLE SIZE: %d", p->index);
- abort(); /* not implemented */
- return parse_begin(p, cur, end);
+ return grpc_chttp2_hptbl_set_current_table_size(&p->table, p->index) &&
+ parse_begin(p, cur, end);
}
/* parse a max table size change, max size < 15 */
static int parse_max_tbl_size(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
const gpr_uint8 *end) {
- p->index = (*cur) & 0xf;
+ if (p->dynamic_table_update_allowed == 0) {
+ return 0;
+ }
+ p->dynamic_table_update_allowed--;
+ p->index = (*cur) & 0x1f;
return finish_max_tbl_size(p, cur + 1, end);
}
@@ -915,8 +935,12 @@ static int parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
finish_max_tbl_size};
+ if (p->dynamic_table_update_allowed == 0) {
+ return 0;
+ }
+ p->dynamic_table_update_allowed--;
p->next_state = and_then;
- p->index = 0xf;
+ p->index = 0x1f;
p->parsing.value = &p->index;
return parse_value0(p, cur + 1, end);
}
@@ -928,6 +952,13 @@ static int parse_error(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
return 0;
}
+static int parse_illegal_op(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
+ const gpr_uint8 *end) {
+ GPR_ASSERT(cur != end);
+ gpr_log(GPR_DEBUG, "Illegal hpack op code %d", *cur);
+ return parse_error(p, cur, end);
+}
+
/* parse the 1st byte of a varint into p->parsing.value
no overflow is possible */
static int parse_value0(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
@@ -1035,7 +1066,7 @@ static int parse_value4(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
error:
gpr_log(GPR_ERROR,
"integer overflow in hpack integer decoding: have 0x%08x, "
- "got byte 0x%02x",
+ "got byte 0x%02x on byte 5",
*p->parsing.value, *cur);
return parse_error(p, cur, end);
}
@@ -1060,7 +1091,8 @@ static int parse_value5up(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
gpr_log(GPR_ERROR,
"integer overflow in hpack integer decoding: have 0x%08x, "
- "got byte 0x%02x sometime after byte 4");
+ "got byte 0x%02x sometime after byte 5",
+ *p->parsing.value, *cur);
return parse_error(p, cur, end);
}
@@ -1291,7 +1323,10 @@ static is_binary_header is_binary_literal_header(grpc_chttp2_hpack_parser *p) {
static is_binary_header is_binary_indexed_header(grpc_chttp2_hpack_parser *p) {
grpc_mdelem *elem = grpc_chttp2_hptbl_lookup(&p->table, p->index);
- if (!elem) return ERROR_HEADER;
+ if (!elem) {
+ gpr_log(GPR_ERROR, "Invalid HPACK index received: %d", p->index);
+ return ERROR_HEADER;
+ }
return grpc_is_binary_header(
(const char *)GPR_SLICE_START_PTR(elem->key->slice),
GPR_SLICE_LENGTH(elem->key->slice))
@@ -1329,19 +1364,10 @@ static int parse_value_string_with_literal_key(grpc_chttp2_hpack_parser *p,
/* PUBLIC INTERFACE */
static void on_header_not_set(void *user_data, grpc_mdelem *md) {
- char *keyhex = gpr_dump_slice(md->key->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
- char *valuehex =
- gpr_dump_slice(md->value->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
- gpr_log(GPR_ERROR, "on_header callback not set; key=%s value=%s", keyhex,
- valuehex);
- gpr_free(keyhex);
- gpr_free(valuehex);
- GRPC_MDELEM_UNREF(md);
- abort();
+ GPR_UNREACHABLE_CODE(return );
}
-void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p,
- grpc_mdctx *mdctx) {
+void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p) {
p->on_header = on_header_not_set;
p->on_header_user_data = NULL;
p->state = parse_begin;
@@ -1351,7 +1377,8 @@ void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p,
p->value.str = NULL;
p->value.capacity = 0;
p->value.length = 0;
- grpc_chttp2_hptbl_init(&p->table, mdctx);
+ p->dynamic_table_update_allowed = 2;
+ grpc_chttp2_hptbl_init(&p->table);
}
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p) {
@@ -1379,30 +1406,39 @@ grpc_chttp2_parse_error grpc_chttp2_header_parser_parse(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_chttp2_hpack_parser *parser = hpack_parser;
+ GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0);
if (!grpc_chttp2_hpack_parser_parse(parser, GPR_SLICE_START_PTR(slice),
GPR_SLICE_END_PTR(slice))) {
+ GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
if (is_last) {
if (parser->is_boundary && parser->state != parse_begin) {
gpr_log(GPR_ERROR,
"end of header frame not aligned with a hpack record boundary");
+ GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
- if (parser->is_boundary) {
- grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
- &stream_parsing->incoming_metadata,
- &stream_parsing->data_parser.incoming_sopb);
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
- stream_parsing);
- }
- if (parser->is_eof) {
- stream_parsing->received_close = 1;
+ /* need to check for null stream: this can occur if we receive an invalid
+ stream id on a header */
+ if (stream_parsing != NULL) {
+ if (parser->is_boundary) {
+ stream_parsing
+ ->got_metadata_on_parse[stream_parsing->header_frames_received] = 1;
+ stream_parsing->header_frames_received++;
+ grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
+ stream_parsing);
+ }
+ if (parser->is_eof) {
+ stream_parsing->received_close = 1;
+ }
}
parser->on_header = on_header_not_set;
parser->on_header_user_data = NULL;
parser->is_boundary = 0xde;
parser->is_eof = 0xde;
+ parser->dynamic_table_update_allowed = 2;
}
+ GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_CHTTP2_PARSE_OK;
}
diff --git a/src/core/transport/chttp2/hpack_parser.h b/src/core/transport/chttp2/hpack_parser.h
index f56867016c..bd36357124 100644
--- a/src/core/transport/chttp2/hpack_parser.h
+++ b/src/core/transport/chttp2/hpack_parser.h
@@ -85,6 +85,8 @@ struct grpc_chttp2_hpack_parser {
gpr_uint8 binary;
/* is the current string huffman encoded? */
gpr_uint8 huff;
+ /* is a dynamic table update allowed? */
+ gpr_uint8 dynamic_table_update_allowed;
/* set by higher layers, used by grpc_chttp2_header_parser_parse to signal
it should append a metadata boundary at the end of frame */
gpr_uint8 is_boundary;
@@ -95,8 +97,7 @@ struct grpc_chttp2_hpack_parser {
grpc_chttp2_hptbl table;
};
-void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p,
- grpc_mdctx *mdctx);
+void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p);
void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p);
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p);
diff --git a/src/core/transport/chttp2/hpack_table.c b/src/core/transport/chttp2/hpack_table.c
index c442c2c341..59060daad3 100644
--- a/src/core/transport/chttp2/hpack_table.c
+++ b/src/core/transport/chttp2/hpack_table.c
@@ -36,7 +36,9 @@
#include <assert.h>
#include <string.h>
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+
#include "src/core/support/murmur_hash.h"
static struct {
@@ -169,15 +171,24 @@ static struct {
{"www-authenticate", ""},
};
-void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl *tbl, grpc_mdctx *mdctx) {
+static gpr_uint32 entries_for_bytes(gpr_uint32 bytes) {
+ return (bytes + GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD - 1) /
+ GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
+}
+
+void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl *tbl) {
size_t i;
memset(tbl, 0, sizeof(*tbl));
- tbl->mdctx = mdctx;
- tbl->max_bytes = GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE;
+ tbl->current_table_bytes = tbl->max_bytes =
+ GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE;
+ tbl->max_entries = tbl->cap_entries =
+ entries_for_bytes(tbl->current_table_bytes);
+ tbl->ents = gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
+ memset(tbl->ents, 0, sizeof(*tbl->ents) * tbl->cap_entries);
for (i = 1; i <= GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
- tbl->static_ents[i - 1] = grpc_mdelem_from_strings(
- mdctx, static_table[i].key, static_table[i].value);
+ tbl->static_ents[i - 1] =
+ grpc_mdelem_from_strings(static_table[i].key, static_table[i].value);
}
}
@@ -187,9 +198,9 @@ void grpc_chttp2_hptbl_destroy(grpc_chttp2_hptbl *tbl) {
GRPC_MDELEM_UNREF(tbl->static_ents[i]);
}
for (i = 0; i < tbl->num_ents; i++) {
- GRPC_MDELEM_UNREF(
- tbl->ents[(tbl->first_ent + i) % GRPC_CHTTP2_MAX_TABLE_COUNT]);
+ GRPC_MDELEM_UNREF(tbl->ents[(tbl->first_ent + i) % tbl->cap_entries]);
}
+ gpr_free(tbl->ents);
}
grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
@@ -201,8 +212,8 @@ grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
/* Otherwise, find the value in the list of valid entries */
tbl_index -= (GRPC_CHTTP2_LAST_STATIC_ENTRY + 1);
if (tbl_index < tbl->num_ents) {
- gpr_uint32 offset = (tbl->num_ents - 1u - tbl_index + tbl->first_ent) %
- GRPC_CHTTP2_MAX_TABLE_COUNT;
+ gpr_uint32 offset =
+ (tbl->num_ents - 1u - tbl_index + tbl->first_ent) % tbl->cap_entries;
return tbl->ents[offset];
}
/* Invalid entry: return error */
@@ -216,21 +227,81 @@ static void evict1(grpc_chttp2_hptbl *tbl) {
GPR_SLICE_LENGTH(first_ent->value->slice) +
GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
GPR_ASSERT(elem_bytes <= tbl->mem_used);
- tbl->mem_used = (gpr_uint16)(tbl->mem_used - elem_bytes);
- tbl->first_ent =
- (gpr_uint16)((tbl->first_ent + 1) % GRPC_CHTTP2_MAX_TABLE_COUNT);
+ tbl->mem_used -= (gpr_uint32)elem_bytes;
+ tbl->first_ent = ((tbl->first_ent + 1) % tbl->cap_entries);
tbl->num_ents--;
GRPC_MDELEM_UNREF(first_ent);
}
-void grpc_chttp2_hptbl_add(grpc_chttp2_hptbl *tbl, grpc_mdelem *md) {
+static void rebuild_ents(grpc_chttp2_hptbl *tbl, gpr_uint32 new_cap) {
+ grpc_mdelem **ents = gpr_malloc(sizeof(*ents) * new_cap);
+ gpr_uint32 i;
+
+ for (i = 0; i < tbl->num_ents; i++) {
+ ents[i] = tbl->ents[(tbl->first_ent + i) % tbl->cap_entries];
+ }
+ gpr_free(tbl->ents);
+ tbl->ents = ents;
+ tbl->cap_entries = new_cap;
+ tbl->first_ent = 0;
+}
+
+void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl *tbl,
+ gpr_uint32 max_bytes) {
+ if (tbl->max_bytes == max_bytes) {
+ return;
+ }
+ gpr_log(GPR_DEBUG, "Update hpack parser max size to %d", max_bytes);
+ while (tbl->mem_used > max_bytes) {
+ evict1(tbl);
+ }
+ tbl->max_bytes = max_bytes;
+}
+
+int grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl *tbl,
+ gpr_uint32 bytes) {
+ if (tbl->current_table_bytes == bytes) {
+ return 1;
+ }
+ if (bytes > tbl->max_bytes) {
+ gpr_log(GPR_ERROR,
+ "Attempt to make hpack table %d bytes when max is %d bytes", bytes,
+ tbl->max_bytes);
+ return 0;
+ }
+ gpr_log(GPR_DEBUG, "Update hpack parser table size to %d", bytes);
+ while (tbl->mem_used > bytes) {
+ evict1(tbl);
+ }
+ tbl->current_table_bytes = bytes;
+ tbl->max_entries = entries_for_bytes(bytes);
+ if (tbl->max_entries > tbl->cap_entries) {
+ rebuild_ents(tbl, GPR_MAX(tbl->max_entries, 2 * tbl->cap_entries));
+ } else if (tbl->max_entries < tbl->cap_entries / 3) {
+ gpr_uint32 new_cap = GPR_MAX(tbl->max_entries, 16u);
+ if (new_cap != tbl->cap_entries) {
+ rebuild_ents(tbl, new_cap);
+ }
+ }
+ return 1;
+}
+
+int grpc_chttp2_hptbl_add(grpc_chttp2_hptbl *tbl, grpc_mdelem *md) {
/* determine how many bytes of buffer this entry represents */
size_t elem_bytes = GPR_SLICE_LENGTH(md->key->slice) +
GPR_SLICE_LENGTH(md->value->slice) +
GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
+ if (tbl->current_table_bytes > tbl->max_bytes) {
+ gpr_log(GPR_ERROR,
+ "HPACK max table size reduced to %d but not reflected by hpack "
+ "stream (still at %d)",
+ tbl->max_bytes, tbl->current_table_bytes);
+ return 0;
+ }
+
/* we can't add elements bigger than the max table size */
- if (elem_bytes > tbl->max_bytes) {
+ if (elem_bytes > tbl->current_table_bytes) {
/* HPACK draft 10 section 4.4 states:
* If the size of the new entry is less than or equal to the maximum
* size, that entry is added to the table. It is not an error to
@@ -243,44 +314,43 @@ void grpc_chttp2_hptbl_add(grpc_chttp2_hptbl *tbl, grpc_mdelem *md) {
while (tbl->num_ents) {
evict1(tbl);
}
- return;
+ return 1;
}
/* evict entries to ensure no overflow */
- while (elem_bytes > (size_t)tbl->max_bytes - tbl->mem_used) {
+ while (elem_bytes > (size_t)tbl->current_table_bytes - tbl->mem_used) {
evict1(tbl);
}
/* copy the finalized entry in */
- tbl->ents[tbl->last_ent] = md;
+ tbl->ents[(tbl->first_ent + tbl->num_ents) % tbl->cap_entries] =
+ GRPC_MDELEM_REF(md);
/* update accounting values */
- tbl->last_ent =
- (gpr_uint16)((tbl->last_ent + 1) % GRPC_CHTTP2_MAX_TABLE_COUNT);
tbl->num_ents++;
- tbl->mem_used = (gpr_uint16)(tbl->mem_used + elem_bytes);
+ tbl->mem_used += (gpr_uint32)elem_bytes;
+ return 1;
}
grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find(
const grpc_chttp2_hptbl *tbl, grpc_mdelem *md) {
grpc_chttp2_hptbl_find_result r = {0, 0};
- gpr_uint16 i;
+ gpr_uint32 i;
/* See if the string is in the static table */
for (i = 0; i < GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
grpc_mdelem *ent = tbl->static_ents[i];
if (md->key != ent->key) continue;
- r.index = (gpr_uint16)(i + 1);
+ r.index = i + 1u;
r.has_value = md->value == ent->value;
if (r.has_value) return r;
}
/* Scan the dynamic table */
for (i = 0; i < tbl->num_ents; i++) {
- gpr_uint16 idx =
- (gpr_uint16)(tbl->num_ents - i + GRPC_CHTTP2_LAST_STATIC_ENTRY);
- grpc_mdelem *ent =
- tbl->ents[(tbl->first_ent + i) % GRPC_CHTTP2_MAX_TABLE_COUNT];
+ gpr_uint32 idx =
+ (gpr_uint32)(tbl->num_ents - i + GRPC_CHTTP2_LAST_STATIC_ENTRY);
+ grpc_mdelem *ent = tbl->ents[(tbl->first_ent + i) % tbl->cap_entries];
if (md->key != ent->key) continue;
r.index = idx;
r.has_value = md->value == ent->value;
diff --git a/src/core/transport/chttp2/hpack_table.h b/src/core/transport/chttp2/hpack_table.h
index 4f882e2e03..a173eec30c 100644
--- a/src/core/transport/chttp2/hpack_table.h
+++ b/src/core/transport/chttp2/hpack_table.h
@@ -49,47 +49,58 @@
#define GRPC_CHTTP2_MAX_HPACK_TABLE_SIZE GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE
/* Per entry overhead bytes as per the spec */
#define GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD 32
+#if 0
/* Maximum number of entries we could possibly fit in the table, given defined
overheads */
#define GRPC_CHTTP2_MAX_TABLE_COUNT \
((GRPC_CHTTP2_MAX_HPACK_TABLE_SIZE + GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD - 1) / \
GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD)
+#endif
/* hpack decoder table */
typedef struct {
- grpc_mdctx *mdctx;
/* the first used entry in ents */
- gpr_uint16 first_ent;
- /* the last used entry in ents */
- gpr_uint16 last_ent;
+ gpr_uint32 first_ent;
/* how many entries are in the table */
- gpr_uint16 num_ents;
+ gpr_uint32 num_ents;
/* the amount of memory used by the table, according to the hpack algorithm */
- gpr_uint16 mem_used;
+ gpr_uint32 mem_used;
/* the max memory allowed to be used by the table, according to the hpack
algorithm */
- gpr_uint16 max_bytes;
+ gpr_uint32 max_bytes;
+ /* the currently agreed size of the table, according to the hpack algorithm */
+ gpr_uint32 current_table_bytes;
+ /* Maximum number of entries we could possibly fit in the table, given defined
+ overheads */
+ gpr_uint32 max_entries;
+ /* Number of entries allocated in ents */
+ gpr_uint32 cap_entries;
/* a circular buffer of headers - this is stored in the opposite order to
what hpack specifies, in order to simplify table management a little...
meaning lookups need to SUBTRACT from the end position */
- grpc_mdelem *ents[GRPC_CHTTP2_MAX_TABLE_COUNT];
+ grpc_mdelem **ents;
grpc_mdelem *static_ents[GRPC_CHTTP2_LAST_STATIC_ENTRY];
} grpc_chttp2_hptbl;
/* initialize a hpack table */
-void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl *tbl, grpc_mdctx *mdctx);
+void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl *tbl);
void grpc_chttp2_hptbl_destroy(grpc_chttp2_hptbl *tbl);
+void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl *tbl,
+ gpr_uint32 max_bytes);
+int grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl *tbl,
+ gpr_uint32 bytes);
/* lookup a table entry based on its hpack index */
grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
gpr_uint32 index);
/* add a table entry to the index */
-void grpc_chttp2_hptbl_add(grpc_chttp2_hptbl *tbl, grpc_mdelem *md);
+int grpc_chttp2_hptbl_add(grpc_chttp2_hptbl *tbl,
+ grpc_mdelem *md) GRPC_MUST_USE_RESULT;
/* Find a key/value pair in the table... returns the index in the table of the
most similar entry, or 0 if the value was not found */
typedef struct {
- gpr_uint16 index;
- gpr_uint8 has_value;
+ gpr_uint32 index;
+ int has_value;
} grpc_chttp2_hptbl_find_result;
grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find(
const grpc_chttp2_hptbl *tbl, grpc_mdelem *md);
diff --git a/src/core/transport/chttp2/incoming_metadata.c b/src/core/transport/chttp2/incoming_metadata.c
index 10c64f3356..315bc2faa1 100644
--- a/src/core/transport/chttp2/incoming_metadata.c
+++ b/src/core/transport/chttp2/incoming_metadata.c
@@ -48,14 +48,17 @@ void grpc_chttp2_incoming_metadata_buffer_init(
void grpc_chttp2_incoming_metadata_buffer_destroy(
grpc_chttp2_incoming_metadata_buffer *buffer) {
size_t i;
- for (i = 0; i < buffer->count; i++) {
- GRPC_MDELEM_UNREF(buffer->elems[i].md);
+ if (!buffer->published) {
+ for (i = 0; i < buffer->count; i++) {
+ GRPC_MDELEM_UNREF(buffer->elems[i].md);
+ }
}
gpr_free(buffer->elems);
}
void grpc_chttp2_incoming_metadata_buffer_add(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem *elem) {
+ GPR_ASSERT(!buffer->published);
if (buffer->capacity == buffer->count) {
buffer->capacity = GPR_MAX(8, 2 * buffer->capacity);
buffer->elems =
@@ -66,117 +69,28 @@ void grpc_chttp2_incoming_metadata_buffer_add(
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) {
+ GPR_ASSERT(!buffer->published);
buffer->deadline = deadline;
}
-void grpc_chttp2_incoming_metadata_live_op_buffer_end(
- grpc_chttp2_incoming_metadata_live_op_buffer *buffer) {
- gpr_free(buffer->elems);
- buffer->elems = NULL;
-}
-
-void grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb) {
- grpc_metadata_batch b;
-
- b.list.head = NULL;
- /* Store away the last element of the list, so that in patch_metadata_ops
- we can reconstitute the list.
- We can't do list building here as later incoming metadata may reallocate
- the underlying array. */
- b.list.tail = (void *)(gpr_intptr)buffer->count;
- b.garbage.head = b.garbage.tail = NULL;
- b.deadline = buffer->deadline;
- buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
-
- grpc_sopb_add_metadata(sopb, b);
-}
-
-void grpc_chttp2_incoming_metadata_buffer_swap(
- grpc_chttp2_incoming_metadata_buffer *a,
- grpc_chttp2_incoming_metadata_buffer *b) {
- GPR_SWAP(grpc_chttp2_incoming_metadata_buffer, *a, *b);
-}
-
-void grpc_incoming_metadata_buffer_move_to_referencing_sopb(
- grpc_chttp2_incoming_metadata_buffer *src,
- grpc_chttp2_incoming_metadata_buffer *dst, grpc_stream_op_buffer *sopb) {
- size_t delta;
- size_t i;
- dst->deadline = gpr_time_min(src->deadline, dst->deadline);
-
- if (src->count == 0) {
- return;
- }
- if (dst->count == 0) {
- grpc_chttp2_incoming_metadata_buffer_swap(src, dst);
- return;
- }
- delta = dst->count;
- if (dst->capacity < src->count + dst->count) {
- dst->capacity = GPR_MAX(dst->capacity * 2, src->count + dst->count);
- dst->elems = gpr_realloc(dst->elems, dst->capacity * sizeof(*dst->elems));
- }
- memcpy(dst->elems + dst->count, src->elems, src->count * sizeof(*src->elems));
- dst->count += src->count;
- for (i = 0; i < sopb->nops; i++) {
- if (sopb->ops[i].type != GRPC_OP_METADATA) continue;
- sopb->ops[i].data.metadata.list.tail =
- (void *)(delta + (gpr_uintptr)sopb->ops[i].data.metadata.list.tail);
- }
- src->count = 0;
-}
-
-void grpc_chttp2_incoming_metadata_buffer_postprocess_sopb_and_begin_live_op(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb,
- grpc_chttp2_incoming_metadata_live_op_buffer *live_op_buffer) {
- grpc_stream_op *ops = sopb->ops;
- size_t nops = sopb->nops;
- size_t i;
- size_t j;
- size_t mdidx = 0;
- size_t last_mdidx;
- int found_metadata = 0;
-
- /* rework the array of metadata into a linked list, making use
- of the breadcrumbs we left in metadata batches during
- add_metadata_batch */
- for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
- found_metadata = 1;
- /* we left a breadcrumb indicating where the end of this list is,
- and since we add sequentially, we know from the end of the last
- segment where this segment begins */
- last_mdidx = (size_t)(gpr_intptr)(op->data.metadata.list.tail);
- GPR_ASSERT(last_mdidx > mdidx);
- GPR_ASSERT(last_mdidx <= buffer->count);
- /* turn the array into a doubly linked list */
- op->data.metadata.list.head = &buffer->elems[mdidx];
- op->data.metadata.list.tail = &buffer->elems[last_mdidx - 1];
- for (j = mdidx + 1; j < last_mdidx; j++) {
- buffer->elems[j].prev = &buffer->elems[j - 1];
- buffer->elems[j - 1].next = &buffer->elems[j];
+void grpc_chttp2_incoming_metadata_buffer_publish(
+ grpc_chttp2_incoming_metadata_buffer *buffer, grpc_metadata_batch *batch) {
+ GPR_ASSERT(!buffer->published);
+ buffer->published = 1;
+ if (buffer->count > 0) {
+ size_t i;
+ for (i = 1; i < buffer->count; i++) {
+ buffer->elems[i].prev = &buffer->elems[i - 1];
}
- buffer->elems[mdidx].prev = NULL;
- buffer->elems[last_mdidx - 1].next = NULL;
- /* track where we're up to */
- mdidx = last_mdidx;
- }
- if (found_metadata) {
- live_op_buffer->elems = buffer->elems;
- if (mdidx != buffer->count) {
- /* we have a partially read metadata batch still in incoming_metadata */
- size_t new_count = buffer->count - mdidx;
- size_t copy_bytes = sizeof(*buffer->elems) * new_count;
- GPR_ASSERT(mdidx < buffer->count);
- buffer->elems = gpr_malloc(copy_bytes);
- memcpy(buffer->elems, live_op_buffer->elems + mdidx, copy_bytes);
- buffer->count = buffer->capacity = new_count;
- } else {
- buffer->elems = NULL;
- buffer->count = 0;
- buffer->capacity = 0;
+ for (i = 0; i < buffer->count - 1; i++) {
+ buffer->elems[i].next = &buffer->elems[i + 1];
}
+ buffer->elems[0].prev = NULL;
+ buffer->elems[buffer->count - 1].next = NULL;
+ batch->list.head = &buffer->elems[0];
+ batch->list.tail = &buffer->elems[buffer->count - 1];
+ } else {
+ batch->list.head = batch->list.tail = NULL;
}
+ batch->deadline = buffer->deadline;
}
diff --git a/src/core/transport/chttp2/incoming_metadata.h b/src/core/transport/chttp2/incoming_metadata.h
index 2f1de411ba..ea74cfc64b 100644
--- a/src/core/transport/chttp2/incoming_metadata.h
+++ b/src/core/transport/chttp2/incoming_metadata.h
@@ -41,40 +41,20 @@ typedef struct {
size_t count;
size_t capacity;
gpr_timespec deadline;
+ int published;
} grpc_chttp2_incoming_metadata_buffer;
-typedef struct {
- grpc_linked_mdelem *elems;
-} grpc_chttp2_incoming_metadata_live_op_buffer;
-
/** assumes everything initially zeroed */
void grpc_chttp2_incoming_metadata_buffer_init(
grpc_chttp2_incoming_metadata_buffer *buffer);
void grpc_chttp2_incoming_metadata_buffer_destroy(
grpc_chttp2_incoming_metadata_buffer *buffer);
-void grpc_chttp2_incoming_metadata_buffer_reset(
- grpc_chttp2_incoming_metadata_buffer *buffer);
+void grpc_chttp2_incoming_metadata_buffer_publish(
+ grpc_chttp2_incoming_metadata_buffer *buffer, grpc_metadata_batch *batch);
void grpc_chttp2_incoming_metadata_buffer_add(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem *elem);
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline);
-/** extend sopb with a metadata batch; this must be post-processed by
- grpc_chttp2_incoming_metadata_buffer_postprocess_sopb before being handed
- out of the transport */
-void grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb);
-
-void grpc_incoming_metadata_buffer_move_to_referencing_sopb(
- grpc_chttp2_incoming_metadata_buffer *src,
- grpc_chttp2_incoming_metadata_buffer *dst, grpc_stream_op_buffer *sopb);
-
-void grpc_chttp2_incoming_metadata_buffer_postprocess_sopb_and_begin_live_op(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb,
- grpc_chttp2_incoming_metadata_live_op_buffer *live_op_buffer);
-
-void grpc_chttp2_incoming_metadata_live_op_buffer_end(
- grpc_chttp2_incoming_metadata_live_op_buffer *live_op_buffer);
-
#endif /* GRPC_INTERNAL_CORE_CHTTP2_INCOMING_METADATA_H */
diff --git a/src/core/transport/chttp2/internal.h b/src/core/transport/chttp2/internal.h
index b35f8b5d88..43b3adb9d3 100644
--- a/src/core/transport/chttp2/internal.h
+++ b/src/core/transport/chttp2/internal.h
@@ -34,6 +34,8 @@
#ifndef GRPC_INTERNAL_CORE_CHTTP2_INTERNAL_H
#define GRPC_INTERNAL_CORE_CHTTP2_INTERNAL_H
+#include <assert.h>
+
#include "src/core/iomgr/endpoint.h"
#include "src/core/transport/chttp2/frame.h"
#include "src/core/transport/chttp2/frame_data.h"
@@ -42,9 +44,9 @@
#include "src/core/transport/chttp2/frame_rst_stream.h"
#include "src/core/transport/chttp2/frame_settings.h"
#include "src/core/transport/chttp2/frame_window_update.h"
+#include "src/core/transport/chttp2/hpack_encoder.h"
#include "src/core/transport/chttp2/hpack_parser.h"
#include "src/core/transport/chttp2/incoming_metadata.h"
-#include "src/core/transport/chttp2/stream_encoder.h"
#include "src/core/transport/chttp2/stream_map.h"
#include "src/core/transport/connectivity_state.h"
#include "src/core/transport/transport_impl.h"
@@ -56,14 +58,15 @@ typedef struct grpc_chttp2_stream grpc_chttp2_stream;
happen to them... this enum labels each list */
typedef enum {
GRPC_CHTTP2_LIST_ALL_STREAMS,
- GRPC_CHTTP2_LIST_READ_WRITE_STATE_CHANGED,
+ GRPC_CHTTP2_LIST_CHECK_READ_OPS,
+ GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE,
GRPC_CHTTP2_LIST_WRITABLE,
GRPC_CHTTP2_LIST_WRITING,
GRPC_CHTTP2_LIST_WRITTEN,
GRPC_CHTTP2_LIST_PARSING_SEEN,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING,
- GRPC_CHTTP2_LIST_CANCELLED_WAITING_FOR_WRITING,
- GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED,
+ GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING,
+ GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT,
/** streams that are waiting to start because there are too many concurrent
streams on the connection */
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
@@ -113,22 +116,6 @@ typedef enum {
GRPC_DTS_FRAME
} grpc_chttp2_deframe_transport_state;
-typedef enum {
- GRPC_WRITE_STATE_OPEN,
- GRPC_WRITE_STATE_QUEUED_CLOSE,
- GRPC_WRITE_STATE_SENT_CLOSE
-} grpc_chttp2_write_state;
-
-/* flags that can be or'd into stream_global::writing_now */
-#define GRPC_CHTTP2_WRITING_DATA 1
-#define GRPC_CHTTP2_WRITING_WINDOW 2
-
-typedef enum {
- GRPC_DONT_SEND_CLOSED = 0,
- GRPC_SEND_CLOSED,
- GRPC_SEND_CLOSED_WITH_RST_STREAM
-} grpc_chttp2_send_closed;
-
typedef struct {
grpc_chttp2_stream *head;
grpc_chttp2_stream *tail;
@@ -160,14 +147,28 @@ typedef struct grpc_chttp2_outstanding_ping {
struct grpc_chttp2_outstanding_ping *prev;
} grpc_chttp2_outstanding_ping;
+/* forward declared in frame_data.h */
+struct grpc_chttp2_incoming_byte_stream {
+ grpc_byte_stream base;
+ gpr_refcount refs;
+ struct grpc_chttp2_incoming_byte_stream *next_message;
+
+ grpc_chttp2_transport *transport;
+ grpc_chttp2_stream *stream;
+ int is_tail;
+ gpr_slice_buffer slices;
+ grpc_closure *on_next;
+ gpr_slice *next;
+};
+
typedef struct {
/** data to write next write */
gpr_slice_buffer qbuf;
/** window available for us to send to peer */
gpr_int64 outgoing_window;
- /** window available for peer to send to us - updated after parse */
- gpr_uint32 incoming_window;
+ /** window available to announce to peer */
+ gpr_int64 announce_incoming_window;
/** how much window would we like to have for incoming_window */
gpr_uint32 connection_window_target;
@@ -191,6 +192,9 @@ typedef struct {
copied to next_stream_id in parsing when parsing commences */
gpr_uint32 next_stream_id;
+ /** how far to lookahead in a stream? */
+ gpr_uint32 stream_lookahead;
+
/** last received stream id */
gpr_uint32 last_incoming_stream_id;
@@ -209,6 +213,7 @@ typedef struct {
gpr_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
+ gpr_int64 outgoing_window;
/** is this a client? */
gpr_uint8 is_client;
/** callback for when writing is done */
@@ -226,13 +231,14 @@ struct grpc_chttp2_transport_parsing {
/** was a goaway frame received? */
gpr_uint8 goaway_received;
+ /** the last sent max_table_size setting */
+ gpr_uint32 last_sent_max_table_size;
+
/** initial window change */
gpr_int64 initial_window_update;
/** data to write later - after parsing */
gpr_slice_buffer qbuf;
- /* metadata object cache */
- grpc_mdstr *str_grpc_timeout;
/** parser for headers */
grpc_chttp2_hpack_parser hpack_parser;
/** simple one shot parsers */
@@ -246,8 +252,7 @@ struct grpc_chttp2_transport_parsing {
grpc_chttp2_goaway_parser goaway_parser;
/** window available for peer to send to us */
- gpr_uint32 incoming_window;
- gpr_uint32 incoming_window_delta;
+ gpr_int64 incoming_window;
/** next stream id available at the time of beginning parsing */
gpr_uint32 next_stream_id;
@@ -278,16 +283,12 @@ struct grpc_chttp2_transport_parsing {
gpr_uint32 goaway_last_stream_index;
gpr_slice goaway_text;
- gpr_int64 outgoing_window_update;
-
- /** pings awaiting responses */
- grpc_chttp2_outstanding_ping pings;
+ gpr_int64 outgoing_window;
};
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
grpc_endpoint *ep;
- grpc_mdctx *metadata_context;
gpr_refcount refs;
char *peer_string;
@@ -345,8 +346,8 @@ struct grpc_chttp2_transport {
struct {
/* accept stream callback */
- void (*accept_stream)(void *user_data, grpc_transport *transport,
- const void *server_data);
+ void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_transport *transport, const void *server_data);
void *accept_stream_user_data;
/** connectivity tracking */
@@ -358,9 +359,6 @@ typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
gpr_uint32 id;
- grpc_closure *send_done_closure;
- grpc_closure *recv_done_closure;
-
/** window available for us to send to peer */
gpr_int64 outgoing_window;
/** The number of bytes the upper layers have offered to receive.
@@ -371,54 +369,64 @@ typedef struct {
not yet announced to HTTP2 flow control.
As the upper layers offer to read more bytes, this value increases.
As we advertise incoming flow control window, this value decreases. */
- gpr_uint32 unannounced_incoming_window;
- /** The number of bytes of HTTP2 flow control we have advertised.
- As we advertise incoming flow control window, this value increases.
- As bytes are read, this value decreases.
- Updated after parse. */
- gpr_uint32 incoming_window;
- /** stream ops the transport user would like to send */
- grpc_stream_op_buffer *outgoing_sopb;
+ gpr_uint32 unannounced_incoming_window_for_parse;
+ gpr_uint32 unannounced_incoming_window_for_writing;
+ /** things the upper layers would like to send */
+ grpc_metadata_batch *send_initial_metadata;
+ grpc_closure *send_initial_metadata_finished;
+ grpc_byte_stream *send_message;
+ grpc_closure *send_message_finished;
+ grpc_metadata_batch *send_trailing_metadata;
+ grpc_closure *send_trailing_metadata_finished;
+
+ grpc_metadata_batch *recv_initial_metadata;
+ grpc_closure *recv_initial_metadata_finished;
+ grpc_byte_stream **recv_message;
+ grpc_closure *recv_message_ready;
+ grpc_metadata_batch *recv_trailing_metadata;
+ grpc_closure *recv_trailing_metadata_finished;
+
/** when the application requests writes be closed, the write_closed is
'queued'; when the close is flow controlled into the send path, we are
'sending' it; when the write has been performed it is 'sent' */
- grpc_chttp2_write_state write_state;
- /** is this stream closed (boolean) */
+ gpr_uint8 write_closed;
+ /** is this stream reading half-closed (boolean) */
gpr_uint8 read_closed;
- /** has this stream been cancelled? (boolean) */
- gpr_uint8 cancelled;
- grpc_status_code cancelled_status;
- /** have we told the upper layer that this stream is cancelled? */
- gpr_uint8 published_cancelled;
/** is this stream in the stream map? (boolean) */
gpr_uint8 in_stream_map;
- /** bitmask of GRPC_CHTTP2_WRITING_xxx above */
- gpr_uint8 writing_now;
- /** has anything been written to this stream? */
- gpr_uint8 written_anything;
-
- /** stream state already published to the upper layer */
- grpc_stream_state published_state;
- /** address to publish next stream state to */
- grpc_stream_state *publish_state;
- /** pointer to sop buffer to fill in with new stream ops */
- grpc_stream_op_buffer *publish_sopb;
- grpc_stream_op_buffer incoming_sopb;
+ /** has this stream seen an error? if 1, then pending incoming frames
+ can be thrown away */
+ gpr_uint8 seen_error;
- /** incoming metadata */
- grpc_chttp2_incoming_metadata_buffer incoming_metadata;
- grpc_chttp2_incoming_metadata_live_op_buffer outstanding_metadata;
+ gpr_uint8 published_initial_metadata;
+ gpr_uint8 published_trailing_metadata;
+ gpr_uint8 faked_trailing_metadata;
+
+ grpc_chttp2_incoming_metadata_buffer received_initial_metadata;
+ grpc_chttp2_incoming_metadata_buffer received_trailing_metadata;
+
+ grpc_chttp2_incoming_frame_queue incoming_frames;
} grpc_chttp2_stream_global;
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
gpr_uint32 id;
- /** sops that have passed flow control to be written */
- grpc_stream_op_buffer sopb;
- /** how strongly should we indicate closure with the next write */
- grpc_chttp2_send_closed send_closed;
+ gpr_uint8 fetching;
+ gpr_uint8 sent_initial_metadata;
+ gpr_uint8 sent_message;
+ gpr_uint8 sent_trailing_metadata;
+ gpr_uint8 read_closed;
+ /** send this initial metadata */
+ grpc_metadata_batch *send_initial_metadata;
+ grpc_byte_stream *send_message;
+ grpc_metadata_batch *send_trailing_metadata;
+ gpr_int64 outgoing_window;
/** how much window should we announce? */
gpr_uint32 announce_window;
+ gpr_slice_buffer flow_controlled_buffer;
+ gpr_slice fetching_slice;
+ size_t stream_fetched;
+ grpc_closure finished_fetch;
} grpc_chttp2_stream_writing;
struct grpc_chttp2_stream_parsing {
@@ -428,22 +436,29 @@ struct grpc_chttp2_stream_parsing {
gpr_uint8 received_close;
/** saw a rst_stream */
gpr_uint8 saw_rst_stream;
- /** incoming_window has been reduced by this much during parsing */
- gpr_uint32 incoming_window_delta;
+ /** how many header frames have we received? */
+ gpr_uint8 header_frames_received;
+ /** which metadata did we get (on this parse) */
+ gpr_uint8 got_metadata_on_parse[2];
+ /** should we raise the seen_error flag in transport_global */
+ gpr_uint8 seen_error;
/** window available for peer to send to us */
- gpr_uint32 incoming_window;
+ gpr_int64 incoming_window;
/** parsing state for data frames */
grpc_chttp2_data_parser data_parser;
/** reason give to rst_stream */
gpr_uint32 rst_stream_reason;
- /* amount of window given */
- gpr_uint64 outgoing_window_update;
+ /** amount of window given */
+ gpr_int64 outgoing_window;
+ /** number of bytes received - reset at end of parse thread execution */
+ gpr_int64 received_bytes;
/** incoming metadata */
- grpc_chttp2_incoming_metadata_buffer incoming_metadata;
+ grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
};
struct grpc_chttp2_stream {
+ grpc_stream_refcount *refcount;
grpc_chttp2_stream_global global;
grpc_chttp2_stream_writing writing;
grpc_chttp2_stream_parsing parsing;
@@ -466,7 +481,8 @@ struct grpc_chttp2_stream {
/** Someone is unlocking the transport mutex: check to see if writes
are required, and schedule them if so */
int grpc_chttp2_unlocking_check_writes(grpc_chttp2_transport_global *global,
- grpc_chttp2_transport_writing *writing);
+ grpc_chttp2_transport_writing *writing,
+ int is_parsing);
void grpc_chttp2_perform_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
grpc_endpoint *endpoint);
@@ -492,9 +508,6 @@ void grpc_chttp2_publish_reads(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
-void grpc_chttp2_list_add_first_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
@@ -504,21 +517,10 @@ void grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
-void grpc_chttp2_list_add_incoming_window_updated(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_incoming_window_updated(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing);
-void grpc_chttp2_list_remove_incoming_window_updated(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-
-void grpc_chttp2_list_add_writing_stream(
+/* returns 1 if stream added, 0 if it was already present */
+int grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing);
+ grpc_chttp2_stream_writing *stream_writing) GRPC_MUST_USE_RESULT;
int grpc_chttp2_list_have_writing_streams(
grpc_chttp2_transport_writing *transport_writing);
int grpc_chttp2_list_pop_writing_stream(
@@ -550,31 +552,51 @@ int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
-void grpc_chttp2_list_add_closed_waiting_for_parsing(
+void grpc_chttp2_list_add_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_closed_waiting_for_parsing(
+int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
-void grpc_chttp2_list_add_cancelled_waiting_for_writing(
+void grpc_chttp2_list_add_stalled_by_transport(
+ grpc_chttp2_transport_writing *transport_writing,
+ grpc_chttp2_stream_writing *stream_writing);
+int grpc_chttp2_list_pop_stalled_by_transport(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global **stream_global);
+
+void grpc_chttp2_list_add_unannounced_incoming_window_available(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global);
+void grpc_chttp2_list_remove_unannounced_incoming_window_available(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global);
+int grpc_chttp2_list_pop_unannounced_incoming_window_available(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_chttp2_stream_global **stream_global,
+ grpc_chttp2_stream_parsing **stream_parsing);
+
+void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_cancelled_waiting_for_writing(
+int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
-void grpc_chttp2_list_add_read_write_state_changed(
+void grpc_chttp2_list_add_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_read_write_state_changed(
+int grpc_chttp2_list_pop_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
grpc_chttp2_transport_parsing *transport_parsing, gpr_uint32 id);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
- grpc_chttp2_transport_parsing *transport_parsing, gpr_uint32 id);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ gpr_uint32 id);
void grpc_chttp2_add_incoming_goaway(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
@@ -592,7 +614,10 @@ void grpc_chttp2_for_all_streams(
grpc_chttp2_stream_global *stream_global));
void grpc_chttp2_parsing_become_skip_parser(
- grpc_chttp2_transport_parsing *transport_parsing);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
+
+void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
+ grpc_closure **pclosure, int success);
#define GRPC_CHTTP2_CLIENT_CONNECT_STRING "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
@@ -607,26 +632,126 @@ extern int grpc_flowctl_trace;
else \
stmt
-#define GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(reason, transport, context, var, \
- delta) \
- if (!(grpc_flowctl_trace)) { \
- } else { \
- grpc_chttp2_flowctl_trace(__FILE__, __LINE__, reason, #context, #var, \
- transport->is_client, context->id, \
- (gpr_int64)(context->var), (gpr_int64)(delta)); \
- }
-
-#define GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(reason, context, var, delta) \
- if (!(grpc_flowctl_trace)) { \
- } else { \
- grpc_chttp2_flowctl_trace(__FILE__, __LINE__, reason, #context, #var, \
- context->is_client, 0, \
- (gpr_int64)(context->var), (gpr_int64)(delta)); \
- }
-
-void grpc_chttp2_flowctl_trace(const char *file, int line, const char *reason,
- const char *context, const char *var,
- int is_client, gpr_uint32 stream_id,
- gpr_int64 current_value, gpr_int64 delta);
+typedef enum {
+ GRPC_CHTTP2_FLOWCTL_MOVE,
+ GRPC_CHTTP2_FLOWCTL_CREDIT,
+ GRPC_CHTTP2_FLOWCTL_DEBIT
+} grpc_chttp2_flowctl_op;
+
+#define GRPC_CHTTP2_FLOW_MOVE_COMMON(phase, transport, id1, id2, dst_context, \
+ dst_var, src_context, src_var) \
+ do { \
+ assert(id1 == id2); \
+ if (grpc_flowctl_trace) { \
+ grpc_chttp2_flowctl_trace( \
+ __FILE__, __LINE__, phase, GRPC_CHTTP2_FLOWCTL_MOVE, #dst_context, \
+ #dst_var, #src_context, #src_var, transport->is_client, id1, \
+ dst_context->dst_var, src_context->src_var); \
+ } \
+ dst_context->dst_var += src_context->src_var; \
+ src_context->src_var = 0; \
+ } while (0)
+
+#define GRPC_CHTTP2_FLOW_MOVE_STREAM(phase, transport, dst_context, dst_var, \
+ src_context, src_var) \
+ GRPC_CHTTP2_FLOW_MOVE_COMMON(phase, transport, dst_context->id, \
+ src_context->id, dst_context, dst_var, \
+ src_context, src_var)
+#define GRPC_CHTTP2_FLOW_MOVE_TRANSPORT(phase, dst_context, dst_var, \
+ src_context, src_var) \
+ GRPC_CHTTP2_FLOW_MOVE_COMMON(phase, dst_context, 0, 0, dst_context, dst_var, \
+ src_context, src_var)
+
+#define GRPC_CHTTP2_FLOW_CREDIT_COMMON(phase, transport, id, dst_context, \
+ dst_var, amount) \
+ do { \
+ if (grpc_flowctl_trace) { \
+ grpc_chttp2_flowctl_trace(__FILE__, __LINE__, phase, \
+ GRPC_CHTTP2_FLOWCTL_CREDIT, #dst_context, \
+ #dst_var, NULL, #amount, transport->is_client, \
+ id, dst_context->dst_var, amount); \
+ } \
+ dst_context->dst_var += amount; \
+ } while (0)
+
+#define GRPC_CHTTP2_FLOW_CREDIT_STREAM(phase, transport, dst_context, dst_var, \
+ amount) \
+ GRPC_CHTTP2_FLOW_CREDIT_COMMON(phase, transport, dst_context->id, \
+ dst_context, dst_var, amount)
+#define GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT(phase, dst_context, dst_var, amount) \
+ GRPC_CHTTP2_FLOW_CREDIT_COMMON(phase, dst_context, 0, dst_context, dst_var, \
+ amount)
+
+#define GRPC_CHTTP2_FLOW_DEBIT_COMMON(phase, transport, id, dst_context, \
+ dst_var, amount) \
+ do { \
+ if (grpc_flowctl_trace) { \
+ grpc_chttp2_flowctl_trace(__FILE__, __LINE__, phase, \
+ GRPC_CHTTP2_FLOWCTL_DEBIT, #dst_context, \
+ #dst_var, NULL, #amount, transport->is_client, \
+ id, dst_context->dst_var, amount); \
+ } \
+ dst_context->dst_var -= amount; \
+ } while (0)
+
+#define GRPC_CHTTP2_FLOW_DEBIT_STREAM(phase, transport, dst_context, dst_var, \
+ amount) \
+ GRPC_CHTTP2_FLOW_DEBIT_COMMON(phase, transport, dst_context->id, \
+ dst_context, dst_var, amount)
+#define GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT(phase, dst_context, dst_var, amount) \
+ GRPC_CHTTP2_FLOW_DEBIT_COMMON(phase, dst_context, 0, dst_context, dst_var, \
+ amount)
+
+void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
+ grpc_chttp2_flowctl_op op, const char *context1,
+ const char *var1, const char *context2,
+ const char *var2, int is_client,
+ gpr_uint32 stream_id, gpr_int64 val1,
+ gpr_int64 val2);
+
+void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream,
+ grpc_status_code status, gpr_slice *details);
+void grpc_chttp2_mark_stream_closed(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global, int close_reads,
+ int close_writes);
+void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global);
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
+ grpc_chttp2_stream_ref(stream_global, reason)
+#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
+ grpc_chttp2_stream_unref(exec_ctx, stream_global, reason)
+void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
+ const char *reason);
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global,
+ const char *reason);
+#else
+#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
+ grpc_chttp2_stream_ref(stream_global)
+#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
+ grpc_chttp2_stream_unref(exec_ctx, stream_global)
+void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global);
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global);
+#endif
+
+grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_chttp2_stream_parsing *stream_parsing, gpr_uint32 frame_size,
+ gpr_uint32 flags, grpc_chttp2_incoming_frame_queue *add_to_queue);
+void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_incoming_byte_stream *bs,
+ gpr_slice slice);
+void grpc_chttp2_incoming_byte_stream_finished(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs);
+
+void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *parsing,
+ const gpr_uint8 *opaque_8bytes);
#endif
diff --git a/src/core/transport/chttp2/parsing.c b/src/core/transport/chttp2/parsing.c
index 5d4d8e70c4..7604e7b681 100644
--- a/src/core/transport/chttp2/parsing.c
+++ b/src/core/transport/chttp2/parsing.c
@@ -35,29 +35,36 @@
#include <string.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
#include "src/core/profiling/timers.h"
#include "src/core/transport/chttp2/http2_errors.h"
#include "src/core/transport/chttp2/status_conversion.h"
#include "src/core/transport/chttp2/timeout_encoding.h"
+#include "src/core/transport/static_metadata.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing);
+static int init_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing);
static int init_header_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing, int is_continuation);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ int is_continuation);
static int init_data_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
static int init_rst_stream_parser(
- grpc_chttp2_transport_parsing *transport_parsing);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
static int init_settings_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
static int init_window_update_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing);
-static int init_ping_parser(grpc_chttp2_transport_parsing *transport_parsing);
-static int init_goaway_parser(grpc_chttp2_transport_parsing *transport_parsing);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
+static int init_ping_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing);
+static int init_goaway_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing);
static int init_skip_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing, int is_header);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ int is_header);
static int parse_frame_slice(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_parsing *transport_parsing,
@@ -72,25 +79,16 @@ void grpc_chttp2_prepare_to_read(
GPR_TIMER_BEGIN("grpc_chttp2_prepare_to_read", 0);
transport_parsing->next_stream_id = transport_global->next_stream_id;
+ transport_parsing->last_sent_max_table_size =
+ transport_global->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE];
/* update the parsing view of incoming window */
- if (transport_parsing->incoming_window != transport_global->incoming_window) {
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "parse", transport_parsing, incoming_window,
- (gpr_int64)transport_global->incoming_window -
- (gpr_int64)transport_parsing->incoming_window);
- transport_parsing->incoming_window = transport_global->incoming_window;
- }
- while (grpc_chttp2_list_pop_incoming_window_updated(
+ while (grpc_chttp2_list_pop_unannounced_incoming_window_available(
transport_global, transport_parsing, &stream_global, &stream_parsing)) {
- stream_parsing->id = stream_global->id;
- if (stream_parsing->incoming_window != stream_global->incoming_window) {
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "parse", transport_parsing, stream_parsing, incoming_window,
- (gpr_int64)stream_global->incoming_window -
- (gpr_int64)stream_parsing->incoming_window);
- stream_parsing->incoming_window = stream_global->incoming_window;
- }
+ GRPC_CHTTP2_FLOW_MOVE_STREAM("parse", transport_parsing, stream_parsing,
+ incoming_window, stream_global,
+ unannounced_incoming_window_for_parse);
}
GPR_TIMER_END("grpc_chttp2_prepare_to_read", 0);
@@ -101,6 +99,8 @@ void grpc_chttp2_publish_reads(
grpc_chttp2_transport_parsing *transport_parsing) {
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_parsing *stream_parsing;
+ int was_zero;
+ int is_zero;
/* transport_parsing->last_incoming_stream_id is used as
last-grpc_chttp2_stream-id when
@@ -115,9 +115,6 @@ void grpc_chttp2_publish_reads(
transport_parsing->incoming_stream_id;
}
- /* copy parsing qbuf to global qbuf */
- gpr_slice_buffer_move_into(&transport_parsing->qbuf, &transport_global->qbuf);
-
/* update global settings */
if (transport_parsing->settings_updated) {
memcpy(transport_global->settings[GRPC_PEER_SETTINGS],
@@ -131,6 +128,7 @@ void grpc_chttp2_publish_reads(
transport_global->settings[GRPC_SENT_SETTINGS],
GRPC_CHTTP2_NUM_SETTINGS * sizeof(gpr_uint32));
transport_parsing->settings_ack_received = 0;
+ transport_global->sent_local_settings = 0;
}
/* move goaway to the global state if we received one (it will be
@@ -144,98 +142,102 @@ void grpc_chttp2_publish_reads(
}
/* propagate flow control tokens to global state */
- if (transport_parsing->outgoing_window_update) {
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "parsed", transport_global, outgoing_window,
- transport_parsing->outgoing_window_update);
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "parsed", transport_parsing, outgoing_window_update,
- -(gpr_int64)transport_parsing->outgoing_window_update);
- transport_global->outgoing_window +=
- transport_parsing->outgoing_window_update;
- transport_parsing->outgoing_window_update = 0;
- }
-
- if (transport_parsing->incoming_window_delta) {
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "parsed", transport_global, incoming_window,
- -(gpr_int64)transport_parsing->incoming_window_delta);
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "parsed", transport_parsing, incoming_window_delta,
- -(gpr_int64)transport_parsing->incoming_window_delta);
- transport_global->incoming_window -=
- transport_parsing->incoming_window_delta;
- transport_parsing->incoming_window_delta = 0;
+ was_zero = transport_global->outgoing_window <= 0;
+ GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("parsed", transport_global, outgoing_window,
+ transport_parsing, outgoing_window);
+ is_zero = transport_global->outgoing_window <= 0;
+ if (was_zero && !is_zero) {
+ while (grpc_chttp2_list_pop_stalled_by_transport(transport_global,
+ &stream_global)) {
+ grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
+ }
+ }
+
+ if (transport_parsing->incoming_window <
+ transport_global->connection_window_target * 3 / 4) {
+ gpr_int64 announce_bytes = transport_global->connection_window_target -
+ transport_parsing->incoming_window;
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_global,
+ announce_incoming_window, announce_bytes);
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_parsing,
+ incoming_window, announce_bytes);
}
/* for each stream that saw an update, fixup global state */
while (grpc_chttp2_list_pop_parsing_seen_stream(
transport_global, transport_parsing, &stream_global, &stream_parsing)) {
- /* update incoming flow control window */
- if (stream_parsing->incoming_window_delta) {
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "parsed", transport_parsing, stream_global, incoming_window,
- -(gpr_int64)stream_parsing->incoming_window_delta);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "parsed", transport_parsing, stream_parsing, incoming_window_delta,
- -(gpr_int64)stream_parsing->incoming_window_delta);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "parsed", transport_parsing, stream_global, max_recv_bytes,
- -(gpr_int64)stream_parsing->incoming_window_delta);
- stream_global->incoming_window -= stream_parsing->incoming_window_delta;
- GPR_ASSERT(stream_global->max_recv_bytes >=
- stream_parsing->incoming_window_delta);
- stream_global->max_recv_bytes -= stream_parsing->incoming_window_delta;
- stream_parsing->incoming_window_delta = 0;
- grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
+ if (stream_parsing->seen_error) {
+ stream_global->seen_error = 1;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
/* update outgoing flow control window */
- if (stream_parsing->outgoing_window_update) {
- int was_zero = stream_global->outgoing_window <= 0;
- int is_zero;
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("parsed", transport_parsing,
- stream_global, outgoing_window,
- stream_parsing->outgoing_window_update);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "parsed", transport_parsing, stream_parsing, outgoing_window_update,
- -(gpr_int64)stream_parsing->outgoing_window_update);
- GPR_ASSERT(stream_parsing->outgoing_window_update <= GPR_UINT32_MAX);
- stream_global->outgoing_window +=
- (gpr_uint32)stream_parsing->outgoing_window_update;
- stream_parsing->outgoing_window_update = 0;
- is_zero = stream_global->outgoing_window <= 0;
- if (was_zero && !is_zero) {
- grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
- }
+ was_zero = stream_global->outgoing_window <= 0;
+ GRPC_CHTTP2_FLOW_MOVE_STREAM("parsed", transport_global, stream_global,
+ outgoing_window, stream_parsing,
+ outgoing_window);
+ is_zero = stream_global->outgoing_window <= 0;
+ if (was_zero && !is_zero) {
+ grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
}
- /* updating closed status */
- if (stream_parsing->received_close) {
- stream_global->read_closed = 1;
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+ stream_global->max_recv_bytes -= (gpr_uint32)GPR_MIN(
+ stream_global->max_recv_bytes, stream_parsing->received_bytes);
+ stream_parsing->received_bytes = 0;
+
+ /* publish incoming stream ops */
+ if (stream_global->incoming_frames.tail != NULL) {
+ stream_global->incoming_frames.tail->is_tail = 0;
+ }
+ if (stream_parsing->data_parser.incoming_frames.head != NULL) {
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+ grpc_chttp2_incoming_frame_queue_merge(
+ &stream_global->incoming_frames,
+ &stream_parsing->data_parser.incoming_frames);
+ if (stream_global->incoming_frames.tail != NULL) {
+ stream_global->incoming_frames.tail->is_tail = 1;
+ }
+
+ if (!stream_global->published_initial_metadata &&
+ stream_parsing->got_metadata_on_parse[0]) {
+ stream_parsing->got_metadata_on_parse[0] = 0;
+ stream_global->published_initial_metadata = 1;
+ GPR_SWAP(grpc_chttp2_incoming_metadata_buffer,
+ stream_parsing->metadata_buffer[0],
+ stream_global->received_initial_metadata);
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
+ if (!stream_global->published_trailing_metadata &&
+ stream_parsing->got_metadata_on_parse[1]) {
+ stream_parsing->got_metadata_on_parse[1] = 0;
+ stream_global->published_trailing_metadata = 1;
+ GPR_SWAP(grpc_chttp2_incoming_metadata_buffer,
+ stream_parsing->metadata_buffer[1],
+ stream_global->received_trailing_metadata);
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+
if (stream_parsing->saw_rst_stream) {
- stream_global->cancelled = 1;
- stream_global->cancelled_status = grpc_chttp2_http2_error_to_grpc_status(
- (grpc_chttp2_error_code)stream_parsing->rst_stream_reason);
- if (stream_parsing->rst_stream_reason == GRPC_CHTTP2_NO_ERROR) {
- stream_global->published_cancelled = 1;
+ if (stream_parsing->rst_stream_reason != GRPC_CHTTP2_NO_ERROR) {
+ grpc_status_code status_code = grpc_chttp2_http2_error_to_grpc_status(
+ (grpc_chttp2_error_code)stream_parsing->rst_stream_reason);
+ char *status_details;
+ gpr_slice slice_details;
+ gpr_asprintf(&status_details, "Received RST_STREAM err=%d",
+ stream_parsing->rst_stream_reason);
+ slice_details = gpr_slice_from_copied_string(status_details);
+ gpr_free(status_details);
+ grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global,
+ status_code, &slice_details);
}
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+ grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
+ 1, 1);
}
- /* publish incoming stream ops */
- if (stream_parsing->data_parser.incoming_sopb.nops > 0) {
- grpc_incoming_metadata_buffer_move_to_referencing_sopb(
- &stream_parsing->incoming_metadata, &stream_global->incoming_metadata,
- &stream_parsing->data_parser.incoming_sopb);
- grpc_sopb_move_to(&stream_parsing->data_parser.incoming_sopb,
- &stream_global->incoming_sopb);
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+ if (stream_parsing->received_close) {
+ grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
+ 1, 0);
}
}
}
@@ -363,7 +365,7 @@ int grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(cur < end);
transport_parsing->incoming_stream_id |= ((gpr_uint32)*cur);
transport_parsing->deframe_state = GRPC_DTS_FRAME;
- if (!init_frame_parser(transport_parsing)) {
+ if (!init_frame_parser(exec_ctx, transport_parsing)) {
return 0;
}
if (transport_parsing->incoming_stream_id) {
@@ -428,7 +430,8 @@ int grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
GPR_UNREACHABLE_CODE(return 0);
}
-static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing) {
+static int init_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing) {
if (transport_parsing->expect_continuation_stream_id != 0) {
if (transport_parsing->incoming_frame_type !=
GRPC_CHTTP2_FRAME_CONTINUATION) {
@@ -445,30 +448,30 @@ static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing) {
transport_parsing->incoming_stream_id);
return 0;
}
- return init_header_frame_parser(transport_parsing, 1);
+ return init_header_frame_parser(exec_ctx, transport_parsing, 1);
}
switch (transport_parsing->incoming_frame_type) {
case GRPC_CHTTP2_FRAME_DATA:
- return init_data_frame_parser(transport_parsing);
+ return init_data_frame_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_HEADER:
- return init_header_frame_parser(transport_parsing, 0);
+ return init_header_frame_parser(exec_ctx, transport_parsing, 0);
case GRPC_CHTTP2_FRAME_CONTINUATION:
gpr_log(GPR_ERROR, "Unexpected CONTINUATION frame");
return 0;
case GRPC_CHTTP2_FRAME_RST_STREAM:
- return init_rst_stream_parser(transport_parsing);
+ return init_rst_stream_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_SETTINGS:
- return init_settings_frame_parser(transport_parsing);
+ return init_settings_frame_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_WINDOW_UPDATE:
- return init_window_update_frame_parser(transport_parsing);
+ return init_window_update_frame_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_PING:
- return init_ping_parser(transport_parsing);
+ return init_ping_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_GOAWAY:
- return init_goaway_parser(transport_parsing);
+ return init_goaway_parser(exec_ctx, transport_parsing);
default:
gpr_log(GPR_ERROR, "Unknown frame type %02x",
transport_parsing->incoming_frame_type);
- return init_skip_frame_parser(transport_parsing, 0);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
}
}
@@ -482,7 +485,8 @@ static grpc_chttp2_parse_error skip_parser(
static void skip_header(void *tp, grpc_mdelem *md) { GRPC_MDELEM_UNREF(md); }
static int init_skip_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing, int is_header) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ int is_header) {
if (is_header) {
gpr_uint8 is_eoh = transport_parsing->expect_continuation_stream_id != 0;
transport_parsing->parser = grpc_chttp2_header_parser_parse;
@@ -499,65 +503,51 @@ static int init_skip_frame_parser(
}
void grpc_chttp2_parsing_become_skip_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
init_skip_frame_parser(
- transport_parsing,
+ exec_ctx, transport_parsing,
transport_parsing->parser == grpc_chttp2_header_parser_parse);
}
static grpc_chttp2_parse_error update_incoming_window(
- grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing) {
- if (transport_parsing->incoming_frame_size >
- transport_parsing->incoming_window) {
+ gpr_uint32 incoming_frame_size = transport_parsing->incoming_frame_size;
+ if (incoming_frame_size > transport_parsing->incoming_window) {
gpr_log(GPR_ERROR, "frame of size %d overflows incoming window of %d",
transport_parsing->incoming_frame_size,
transport_parsing->incoming_window);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
- if (transport_parsing->incoming_frame_size >
- stream_parsing->incoming_window) {
+ if (incoming_frame_size > stream_parsing->incoming_window) {
gpr_log(GPR_ERROR, "frame of size %d overflows incoming window of %d",
transport_parsing->incoming_frame_size,
stream_parsing->incoming_window);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "data", transport_parsing, incoming_window,
- -(gpr_int64)transport_parsing->incoming_frame_size);
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT("data", transport_parsing,
- incoming_window_delta,
- transport_parsing->incoming_frame_size);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "data", transport_parsing, stream_parsing, incoming_window,
- -(gpr_int64)transport_parsing->incoming_frame_size);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("data", transport_parsing, stream_parsing,
- incoming_window_delta,
- transport_parsing->incoming_frame_size);
-
- transport_parsing->incoming_window -= transport_parsing->incoming_frame_size;
- transport_parsing->incoming_window_delta +=
- transport_parsing->incoming_frame_size;
- stream_parsing->incoming_window -= transport_parsing->incoming_frame_size;
- stream_parsing->incoming_window_delta +=
- transport_parsing->incoming_frame_size;
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("parse", transport_parsing, incoming_window,
+ incoming_frame_size);
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("parse", transport_parsing, stream_parsing,
+ incoming_window, incoming_frame_size);
+ stream_parsing->received_bytes += incoming_frame_size;
+
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
return GRPC_CHTTP2_PARSE_OK;
}
static int init_data_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
grpc_chttp2_stream_parsing *stream_parsing =
grpc_chttp2_parsing_lookup_stream(transport_parsing,
transport_parsing->incoming_stream_id);
grpc_chttp2_parse_error err = GRPC_CHTTP2_PARSE_OK;
if (!stream_parsing || stream_parsing->received_close)
- return init_skip_frame_parser(transport_parsing, 0);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
if (err == GRPC_CHTTP2_PARSE_OK) {
- err = update_incoming_window(transport_parsing, stream_parsing);
+ err = update_incoming_window(exec_ctx, transport_parsing, stream_parsing);
}
if (err == GRPC_CHTTP2_PARSE_OK) {
err = grpc_chttp2_data_parser_begin_frame(
@@ -577,7 +567,7 @@ static int init_data_frame_parser(
&transport_parsing->qbuf,
grpc_chttp2_rst_stream_create(transport_parsing->incoming_stream_id,
GRPC_CHTTP2_PROTOCOL_ERROR));
- return init_skip_frame_parser(transport_parsing, 0);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
case GRPC_CHTTP2_CONNECTION_ERROR:
return 0;
}
@@ -586,11 +576,13 @@ static int init_data_frame_parser(
static void free_timeout(void *p) { gpr_free(p); }
-static void on_header(void *tp, grpc_mdelem *md) {
+static void on_initial_header(void *tp, grpc_mdelem *md) {
grpc_chttp2_transport_parsing *transport_parsing = tp;
grpc_chttp2_stream_parsing *stream_parsing =
transport_parsing->incoming_stream;
+ GPR_TIMER_BEGIN("on_initial_header", 0);
+
GPR_ASSERT(stream_parsing);
GRPC_CHTTP2_IF_TRACING(gpr_log(
@@ -598,7 +590,12 @@ static void on_header(void *tp, grpc_mdelem *md) {
transport_parsing->is_client ? "CLI" : "SVR",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
- if (md->key == transport_parsing->str_grpc_timeout) {
+ if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
+ /* TODO(ctiller): check for a status like " 0" */
+ stream_parsing->seen_error = 1;
+ }
+
+ if (md->key == GRPC_MDSTR_GRPC_TIMEOUT) {
gpr_timespec *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout);
if (!cached_timeout) {
/* not already parsed: parse it now, and store the result away */
@@ -607,29 +604,61 @@ static void on_header(void *tp, grpc_mdelem *md) {
cached_timeout)) {
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'",
grpc_mdstr_as_c_string(md->value));
- *cached_timeout = gpr_inf_future(GPR_CLOCK_REALTIME);
+ *cached_timeout = gpr_inf_future(GPR_TIMESPAN);
}
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}
grpc_chttp2_incoming_metadata_buffer_set_deadline(
- &stream_parsing->incoming_metadata,
+ &stream_parsing->metadata_buffer[0],
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), *cached_timeout));
GRPC_MDELEM_UNREF(md);
} else {
- grpc_chttp2_incoming_metadata_buffer_add(&stream_parsing->incoming_metadata,
- md);
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &stream_parsing->metadata_buffer[0], md);
}
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
+
+ GPR_TIMER_END("on_initial_header", 0);
+}
+
+static void on_trailing_header(void *tp, grpc_mdelem *md) {
+ grpc_chttp2_transport_parsing *transport_parsing = tp;
+ grpc_chttp2_stream_parsing *stream_parsing =
+ transport_parsing->incoming_stream;
+
+ GPR_TIMER_BEGIN("on_trailing_header", 0);
+
+ GPR_ASSERT(stream_parsing);
+
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_INFO, "HTTP:%d:TRL:%s: %s: %s", stream_parsing->id,
+ transport_parsing->is_client ? "CLI" : "SVR",
+ grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
+
+ if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
+ /* TODO(ctiller): check for a status like " 0" */
+ stream_parsing->seen_error = 1;
+ }
+
+ grpc_chttp2_incoming_metadata_buffer_add(&stream_parsing->metadata_buffer[1],
+ md);
+
+ grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
+
+ GPR_TIMER_END("on_trailing_header", 0);
}
static int init_header_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing, int is_continuation) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ int is_continuation) {
gpr_uint8 is_eoh = (transport_parsing->incoming_frame_flags &
GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
int via_accept = 0;
grpc_chttp2_stream_parsing *stream_parsing;
+ /* TODO(ctiller): when to increment header_frames_received? */
+
if (is_eoh) {
transport_parsing->expect_continuation_stream_id = 0;
} else {
@@ -649,7 +678,7 @@ static int init_header_frame_parser(
if (is_continuation) {
gpr_log(GPR_ERROR,
"grpc_chttp2_stream disbanded before CONTINUATION received");
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
if (transport_parsing->is_client) {
if ((transport_parsing->incoming_stream_id & 1) &&
@@ -660,7 +689,7 @@ static int init_header_frame_parser(
gpr_log(GPR_ERROR,
"ignoring new grpc_chttp2_stream creation on client");
}
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
} else if (transport_parsing->last_incoming_stream_id >
transport_parsing->incoming_stream_id) {
gpr_log(GPR_ERROR,
@@ -669,19 +698,19 @@ static int init_header_frame_parser(
"id=%d, new grpc_chttp2_stream id=%d",
transport_parsing->last_incoming_stream_id,
transport_parsing->incoming_stream_id);
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
} else if ((transport_parsing->incoming_stream_id & 1) == 0) {
gpr_log(GPR_ERROR,
"ignoring grpc_chttp2_stream with non-client generated index %d",
transport_parsing->incoming_stream_id);
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
stream_parsing = transport_parsing->incoming_stream =
grpc_chttp2_parsing_accept_stream(
- transport_parsing, transport_parsing->incoming_stream_id);
+ exec_ctx, transport_parsing, transport_parsing->incoming_stream_id);
if (stream_parsing == NULL) {
gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted");
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
via_accept = 1;
} else {
@@ -691,11 +720,21 @@ static int init_header_frame_parser(
if (stream_parsing->received_close) {
gpr_log(GPR_ERROR, "skipping already closed grpc_chttp2_stream header");
transport_parsing->incoming_stream = NULL;
- return init_skip_frame_parser(transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
transport_parsing->parser = grpc_chttp2_header_parser_parse;
transport_parsing->parser_data = &transport_parsing->hpack_parser;
- transport_parsing->hpack_parser.on_header = on_header;
+ switch (stream_parsing->header_frames_received) {
+ case 0:
+ transport_parsing->hpack_parser.on_header = on_initial_header;
+ break;
+ case 1:
+ transport_parsing->hpack_parser.on_header = on_trailing_header;
+ break;
+ case 2:
+ gpr_log(GPR_ERROR, "too many header frames received");
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
+ }
transport_parsing->hpack_parser.on_header_user_data = transport_parsing;
transport_parsing->hpack_parser.is_boundary = is_eoh;
transport_parsing->hpack_parser.is_eof =
@@ -708,7 +747,7 @@ static int init_header_frame_parser(
}
static int init_window_update_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_window_update_parser_begin_frame(
&transport_parsing->simple.window_update,
transport_parsing->incoming_frame_size,
@@ -722,7 +761,8 @@ static int init_window_update_frame_parser(
return ok;
}
-static int init_ping_parser(grpc_chttp2_transport_parsing *transport_parsing) {
+static int init_ping_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_ping_parser_begin_frame(
&transport_parsing->simple.ping,
transport_parsing->incoming_frame_size,
@@ -733,7 +773,7 @@ static int init_ping_parser(grpc_chttp2_transport_parsing *transport_parsing) {
}
static int init_rst_stream_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_rst_stream_parser_begin_frame(
&transport_parsing->simple.rst_stream,
transport_parsing->incoming_frame_size,
@@ -741,7 +781,7 @@ static int init_rst_stream_parser(
transport_parsing->incoming_stream = grpc_chttp2_parsing_lookup_stream(
transport_parsing, transport_parsing->incoming_stream_id);
if (!transport_parsing->incoming_stream) {
- return init_skip_frame_parser(transport_parsing, 0);
+ return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
}
transport_parsing->parser = grpc_chttp2_rst_stream_parser_parse;
transport_parsing->parser_data = &transport_parsing->simple.rst_stream;
@@ -749,7 +789,7 @@ static int init_rst_stream_parser(
}
static int init_goaway_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_goaway_parser_begin_frame(
&transport_parsing->goaway_parser,
transport_parsing->incoming_frame_size,
@@ -760,7 +800,7 @@ static int init_goaway_parser(
}
static int init_settings_frame_parser(
- grpc_chttp2_transport_parsing *transport_parsing) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok;
if (transport_parsing->incoming_stream_id != 0) {
@@ -779,6 +819,9 @@ static int init_settings_frame_parser(
}
if (transport_parsing->incoming_frame_flags & GRPC_CHTTP2_FLAG_ACK) {
transport_parsing->settings_ack_received = 1;
+ grpc_chttp2_hptbl_set_max_bytes(
+ &transport_parsing->hpack_parser.table,
+ transport_parsing->last_sent_max_table_size);
}
transport_parsing->parser = grpc_chttp2_settings_parser_parse;
transport_parsing->parser_data = &transport_parsing->simple.settings;
@@ -806,7 +849,7 @@ static int parse_frame_slice(grpc_exec_ctx *exec_ctx,
}
return 1;
case GRPC_CHTTP2_STREAM_ERROR:
- grpc_chttp2_parsing_become_skip_parser(transport_parsing);
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, transport_parsing);
if (stream_parsing) {
stream_parsing->saw_rst_stream = 1;
stream_parsing->rst_stream_reason = GRPC_CHTTP2_PROTOCOL_ERROR;
diff --git a/src/core/transport/chttp2/stream_lists.c b/src/core/transport/chttp2/stream_lists.c
index 781db7b0d6..49f951d08b 100644
--- a/src/core/transport/chttp2/stream_lists.c
+++ b/src/core/transport/chttp2/stream_lists.c
@@ -108,23 +108,6 @@ static void stream_list_maybe_remove(grpc_chttp2_transport *t,
}
}
-static void stream_list_add_head(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
- grpc_chttp2_stream_list_id id) {
- grpc_chttp2_stream *old_head;
- GPR_ASSERT(!s->included[id]);
- old_head = t->lists[id].head;
- s->links[id].next = old_head;
- s->links[id].prev = NULL;
- if (old_head) {
- old_head->links[id].prev = s;
- } else {
- t->lists[id].tail = s;
- }
- t->lists[id].head = s;
- s->included[id] = 1;
-}
-
static void stream_list_add_tail(grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_chttp2_stream_list_id id) {
@@ -142,12 +125,13 @@ static void stream_list_add_tail(grpc_chttp2_transport *t,
s->included[id] = 1;
}
-static void stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- grpc_chttp2_stream_list_id id) {
+static int stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ grpc_chttp2_stream_list_id id) {
if (s->included[id]) {
- return;
+ return 0;
}
stream_list_add_tail(t, s, id);
+ return 1;
}
/* wrappers for specializations */
@@ -160,15 +144,6 @@ void grpc_chttp2_list_add_writable_stream(
STREAM_FROM_GLOBAL(stream_global), GRPC_CHTTP2_LIST_WRITABLE);
}
-void grpc_chttp2_list_add_first_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- GPR_ASSERT(stream_global->id != 0);
- stream_list_add_head(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_WRITABLE);
-}
-
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
@@ -192,12 +167,12 @@ void grpc_chttp2_list_remove_writable_stream(
GRPC_CHTTP2_LIST_WRITABLE);
}
-void grpc_chttp2_list_add_writing_stream(
+int grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
- stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
- STREAM_FROM_WRITING(stream_writing),
- GRPC_CHTTP2_LIST_WRITING);
+ return stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
+ STREAM_FROM_WRITING(stream_writing),
+ GRPC_CHTTP2_LIST_WRITING);
}
int grpc_chttp2_list_have_writing_streams(
@@ -241,6 +216,40 @@ int grpc_chttp2_list_pop_written_stream(
return r;
}
+void grpc_chttp2_list_add_unannounced_incoming_window_available(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global) {
+ GPR_ASSERT(stream_global->id != 0);
+ stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
+ STREAM_FROM_GLOBAL(stream_global),
+ GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
+}
+
+void grpc_chttp2_list_remove_unannounced_incoming_window_available(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global) {
+ stream_list_maybe_remove(
+ TRANSPORT_FROM_GLOBAL(transport_global),
+ STREAM_FROM_GLOBAL(stream_global),
+ GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
+}
+
+int grpc_chttp2_list_pop_unannounced_incoming_window_available(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_chttp2_stream_global **stream_global,
+ grpc_chttp2_stream_parsing **stream_parsing) {
+ grpc_chttp2_stream *stream;
+ int r =
+ stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
+ GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
+ if (r != 0) {
+ *stream_global = &stream->global;
+ *stream_parsing = &stream->parsing;
+ }
+ return r;
+}
+
void grpc_chttp2_list_add_parsing_seen_stream(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing) {
@@ -284,91 +293,80 @@ int grpc_chttp2_list_pop_waiting_for_concurrency(
return r;
}
-void grpc_chttp2_list_add_closed_waiting_for_parsing(
+void grpc_chttp2_list_add_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
+ GRPC_CHTTP2_LIST_CHECK_READ_OPS);
}
-int grpc_chttp2_list_pop_closed_waiting_for_parsing(
+int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
+ GRPC_CHTTP2_LIST_CHECK_READ_OPS);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
-void grpc_chttp2_list_add_cancelled_waiting_for_writing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_CANCELLED_WAITING_FOR_WRITING);
+void grpc_chttp2_list_add_stalled_by_transport(
+ grpc_chttp2_transport_writing *transport_writing,
+ grpc_chttp2_stream_writing *stream_writing) {
+ stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
+ STREAM_FROM_WRITING(stream_writing),
+ GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
-int grpc_chttp2_list_pop_cancelled_waiting_for_writing(
+int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_CANCELLED_WAITING_FOR_WRITING);
+ GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
-void grpc_chttp2_list_add_incoming_window_updated(
+void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED);
+ GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
}
-int grpc_chttp2_list_pop_incoming_window_updated(
+int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing) {
+ grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED);
+ GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
if (r != 0) {
*stream_global = &stream->global;
- *stream_parsing = &stream->parsing;
}
return r;
}
-void grpc_chttp2_list_remove_incoming_window_updated(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED);
-}
-
-void grpc_chttp2_list_add_read_write_state_changed(
+void grpc_chttp2_list_add_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_READ_WRITE_STATE_CHANGED);
+ GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
}
-int grpc_chttp2_list_pop_read_write_state_changed(
+int grpc_chttp2_list_pop_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_READ_WRITE_STATE_CHANGED);
+ GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
if (r != 0) {
*stream_global = &stream->global;
}
diff --git a/src/core/transport/chttp2/timeout_encoding.c b/src/core/transport/chttp2/timeout_encoding.c
index 8a9b290ecb..7ec8b4e8bf 100644
--- a/src/core/transport/chttp2/timeout_encoding.c
+++ b/src/core/transport/chttp2/timeout_encoding.c
@@ -36,14 +36,15 @@
#include <stdio.h>
#include <string.h>
+#include <grpc/support/port_platform.h>
#include "src/core/support/string.h"
-static int round_up(int x, int divisor) {
+static gpr_int64 round_up(gpr_int64 x, gpr_int64 divisor) {
return (x / divisor + (x % divisor != 0)) * divisor;
}
/* round an integer up to the next value with three significant figures */
-static int round_up_to_three_sig_figs(int x) {
+static gpr_int64 round_up_to_three_sig_figs(gpr_int64 x) {
if (x < 1000) return x;
if (x < 10000) return round_up(x, 10);
if (x < 100000) return round_up(x, 100);
@@ -57,13 +58,13 @@ static int round_up_to_three_sig_figs(int x) {
/* encode our minimum viable timeout value */
static void enc_tiny(char *buffer) { memcpy(buffer, "1n", 3); }
-static void enc_ext(char *buffer, long value, char ext) {
- int n = gpr_ltoa(value, buffer);
+static void enc_ext(char *buffer, gpr_int64 value, char ext) {
+ int n = gpr_int64toa(value, buffer);
buffer[n] = ext;
buffer[n + 1] = 0;
}
-static void enc_seconds(char *buffer, long sec) {
+static void enc_seconds(char *buffer, gpr_int64 sec) {
if (sec % 3600 == 0) {
enc_ext(buffer, sec / 3600, 'H');
} else if (sec % 60 == 0) {
@@ -73,7 +74,7 @@ static void enc_seconds(char *buffer, long sec) {
}
}
-static void enc_nanos(char *buffer, int x) {
+static void enc_nanos(char *buffer, gpr_int64 x) {
x = round_up_to_three_sig_figs(x);
if (x < 100000) {
if (x % 1000 == 0) {
@@ -97,7 +98,7 @@ static void enc_nanos(char *buffer, int x) {
}
}
-static void enc_micros(char *buffer, int x) {
+static void enc_micros(char *buffer, gpr_int64 x) {
x = round_up_to_three_sig_figs(x);
if (x < 100000) {
if (x % 1000 == 0) {
@@ -123,7 +124,7 @@ void grpc_chttp2_encode_timeout(gpr_timespec timeout, char *buffer) {
enc_nanos(buffer, timeout.tv_nsec);
} else if (timeout.tv_sec < 1000 && timeout.tv_nsec != 0) {
enc_micros(buffer,
- (int)(timeout.tv_sec * 1000000) +
+ (gpr_int64)(timeout.tv_sec * 1000000) +
(timeout.tv_nsec / 1000 + (timeout.tv_nsec % 1000 != 0)));
} else {
enc_seconds(buffer, timeout.tv_sec + (timeout.tv_nsec != 0));
diff --git a/src/core/transport/chttp2/varint.h b/src/core/transport/chttp2/varint.h
index 4dfcc76773..5acb15d032 100644
--- a/src/core/transport/chttp2/varint.h
+++ b/src/core/transport/chttp2/varint.h
@@ -50,7 +50,8 @@ void grpc_chttp2_hpack_write_varint_tail(gpr_uint32 tail_value,
/* maximum value that can be bitpacked with the opcode if the opcode has a
prefix
of length prefix_bits */
-#define GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits) ((1 << (8 - (prefix_bits))) - 1)
+#define GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits) \
+ ((gpr_uint32)((1 << (8 - (prefix_bits))) - 1))
/* length required to bitpack a value */
#define GRPC_CHTTP2_VARINT_LENGTH(n, prefix_bits) \
@@ -65,7 +66,8 @@ void grpc_chttp2_hpack_write_varint_tail(gpr_uint32 tail_value,
if ((length) == 1u) { \
(tgt)[0] = (gpr_uint8)((prefix_or) | (n)); \
} else { \
- (tgt)[0] = (prefix_or) | GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits); \
+ (tgt)[0] = \
+ (prefix_or) | (gpr_uint8)GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits); \
grpc_chttp2_hpack_write_varint_tail( \
(n)-GRPC_CHTTP2_MAX_IN_PREFIX(prefix_bits), (tgt) + 1, (length)-1); \
} \
diff --git a/src/core/transport/chttp2/writing.c b/src/core/transport/chttp2/writing.c
index 69ad8854ba..b5ca42d69c 100644
--- a/src/core/transport/chttp2/writing.c
+++ b/src/core/transport/chttp2/writing.c
@@ -40,22 +40,28 @@
#include "src/core/profiling/timers.h"
#include "src/core/transport/chttp2/http2_errors.h"
-static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing);
+static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_writing *transport_writing);
int grpc_chttp2_unlocking_check_writes(
grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing) {
+ grpc_chttp2_transport_writing *transport_writing, int is_parsing) {
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_writing *stream_writing;
- grpc_chttp2_stream_global *first_reinserted_stream = NULL;
- gpr_uint32 window_delta;
+
+ GPR_TIMER_BEGIN("grpc_chttp2_unlocking_check_writes", 0);
/* simple writes are queued to qbuf, and flushed here */
gpr_slice_buffer_swap(&transport_global->qbuf, &transport_writing->outbuf);
GPR_ASSERT(transport_global->qbuf.count == 0);
+ grpc_chttp2_hpack_compressor_set_max_table_size(
+ &transport_writing->hpack_compressor,
+ transport_global->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
+
if (transport_global->dirtied_local_settings &&
- !transport_global->sent_local_settings) {
+ !transport_global->sent_local_settings && !is_parsing) {
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_settings_create(
@@ -67,98 +73,103 @@ int grpc_chttp2_unlocking_check_writes(
transport_global->sent_local_settings = 1;
}
+ GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("write", transport_writing, outgoing_window,
+ transport_global, outgoing_window);
+
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (grpc_chttp2_list_pop_writable_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
- if (stream_global == first_reinserted_stream) {
- /* prevent infinite loop */
- grpc_chttp2_list_add_first_writable_stream(transport_global,
- stream_global);
- break;
- }
+ gpr_uint8 sent_initial_metadata;
stream_writing->id = stream_global->id;
- stream_writing->send_closed = GRPC_DONT_SEND_CLOSED;
-
- if (stream_global->outgoing_sopb) {
- window_delta = grpc_chttp2_preencode(
- stream_global->outgoing_sopb->ops,
- &stream_global->outgoing_sopb->nops,
- (gpr_uint32)GPR_MIN(GPR_MIN(transport_global->outgoing_window,
- stream_global->outgoing_window),
- GPR_UINT32_MAX),
- &stream_writing->sopb);
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
- "write", transport_global, outgoing_window, -(gpr_int64)window_delta);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global,
- outgoing_window,
- -(gpr_int64)window_delta);
- transport_global->outgoing_window -= window_delta;
- stream_global->outgoing_window -= window_delta;
-
- if (stream_global->write_state == GRPC_WRITE_STATE_QUEUED_CLOSE &&
- stream_global->outgoing_sopb->nops == 0) {
- if (!transport_global->is_client && !stream_global->read_closed) {
- stream_writing->send_closed = GRPC_SEND_CLOSED_WITH_RST_STREAM;
+ stream_writing->read_closed = stream_global->read_closed;
+
+ GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_writing, stream_writing,
+ outgoing_window, stream_global,
+ outgoing_window);
+
+ sent_initial_metadata = stream_writing->sent_initial_metadata;
+ if (!sent_initial_metadata && stream_global->send_initial_metadata) {
+ stream_writing->send_initial_metadata =
+ stream_global->send_initial_metadata;
+ stream_global->send_initial_metadata = NULL;
+ if (grpc_chttp2_list_add_writing_stream(transport_writing,
+ stream_writing)) {
+ GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
+ }
+ sent_initial_metadata = 1;
+ }
+ if (sent_initial_metadata) {
+ if (stream_global->send_message != NULL) {
+ gpr_slice hdr = gpr_slice_malloc(5);
+ gpr_uint8 *p = GPR_SLICE_START_PTR(hdr);
+ gpr_uint32 len = stream_global->send_message->length;
+ GPR_ASSERT(stream_writing->send_message == NULL);
+ p[0] = (stream_global->send_message->flags &
+ GRPC_WRITE_INTERNAL_COMPRESS) != 0;
+ p[1] = (gpr_uint8)(len >> 24);
+ p[2] = (gpr_uint8)(len >> 16);
+ p[3] = (gpr_uint8)(len >> 8);
+ p[4] = (gpr_uint8)(len);
+ gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer, hdr);
+ if (stream_global->send_message->length > 0) {
+ stream_writing->send_message = stream_global->send_message;
} else {
- stream_writing->send_closed = GRPC_SEND_CLOSED;
+ stream_writing->send_message = NULL;
}
+ stream_writing->stream_fetched = 0;
+ stream_global->send_message = NULL;
}
-
- if (stream_global->outgoing_window > 0 &&
- stream_global->outgoing_sopb->nops != 0) {
- grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
- if (first_reinserted_stream == NULL &&
- transport_global->outgoing_window == 0) {
- first_reinserted_stream = stream_global;
+ if ((stream_writing->send_message != NULL ||
+ stream_writing->flow_controlled_buffer.length > 0) &&
+ stream_writing->outgoing_window > 0) {
+ if (transport_writing->outgoing_window > 0) {
+ if (grpc_chttp2_list_add_writing_stream(transport_writing,
+ stream_writing)) {
+ GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
+ }
+ } else {
+ grpc_chttp2_list_add_stalled_by_transport(transport_writing,
+ stream_writing);
+ }
+ }
+ if (stream_global->send_trailing_metadata) {
+ stream_writing->send_trailing_metadata =
+ stream_global->send_trailing_metadata;
+ stream_global->send_trailing_metadata = NULL;
+ if (grpc_chttp2_list_add_writing_stream(transport_writing,
+ stream_writing)) {
+ GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
}
}
}
if (!stream_global->read_closed &&
- stream_global->unannounced_incoming_window > 0) {
- GPR_ASSERT(stream_writing->announce_window == 0);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "write", transport_writing, stream_writing, announce_window,
- stream_global->unannounced_incoming_window);
- stream_writing->announce_window =
- stream_global->unannounced_incoming_window;
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "write", transport_global, stream_global, incoming_window,
- stream_global->unannounced_incoming_window);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "write", transport_global, stream_global, unannounced_incoming_window,
- -(gpr_int64)stream_global->unannounced_incoming_window);
- stream_global->incoming_window +=
- stream_global->unannounced_incoming_window;
- stream_global->unannounced_incoming_window = 0;
- grpc_chttp2_list_add_incoming_window_updated(transport_global,
- stream_global);
- stream_global->writing_now |= GRPC_CHTTP2_WRITING_WINDOW;
- }
- if (stream_writing->sopb.nops > 0 ||
- stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
- stream_global->writing_now |= GRPC_CHTTP2_WRITING_DATA;
- }
- if (stream_global->writing_now != 0) {
- grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
+ stream_global->unannounced_incoming_window_for_writing > 1024) {
+ GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_global, stream_writing,
+ announce_window, stream_global,
+ unannounced_incoming_window_for_writing);
+ if (grpc_chttp2_list_add_writing_stream(transport_writing,
+ stream_writing)) {
+ GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
+ }
}
}
/* if the grpc_chttp2_transport is ready to send a window update, do so here
also; 3/4 is a magic number that will likely get tuned soon */
- if (transport_global->incoming_window <
- transport_global->connection_window_target * 3 / 4) {
- window_delta = transport_global->connection_window_target -
- transport_global->incoming_window;
+ if (transport_global->announce_incoming_window > 0) {
+ gpr_uint32 announced = (gpr_uint32)GPR_MIN(
+ transport_global->announce_incoming_window, GPR_UINT32_MAX);
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_global,
+ announce_incoming_window, announced);
gpr_slice_buffer_add(&transport_writing->outbuf,
- grpc_chttp2_window_update_create(0, window_delta));
- GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT("write", transport_global,
- incoming_window, window_delta);
- transport_global->incoming_window += window_delta;
+ grpc_chttp2_window_update_create(0, announced));
}
+ GPR_TIMER_END("grpc_chttp2_unlocking_check_writes", 0);
+
return transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing);
}
@@ -169,47 +180,145 @@ void grpc_chttp2_perform_writes(
GPR_ASSERT(transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing));
- finalize_outbuf(transport_writing);
+ finalize_outbuf(exec_ctx, transport_writing);
- GPR_ASSERT(transport_writing->outbuf.count > 0);
GPR_ASSERT(endpoint);
- grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
- &transport_writing->done_cb);
+ if (transport_writing->outbuf.count > 0) {
+ grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
+ &transport_writing->done_cb);
+ } else {
+ grpc_exec_ctx_enqueue(exec_ctx, &transport_writing->done_cb, 1);
+ }
}
-static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
+static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
GPR_TIMER_BEGIN("finalize_outbuf", 0);
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
- if (stream_writing->sopb.nops > 0 ||
- stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
- grpc_chttp2_encode(stream_writing->sopb.ops, stream_writing->sopb.nops,
- stream_writing->send_closed != GRPC_DONT_SEND_CLOSED,
- stream_writing->id,
- &transport_writing->hpack_compressor,
- &transport_writing->outbuf);
- stream_writing->sopb.nops = 0;
+ gpr_uint32 max_outgoing =
+ (gpr_uint32)GPR_MIN(GRPC_CHTTP2_MAX_PAYLOAD_LENGTH,
+ GPR_MIN(stream_writing->outgoing_window,
+ transport_writing->outgoing_window));
+ /* send initial metadata if it's available */
+ if (stream_writing->send_initial_metadata != NULL) {
+ grpc_chttp2_encode_header(
+ &transport_writing->hpack_compressor, stream_writing->id,
+ stream_writing->send_initial_metadata, 0, &transport_writing->outbuf);
+ stream_writing->send_initial_metadata = NULL;
+ stream_writing->sent_initial_metadata = 1;
}
- if (stream_writing->announce_window > 0) {
+ /* send any window updates */
+ if (stream_writing->announce_window > 0 &&
+ stream_writing->send_initial_metadata == NULL) {
+ gpr_uint32 announce = stream_writing->announce_window;
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(stream_writing->id,
stream_writing->announce_window));
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "write", transport_writing, stream_writing, announce_window,
- -(gpr_int64)stream_writing->announce_window);
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing, stream_writing,
+ announce_window, announce);
stream_writing->announce_window = 0;
}
- if (stream_writing->send_closed == GRPC_SEND_CLOSED_WITH_RST_STREAM) {
- gpr_slice_buffer_add(&transport_writing->outbuf,
- grpc_chttp2_rst_stream_create(stream_writing->id,
- GRPC_CHTTP2_NO_ERROR));
+ /* fetch any body bytes */
+ while (!stream_writing->fetching && stream_writing->send_message &&
+ stream_writing->flow_controlled_buffer.length < max_outgoing &&
+ stream_writing->stream_fetched <
+ stream_writing->send_message->length) {
+ if (grpc_byte_stream_next(exec_ctx, stream_writing->send_message,
+ &stream_writing->fetching_slice, max_outgoing,
+ &stream_writing->finished_fetch)) {
+ stream_writing->stream_fetched +=
+ GPR_SLICE_LENGTH(stream_writing->fetching_slice);
+ if (stream_writing->stream_fetched ==
+ stream_writing->send_message->length) {
+ stream_writing->send_message = NULL;
+ }
+ gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer,
+ stream_writing->fetching_slice);
+ } else {
+ stream_writing->fetching = 1;
+ }
+ }
+ /* send any body bytes */
+ if (stream_writing->flow_controlled_buffer.length > 0) {
+ if (max_outgoing > 0) {
+ gpr_uint32 send_bytes = (gpr_uint32)GPR_MIN(
+ max_outgoing, stream_writing->flow_controlled_buffer.length);
+ int is_last_data_frame =
+ stream_writing->send_message == NULL &&
+ send_bytes == stream_writing->flow_controlled_buffer.length;
+ int is_last_frame = is_last_data_frame &&
+ stream_writing->send_trailing_metadata != NULL &&
+ grpc_metadata_batch_is_empty(
+ stream_writing->send_trailing_metadata);
+ grpc_chttp2_encode_data(
+ stream_writing->id, &stream_writing->flow_controlled_buffer,
+ send_bytes, is_last_frame, &transport_writing->outbuf);
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing,
+ stream_writing, outgoing_window,
+ send_bytes);
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_writing,
+ outgoing_window, send_bytes);
+ if (is_last_frame) {
+ stream_writing->send_trailing_metadata = NULL;
+ stream_writing->sent_trailing_metadata = 1;
+ }
+ if (is_last_data_frame) {
+ GPR_ASSERT(stream_writing->send_message == NULL);
+ stream_writing->sent_message = 1;
+ }
+ } else if (transport_writing->outgoing_window == 0) {
+ grpc_chttp2_list_add_stalled_by_transport(transport_writing,
+ stream_writing);
+ grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
+ }
+ }
+ /* send trailing metadata if it's available and we're ready for it */
+ if (stream_writing->send_message == NULL &&
+ stream_writing->flow_controlled_buffer.length == 0 &&
+ stream_writing->send_trailing_metadata != NULL) {
+ if (grpc_metadata_batch_is_empty(
+ stream_writing->send_trailing_metadata)) {
+ grpc_chttp2_encode_data(stream_writing->id,
+ &stream_writing->flow_controlled_buffer, 0, 1,
+ &transport_writing->outbuf);
+ } else {
+ grpc_chttp2_encode_header(&transport_writing->hpack_compressor,
+ stream_writing->id,
+ stream_writing->send_trailing_metadata, 1,
+ &transport_writing->outbuf);
+ }
+ if (!transport_writing->is_client && !stream_writing->read_closed) {
+ gpr_slice_buffer_add(&transport_writing->outbuf,
+ grpc_chttp2_rst_stream_create(
+ stream_writing->id, GRPC_CHTTP2_NO_ERROR));
+ }
+ stream_writing->send_trailing_metadata = NULL;
+ stream_writing->sent_trailing_metadata = 1;
+ }
+ /* if there's more to write, then loop, otherwise prepare to finish the
+ * write */
+ if ((stream_writing->flow_controlled_buffer.length > 0 ||
+ (stream_writing->send_message && !stream_writing->fetching)) &&
+ stream_writing->outgoing_window > 0) {
+ if (transport_writing->outgoing_window > 0) {
+ if (grpc_chttp2_list_add_writing_stream(transport_writing,
+ stream_writing)) {
+ /* do nothing - already reffed */
+ }
+ } else {
+ grpc_chttp2_list_add_stalled_by_transport(transport_writing,
+ stream_writing);
+ grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
+ }
+ } else {
+ grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
- grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
GPR_TIMER_END("finalize_outbuf", 0);
@@ -223,24 +332,25 @@ void grpc_chttp2_cleanup_writing(
while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
- GPR_ASSERT(stream_global->writing_now != 0);
- if (stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
- stream_global->write_state = GRPC_WRITE_STATE_SENT_CLOSE;
- if (!transport_global->is_client) {
- stream_global->read_closed = 1;
- }
+ if (stream_writing->sent_initial_metadata) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_initial_metadata_finished, 1);
}
- if (stream_global->writing_now & GRPC_CHTTP2_WRITING_DATA) {
- if (stream_global->outgoing_sopb != NULL &&
- stream_global->outgoing_sopb->nops == 0) {
- GPR_ASSERT(stream_global->write_state != GRPC_WRITE_STATE_QUEUED_CLOSE);
- stream_global->outgoing_sopb = NULL;
- grpc_exec_ctx_enqueue(exec_ctx, stream_global->send_done_closure, 1);
- }
+ if (stream_writing->sent_message) {
+ GPR_ASSERT(stream_writing->send_message == NULL);
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_message_finished, 1);
+ stream_writing->sent_message = 0;
+ }
+ if (stream_writing->sent_trailing_metadata) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_trailing_metadata_finished, 1);
+ }
+ if (stream_writing->sent_trailing_metadata) {
+ grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
+ !transport_global->is_client, 1);
}
- stream_global->writing_now = 0;
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
}
gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf);
}
diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c
index effc3c4b3b..7793f7c9e4 100644
--- a/src/core/transport/chttp2_transport.c
+++ b/src/core/transport/chttp2_transport.c
@@ -49,6 +49,7 @@
#include "src/core/transport/chttp2/internal.h"
#include "src/core/transport/chttp2/status_conversion.h"
#include "src/core/transport/chttp2/timeout_encoding.h"
+#include "src/core/transport/static_metadata.h"
#include "src/core/transport/transport_impl.h"
#define DEFAULT_WINDOW 65535
@@ -75,14 +76,14 @@ int grpc_flowctl_trace = 0;
#define STREAM_FROM_GLOBAL(sg) \
((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, global)))
+#define STREAM_FROM_PARSING(sg) \
+ ((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, parsing)))
+
static const grpc_transport_vtable vtable;
static void lock(grpc_chttp2_transport *t);
static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
-static void unlock_check_read_write_state(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-
/* forward declarations of various callbacks that we'll build closures around */
static void writing_action(grpc_exec_ctx *exec_ctx, void *t,
int iomgr_success_ignored);
@@ -103,11 +104,13 @@ static void perform_stream_op_locked(
grpc_chttp2_stream_global *stream_global, grpc_transport_stream_op *op);
/** Cancel a stream: coming from the transport API */
-static void cancel_from_api(grpc_chttp2_transport_global *transport_global,
+static void cancel_from_api(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
grpc_status_code status);
-static void close_from_api(grpc_chttp2_transport_global *transport_global,
+static void close_from_api(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
grpc_status_code status,
gpr_slice *optional_message);
@@ -128,6 +131,12 @@ static void connectivity_state_set(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_connectivity_state state, const char *reason);
+static void check_read_ops(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global);
+
+static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global);
+
/*
* CONSTRUCTION/DESTRUCTION/REFCOUNTING
*/
@@ -150,8 +159,6 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser_destroy(&t->parsing.hpack_parser);
grpc_chttp2_goaway_parser_destroy(&t->parsing.goaway_parser);
- GRPC_MDSTR_UNREF(t->parsing.str_grpc_timeout);
-
for (i = 0; i < STREAM_LIST_COUNT; i++) {
GPR_ASSERT(t->lists[i].head == NULL);
GPR_ASSERT(t->lists[i].tail == NULL);
@@ -177,8 +184,6 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
gpr_free(ping);
}
- grpc_mdctx_unref(t->metadata_context);
-
gpr_free(t->peer_string);
gpr_free(t);
}
@@ -213,8 +218,7 @@ static void ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const grpc_channel_args *channel_args,
- grpc_endpoint *ep, grpc_mdctx *mdctx,
- gpr_uint8 is_client) {
+ grpc_endpoint *ep, gpr_uint8 is_client) {
size_t i;
int j;
@@ -230,20 +234,17 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
/* ref is dropped at transport close() */
gpr_ref_init(&t->shutdown_ep_refs, 1);
gpr_mu_init(&t->mu);
- grpc_mdctx_ref(mdctx);
t->peer_string = grpc_endpoint_get_peer(ep);
- t->metadata_context = mdctx;
t->endpoint_reading = 1;
t->global.next_stream_id = is_client ? 1 : 2;
t->global.is_client = is_client;
- t->global.outgoing_window = DEFAULT_WINDOW;
- t->global.incoming_window = DEFAULT_WINDOW;
+ t->writing.outgoing_window = DEFAULT_WINDOW;
+ t->parsing.incoming_window = DEFAULT_WINDOW;
+ t->global.stream_lookahead = DEFAULT_WINDOW;
t->global.connection_window_target = DEFAULT_CONNECTION_WINDOW_TARGET;
t->global.ping_counter = 1;
t->global.pings.next = t->global.pings.prev = &t->global.pings;
t->parsing.is_client = is_client;
- t->parsing.str_grpc_timeout =
- grpc_mdstr_from_string(t->metadata_context, "grpc-timeout");
t->parsing.deframe_state =
is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->writing.is_client = is_client;
@@ -254,12 +255,12 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_slice_buffer_init(&t->global.qbuf);
gpr_slice_buffer_init(&t->writing.outbuf);
- grpc_chttp2_hpack_compressor_init(&t->writing.hpack_compressor, mdctx);
+ grpc_chttp2_hpack_compressor_init(&t->writing.hpack_compressor);
grpc_closure_init(&t->writing_action, writing_action, t);
gpr_slice_buffer_init(&t->parsing.qbuf);
grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser);
- grpc_chttp2_hpack_parser_init(&t->parsing.hpack_parser, t->metadata_context);
+ grpc_chttp2_hpack_parser_init(&t->parsing.hpack_parser);
grpc_closure_init(&t->writing.done_cb, grpc_chttp2_terminate_writing,
&t->writing);
@@ -329,6 +330,43 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->global.next_stream_id =
(gpr_uint32)channel_args->args[i].value.integer;
}
+ } else if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES)) {
+ if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
+ gpr_log(GPR_ERROR, "%s: must be an integer",
+ GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES);
+ } else if (channel_args->args[i].value.integer <= 5) {
+ gpr_log(GPR_ERROR, "%s: must be at least 5",
+ GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES);
+ } else {
+ t->global.stream_lookahead =
+ (gpr_uint32)channel_args->args[i].value.integer;
+ }
+ } else if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER)) {
+ if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
+ gpr_log(GPR_ERROR, "%s: must be an integer",
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER);
+ } else if (channel_args->args[i].value.integer < 0) {
+ gpr_log(GPR_DEBUG, "%s: must be non-negative",
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER);
+ } else {
+ push_setting(t, GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE,
+ (gpr_uint32)channel_args->args[i].value.integer);
+ }
+ } else if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER)) {
+ if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
+ gpr_log(GPR_ERROR, "%s: must be an integer",
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER);
+ } else if (channel_args->args[i].value.integer < 0) {
+ gpr_log(GPR_DEBUG, "%s: must be non-negative",
+ GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER);
+ } else {
+ grpc_chttp2_hpack_compressor_set_max_usable_size(
+ &t->writing.hpack_compressor,
+ (gpr_uint32)channel_args->args[i].value.integer);
+ }
}
}
}
@@ -392,19 +430,46 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
}
}
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
+ const char *reason) {
+ grpc_stream_ref(STREAM_FROM_GLOBAL(stream_global)->refcount, reason);
+}
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global,
+ const char *reason) {
+ grpc_stream_unref(exec_ctx, STREAM_FROM_GLOBAL(stream_global)->refcount,
+ reason);
+}
+#else
+void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global) {
+ grpc_stream_ref(STREAM_FROM_GLOBAL(stream_global)->refcount);
+}
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global) {
+ grpc_stream_unref(exec_ctx, STREAM_FROM_GLOBAL(stream_global)->refcount);
+}
+#endif
+
static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, const void *server_data,
- grpc_transport_stream_op *initial_op) {
+ grpc_stream *gs, grpc_stream_refcount *refcount,
+ const void *server_data) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
memset(s, 0, sizeof(*s));
- grpc_chttp2_incoming_metadata_buffer_init(&s->parsing.incoming_metadata);
- grpc_chttp2_incoming_metadata_buffer_init(&s->global.incoming_metadata);
- grpc_sopb_init(&s->writing.sopb);
- grpc_sopb_init(&s->global.incoming_sopb);
+ s->refcount = refcount;
+ GRPC_CHTTP2_STREAM_REF(&s->global, "chttp2");
+
+ grpc_chttp2_incoming_metadata_buffer_init(&s->parsing.metadata_buffer[0]);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->parsing.metadata_buffer[1]);
+ grpc_chttp2_incoming_metadata_buffer_init(
+ &s->global.received_initial_metadata);
+ grpc_chttp2_incoming_metadata_buffer_init(
+ &s->global.received_trailing_metadata);
grpc_chttp2_data_parser_init(&s->parsing.data_parser);
+ gpr_slice_buffer_init(&s->writing.flow_controlled_buffer);
REF_TRANSPORT(t, "stream");
@@ -413,20 +478,17 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
if (server_data) {
GPR_ASSERT(t->parsing_active);
s->global.id = (gpr_uint32)(gpr_uintptr)server_data;
+ s->parsing.id = s->global.id;
s->global.outgoing_window =
t->global.settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- s->global.max_recv_bytes = s->parsing.incoming_window =
- s->global.incoming_window =
- t->global.settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ s->parsing.incoming_window = s->global.max_recv_bytes =
+ t->global.settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
*t->accepting_stream = s;
grpc_chttp2_stream_map_add(&t->parsing_stream_map, s->global.id, s);
s->global.in_stream_map = 1;
}
-
- if (initial_op)
- perform_stream_op_locked(exec_ctx, &t->global, &s->global, initial_op);
unlock(exec_ctx, t);
return 0;
@@ -437,10 +499,13 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
int i;
+ grpc_byte_stream *bs;
+
+ GPR_TIMER_BEGIN("destroy_stream", 0);
gpr_mu_lock(&t->mu);
- GPR_ASSERT(s->global.published_state == GRPC_STREAM_CLOSED ||
+ GPR_ASSERT((s->global.write_closed && s->global.read_closed) ||
s->global.id == 0);
GPR_ASSERT(!s->global.in_stream_map);
if (grpc_chttp2_unregister_stream(t, s) && t->global.sent_goaway) {
@@ -451,8 +516,9 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->global.id) == NULL);
}
- grpc_chttp2_list_remove_incoming_window_updated(&t->global, &s->global);
grpc_chttp2_list_remove_writable_stream(&t->global, &s->global);
+ grpc_chttp2_list_remove_unannounced_incoming_window_available(&t->global,
+ &s->global);
gpr_mu_unlock(&t->mu);
@@ -464,17 +530,29 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
}
}
- GPR_ASSERT(s->global.outgoing_sopb == NULL);
- GPR_ASSERT(s->global.publish_sopb == NULL);
- grpc_sopb_destroy(&s->writing.sopb);
- grpc_sopb_destroy(&s->global.incoming_sopb);
- grpc_chttp2_data_parser_destroy(&s->parsing.data_parser);
- grpc_chttp2_incoming_metadata_buffer_destroy(&s->parsing.incoming_metadata);
- grpc_chttp2_incoming_metadata_buffer_destroy(&s->global.incoming_metadata);
- grpc_chttp2_incoming_metadata_live_op_buffer_end(
- &s->global.outstanding_metadata);
+ while (
+ (bs = grpc_chttp2_incoming_frame_queue_pop(&s->global.incoming_frames))) {
+ grpc_byte_stream_destroy(bs);
+ }
+
+ GPR_ASSERT(s->global.send_initial_metadata_finished == NULL);
+ GPR_ASSERT(s->global.send_message_finished == NULL);
+ GPR_ASSERT(s->global.send_trailing_metadata_finished == NULL);
+ GPR_ASSERT(s->global.recv_initial_metadata_finished == NULL);
+ GPR_ASSERT(s->global.recv_message_ready == NULL);
+ GPR_ASSERT(s->global.recv_trailing_metadata_finished == NULL);
+ grpc_chttp2_data_parser_destroy(exec_ctx, &s->parsing.data_parser);
+ grpc_chttp2_incoming_metadata_buffer_destroy(&s->parsing.metadata_buffer[0]);
+ grpc_chttp2_incoming_metadata_buffer_destroy(&s->parsing.metadata_buffer[1]);
+ grpc_chttp2_incoming_metadata_buffer_destroy(
+ &s->global.received_initial_metadata);
+ grpc_chttp2_incoming_metadata_buffer_destroy(
+ &s->global.received_trailing_metadata);
+ gpr_slice_buffer_destroy(&s->writing.flow_controlled_buffer);
UNREF_TRANSPORT(exec_ctx, t, "stream");
+
+ GPR_TIMER_END("destroy_stream", 0);
}
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
@@ -486,12 +564,14 @@ grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
}
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
- grpc_chttp2_transport_parsing *transport_parsing, gpr_uint32 id) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ gpr_uint32 id) {
grpc_chttp2_stream *accepting;
grpc_chttp2_transport *t = TRANSPORT_FROM_PARSING(transport_parsing);
GPR_ASSERT(t->accepting_stream == NULL);
t->accepting_stream = &accepting;
- t->channel_callback.accept_stream(t->channel_callback.accept_stream_user_data,
+ t->channel_callback.accept_stream(exec_ctx,
+ t->channel_callback.accept_stream_user_data,
&t->base, (void *)(gpr_uintptr)id);
t->accepting_stream = NULL;
return &accepting->parsing;
@@ -511,14 +591,15 @@ static void lock(grpc_chttp2_transport *t) { gpr_mu_lock(&t->mu); }
static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
GPR_TIMER_BEGIN("unlock", 0);
- unlock_check_read_write_state(exec_ctx, t);
if (!t->writing_active && !t->closed &&
- grpc_chttp2_unlocking_check_writes(&t->global, &t->writing)) {
+ grpc_chttp2_unlocking_check_writes(&t->global, &t->writing,
+ t->parsing_active)) {
t->writing_active = 1;
REF_TRANSPORT(t, "writing");
grpc_exec_ctx_enqueue(exec_ctx, &t->writing_action, 1);
prevent_endpoint_shutdown(t);
}
+ check_read_ops(exec_ctx, &t->global);
gpr_mu_unlock(&t->mu);
GPR_TIMER_END("unlock", 0);
@@ -547,6 +628,7 @@ void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
void *transport_writing_ptr, int success) {
grpc_chttp2_transport_writing *transport_writing = transport_writing_ptr;
grpc_chttp2_transport *t = TRANSPORT_FROM_WRITING(transport_writing);
+ grpc_chttp2_stream_global *stream_global;
GPR_TIMER_BEGIN("grpc_chttp2_terminate_writing", 0);
@@ -558,9 +640,13 @@ void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
drop_connection(exec_ctx, t);
}
- /* cleanup writing related jazz */
grpc_chttp2_cleanup_writing(exec_ctx, &t->global, &t->writing);
+ while (grpc_chttp2_list_pop_closed_waiting_for_writing(&t->global, &stream_global)) {
+ fail_pending_writes(exec_ctx, stream_global);
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "finish_writes");
+ }
+
/* leave the writing flag up on shutdown to prevent further writes in unlock()
from starting */
t->writing_active = 0;
@@ -598,6 +684,7 @@ void grpc_chttp2_add_incoming_goaway(
static void maybe_start_some_streams(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global) {
grpc_chttp2_stream_global *stream_global;
+ gpr_uint32 stream_incoming_window;
/* start streams where we have free grpc_chttp2_stream ids and free
* concurrency */
while (transport_global->next_stream_id <= MAX_CLIENT_STREAM_ID &&
@@ -607,13 +694,16 @@ static void maybe_start_some_streams(
[GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS] &&
grpc_chttp2_list_pop_waiting_for_concurrency(transport_global,
&stream_global)) {
+ /* safe since we can't (legally) be parsing this stream yet */
+ grpc_chttp2_stream_parsing *stream_parsing =
+ &STREAM_FROM_GLOBAL(stream_global)->parsing;
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_DEBUG, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d",
transport_global->is_client ? "CLI" : "SVR", stream_global,
transport_global->next_stream_id));
GPR_ASSERT(stream_global->id == 0);
- stream_global->id = transport_global->next_stream_id;
+ stream_global->id = stream_parsing->id = transport_global->next_stream_id;
transport_global->next_stream_id += 2;
if (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID) {
@@ -625,103 +715,171 @@ static void maybe_start_some_streams(
stream_global->outgoing_window =
transport_global->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- stream_global->incoming_window =
+ stream_parsing->incoming_window = stream_incoming_window =
transport_global->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
stream_global->max_recv_bytes =
- GPR_MAX(stream_global->incoming_window, stream_global->max_recv_bytes);
+ GPR_MAX(stream_incoming_window, stream_global->max_recv_bytes);
grpc_chttp2_stream_map_add(
&TRANSPORT_FROM_GLOBAL(transport_global)->new_stream_map,
stream_global->id, STREAM_FROM_GLOBAL(stream_global));
stream_global->in_stream_map = 1;
transport_global->concurrent_stream_count++;
- grpc_chttp2_list_add_incoming_window_updated(transport_global,
- stream_global);
grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
}
/* cancel out streams that will never be started */
while (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID &&
grpc_chttp2_list_pop_waiting_for_concurrency(transport_global,
&stream_global)) {
- cancel_from_api(transport_global, stream_global, GRPC_STATUS_UNAVAILABLE);
+ cancel_from_api(exec_ctx, transport_global, stream_global,
+ GRPC_STATUS_UNAVAILABLE);
}
}
+static grpc_closure *add_closure_barrier(grpc_closure *closure) {
+ closure->final_data += 2;
+ return closure;
+}
+
+void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
+ grpc_closure **pclosure, int success) {
+ grpc_closure *closure = *pclosure;
+ if (closure == NULL) {
+ return;
+ }
+ closure->final_data -= 2;
+ if (!success) {
+ closure->final_data |= 1;
+ }
+ if (closure->final_data < 2) {
+ grpc_exec_ctx_enqueue(exec_ctx, closure, closure->final_data == 0);
+ }
+ *pclosure = NULL;
+}
+
+static int contains_non_ok_status(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_metadata_batch *batch) {
+ grpc_linked_mdelem *l;
+ for (l = batch->list.head; l; l = l->next) {
+ if (l->md->key == GRPC_MDSTR_GRPC_STATUS &&
+ l->md != GRPC_MDELEM_GRPC_STATUS_0) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, int success) {}
+
static void perform_stream_op_locked(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, grpc_transport_stream_op *op) {
+ grpc_closure *on_complete;
+
GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
+
+ on_complete = op->on_complete;
+ if (on_complete == NULL) {
+ on_complete = grpc_closure_create(do_nothing, NULL);
+ }
+ /* use final_data as a barrier until enqueue time; the inital counter is
+ dropped at the end of this function */
+ on_complete->final_data = 2;
+
if (op->cancel_with_status != GRPC_STATUS_OK) {
- cancel_from_api(transport_global, stream_global, op->cancel_with_status);
+ cancel_from_api(exec_ctx, transport_global, stream_global,
+ op->cancel_with_status);
}
if (op->close_with_status != GRPC_STATUS_OK) {
- close_from_api(transport_global, stream_global, op->close_with_status,
- op->optional_close_message);
- }
-
- if (op->send_ops) {
- GPR_ASSERT(stream_global->outgoing_sopb == NULL);
- stream_global->send_done_closure = op->on_done_send;
- if (!stream_global->cancelled) {
- stream_global->written_anything = 1;
- stream_global->outgoing_sopb = op->send_ops;
- if (op->is_last_send &&
- stream_global->write_state == GRPC_WRITE_STATE_OPEN) {
- stream_global->write_state = GRPC_WRITE_STATE_QUEUED_CLOSE;
- }
- if (stream_global->id == 0) {
- GRPC_CHTTP2_IF_TRACING(gpr_log(
- GPR_DEBUG,
- "HTTP:%s: New grpc_chttp2_stream %p waiting for concurrency",
- transport_global->is_client ? "CLI" : "SVR", stream_global));
+ close_from_api(exec_ctx, transport_global, stream_global,
+ op->close_with_status, op->optional_close_message);
+ }
+
+ if (op->send_initial_metadata != NULL) {
+ GPR_ASSERT(stream_global->send_initial_metadata_finished == NULL);
+ stream_global->send_initial_metadata_finished =
+ add_closure_barrier(on_complete);
+ stream_global->send_initial_metadata = op->send_initial_metadata;
+ if (contains_non_ok_status(transport_global, op->send_initial_metadata)) {
+ stream_global->seen_error = 1;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+ if (!stream_global->write_closed) {
+ if (transport_global->is_client) {
+ GPR_ASSERT(stream_global->id == 0);
grpc_chttp2_list_add_waiting_for_concurrency(transport_global,
stream_global);
maybe_start_some_streams(exec_ctx, transport_global);
- } else if (stream_global->outgoing_window > 0) {
+ } else {
+ GPR_ASSERT(stream_global->id != 0);
grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
}
} else {
- grpc_sopb_reset(op->send_ops);
- grpc_exec_ctx_enqueue(exec_ctx, stream_global->send_done_closure, 0);
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_initial_metadata_finished, 0);
+ }
+ }
+
+ if (op->send_message != NULL) {
+ GPR_ASSERT(stream_global->send_message_finished == NULL);
+ GPR_ASSERT(stream_global->send_message == NULL);
+ stream_global->send_message_finished = add_closure_barrier(on_complete);
+ if (stream_global->write_closed) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_message_finished, 0);
+ } else if (stream_global->id != 0) {
+ stream_global->send_message = op->send_message;
+ grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
}
}
- if (op->recv_ops) {
- GPR_ASSERT(stream_global->publish_sopb == NULL);
- GPR_ASSERT(stream_global->published_state != GRPC_STREAM_CLOSED);
- stream_global->recv_done_closure = op->on_done_recv;
- stream_global->publish_sopb = op->recv_ops;
- stream_global->publish_sopb->nops = 0;
- stream_global->publish_state = op->recv_state;
- /* clamp max recv bytes */
- op->max_recv_bytes = GPR_MIN(op->max_recv_bytes, GPR_UINT32_MAX);
- if (stream_global->max_recv_bytes < op->max_recv_bytes) {
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "op", transport_global, stream_global, max_recv_bytes,
- op->max_recv_bytes - stream_global->max_recv_bytes);
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
- "op", transport_global, stream_global, unannounced_incoming_window,
- op->max_recv_bytes - stream_global->max_recv_bytes);
- stream_global->unannounced_incoming_window +=
- (gpr_uint32)op->max_recv_bytes - stream_global->max_recv_bytes;
- stream_global->max_recv_bytes = (gpr_uint32)op->max_recv_bytes;
+ if (op->send_trailing_metadata != NULL) {
+ GPR_ASSERT(stream_global->send_trailing_metadata_finished == NULL);
+ stream_global->send_trailing_metadata_finished =
+ add_closure_barrier(on_complete);
+ stream_global->send_trailing_metadata = op->send_trailing_metadata;
+ if (contains_non_ok_status(transport_global, op->send_trailing_metadata)) {
+ stream_global->seen_error = 1;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
- grpc_chttp2_incoming_metadata_live_op_buffer_end(
- &stream_global->outstanding_metadata);
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
- if (stream_global->id != 0) {
+ if (stream_global->write_closed) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_trailing_metadata_finished,
+ grpc_metadata_batch_is_empty(op->send_trailing_metadata));
+ } else if (stream_global->id != 0) {
+ /* TODO(ctiller): check if there's flow control for any outstanding
+ bytes before going writable */
grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
}
}
- if (op->bind_pollset) {
- add_to_pollset_locked(exec_ctx, TRANSPORT_FROM_GLOBAL(transport_global),
- op->bind_pollset);
+ if (op->recv_initial_metadata != NULL) {
+ GPR_ASSERT(stream_global->recv_initial_metadata_finished == NULL);
+ stream_global->recv_initial_metadata_finished =
+ add_closure_barrier(on_complete);
+ stream_global->recv_initial_metadata = op->recv_initial_metadata;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
- grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
+ if (op->recv_message != NULL) {
+ GPR_ASSERT(stream_global->recv_message_ready == NULL);
+ stream_global->recv_message_ready = op->recv_message_ready;
+ stream_global->recv_message = op->recv_message;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+
+ if (op->recv_trailing_metadata != NULL) {
+ GPR_ASSERT(stream_global->recv_trailing_metadata_finished == NULL);
+ stream_global->recv_trailing_metadata_finished =
+ add_closure_barrier(on_complete);
+ stream_global->recv_trailing_metadata = op->recv_trailing_metadata;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+
+ grpc_chttp2_complete_closure_step(exec_ctx, &on_complete, 1);
+
GPR_TIMER_END("perform_stream_op_locked", 0);
}
@@ -752,6 +910,26 @@ static void send_ping_locked(grpc_chttp2_transport *t, grpc_closure *on_recv) {
gpr_slice_buffer_add(&t->global.qbuf, grpc_chttp2_ping_create(0, p->id));
}
+void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing,
+ const gpr_uint8 *opaque_8bytes) {
+ grpc_chttp2_outstanding_ping *ping;
+ grpc_chttp2_transport *t = TRANSPORT_FROM_PARSING(transport_parsing);
+ grpc_chttp2_transport_global *transport_global = &t->global;
+ lock(t);
+ for (ping = transport_global->pings.next; ping != &transport_global->pings;
+ ping = ping->next) {
+ if (0 == memcmp(opaque_8bytes, ping->id, 8)) {
+ grpc_exec_ctx_enqueue(exec_ctx, ping->on_recv, 1);
+ ping->next->prev = ping->prev;
+ ping->prev->next = ping->next;
+ gpr_free(ping);
+ break;
+ }
+ }
+ unlock(exec_ctx, t);
+}
+
static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_transport_op *op) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
@@ -761,7 +939,7 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
- if (op->on_connectivity_state_change) {
+ if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &t->channel_callback.state_tracker, op->connectivity_state,
op->on_connectivity_state_change);
@@ -811,12 +989,49 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
* INPUT PROCESSING
*/
-static grpc_stream_state compute_state(gpr_uint8 write_closed,
- gpr_uint8 read_closed) {
- if (write_closed && read_closed) return GRPC_STREAM_CLOSED;
- if (write_closed) return GRPC_STREAM_SEND_CLOSED;
- if (read_closed) return GRPC_STREAM_RECV_CLOSED;
- return GRPC_STREAM_OPEN;
+static void check_read_ops(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global) {
+ grpc_chttp2_stream_global *stream_global;
+ grpc_byte_stream *bs;
+ while (
+ grpc_chttp2_list_pop_check_read_ops(transport_global, &stream_global)) {
+ if (stream_global->recv_initial_metadata_finished != NULL &&
+ stream_global->published_initial_metadata) {
+ grpc_chttp2_incoming_metadata_buffer_publish(
+ &stream_global->received_initial_metadata,
+ stream_global->recv_initial_metadata);
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->recv_initial_metadata_finished, 1);
+ }
+ if (stream_global->recv_message_ready != NULL) {
+ if (stream_global->incoming_frames.head != NULL) {
+ *stream_global->recv_message = grpc_chttp2_incoming_frame_queue_pop(
+ &stream_global->incoming_frames);
+ GPR_ASSERT(*stream_global->recv_message != NULL);
+ grpc_exec_ctx_enqueue(exec_ctx, stream_global->recv_message_ready, 1);
+ stream_global->recv_message_ready = NULL;
+ } else if (stream_global->published_trailing_metadata) {
+ *stream_global->recv_message = NULL;
+ grpc_exec_ctx_enqueue(exec_ctx, stream_global->recv_message_ready, 1);
+ stream_global->recv_message_ready = NULL;
+ }
+ }
+ if (stream_global->recv_trailing_metadata_finished != NULL &&
+ stream_global->read_closed && stream_global->write_closed) {
+ while (stream_global->seen_error &&
+ (bs = grpc_chttp2_incoming_frame_queue_pop(
+ &stream_global->incoming_frames)) != NULL) {
+ grpc_byte_stream_destroy(bs);
+ }
+ if (stream_global->incoming_frames.head == NULL) {
+ grpc_chttp2_incoming_metadata_buffer_publish(
+ &stream_global->received_trailing_metadata,
+ stream_global->recv_trailing_metadata);
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->recv_trailing_metadata_finished, 1);
+ }
+ }
+ }
}
static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -832,7 +1047,7 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
s->global.in_stream_map = 0;
if (t->parsing.incoming_stream == &s->parsing) {
t->parsing.incoming_stream = NULL;
- grpc_chttp2_parsing_become_skip_parser(&t->parsing);
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, &t->parsing);
}
if (grpc_chttp2_unregister_stream(t, s) && t->global.sent_goaway) {
close_transport_locked(exec_ctx, t);
@@ -847,109 +1062,10 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
-static void unlock_check_read_write_state(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
- grpc_chttp2_transport_global *transport_global = &t->global;
- grpc_chttp2_stream_global *stream_global;
- grpc_stream_state state;
-
- if (!t->parsing_active) {
- /* if a stream is in the stream map, and gets cancelled, we need to ensure
- we are not parsing before continuing the cancellation to keep things in
- a sane state */
- while (grpc_chttp2_list_pop_closed_waiting_for_parsing(transport_global,
- &stream_global)) {
- GPR_ASSERT(stream_global->in_stream_map);
- GPR_ASSERT(stream_global->write_state != GRPC_WRITE_STATE_OPEN);
- GPR_ASSERT(stream_global->read_closed);
- remove_stream(exec_ctx, t, stream_global->id);
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
- }
- }
-
- if (!t->writing_active) {
- while (grpc_chttp2_list_pop_cancelled_waiting_for_writing(transport_global,
- &stream_global)) {
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
- }
- }
-
- while (grpc_chttp2_list_pop_read_write_state_changed(transport_global,
- &stream_global)) {
- if (stream_global->cancelled) {
- if (t->writing_active &&
- stream_global->write_state != GRPC_WRITE_STATE_SENT_CLOSE) {
- grpc_chttp2_list_add_cancelled_waiting_for_writing(transport_global,
- stream_global);
- } else {
- stream_global->write_state = GRPC_WRITE_STATE_SENT_CLOSE;
- if (stream_global->outgoing_sopb != NULL) {
- grpc_sopb_reset(stream_global->outgoing_sopb);
- stream_global->outgoing_sopb = NULL;
- grpc_exec_ctx_enqueue(exec_ctx, stream_global->send_done_closure, 1);
- }
- stream_global->read_closed = 1;
- if (!stream_global->published_cancelled) {
- char buffer[GPR_LTOA_MIN_BUFSIZE];
- gpr_ltoa(stream_global->cancelled_status, buffer);
- grpc_chttp2_incoming_metadata_buffer_add(
- &stream_global->incoming_metadata,
- grpc_mdelem_from_strings(t->metadata_context, "grpc-status",
- buffer));
- grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
- &stream_global->incoming_metadata, &stream_global->incoming_sopb);
- stream_global->published_cancelled = 1;
- }
- }
- }
- if (stream_global->write_state == GRPC_WRITE_STATE_SENT_CLOSE &&
- stream_global->read_closed && stream_global->in_stream_map) {
- if (t->parsing_active) {
- grpc_chttp2_list_add_closed_waiting_for_parsing(transport_global,
- stream_global);
- } else {
- remove_stream(exec_ctx, t, stream_global->id);
- }
- }
- if (!stream_global->publish_sopb) {
- continue;
- }
- if (stream_global->writing_now != 0) {
- continue;
- }
- /* FIXME(ctiller): we include in_stream_map in our computation of
- whether the stream is write-closed. This is completely bogus,
- but has the effect of delaying stream-closed until the stream
- is indeed evicted from the stream map, making it safe to delete.
- To fix this will require having an edge after stream-closed
- indicating that the stream is closed AND safe to delete. */
- state = compute_state(
- stream_global->write_state == GRPC_WRITE_STATE_SENT_CLOSE &&
- !stream_global->in_stream_map,
- stream_global->read_closed);
- if (stream_global->incoming_sopb.nops == 0 &&
- state == stream_global->published_state) {
- continue;
- }
- grpc_chttp2_incoming_metadata_buffer_postprocess_sopb_and_begin_live_op(
- &stream_global->incoming_metadata, &stream_global->incoming_sopb,
- &stream_global->outstanding_metadata);
- grpc_sopb_swap(stream_global->publish_sopb, &stream_global->incoming_sopb);
- stream_global->published_state = *stream_global->publish_state = state;
- grpc_exec_ctx_enqueue(exec_ctx, stream_global->recv_done_closure, 1);
- stream_global->recv_done_closure = NULL;
- stream_global->publish_sopb = NULL;
- stream_global->publish_state = NULL;
- }
-}
-
-static void cancel_from_api(grpc_chttp2_transport_global *transport_global,
+static void cancel_from_api(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
grpc_status_code status) {
- stream_global->cancelled = 1;
- stream_global->cancelled_status = status;
if (stream_global->id != 0) {
gpr_slice_buffer_add(
&transport_global->qbuf,
@@ -957,11 +1073,100 @@ static void cancel_from_api(grpc_chttp2_transport_global *transport_global,
stream_global->id,
(gpr_uint32)grpc_chttp2_grpc_status_to_http2_error(status)));
}
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+ grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global, status,
+ NULL);
+ grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1,
+ 1);
+}
+
+void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global,
+ grpc_status_code status, gpr_slice *slice) {
+ if (status != GRPC_STATUS_OK) {
+ stream_global->seen_error = 1;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+ /* stream_global->recv_trailing_metadata_finished gives us a
+ last chance replacement: we've received trailing metadata,
+ but something more important has become available to signal
+ to the upper layers - drop what we've got, and then publish
+ what we want - which is safe because we haven't told anyone
+ about the metadata yet */
+ if (!stream_global->published_trailing_metadata ||
+ stream_global->recv_trailing_metadata_finished != NULL) {
+ char status_string[GPR_LTOA_MIN_BUFSIZE];
+ gpr_ltoa(status, status_string);
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &stream_global->received_trailing_metadata,
+ grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_GRPC_STATUS, grpc_mdstr_from_string(status_string)));
+ if (slice) {
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &stream_global->received_trailing_metadata,
+ grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_GRPC_MESSAGE,
+ grpc_mdstr_from_slice(gpr_slice_ref(*slice))));
+ }
+ stream_global->published_trailing_metadata = 1;
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ }
+ if (slice) {
+ gpr_slice_unref(*slice);
+ }
+}
+
+static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_stream_global *stream_global) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_initial_metadata_finished, 0);
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, &stream_global->send_trailing_metadata_finished, 0);
+ grpc_chttp2_complete_closure_step(exec_ctx,
+ &stream_global->send_message_finished, 0);
+}
+
+void grpc_chttp2_mark_stream_closed(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global, int close_reads,
+ int close_writes) {
+ if (stream_global->read_closed && stream_global->write_closed) {
+ /* already closed */
+ return;
+ }
+ grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
+ if (close_reads && !stream_global->read_closed) {
+ stream_global->read_closed = 1;
+ stream_global->published_initial_metadata = 1;
+ stream_global->published_trailing_metadata = 1;
+ }
+ if (close_writes && !stream_global->write_closed) {
+ stream_global->write_closed = 1;
+ if (TRANSPORT_FROM_GLOBAL(transport_global)->writing_active) {
+ GRPC_CHTTP2_STREAM_REF(stream_global, "finish_writes");
+ grpc_chttp2_list_add_closed_waiting_for_writing(transport_global,
+ stream_global);
+ } else {
+ fail_pending_writes(exec_ctx, stream_global);
+ }
+ }
+ if (stream_global->read_closed && stream_global->write_closed) {
+ if (stream_global->id != 0 &&
+ TRANSPORT_FROM_GLOBAL(transport_global)->parsing_active) {
+ grpc_chttp2_list_add_closed_waiting_for_parsing(transport_global,
+ stream_global);
+ } else {
+ if (stream_global->id != 0) {
+ remove_stream(exec_ctx, TRANSPORT_FROM_GLOBAL(transport_global),
+ stream_global->id);
+ }
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2");
+ }
+ }
}
-static void close_from_api(grpc_chttp2_transport_global *transport_global,
+static void close_from_api(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
grpc_status_code status,
gpr_slice *optional_message) {
@@ -973,10 +1178,7 @@ static void close_from_api(grpc_chttp2_transport_global *transport_global,
GPR_ASSERT(status >= 0 && (int)status < 100);
- stream_global->cancelled = 1;
- stream_global->cancelled_status = status;
GPR_ASSERT(stream_global->id != 0);
- GPR_ASSERT(!stream_global->written_anything);
/* Hand roll a header block.
This is unnecessarily ugly - at some point we should find a more elegant
@@ -1059,23 +1261,30 @@ static void close_from_api(grpc_chttp2_transport_global *transport_global,
&transport_global->qbuf,
grpc_chttp2_rst_stream_create(stream_global->id, GRPC_CHTTP2_NO_ERROR));
- grpc_chttp2_list_add_read_write_state_changed(transport_global,
- stream_global);
+ if (optional_message) {
+ gpr_slice_ref(*optional_message);
+ }
+ grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global, status,
+ optional_message);
+ grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1,
+ 1);
}
static void cancel_stream_cb(grpc_chttp2_transport_global *transport_global,
void *user_data,
grpc_chttp2_stream_global *stream_global) {
- cancel_from_api(transport_global, stream_global, GRPC_STATUS_UNAVAILABLE);
+ cancel_from_api(user_data, transport_global, stream_global,
+ GRPC_STATUS_UNAVAILABLE);
}
-static void end_all_the_calls(grpc_chttp2_transport *t) {
- grpc_chttp2_for_all_streams(&t->global, NULL, cancel_stream_cb);
+static void end_all_the_calls(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_chttp2_for_all_streams(&t->global, exec_ctx, cancel_stream_cb);
}
static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
close_transport_locked(exec_ctx, t);
- end_all_the_calls(t);
+ end_all_the_calls(exec_ctx, t);
}
/** update window from a settings change */
@@ -1086,12 +1295,11 @@ static void update_global_window(void *args, gpr_uint32 id, void *stream) {
grpc_chttp2_stream_global *stream_global = &s->global;
int was_zero;
int is_zero;
+ gpr_int64 initial_window_update = t->parsing.initial_window_update;
- GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("settings", transport_global, stream_global,
- outgoing_window,
- t->parsing.initial_window_update);
was_zero = stream_global->outgoing_window <= 0;
- stream_global->outgoing_window += t->parsing.initial_window_update;
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("settings", transport_global, stream_global,
+ outgoing_window, initial_window_update);
is_zero = stream_global->outgoing_window <= 0;
if (was_zero && !is_zero) {
@@ -1112,6 +1320,9 @@ static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
size_t i;
int keep_reading = 0;
grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport_global *transport_global = &t->global;
+ grpc_chttp2_transport_parsing *transport_parsing = &t->parsing;
+ grpc_chttp2_stream_global *stream_global;
GPR_TIMER_BEGIN("recv_data", 0);
@@ -1123,32 +1334,47 @@ static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
/* merge stream lists */
grpc_chttp2_stream_map_move_into(&t->new_stream_map,
&t->parsing_stream_map);
- grpc_chttp2_prepare_to_read(&t->global, &t->parsing);
+ grpc_chttp2_prepare_to_read(transport_global, transport_parsing);
gpr_mu_unlock(&t->mu);
GPR_TIMER_BEGIN("recv_data.parse", 0);
for (; i < t->read_buffer.count &&
- grpc_chttp2_perform_read(exec_ctx, &t->parsing,
+ grpc_chttp2_perform_read(exec_ctx, transport_parsing,
t->read_buffer.slices[i]);
i++)
;
GPR_TIMER_END("recv_data.parse", 0);
gpr_mu_lock(&t->mu);
+ /* copy parsing qbuf to global qbuf */
+ gpr_slice_buffer_move_into(&t->parsing.qbuf, &t->global.qbuf);
if (i != t->read_buffer.count) {
+ unlock(exec_ctx, t);
+ lock(t);
drop_connection(exec_ctx, t);
}
/* merge stream lists */
grpc_chttp2_stream_map_move_into(&t->new_stream_map,
&t->parsing_stream_map);
- t->global.concurrent_stream_count =
+ transport_global->concurrent_stream_count =
(gpr_uint32)grpc_chttp2_stream_map_size(&t->parsing_stream_map);
- if (t->parsing.initial_window_update != 0) {
+ if (transport_parsing->initial_window_update != 0) {
grpc_chttp2_stream_map_for_each(&t->parsing_stream_map,
update_global_window, t);
- t->parsing.initial_window_update = 0;
+ transport_parsing->initial_window_update = 0;
}
/* handle higher level things */
- grpc_chttp2_publish_reads(exec_ctx, &t->global, &t->parsing);
+ grpc_chttp2_publish_reads(exec_ctx, transport_global, transport_parsing);
t->parsing_active = 0;
+ /* if a stream is in the stream map, and gets cancelled, we need to ensure
+ * we are not parsing before continuing the cancellation to keep things in
+ * a sane state */
+ while (grpc_chttp2_list_pop_closed_waiting_for_parsing(transport_global,
+ &stream_global)) {
+ GPR_ASSERT(stream_global->in_stream_map);
+ GPR_ASSERT(stream_global->write_closed);
+ GPR_ASSERT(stream_global->read_closed);
+ remove_stream(exec_ctx, t, stream_global->id);
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2");
+ }
}
if (!success || i != t->read_buffer.count || t->closed) {
drop_connection(exec_ctx, t);
@@ -1206,35 +1432,234 @@ static void add_to_pollset_set_locked(grpc_exec_ctx *exec_ctx,
}
}
+static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs, grpc_pollset *pollset) {
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
+ lock(t);
+ add_to_pollset_locked(exec_ctx, t, pollset);
+ unlock(exec_ctx, t);
+}
+
/*
- * TRACING
+ * BYTE STREAM
*/
-void grpc_chttp2_flowctl_trace(const char *file, int line, const char *reason,
- const char *context, const char *var,
- int is_client, gpr_uint32 stream_id,
- gpr_int64 current_value, gpr_int64 delta) {
- char *identifier;
- char *context_scope;
- char *context_thread;
- char *underscore_pos = strchr(context, '_');
- GPR_ASSERT(underscore_pos);
- context_thread = gpr_strdup(underscore_pos + 1);
- context_scope = gpr_strdup(context);
- context_scope[underscore_pos - context] = 0;
- if (stream_id) {
- gpr_asprintf(&identifier, "%s[%d]", context_scope, stream_id);
+static void incoming_byte_stream_update_flow_control(
+ grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_stream_global *stream_global, size_t max_size_hint,
+ size_t have_already) {
+ gpr_uint32 max_recv_bytes;
+
+ /* clamp max recv hint to an allowable size */
+ if (max_size_hint >= GPR_UINT32_MAX - transport_global->stream_lookahead) {
+ max_recv_bytes = GPR_UINT32_MAX - transport_global->stream_lookahead;
+ } else {
+ max_recv_bytes = (gpr_uint32)max_size_hint;
+ }
+
+ /* account for bytes already received but unknown to higher layers */
+ if (max_recv_bytes >= have_already) {
+ max_recv_bytes -= (gpr_uint32)have_already;
+ } else {
+ max_recv_bytes = 0;
+ }
+
+ /* add some small lookahead to keep pipelines flowing */
+ GPR_ASSERT(max_recv_bytes <=
+ GPR_UINT32_MAX - transport_global->stream_lookahead);
+ max_recv_bytes += transport_global->stream_lookahead;
+ if (stream_global->max_recv_bytes < max_recv_bytes) {
+ gpr_uint32 add_max_recv_bytes =
+ max_recv_bytes - stream_global->max_recv_bytes;
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
+ max_recv_bytes, add_max_recv_bytes);
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
+ unannounced_incoming_window_for_parse,
+ add_max_recv_bytes);
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
+ unannounced_incoming_window_for_writing,
+ add_max_recv_bytes);
+ grpc_chttp2_list_add_unannounced_incoming_window_available(transport_global,
+ stream_global);
+ grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
+ }
+}
+
+static int incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
+ grpc_byte_stream *byte_stream,
+ gpr_slice *slice, size_t max_size_hint,
+ grpc_closure *on_complete) {
+ grpc_chttp2_incoming_byte_stream *bs =
+ (grpc_chttp2_incoming_byte_stream *)byte_stream;
+ grpc_chttp2_transport_global *transport_global = &bs->transport->global;
+ grpc_chttp2_stream_global *stream_global = &bs->stream->global;
+
+ lock(bs->transport);
+ if (bs->is_tail) {
+ incoming_byte_stream_update_flow_control(transport_global, stream_global,
+ max_size_hint, bs->slices.length);
+ }
+ if (bs->slices.count > 0) {
+ *slice = gpr_slice_buffer_take_first(&bs->slices);
+ unlock(exec_ctx, bs->transport);
+ return 1;
} else {
- identifier = gpr_strdup(context_scope);
- }
- gpr_log(GPR_INFO,
- "FLOWCTL: %s %-10s %8s %-27s %8lld %c %8lld = %8lld %-10s [%s:%d]",
- is_client ? "client" : "server", identifier, context_thread, var,
- current_value, delta < 0 ? '-' : '+', delta < 0 ? -delta : delta,
- current_value + delta, reason, file, line);
- gpr_free(identifier);
- gpr_free(context_thread);
- gpr_free(context_scope);
+ bs->on_next = on_complete;
+ bs->next = slice;
+ unlock(exec_ctx, bs->transport);
+ return 0;
+ }
+}
+
+static void incoming_byte_stream_unref(grpc_chttp2_incoming_byte_stream *bs) {
+ if (gpr_unref(&bs->refs)) {
+ gpr_slice_buffer_destroy(&bs->slices);
+ gpr_free(bs);
+ }
+}
+
+static void incoming_byte_stream_destroy(grpc_byte_stream *byte_stream) {
+ incoming_byte_stream_unref((grpc_chttp2_incoming_byte_stream *)byte_stream);
+}
+
+void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_incoming_byte_stream *bs,
+ gpr_slice slice) {
+ gpr_mu_lock(&bs->transport->mu);
+ if (bs->on_next != NULL) {
+ *bs->next = slice;
+ grpc_exec_ctx_enqueue(exec_ctx, bs->on_next, 1);
+ bs->on_next = NULL;
+ } else {
+ gpr_slice_buffer_add(&bs->slices, slice);
+ }
+ gpr_mu_unlock(&bs->transport->mu);
+}
+
+void grpc_chttp2_incoming_byte_stream_finished(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs) {
+ incoming_byte_stream_unref(bs);
+}
+
+grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_chttp2_stream_parsing *stream_parsing, gpr_uint32 frame_size,
+ gpr_uint32 flags, grpc_chttp2_incoming_frame_queue *add_to_queue) {
+ grpc_chttp2_incoming_byte_stream *incoming_byte_stream =
+ gpr_malloc(sizeof(*incoming_byte_stream));
+ incoming_byte_stream->base.length = frame_size;
+ incoming_byte_stream->base.flags = flags;
+ incoming_byte_stream->base.next = incoming_byte_stream_next;
+ incoming_byte_stream->base.destroy = incoming_byte_stream_destroy;
+ gpr_ref_init(&incoming_byte_stream->refs, 2);
+ incoming_byte_stream->next_message = NULL;
+ incoming_byte_stream->transport = TRANSPORT_FROM_PARSING(transport_parsing);
+ incoming_byte_stream->stream = STREAM_FROM_PARSING(stream_parsing);
+ gpr_slice_buffer_init(&incoming_byte_stream->slices);
+ incoming_byte_stream->on_next = NULL;
+ incoming_byte_stream->is_tail = 1;
+ if (add_to_queue->head == NULL) {
+ add_to_queue->head = incoming_byte_stream;
+ } else {
+ add_to_queue->tail->is_tail = 0;
+ add_to_queue->tail->next_message = incoming_byte_stream;
+ }
+ add_to_queue->tail = incoming_byte_stream;
+ if (frame_size == 0) {
+ lock(TRANSPORT_FROM_PARSING(transport_parsing));
+ incoming_byte_stream_update_flow_control(
+ &TRANSPORT_FROM_PARSING(transport_parsing)->global,
+ &STREAM_FROM_PARSING(stream_parsing)->global, 0, 0);
+ unlock(exec_ctx, TRANSPORT_FROM_PARSING(transport_parsing));
+ }
+ return incoming_byte_stream;
+}
+
+/*
+ * TRACING
+ */
+
+static char *format_flowctl_context_var(const char *context, const char *var,
+ gpr_int64 val, gpr_uint32 id,
+ char **scope) {
+ char *underscore_pos;
+ char *result;
+ if (context == NULL) {
+ *scope = NULL;
+ gpr_asprintf(&result, "%s(%lld)", var, val);
+ return result;
+ }
+ underscore_pos = strchr(context, '_');
+ *scope = gpr_strdup(context);
+ (*scope)[underscore_pos - context] = 0;
+ if (id != 0) {
+ char *tmp = *scope;
+ gpr_asprintf(scope, "%s[%d]", tmp, id);
+ gpr_free(tmp);
+ }
+ gpr_asprintf(&result, "%s.%s(%lld)", underscore_pos + 1, var, val);
+ return result;
+}
+
+static int samestr(char *a, char *b) {
+ if (a == NULL) {
+ return b == NULL;
+ }
+ if (b == NULL) {
+ return 0;
+ }
+ return 0 == strcmp(a, b);
+}
+
+void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
+ grpc_chttp2_flowctl_op op, const char *context1,
+ const char *var1, const char *context2,
+ const char *var2, int is_client,
+ gpr_uint32 stream_id, gpr_int64 val1,
+ gpr_int64 val2) {
+ char *scope1;
+ char *scope2;
+ char *label1 =
+ format_flowctl_context_var(context1, var1, val1, stream_id, &scope1);
+ char *label2 =
+ format_flowctl_context_var(context2, var2, val2, stream_id, &scope2);
+ char *clisvr = is_client ? "client" : "server";
+ char *prefix;
+
+ gpr_asprintf(&prefix, "FLOW % 8s: %s % 11s ", phase, clisvr, scope1);
+
+ switch (op) {
+ case GRPC_CHTTP2_FLOWCTL_MOVE:
+ GPR_ASSERT(samestr(scope1, scope2));
+ if (val2 != 0) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "%sMOVE % 40s <- % 40s giving %d", prefix, label1, label2,
+ val1 + val2);
+ }
+ break;
+ case GRPC_CHTTP2_FLOWCTL_CREDIT:
+ GPR_ASSERT(val2 >= 0);
+ if (val2 != 0) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "%sCREDIT % 40s by % 40s giving %d", prefix, label1, label2,
+ val1 + val2);
+ }
+ break;
+ case GRPC_CHTTP2_FLOWCTL_DEBIT:
+ GPR_ASSERT(val2 >= 0);
+ if (val2 != 0) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "%sDEBIT % 40s by % 40s giving %d", prefix, label1, label2,
+ val1 - val2);
+ }
+ break;
+ }
+
+ gpr_free(scope1);
+ gpr_free(scope2);
+ gpr_free(label1);
+ gpr_free(label2);
+ gpr_free(prefix);
}
/*
@@ -1246,14 +1671,14 @@ static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
}
static const grpc_transport_vtable vtable = {
- sizeof(grpc_chttp2_stream), init_stream, perform_stream_op,
+ sizeof(grpc_chttp2_stream), init_stream, set_pollset, perform_stream_op,
perform_transport_op, destroy_stream, destroy_transport, chttp2_get_peer};
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
- grpc_endpoint *ep, grpc_mdctx *mdctx, int is_client) {
+ grpc_endpoint *ep, int is_client) {
grpc_chttp2_transport *t = gpr_malloc(sizeof(grpc_chttp2_transport));
- init_transport(exec_ctx, t, channel_args, ep, mdctx, is_client != 0);
+ init_transport(exec_ctx, t, channel_args, ep, is_client != 0);
return &t->base;
}
diff --git a/src/core/transport/chttp2_transport.h b/src/core/transport/chttp2_transport.h
index fce2b680fd..95520501ed 100644
--- a/src/core/transport/chttp2_transport.h
+++ b/src/core/transport/chttp2_transport.h
@@ -42,7 +42,7 @@ extern int grpc_flowctl_trace;
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
- grpc_endpoint *ep, grpc_mdctx *metadata_context, int is_client);
+ grpc_endpoint *ep, int is_client);
void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
diff --git a/src/core/transport/connectivity_state.c b/src/core/transport/connectivity_state.c
index 09b298c131..3c3fd4671d 100644
--- a/src/core/transport/connectivity_state.c
+++ b/src/core/transport/connectivity_state.c
@@ -54,8 +54,7 @@ const char *grpc_connectivity_state_name(grpc_connectivity_state state) {
case GRPC_CHANNEL_FATAL_FAILURE:
return "FATAL_FAILURE";
}
- abort();
- return "UNKNOWN";
+ GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker,
@@ -88,7 +87,7 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
grpc_connectivity_state grpc_connectivity_state_check(
grpc_connectivity_state_tracker *tracker) {
if (grpc_connectivity_state_trace) {
- gpr_log(GPR_DEBUG, "CONWATCH: %s: get %s", tracker->name,
+ gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name,
grpc_connectivity_state_name(tracker->current_state));
}
return tracker->current_state;
@@ -98,42 +97,47 @@ int grpc_connectivity_state_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state *current, grpc_closure *notify) {
if (grpc_connectivity_state_trace) {
- gpr_log(GPR_DEBUG, "CONWATCH: %s: from %s [cur=%s] notify=%p",
- tracker->name, grpc_connectivity_state_name(*current),
- grpc_connectivity_state_name(tracker->current_state), notify);
+ if (current == NULL) {
+ gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
+ tracker->name, notify);
+ } else {
+ gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
+ tracker->name, grpc_connectivity_state_name(*current),
+ grpc_connectivity_state_name(tracker->current_state), notify);
+ }
}
- if (tracker->current_state != *current) {
- *current = tracker->current_state;
- grpc_exec_ctx_enqueue(exec_ctx, notify, 1);
+ if (current == NULL) {
+ grpc_connectivity_state_watcher *w = tracker->watchers;
+ if (w != NULL && w->notify == notify) {
+ grpc_exec_ctx_enqueue(exec_ctx, notify, 0);
+ tracker->watchers = w->next;
+ gpr_free(w);
+ return 0;
+ }
+ while (w != NULL) {
+ grpc_connectivity_state_watcher *rm_candidate = w->next;
+ if (rm_candidate != NULL && rm_candidate->notify == notify) {
+ grpc_exec_ctx_enqueue(exec_ctx, notify, 0);
+ w->next = w->next->next;
+ gpr_free(rm_candidate);
+ return 0;
+ }
+ w = w->next;
+ }
+ return 0;
} else {
- grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
- w->current = current;
- w->notify = notify;
- w->next = tracker->watchers;
- tracker->watchers = w;
- }
- return tracker->current_state == GRPC_CHANNEL_IDLE;
-}
-
-int grpc_connectivity_state_change_unsubscribe(
- grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
- grpc_closure *subscribed_notify) {
- grpc_connectivity_state_watcher *w = tracker->watchers;
- if (w != NULL && w->notify == subscribed_notify) {
- tracker->watchers = w->next;
- gpr_free(w);
- return 1;
- }
- while (w != NULL) {
- grpc_connectivity_state_watcher *rm_candidate = w->next;
- if (rm_candidate != NULL && rm_candidate->notify == subscribed_notify) {
- w->next = w->next->next;
- gpr_free(rm_candidate);
- return 1;
+ if (tracker->current_state != *current) {
+ *current = tracker->current_state;
+ grpc_exec_ctx_enqueue(exec_ctx, notify, 1);
+ } else {
+ grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
+ w->current = current;
+ w->notify = notify;
+ w->next = tracker->watchers;
+ tracker->watchers = w;
}
- w = w->next;
+ return tracker->current_state == GRPC_CHANNEL_IDLE;
}
- return 0;
}
void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
@@ -142,7 +146,7 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
const char *reason) {
grpc_connectivity_state_watcher *w;
if (grpc_connectivity_state_trace) {
- gpr_log(GPR_DEBUG, "SET: %s: %s --> %s [%s]", tracker->name,
+ gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s]", tracker, tracker->name,
grpc_connectivity_state_name(tracker->current_state),
grpc_connectivity_state_name(state), reason);
}
diff --git a/src/core/transport/connectivity_state.h b/src/core/transport/connectivity_state.h
index 119b1c1554..a4eb6652e5 100644
--- a/src/core/transport/connectivity_state.h
+++ b/src/core/transport/connectivity_state.h
@@ -57,6 +57,8 @@ typedef struct {
extern int grpc_connectivity_state_trace;
+const char *grpc_connectivity_state_name(grpc_connectivity_state state);
+
void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state init_state,
const char *name);
@@ -73,16 +75,11 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
grpc_connectivity_state grpc_connectivity_state_check(
grpc_connectivity_state_tracker *tracker);
-/** Return 1 if the channel should start connecting, 0 otherwise */
+/** Return 1 if the channel should start connecting, 0 otherwise.
+ If current==NULL cancel notify if it is already queued (success==0 in that
+ case) */
int grpc_connectivity_state_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state *current, grpc_closure *notify);
-/** Remove \a subscribed_notify from the list of closures to be called on a
- * state change if present, returning 1. Otherwise, nothing is done and return
- * 0. */
-int grpc_connectivity_state_change_unsubscribe(
- grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
- grpc_closure *subscribed_notify);
-
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CONNECTIVITY_STATE_H */
diff --git a/src/core/transport/metadata.c b/src/core/transport/metadata.c
index 68f23177eb..df05d1a302 100644
--- a/src/core/transport/metadata.c
+++ b/src/core/transport/metadata.c
@@ -31,19 +31,32 @@
*
*/
-#include "src/core/iomgr/sockaddr.h"
#include "src/core/transport/metadata.h"
#include <assert.h>
#include <stddef.h>
#include <string.h>
+#include <grpc/compression.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
+#include "src/core/profiling/timers.h"
#include "src/core/support/murmur_hash.h"
+#include "src/core/support/string.h"
#include "src/core/transport/chttp2/bin_encoder.h"
-#include <grpc/support/time.h>
+#include "src/core/transport/static_metadata.h"
+
+/* There are two kinds of mdelem and mdstr instances.
+ * Static instances are declared in static_metadata.{h,c} and
+ * are initialized by grpc_mdctx_global_init().
+ * Dynamic instances are stored in hash tables on grpc_mdctx, and are backed
+ * by internal_string and internal_element structures.
+ * Internal helper functions here-in (is_mdstr_static, is_mdelem_static) are
+ * used to determine which kind of element a pointer refers to.
+ */
#define INITIAL_STRTAB_CAPACITY 4
#define INITIAL_MDTAB_CAPACITY 4
@@ -51,107 +64,196 @@
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
#define DEBUG_ARGS , const char *file, int line
#define FWD_DEBUG_ARGS , file, line
-#define INTERNAL_STRING_REF(s) internal_string_ref((s), __FILE__, __LINE__)
-#define INTERNAL_STRING_UNREF(s) internal_string_unref((s), __FILE__, __LINE__)
-#define REF_MD_LOCKED(s) ref_md_locked((s), __FILE__, __LINE__)
+#define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s), __FILE__, __LINE__)
#else
#define DEBUG_ARGS
#define FWD_DEBUG_ARGS
-#define INTERNAL_STRING_REF(s) internal_string_ref((s))
-#define INTERNAL_STRING_UNREF(s) internal_string_unref((s))
-#define REF_MD_LOCKED(s) ref_md_locked((s))
+#define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s))
#endif
+#define TABLE_IDX(hash, log2_shards, capacity) \
+ (((hash) >> (log2_shards)) % (capacity))
+#define SHARD_IDX(hash, log2_shards) ((hash) & ((1 << (log2_shards)) - 1))
+
typedef void (*destroy_user_data_func)(void *user_data);
+/* Shadow structure for grpc_mdstr for non-static values */
typedef struct internal_string {
/* must be byte compatible with grpc_mdstr */
gpr_slice slice;
gpr_uint32 hash;
/* private only data */
- gpr_uint32 refs;
+ gpr_atm refcnt;
+
gpr_uint8 has_base64_and_huffman_encoded;
gpr_slice_refcount refcount;
gpr_slice base64_and_huffman;
- grpc_mdctx *context;
-
struct internal_string *bucket_next;
} internal_string;
+/* Shadow structure for grpc_mdelem for non-static elements */
typedef struct internal_metadata {
/* must be byte compatible with grpc_mdelem */
internal_string *key;
internal_string *value;
+ /* private only data */
gpr_atm refcnt;
- /* private only data */
gpr_mu mu_user_data;
gpr_atm destroy_user_data;
gpr_atm user_data;
- grpc_mdctx *context;
struct internal_metadata *bucket_next;
} internal_metadata;
-struct grpc_mdctx {
- gpr_uint32 hash_seed;
- int refs;
+typedef struct strtab_shard {
+ gpr_mu mu;
+ internal_string **strs;
+ size_t count;
+ size_t capacity;
+} strtab_shard;
+typedef struct mdtab_shard {
gpr_mu mu;
+ internal_metadata **elems;
+ size_t count;
+ size_t capacity;
+ size_t free;
+} mdtab_shard;
- internal_string **strtab;
- size_t strtab_count;
- size_t strtab_capacity;
+#define LOG2_STRTAB_SHARD_COUNT 5
+#define LOG2_MDTAB_SHARD_COUNT 4
+#define STRTAB_SHARD_COUNT ((size_t)(1 << LOG2_STRTAB_SHARD_COUNT))
+#define MDTAB_SHARD_COUNT ((size_t)(1 << LOG2_MDTAB_SHARD_COUNT))
- internal_metadata **mdtab;
- size_t mdtab_count;
- size_t mdtab_free;
- size_t mdtab_capacity;
-};
-
-static void internal_string_ref(internal_string *s DEBUG_ARGS);
-static void internal_string_unref(internal_string *s DEBUG_ARGS);
-static void discard_metadata(grpc_mdctx *ctx);
-static void gc_mdtab(grpc_mdctx *ctx);
-static void metadata_context_destroy_locked(grpc_mdctx *ctx);
-
-static void lock(grpc_mdctx *ctx) { gpr_mu_lock(&ctx->mu); }
-
-static void unlock(grpc_mdctx *ctx) {
- /* If the context has been orphaned we'd like to delete it soon. We check
- conditions in unlock as it signals the end of mutations on a context.
-
- We need to ensure all grpc_mdelem and grpc_mdstr elements have been deleted
- first. This is equivalent to saying that both tables have zero counts,
- which is equivalent to saying that strtab_count is zero (as mdelem's MUST
- reference an mdstr for their key and value slots).
-
- To encourage that to happen, we start discarding zero reference count
- mdelems on every unlock (instead of the usual 'I'm too loaded' trigger
- case), since otherwise we can be stuck waiting for a garbage collection
- that will never happen. */
- if (ctx->refs == 0) {
-/* uncomment if you're having trouble diagnosing an mdelem leak to make
- things clearer (slows down destruction a lot, however) */
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- gc_mdtab(ctx);
-#endif
- if (ctx->mdtab_count && ctx->mdtab_count == ctx->mdtab_free) {
- discard_metadata(ctx);
+/* hash seed: decided at initialization time */
+static gpr_uint32 g_hash_seed;
+static int g_forced_hash_seed = 0;
+
+/* linearly probed hash tables for static element lookup */
+static grpc_mdstr *g_static_strtab[GRPC_STATIC_MDSTR_COUNT * 2];
+static grpc_mdelem *g_static_mdtab[GRPC_STATIC_MDELEM_COUNT * 2];
+static size_t g_static_strtab_maxprobe;
+static size_t g_static_mdtab_maxprobe;
+
+static strtab_shard g_strtab_shard[STRTAB_SHARD_COUNT];
+static mdtab_shard g_mdtab_shard[MDTAB_SHARD_COUNT];
+
+static void gc_mdtab(mdtab_shard *shard);
+
+void grpc_test_only_set_metadata_hash_seed(gpr_uint32 seed) {
+ g_hash_seed = seed;
+ g_forced_hash_seed = 1;
+}
+
+void grpc_mdctx_global_init(void) {
+ size_t i, j;
+ if (!g_forced_hash_seed) {
+ g_hash_seed = (gpr_uint32)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
+ }
+ g_static_strtab_maxprobe = 0;
+ g_static_mdtab_maxprobe = 0;
+ /* build static tables */
+ memset(g_static_mdtab, 0, sizeof(g_static_mdtab));
+ memset(g_static_strtab, 0, sizeof(g_static_strtab));
+ for (i = 0; i < GRPC_STATIC_MDSTR_COUNT; i++) {
+ grpc_mdstr *elem = &grpc_static_mdstr_table[i];
+ const char *str = grpc_static_metadata_strings[i];
+ gpr_uint32 hash = gpr_murmur_hash3(str, strlen(str), g_hash_seed);
+ *(gpr_slice *)&elem->slice = gpr_slice_from_static_string(str);
+ *(gpr_uint32 *)&elem->hash = hash;
+ for (j = 0;; j++) {
+ size_t idx = (hash + j) % GPR_ARRAY_SIZE(g_static_strtab);
+ if (g_static_strtab[idx] == NULL) {
+ g_static_strtab[idx] = &grpc_static_mdstr_table[i];
+ break;
+ }
+ }
+ if (j > g_static_strtab_maxprobe) {
+ g_static_strtab_maxprobe = j;
}
- if (ctx->strtab_count == 0) {
- metadata_context_destroy_locked(ctx);
- return;
+ }
+ for (i = 0; i < GRPC_STATIC_MDELEM_COUNT; i++) {
+ grpc_mdelem *elem = &grpc_static_mdelem_table[i];
+ grpc_mdstr *key =
+ &grpc_static_mdstr_table[grpc_static_metadata_elem_indices[2 * i + 0]];
+ grpc_mdstr *value =
+ &grpc_static_mdstr_table[grpc_static_metadata_elem_indices[2 * i + 1]];
+ gpr_uint32 hash = GRPC_MDSTR_KV_HASH(key->hash, value->hash);
+ *(grpc_mdstr **)&elem->key = key;
+ *(grpc_mdstr **)&elem->value = value;
+ for (j = 0;; j++) {
+ size_t idx = (hash + j) % GPR_ARRAY_SIZE(g_static_mdtab);
+ if (g_static_mdtab[idx] == NULL) {
+ g_static_mdtab[idx] = elem;
+ break;
+ }
+ }
+ if (j > g_static_mdtab_maxprobe) {
+ g_static_mdtab_maxprobe = j;
+ }
+ }
+ /* initialize shards */
+ for (i = 0; i < STRTAB_SHARD_COUNT; i++) {
+ strtab_shard *shard = &g_strtab_shard[i];
+ gpr_mu_init(&shard->mu);
+ shard->count = 0;
+ shard->capacity = INITIAL_STRTAB_CAPACITY;
+ shard->strs = gpr_malloc(sizeof(*shard->strs) * shard->capacity);
+ memset(shard->strs, 0, sizeof(*shard->strs) * shard->capacity);
+ }
+ for (i = 0; i < MDTAB_SHARD_COUNT; i++) {
+ mdtab_shard *shard = &g_mdtab_shard[i];
+ gpr_mu_init(&shard->mu);
+ shard->count = 0;
+ shard->free = 0;
+ shard->capacity = INITIAL_MDTAB_CAPACITY;
+ shard->elems = gpr_malloc(sizeof(*shard->elems) * shard->capacity);
+ memset(shard->elems, 0, sizeof(*shard->elems) * shard->capacity);
+ }
+}
+
+void grpc_mdctx_global_shutdown(void) {
+ size_t i;
+ for (i = 0; i < MDTAB_SHARD_COUNT; i++) {
+ mdtab_shard *shard = &g_mdtab_shard[i];
+ gpr_mu_destroy(&shard->mu);
+ gc_mdtab(shard);
+ /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
+ if (shard->count != 0) {
+ gpr_log(GPR_DEBUG, "WARNING: %d metadata elements were leaked",
+ shard->count);
+ }
+ gpr_free(shard->elems);
+ }
+ for (i = 0; i < STRTAB_SHARD_COUNT; i++) {
+ strtab_shard *shard = &g_strtab_shard[i];
+ gpr_mu_destroy(&shard->mu);
+ /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
+ if (shard->count != 0) {
+ gpr_log(GPR_DEBUG, "WARNING: %d metadata strings were leaked",
+ shard->count);
}
+ gpr_free(shard->strs);
}
- gpr_mu_unlock(&ctx->mu);
}
-static void ref_md_locked(internal_metadata *md DEBUG_ARGS) {
+static int is_mdstr_static(grpc_mdstr *s) {
+ return s >= &grpc_static_mdstr_table[0] &&
+ s < &grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
+}
+
+static int is_mdelem_static(grpc_mdelem *e) {
+ return e >= &grpc_static_mdelem_table[0] &&
+ e < &grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
+}
+
+static void ref_md_locked(mdtab_shard *shard,
+ internal_metadata *md DEBUG_ARGS) {
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%d->%d: '%s' = '%s'", md,
@@ -161,187 +263,113 @@ static void ref_md_locked(internal_metadata *md DEBUG_ARGS) {
grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
#endif
if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 2)) {
- md->context->mdtab_free--;
+ shard->free--;
} else {
GPR_ASSERT(1 != gpr_atm_no_barrier_fetch_add(&md->refcnt, -1));
}
}
-grpc_mdctx *grpc_mdctx_create_with_seed(gpr_uint32 seed) {
- grpc_mdctx *ctx = gpr_malloc(sizeof(grpc_mdctx));
-
- ctx->refs = 1;
- ctx->hash_seed = seed;
- gpr_mu_init(&ctx->mu);
- ctx->strtab = gpr_malloc(sizeof(internal_string *) * INITIAL_STRTAB_CAPACITY);
- memset(ctx->strtab, 0, sizeof(grpc_mdstr *) * INITIAL_STRTAB_CAPACITY);
- ctx->strtab_count = 0;
- ctx->strtab_capacity = INITIAL_STRTAB_CAPACITY;
- ctx->mdtab = gpr_malloc(sizeof(internal_metadata *) * INITIAL_MDTAB_CAPACITY);
- memset(ctx->mdtab, 0, sizeof(grpc_mdelem *) * INITIAL_MDTAB_CAPACITY);
- ctx->mdtab_count = 0;
- ctx->mdtab_capacity = INITIAL_MDTAB_CAPACITY;
- ctx->mdtab_free = 0;
-
- return ctx;
-}
-
-grpc_mdctx *grpc_mdctx_create(void) {
- /* This seed is used to prevent remote connections from controlling hash table
- * collisions. It needs to be somewhat unpredictable to a remote connection.
- */
- return grpc_mdctx_create_with_seed(
- (gpr_uint32)gpr_now(GPR_CLOCK_REALTIME).tv_nsec);
-}
-
-static void discard_metadata(grpc_mdctx *ctx) {
+static void grow_strtab(strtab_shard *shard) {
+ size_t capacity = shard->capacity * 2;
size_t i;
- internal_metadata *next, *cur;
-
- for (i = 0; i < ctx->mdtab_capacity; i++) {
- cur = ctx->mdtab[i];
- while (cur) {
- void *user_data = (void *)gpr_atm_no_barrier_load(&cur->user_data);
- GPR_ASSERT(gpr_atm_acq_load(&cur->refcnt) == 0);
- next = cur->bucket_next;
- INTERNAL_STRING_UNREF(cur->key);
- INTERNAL_STRING_UNREF(cur->value);
- if (user_data != NULL) {
- ((destroy_user_data_func)gpr_atm_no_barrier_load(
- &cur->destroy_user_data))(user_data);
- }
- gpr_mu_destroy(&cur->mu_user_data);
- gpr_free(cur);
- cur = next;
- ctx->mdtab_free--;
- ctx->mdtab_count--;
- }
- ctx->mdtab[i] = NULL;
- }
-}
-
-static void metadata_context_destroy_locked(grpc_mdctx *ctx) {
- GPR_ASSERT(ctx->strtab_count == 0);
- GPR_ASSERT(ctx->mdtab_count == 0);
- GPR_ASSERT(ctx->mdtab_free == 0);
- gpr_free(ctx->strtab);
- gpr_free(ctx->mdtab);
- gpr_mu_unlock(&ctx->mu);
- gpr_mu_destroy(&ctx->mu);
- gpr_free(ctx);
-}
-
-void grpc_mdctx_ref(grpc_mdctx *ctx) {
- lock(ctx);
- GPR_ASSERT(ctx->refs > 0);
- ctx->refs++;
- unlock(ctx);
-}
+ internal_string **strtab;
+ internal_string *s, *next;
-void grpc_mdctx_unref(grpc_mdctx *ctx) {
- lock(ctx);
- GPR_ASSERT(ctx->refs > 0);
- ctx->refs--;
- unlock(ctx);
-}
+ GPR_TIMER_BEGIN("grow_strtab", 0);
-static void grow_strtab(grpc_mdctx *ctx) {
- size_t capacity = ctx->strtab_capacity * 2;
- size_t i;
- internal_string **strtab = gpr_malloc(sizeof(internal_string *) * capacity);
- internal_string *s, *next;
+ strtab = gpr_malloc(sizeof(internal_string *) * capacity);
memset(strtab, 0, sizeof(internal_string *) * capacity);
- for (i = 0; i < ctx->strtab_capacity; i++) {
- for (s = ctx->strtab[i]; s; s = next) {
+ for (i = 0; i < shard->capacity; i++) {
+ for (s = shard->strs[i]; s; s = next) {
+ size_t idx = TABLE_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT, capacity);
next = s->bucket_next;
- s->bucket_next = strtab[s->hash % capacity];
- strtab[s->hash % capacity] = s;
+ s->bucket_next = strtab[idx];
+ strtab[idx] = s;
}
}
- gpr_free(ctx->strtab);
- ctx->strtab = strtab;
- ctx->strtab_capacity = capacity;
+ gpr_free(shard->strs);
+ shard->strs = strtab;
+ shard->capacity = capacity;
+
+ GPR_TIMER_END("grow_strtab", 0);
}
-static void internal_destroy_string(internal_string *is) {
+static void internal_destroy_string(strtab_shard *shard, internal_string *is) {
internal_string **prev_next;
internal_string *cur;
- grpc_mdctx *ctx = is->context;
+ GPR_TIMER_BEGIN("internal_destroy_string", 0);
if (is->has_base64_and_huffman_encoded) {
gpr_slice_unref(is->base64_and_huffman);
}
- for (prev_next = &ctx->strtab[is->hash % ctx->strtab_capacity],
+ for (prev_next = &shard->strs[TABLE_IDX(is->hash, LOG2_STRTAB_SHARD_COUNT,
+ shard->capacity)],
cur = *prev_next;
cur != is; prev_next = &cur->bucket_next, cur = cur->bucket_next)
;
*prev_next = cur->bucket_next;
- ctx->strtab_count--;
+ shard->count--;
gpr_free(is);
-}
-
-static void internal_string_ref(internal_string *s DEBUG_ARGS) {
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR REF:%p:%d->%d: '%s'", s,
- s->refs, s->refs + 1, grpc_mdstr_as_c_string((grpc_mdstr *)s));
-#endif
- ++s->refs;
-}
-
-static void internal_string_unref(internal_string *s DEBUG_ARGS) {
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR UNREF:%p:%d->%d: '%s'", s,
- s->refs, s->refs - 1, grpc_mdstr_as_c_string((grpc_mdstr *)s));
-#endif
- GPR_ASSERT(s->refs > 0);
- if (0 == --s->refs) {
- internal_destroy_string(s);
- }
+ GPR_TIMER_END("internal_destroy_string", 0);
}
static void slice_ref(void *p) {
internal_string *is =
(internal_string *)((char *)p - offsetof(internal_string, refcount));
- grpc_mdctx *ctx = is->context;
- lock(ctx);
- INTERNAL_STRING_REF(is);
- unlock(ctx);
+ GRPC_MDSTR_REF((grpc_mdstr *)(is));
}
static void slice_unref(void *p) {
internal_string *is =
(internal_string *)((char *)p - offsetof(internal_string, refcount));
- grpc_mdctx *ctx = is->context;
- lock(ctx);
- INTERNAL_STRING_UNREF(is);
- unlock(ctx);
+ GRPC_MDSTR_UNREF((grpc_mdstr *)(is));
}
-grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str) {
- return grpc_mdstr_from_buffer(ctx, (const gpr_uint8 *)str, strlen(str));
+grpc_mdstr *grpc_mdstr_from_string(const char *str) {
+ return grpc_mdstr_from_buffer((const gpr_uint8 *)str, strlen(str));
}
-grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice) {
- grpc_mdstr *result = grpc_mdstr_from_buffer(ctx, GPR_SLICE_START_PTR(slice),
+grpc_mdstr *grpc_mdstr_from_slice(gpr_slice slice) {
+ grpc_mdstr *result = grpc_mdstr_from_buffer(GPR_SLICE_START_PTR(slice),
GPR_SLICE_LENGTH(slice));
gpr_slice_unref(slice);
return result;
}
-grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
- size_t length) {
- gpr_uint32 hash = gpr_murmur_hash3(buf, length, ctx->hash_seed);
+grpc_mdstr *grpc_mdstr_from_buffer(const gpr_uint8 *buf, size_t length) {
+ gpr_uint32 hash = gpr_murmur_hash3(buf, length, g_hash_seed);
internal_string *s;
+ strtab_shard *shard =
+ &g_strtab_shard[SHARD_IDX(hash, LOG2_STRTAB_SHARD_COUNT)];
+ size_t i;
+ size_t idx;
+
+ GPR_TIMER_BEGIN("grpc_mdstr_from_buffer", 0);
+
+ /* search for a static string */
+ for (i = 0; i <= g_static_strtab_maxprobe; i++) {
+ grpc_mdstr *ss;
+ idx = (hash + i) % GPR_ARRAY_SIZE(g_static_strtab);
+ ss = g_static_strtab[idx];
+ if (ss == NULL) break;
+ if (ss->hash == hash && GPR_SLICE_LENGTH(ss->slice) == length &&
+ 0 == memcmp(buf, GPR_SLICE_START_PTR(ss->slice), length)) {
+ GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
+ return ss;
+ }
+ }
- lock(ctx);
+ gpr_mu_lock(&shard->mu);
/* search for an existing string */
- for (s = ctx->strtab[hash % ctx->strtab_capacity]; s; s = s->bucket_next) {
+ idx = TABLE_IDX(hash, LOG2_STRTAB_SHARD_COUNT, shard->capacity);
+ for (s = shard->strs[idx]; s; s = s->bucket_next) {
if (s->hash == hash && GPR_SLICE_LENGTH(s->slice) == length &&
0 == memcmp(buf, GPR_SLICE_START_PTR(s->slice), length)) {
- INTERNAL_STRING_REF(s);
- unlock(ctx);
+ GRPC_MDSTR_REF((grpc_mdstr *)s);
+ gpr_mu_unlock(&shard->mu);
+ GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
return (grpc_mdstr *)s;
}
}
@@ -350,7 +378,7 @@ grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
if (length + 1 < GPR_SLICE_INLINED_SIZE) {
/* string data goes directly into the slice */
s = gpr_malloc(sizeof(internal_string));
- s->refs = 1;
+ gpr_atm_rel_store(&s->refcnt, 2);
s->slice.refcount = NULL;
memcpy(s->slice.data.inlined.bytes, buf, length);
s->slice.data.inlined.bytes[length] = 0;
@@ -359,7 +387,7 @@ grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
/* string data goes after the internal_string header, and we +1 for null
terminator */
s = gpr_malloc(sizeof(internal_string) + length + 1);
- s->refs = 1;
+ gpr_atm_rel_store(&s->refcnt, 2);
s->refcount.ref = slice_ref;
s->refcount.unref = slice_unref;
s->slice.refcount = &s->refcount;
@@ -371,102 +399,125 @@ grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
}
s->has_base64_and_huffman_encoded = 0;
s->hash = hash;
- s->context = ctx;
- s->bucket_next = ctx->strtab[hash % ctx->strtab_capacity];
- ctx->strtab[hash % ctx->strtab_capacity] = s;
+ s->bucket_next = shard->strs[idx];
+ shard->strs[idx] = s;
- ctx->strtab_count++;
+ shard->count++;
- if (ctx->strtab_count > ctx->strtab_capacity * 2) {
- grow_strtab(ctx);
+ if (shard->count > shard->capacity * 2) {
+ grow_strtab(shard);
}
- unlock(ctx);
+ gpr_mu_unlock(&shard->mu);
+ GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
return (grpc_mdstr *)s;
}
-static void gc_mdtab(grpc_mdctx *ctx) {
+static void gc_mdtab(mdtab_shard *shard) {
size_t i;
internal_metadata **prev_next;
internal_metadata *md, *next;
- for (i = 0; i < ctx->mdtab_capacity; i++) {
- prev_next = &ctx->mdtab[i];
- for (md = ctx->mdtab[i]; md; md = next) {
+ GPR_TIMER_BEGIN("gc_mdtab", 0);
+ for (i = 0; i < shard->capacity; i++) {
+ prev_next = &shard->elems[i];
+ for (md = shard->elems[i]; md; md = next) {
void *user_data = (void *)gpr_atm_no_barrier_load(&md->user_data);
next = md->bucket_next;
if (gpr_atm_acq_load(&md->refcnt) == 0) {
- INTERNAL_STRING_UNREF(md->key);
- INTERNAL_STRING_UNREF(md->value);
+ GRPC_MDSTR_UNREF((grpc_mdstr *)md->key);
+ GRPC_MDSTR_UNREF((grpc_mdstr *)md->value);
if (md->user_data) {
((destroy_user_data_func)gpr_atm_no_barrier_load(
&md->destroy_user_data))(user_data);
}
gpr_free(md);
*prev_next = next;
- ctx->mdtab_free--;
- ctx->mdtab_count--;
+ shard->free--;
+ shard->count--;
} else {
prev_next = &md->bucket_next;
}
}
}
-
- GPR_ASSERT(ctx->mdtab_free == 0);
+ GPR_TIMER_END("gc_mdtab", 0);
}
-static void grow_mdtab(grpc_mdctx *ctx) {
- size_t capacity = ctx->mdtab_capacity * 2;
+static void grow_mdtab(mdtab_shard *shard) {
+ size_t capacity = shard->capacity * 2;
size_t i;
- internal_metadata **mdtab =
- gpr_malloc(sizeof(internal_metadata *) * capacity);
+ internal_metadata **mdtab;
internal_metadata *md, *next;
gpr_uint32 hash;
+
+ GPR_TIMER_BEGIN("grow_mdtab", 0);
+
+ mdtab = gpr_malloc(sizeof(internal_metadata *) * capacity);
memset(mdtab, 0, sizeof(internal_metadata *) * capacity);
- for (i = 0; i < ctx->mdtab_capacity; i++) {
- for (md = ctx->mdtab[i]; md; md = next) {
+ for (i = 0; i < shard->capacity; i++) {
+ for (md = shard->elems[i]; md; md = next) {
+ size_t idx;
hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
next = md->bucket_next;
- md->bucket_next = mdtab[hash % capacity];
- mdtab[hash % capacity] = md;
+ idx = TABLE_IDX(hash, LOG2_MDTAB_SHARD_COUNT, capacity);
+ md->bucket_next = mdtab[idx];
+ mdtab[idx] = md;
}
}
- gpr_free(ctx->mdtab);
- ctx->mdtab = mdtab;
- ctx->mdtab_capacity = capacity;
+ gpr_free(shard->elems);
+ shard->elems = mdtab;
+ shard->capacity = capacity;
+
+ GPR_TIMER_END("grow_mdtab", 0);
}
-static void rehash_mdtab(grpc_mdctx *ctx) {
- if (ctx->mdtab_free > ctx->mdtab_capacity / 4) {
- gc_mdtab(ctx);
+static void rehash_mdtab(mdtab_shard *shard) {
+ if (shard->free > shard->capacity / 4) {
+ gc_mdtab(shard);
} else {
- grow_mdtab(ctx);
+ grow_mdtab(shard);
}
}
-grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
- grpc_mdstr *mkey,
+grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdstr *mkey,
grpc_mdstr *mvalue) {
internal_string *key = (internal_string *)mkey;
internal_string *value = (internal_string *)mvalue;
gpr_uint32 hash = GRPC_MDSTR_KV_HASH(mkey->hash, mvalue->hash);
internal_metadata *md;
+ mdtab_shard *shard = &g_mdtab_shard[SHARD_IDX(hash, LOG2_MDTAB_SHARD_COUNT)];
+ size_t i;
+ size_t idx;
+
+ GPR_TIMER_BEGIN("grpc_mdelem_from_metadata_strings", 0);
+
+ if (is_mdstr_static(mkey) && is_mdstr_static(mvalue)) {
+ for (i = 0; i <= g_static_mdtab_maxprobe; i++) {
+ grpc_mdelem *smd;
+ idx = (hash + i) % GPR_ARRAY_SIZE(g_static_mdtab);
+ smd = g_static_mdtab[idx];
+ if (smd == NULL) break;
+ if (smd->key == mkey && smd->value == mvalue) {
+ GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
+ return smd;
+ }
+ }
+ }
- GPR_ASSERT(key->context == ctx);
- GPR_ASSERT(value->context == ctx);
-
- lock(ctx);
+ gpr_mu_lock(&shard->mu);
+ idx = TABLE_IDX(hash, LOG2_MDTAB_SHARD_COUNT, shard->capacity);
/* search for an existing pair */
- for (md = ctx->mdtab[hash % ctx->mdtab_capacity]; md; md = md->bucket_next) {
+ for (md = shard->elems[idx]; md; md = md->bucket_next) {
if (md->key == key && md->value == value) {
- REF_MD_LOCKED(md);
- INTERNAL_STRING_UNREF(key);
- INTERNAL_STRING_UNREF(value);
- unlock(ctx);
+ REF_MD_LOCKED(shard, md);
+ GRPC_MDSTR_UNREF((grpc_mdstr *)key);
+ GRPC_MDSTR_UNREF((grpc_mdstr *)value);
+ gpr_mu_unlock(&shard->mu);
+ GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
return (grpc_mdelem *)md;
}
}
@@ -474,12 +525,12 @@ grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
/* not found: create a new pair */
md = gpr_malloc(sizeof(internal_metadata));
gpr_atm_rel_store(&md->refcnt, 2);
- md->context = ctx;
md->key = key;
md->value = value;
md->user_data = 0;
md->destroy_user_data = 0;
- md->bucket_next = ctx->mdtab[hash % ctx->mdtab_capacity];
+ md->bucket_next = shard->elems[idx];
+ shard->elems[idx] = md;
gpr_mu_init(&md->mu_user_data);
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
gpr_log(GPR_DEBUG, "ELM NEW:%p:%d: '%s' = '%s'", md,
@@ -487,42 +538,39 @@ grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
#endif
- ctx->mdtab[hash % ctx->mdtab_capacity] = md;
- ctx->mdtab_count++;
+ shard->count++;
- if (ctx->mdtab_count > ctx->mdtab_capacity * 2) {
- rehash_mdtab(ctx);
+ if (shard->count > shard->capacity * 2) {
+ rehash_mdtab(shard);
}
- unlock(ctx);
+ gpr_mu_unlock(&shard->mu);
+
+ GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
return (grpc_mdelem *)md;
}
-grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key,
- const char *value) {
- return grpc_mdelem_from_metadata_strings(ctx,
- grpc_mdstr_from_string(ctx, key),
- grpc_mdstr_from_string(ctx, value));
+grpc_mdelem *grpc_mdelem_from_strings(const char *key, const char *value) {
+ return grpc_mdelem_from_metadata_strings(grpc_mdstr_from_string(key),
+ grpc_mdstr_from_string(value));
}
-grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
- gpr_slice value) {
- return grpc_mdelem_from_metadata_strings(ctx, grpc_mdstr_from_slice(ctx, key),
- grpc_mdstr_from_slice(ctx, value));
+grpc_mdelem *grpc_mdelem_from_slices(gpr_slice key, gpr_slice value) {
+ return grpc_mdelem_from_metadata_strings(grpc_mdstr_from_slice(key),
+ grpc_mdstr_from_slice(value));
}
-grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx,
- const char *key,
+grpc_mdelem *grpc_mdelem_from_string_and_buffer(const char *key,
const gpr_uint8 *value,
size_t value_length) {
return grpc_mdelem_from_metadata_strings(
- ctx, grpc_mdstr_from_string(ctx, key),
- grpc_mdstr_from_buffer(ctx, value, value_length));
+ grpc_mdstr_from_string(key), grpc_mdstr_from_buffer(value, value_length));
}
grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd DEBUG_ARGS) {
internal_metadata *md = (internal_metadata *)gmd;
+ if (is_mdelem_static(gmd)) return gmd;
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%d->%d: '%s' = '%s'", md,
@@ -542,6 +590,8 @@ grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd DEBUG_ARGS) {
void grpc_mdelem_unref(grpc_mdelem *gmd DEBUG_ARGS) {
internal_metadata *md = (internal_metadata *)gmd;
+ if (!md) return;
+ if (is_mdelem_static(gmd)) return;
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM UNREF:%p:%d->%d: '%s' = '%s'", md,
@@ -551,13 +601,17 @@ void grpc_mdelem_unref(grpc_mdelem *gmd DEBUG_ARGS) {
grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
#endif
if (2 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
- grpc_mdctx *ctx = md->context;
- lock(ctx);
+ gpr_uint32 hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
+ mdtab_shard *shard =
+ &g_mdtab_shard[SHARD_IDX(hash, LOG2_MDTAB_SHARD_COUNT)];
+ GPR_TIMER_BEGIN("grpc_mdelem_unref.to_zero", 0);
+ gpr_mu_lock(&shard->mu);
if (1 == gpr_atm_no_barrier_load(&md->refcnt)) {
- ctx->mdtab_free++;
+ shard->free++;
gpr_atm_no_barrier_store(&md->refcnt, 0);
}
- unlock(ctx);
+ gpr_mu_unlock(&shard->mu);
+ GPR_TIMER_END("grpc_mdelem_unref.to_zero", 0);
}
}
@@ -567,36 +621,31 @@ const char *grpc_mdstr_as_c_string(grpc_mdstr *s) {
grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs DEBUG_ARGS) {
internal_string *s = (internal_string *)gs;
- grpc_mdctx *ctx = s->context;
- lock(ctx);
- internal_string_ref(s FWD_DEBUG_ARGS);
- unlock(ctx);
+ if (is_mdstr_static(gs)) return gs;
+ GPR_ASSERT(gpr_atm_full_fetch_add(&s->refcnt, 1) != 0);
return gs;
}
void grpc_mdstr_unref(grpc_mdstr *gs DEBUG_ARGS) {
internal_string *s = (internal_string *)gs;
- grpc_mdctx *ctx = s->context;
- lock(ctx);
- internal_string_unref(s FWD_DEBUG_ARGS);
- unlock(ctx);
-}
-
-size_t grpc_mdctx_get_mdtab_capacity_test_only(grpc_mdctx *ctx) {
- return ctx->mdtab_capacity;
-}
-
-size_t grpc_mdctx_get_mdtab_count_test_only(grpc_mdctx *ctx) {
- return ctx->mdtab_count;
-}
-
-size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *ctx) {
- return ctx->mdtab_free;
+ if (is_mdstr_static(gs)) return;
+ if (2 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
+ strtab_shard *shard =
+ &g_strtab_shard[SHARD_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT)];
+ gpr_mu_lock(&shard->mu);
+ if (1 == gpr_atm_no_barrier_load(&s->refcnt)) {
+ internal_destroy_string(shard, s);
+ }
+ gpr_mu_unlock(&shard->mu);
+ }
}
void *grpc_mdelem_get_user_data(grpc_mdelem *md, void (*destroy_func)(void *)) {
internal_metadata *im = (internal_metadata *)md;
void *result;
+ if (is_mdelem_static(md)) {
+ return (void *)grpc_static_mdelem_user_data[md - grpc_static_mdelem_table];
+ }
if (gpr_atm_acq_load(&im->destroy_user_data) == (gpr_atm)destroy_func) {
return (void *)gpr_atm_no_barrier_load(&im->user_data);
} else {
@@ -608,6 +657,7 @@ void *grpc_mdelem_get_user_data(grpc_mdelem *md, void (*destroy_func)(void *)) {
void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
void *user_data) {
internal_metadata *im = (internal_metadata *)md;
+ GPR_ASSERT(!is_mdelem_static(md));
GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
gpr_mu_lock(&im->mu_user_data);
if (gpr_atm_no_barrier_load(&im->destroy_user_data)) {
@@ -626,15 +676,16 @@ void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) {
internal_string *s = (internal_string *)gs;
gpr_slice slice;
- grpc_mdctx *ctx = s->context;
- lock(ctx);
+ strtab_shard *shard =
+ &g_strtab_shard[SHARD_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT)];
+ gpr_mu_lock(&shard->mu);
if (!s->has_base64_and_huffman_encoded) {
s->base64_and_huffman =
grpc_chttp2_base64_encode_and_huffman_compress(s->slice);
s->has_base64_and_huffman_encoded = 1;
}
slice = s->base64_and_huffman;
- unlock(ctx);
+ gpr_mu_unlock(&shard->mu);
return slice;
}
@@ -660,7 +711,7 @@ int grpc_mdstr_is_legal_header(grpc_mdstr *s) {
int grpc_mdstr_is_legal_nonbin_header(grpc_mdstr *s) {
static const gpr_uint8 legal_header_bits[256 / 8] = {
- 0x00, 0x00, 0x00, 0x00, 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
return conforms_to(s, legal_header_bits);
diff --git a/src/core/transport/metadata.h b/src/core/transport/metadata.h
index 9a8164037c..3d3efc682d 100644
--- a/src/core/transport/metadata.h
+++ b/src/core/transport/metadata.h
@@ -59,10 +59,15 @@
grpc_mdelem instances MAY live longer than their refcount implies, and are
garbage collected periodically, meaning cached data can easily outlive a
- single request. */
+ single request.
+
+ STATIC METADATA: in static_metadata.h we declare a set of static metadata.
+ These mdelems and mdstrs are available via pre-declared code generated macros
+ and are available to code anywhere between grpc_init() and grpc_shutdown().
+ They are not refcounted, but can be passed to _ref and _unref functions
+ declared here - in which case those functions are effectively no-ops. */
/* Forward declarations */
-typedef struct grpc_mdctx grpc_mdctx;
typedef struct grpc_mdstr grpc_mdstr;
typedef struct grpc_mdelem grpc_mdelem;
@@ -81,25 +86,14 @@ struct grpc_mdelem {
/* there is a private part to this in metadata.c */
};
-/* Create/orphan a metadata context */
-grpc_mdctx *grpc_mdctx_create(void);
-grpc_mdctx *grpc_mdctx_create_with_seed(gpr_uint32 seed);
-void grpc_mdctx_ref(grpc_mdctx *mdctx);
-void grpc_mdctx_unref(grpc_mdctx *mdctx);
-
-/* Test only accessors to internal state - only for testing this code - do not
- rely on it outside of metadata_test.c */
-size_t grpc_mdctx_get_mdtab_capacity_test_only(grpc_mdctx *mdctx);
-size_t grpc_mdctx_get_mdtab_count_test_only(grpc_mdctx *mdctx);
-size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *mdctx);
+void grpc_test_only_set_metadata_hash_seed(gpr_uint32 seed);
/* Constructors for grpc_mdstr instances; take a variety of data types that
clients may have handy */
-grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str);
+grpc_mdstr *grpc_mdstr_from_string(const char *str);
/* Unrefs the slice. */
-grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice);
-grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *str,
- size_t length);
+grpc_mdstr *grpc_mdstr_from_slice(gpr_slice slice);
+grpc_mdstr *grpc_mdstr_from_buffer(const gpr_uint8 *str, size_t length);
/* Returns a borrowed slice from the mdstr with its contents base64 encoded
and huffman compressed */
@@ -107,15 +101,12 @@ gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *str);
/* Constructors for grpc_mdelem instances; take a variety of data types that
clients may have handy */
-grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx, grpc_mdstr *key,
+grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdstr *key,
grpc_mdstr *value);
-grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key,
- const char *value);
+grpc_mdelem *grpc_mdelem_from_strings(const char *key, const char *value);
/* Unrefs the slices. */
-grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
- gpr_slice value);
-grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx,
- const char *key,
+grpc_mdelem *grpc_mdelem_from_slices(gpr_slice key, gpr_slice value);
+grpc_mdelem *grpc_mdelem_from_string_and_buffer(const char *key,
const gpr_uint8 *value,
size_t value_length);
@@ -157,4 +148,7 @@ int grpc_mdstr_is_bin_suffixed(grpc_mdstr *s);
#define GRPC_MDSTR_KV_HASH(k_hash, v_hash) (GPR_ROTL((k_hash), 2) ^ (v_hash))
+void grpc_mdctx_global_init(void);
+void grpc_mdctx_global_shutdown(void);
+
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_METADATA_H */
diff --git a/src/core/transport/stream_op.c b/src/core/transport/metadata_batch.c
index 6493e77bc5..1266862f82 100644
--- a/src/core/transport/stream_op.c
+++ b/src/core/transport/metadata_batch.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/transport/stream_op.h"
+#include "src/core/transport/metadata_batch.h"
#include <string.h>
@@ -40,143 +40,6 @@
#include "src/core/profiling/timers.h"
-/* Exponential growth function: Given x, return a larger x.
- Currently we grow by 1.5 times upon reallocation. */
-#define GROW(x) (3 * (x) / 2)
-
-void grpc_sopb_init(grpc_stream_op_buffer *sopb) {
- sopb->ops = sopb->inlined_ops;
- sopb->nops = 0;
- sopb->capacity = GRPC_SOPB_INLINE_ELEMENTS;
-}
-
-void grpc_sopb_destroy(grpc_stream_op_buffer *sopb) {
- grpc_stream_ops_unref_owned_objects(sopb->ops, sopb->nops);
- if (sopb->ops != sopb->inlined_ops) gpr_free(sopb->ops);
-}
-
-void grpc_sopb_reset(grpc_stream_op_buffer *sopb) {
- grpc_stream_ops_unref_owned_objects(sopb->ops, sopb->nops);
- sopb->nops = 0;
-}
-
-void grpc_sopb_swap(grpc_stream_op_buffer *a, grpc_stream_op_buffer *b) {
- GPR_SWAP(size_t, a->nops, b->nops);
- GPR_SWAP(size_t, a->capacity, b->capacity);
-
- if (a->ops == a->inlined_ops) {
- if (b->ops == b->inlined_ops) {
- /* swap contents of inlined buffer */
- grpc_stream_op temp[GRPC_SOPB_INLINE_ELEMENTS];
- memcpy(temp, a->ops, b->nops * sizeof(grpc_stream_op));
- memcpy(a->ops, b->ops, a->nops * sizeof(grpc_stream_op));
- memcpy(b->ops, temp, b->nops * sizeof(grpc_stream_op));
- } else {
- /* a is inlined, b is not - copy a inlined into b, fix pointers */
- a->ops = b->ops;
- b->ops = b->inlined_ops;
- memcpy(b->ops, a->inlined_ops, b->nops * sizeof(grpc_stream_op));
- }
- } else if (b->ops == b->inlined_ops) {
- /* b is inlined, a is not - copy b inlined int a, fix pointers */
- b->ops = a->ops;
- a->ops = a->inlined_ops;
- memcpy(a->ops, b->inlined_ops, a->nops * sizeof(grpc_stream_op));
- } else {
- /* no inlining: easy swap */
- GPR_SWAP(grpc_stream_op *, a->ops, b->ops);
- }
-}
-
-void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops) {
- size_t i;
- for (i = 0; i < nops; i++) {
- switch (ops[i].type) {
- case GRPC_OP_SLICE:
- gpr_slice_unref(ops[i].data.slice);
- break;
- case GRPC_OP_METADATA:
- grpc_metadata_batch_destroy(&ops[i].data.metadata);
- break;
- case GRPC_NO_OP:
- case GRPC_OP_BEGIN_MESSAGE:
- break;
- }
- }
-}
-
-static void expandto(grpc_stream_op_buffer *sopb, size_t new_capacity) {
- sopb->capacity = new_capacity;
- if (sopb->ops == sopb->inlined_ops) {
- sopb->ops = gpr_malloc(sizeof(grpc_stream_op) * new_capacity);
- memcpy(sopb->ops, sopb->inlined_ops, sopb->nops * sizeof(grpc_stream_op));
- } else {
- sopb->ops = gpr_realloc(sopb->ops, sizeof(grpc_stream_op) * new_capacity);
- }
-}
-
-static grpc_stream_op *add(grpc_stream_op_buffer *sopb) {
- grpc_stream_op *out;
-
- GPR_ASSERT(sopb->nops <= sopb->capacity);
- if (sopb->nops == sopb->capacity) {
- expandto(sopb, GROW(sopb->capacity));
- }
- out = sopb->ops + sopb->nops;
- sopb->nops++;
- return out;
-}
-
-void grpc_sopb_add_no_op(grpc_stream_op_buffer *sopb) {
- add(sopb)->type = GRPC_NO_OP;
-}
-
-void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
- gpr_uint32 flags) {
- grpc_stream_op *op = add(sopb);
- op->type = GRPC_OP_BEGIN_MESSAGE;
- op->data.begin_message.length = length;
- op->data.begin_message.flags = flags;
-}
-
-void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb,
- grpc_metadata_batch b) {
- grpc_stream_op *op = add(sopb);
- op->type = GRPC_OP_METADATA;
- op->data.metadata = b;
-}
-
-void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice) {
- grpc_stream_op *op = add(sopb);
- op->type = GRPC_OP_SLICE;
- op->data.slice = slice;
-}
-
-void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops,
- size_t nops) {
- size_t orig_nops = sopb->nops;
- size_t new_nops = orig_nops + nops;
-
- if (new_nops > sopb->capacity) {
- expandto(sopb, GPR_MAX(GROW(sopb->capacity), new_nops));
- }
-
- memcpy(sopb->ops + orig_nops, ops, sizeof(grpc_stream_op) * nops);
- sopb->nops = new_nops;
-}
-
-void grpc_sopb_move_to(grpc_stream_op_buffer *src, grpc_stream_op_buffer *dst) {
- if (src->nops == 0) {
- return;
- }
- if (dst->nops == 0) {
- grpc_sopb_swap(src, dst);
- return;
- }
- grpc_sopb_append(dst, src->ops, src->nops);
- src->nops = 0;
-}
-
static void assert_valid_list(grpc_mdelem_list *list) {
#ifndef NDEBUG
grpc_linked_mdelem *l;
@@ -200,13 +63,11 @@ static void assert_valid_list(grpc_mdelem_list *list) {
#ifndef NDEBUG
void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
assert_valid_list(&batch->list);
- assert_valid_list(&batch->garbage);
}
#endif /* NDEBUG */
void grpc_metadata_batch_init(grpc_metadata_batch *batch) {
- batch->list.head = batch->list.tail = batch->garbage.head =
- batch->garbage.tail = NULL;
+ batch->list.head = batch->list.tail = NULL;
batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
@@ -215,9 +76,6 @@ void grpc_metadata_batch_destroy(grpc_metadata_batch *batch) {
for (l = batch->list.head; l; l = l->next) {
GRPC_MDELEM_UNREF(l->md);
}
- for (l = batch->garbage.head; l; l = l->next) {
- GRPC_MDELEM_UNREF(l->md);
- }
}
void grpc_metadata_batch_add_head(grpc_metadata_batch *batch,
@@ -275,20 +133,6 @@ void grpc_metadata_batch_link_tail(grpc_metadata_batch *batch,
link_tail(&batch->list, storage);
}
-void grpc_metadata_batch_merge(grpc_metadata_batch *target,
- grpc_metadata_batch *to_add) {
- grpc_linked_mdelem *l;
- grpc_linked_mdelem *next;
- for (l = to_add->list.head; l; l = next) {
- next = l->next;
- link_tail(&target->list, l);
- }
- for (l = to_add->garbage.head; l; l = next) {
- next = l->next;
- link_tail(&target->garbage, l);
- }
-}
-
void grpc_metadata_batch_move(grpc_metadata_batch *dst,
grpc_metadata_batch *src) {
*dst = *src;
@@ -305,7 +149,6 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
GPR_TIMER_BEGIN("grpc_metadata_batch_filter", 0);
assert_valid_list(&batch->list);
- assert_valid_list(&batch->garbage);
for (l = batch->list.head; l; l = next) {
grpc_mdelem *orig = l->md;
grpc_mdelem *filt = filter(user_data, orig);
@@ -324,14 +167,28 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
batch->list.tail = l->prev;
}
assert_valid_list(&batch->list);
- link_head(&batch->garbage, l);
+ GRPC_MDELEM_UNREF(l->md);
} else if (filt != orig) {
GRPC_MDELEM_UNREF(orig);
l->md = filt;
}
}
assert_valid_list(&batch->list);
- assert_valid_list(&batch->garbage);
GPR_TIMER_END("grpc_metadata_batch_filter", 0);
}
+
+static grpc_mdelem *no_metadata_for_you(void *user_data, grpc_mdelem *elem) {
+ return NULL;
+}
+
+void grpc_metadata_batch_clear(grpc_metadata_batch *batch) {
+ batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ grpc_metadata_batch_filter(batch, no_metadata_for_you, NULL);
+}
+
+int grpc_metadata_batch_is_empty(grpc_metadata_batch *batch) {
+ return batch->list.head == NULL &&
+ gpr_time_cmp(gpr_inf_future(batch->deadline.clock_type),
+ batch->deadline) == 0;
+}
diff --git a/src/core/transport/stream_op.h b/src/core/transport/metadata_batch.h
index 37f18b02d9..1b0d1fda3e 100644
--- a/src/core/transport/stream_op.h
+++ b/src/core/transport/metadata_batch.h
@@ -40,39 +40,6 @@
#include <grpc/support/time.h>
#include "src/core/transport/metadata.h"
-/* this many stream ops are inlined into a sopb before allocating */
-#define GRPC_SOPB_INLINE_ELEMENTS 4
-
-/* Operations that can be performed on a stream.
- Used by grpc_stream_op. */
-typedef enum grpc_stream_op_code {
- /* Do nothing code. Useful if rewriting a batch to exclude some operations.
- Must be ignored by receivers */
- GRPC_NO_OP,
- GRPC_OP_METADATA,
- /* Begin a message/metadata element/status - as defined by
- grpc_message_type. */
- GRPC_OP_BEGIN_MESSAGE,
- /* Add a slice of data to the current message/metadata element/status.
- Must not overflow the forward declared length. */
- GRPC_OP_SLICE
-} grpc_stream_op_code;
-
-/** Internal bit flag for grpc_begin_message's \a flags signaling the use of
- * compression for the message */
-#define GRPC_WRITE_INTERNAL_COMPRESS (0x80000000u)
-/** Mask of all valid internal flags. */
-#define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS)
-
-/* Arguments for GRPC_OP_BEGIN_MESSAGE */
-typedef struct grpc_begin_message {
- /* How many bytes of data will this message contain */
- gpr_uint32 length;
- /* Write flags for the message: see grpc.h GRPC_WRITE_* for the public bits,
- * GRPC_WRITE_INTERNAL_* for the internal ones. */
- gpr_uint32 flags;
-} grpc_begin_message;
-
typedef struct grpc_linked_mdelem {
grpc_mdelem *md;
struct grpc_linked_mdelem *next;
@@ -88,10 +55,6 @@ typedef struct grpc_mdelem_list {
typedef struct grpc_metadata_batch {
/** Metadata elements in this batch */
grpc_mdelem_list list;
- /** Elements that have been removed from the batch, but have
- not yet been unreffed - used to allow collecting garbage
- under a single metadata context lock */
- grpc_mdelem_list garbage;
/** Used to calculate grpc-timeout at the point of sending,
or gpr_inf_future if this batch does not need to send a
grpc-timeout */
@@ -100,8 +63,8 @@ typedef struct grpc_metadata_batch {
void grpc_metadata_batch_init(grpc_metadata_batch *batch);
void grpc_metadata_batch_destroy(grpc_metadata_batch *batch);
-void grpc_metadata_batch_merge(grpc_metadata_batch *target,
- grpc_metadata_batch *add);
+void grpc_metadata_batch_clear(grpc_metadata_batch *batch);
+int grpc_metadata_batch_is_empty(grpc_metadata_batch *batch);
/** Moves the metadata information from \a src to \a dst. Upon return, \a src is
* zeroed. */
@@ -159,54 +122,4 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd);
} while (0)
#endif
-/* Represents a single operation performed on a stream/transport */
-typedef struct grpc_stream_op {
- /* the operation to be applied */
- enum grpc_stream_op_code type;
- /* the arguments to this operation. union fields are named according to the
- associated op-code */
- union {
- grpc_begin_message begin_message;
- grpc_metadata_batch metadata;
- gpr_slice slice;
- } data;
-} grpc_stream_op;
-
-/** A stream op buffer is a wrapper around stream operations that is
- * dynamically extendable. */
-typedef struct grpc_stream_op_buffer {
- grpc_stream_op *ops;
- size_t nops;
- size_t capacity;
- grpc_stream_op inlined_ops[GRPC_SOPB_INLINE_ELEMENTS];
-} grpc_stream_op_buffer;
-
-/* Initialize a stream op buffer */
-void grpc_sopb_init(grpc_stream_op_buffer *sopb);
-/* Destroy a stream op buffer */
-void grpc_sopb_destroy(grpc_stream_op_buffer *sopb);
-/* Reset a sopb to no elements */
-void grpc_sopb_reset(grpc_stream_op_buffer *sopb);
-/* Swap two sopbs */
-void grpc_sopb_swap(grpc_stream_op_buffer *a, grpc_stream_op_buffer *b);
-
-void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops);
-
-/* Append a GRPC_NO_OP to a buffer */
-void grpc_sopb_add_no_op(grpc_stream_op_buffer *sopb);
-/* Append a GRPC_OP_BEGIN to a buffer */
-void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
- gpr_uint32 flags);
-void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb,
- grpc_metadata_batch metadata);
-/* Append a GRPC_SLICE to a buffer - does not ref/unref the slice */
-void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice);
-/* Append a buffer to a buffer - does not ref/unref any internal objects */
-void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops,
- size_t nops);
-
-void grpc_sopb_move_to(grpc_stream_op_buffer *src, grpc_stream_op_buffer *dst);
-
-char *grpc_sopb_string(grpc_stream_op_buffer *sopb);
-
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_STREAM_OP_H */
diff --git a/src/core/transport/static_metadata.c b/src/core/transport/static_metadata.c
new file mode 100644
index 0000000000..6e42379eee
--- /dev/null
+++ b/src/core/transport/static_metadata.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * WARNING: Auto-generated code.
+ *
+ * To make changes to this file, change
+ *tools/codegen/core/gen_static_metadata.py,
+ * and then re-run it.
+ *
+ * See metadata.h for an explanation of the interface here, and metadata.c for
+ *an
+ * explanation of what's going on.
+ */
+
+#include "src/core/transport/static_metadata.h"
+
+grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
+
+grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
+gpr_uintptr grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3, 7, 5, 2, 4, 8, 6, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+const gpr_uint8
+ grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT * 2] = {
+ 11, 35, 10, 35, 12, 35, 12, 49, 13, 35, 14, 35, 15, 35, 16, 35, 17, 35,
+ 19, 35, 20, 35, 21, 35, 24, 35, 25, 35, 26, 35, 27, 35, 28, 35, 29, 35,
+ 30, 18, 30, 35, 31, 35, 32, 35, 36, 35, 37, 35, 38, 35, 39, 35, 42, 33,
+ 42, 34, 42, 48, 42, 53, 42, 54, 42, 55, 42, 56, 43, 33, 43, 48, 43, 53,
+ 46, 0, 46, 1, 46, 2, 50, 35, 57, 35, 58, 35, 59, 35, 60, 35, 61, 35,
+ 62, 35, 63, 35, 64, 35, 65, 35, 66, 40, 66, 68, 67, 78, 67, 79, 69, 35,
+ 70, 35, 71, 35, 72, 35, 73, 35, 74, 35, 75, 41, 75, 51, 75, 52, 76, 35,
+ 77, 35, 80, 3, 80, 4, 80, 5, 80, 6, 80, 7, 80, 8, 80, 9, 81, 35,
+ 82, 83, 84, 35, 85, 35, 86, 35, 87, 35, 88, 35};
+
+const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {
+ "0", "1", "2", "200", "204", "206", "304", "400", "404", "500", "accept",
+ "accept-charset", "accept-encoding", "accept-language", "accept-ranges",
+ "access-control-allow-origin", "age", "allow", "application/grpc",
+ ":authority", "authorization", "cache-control", "census", "census-bin",
+ "content-disposition", "content-encoding", "content-language",
+ "content-length", "content-location", "content-range", "content-type",
+ "cookie", "date", "deflate", "deflate,gzip", "", "etag", "expect",
+ "expires", "from", "GET", "grpc", "grpc-accept-encoding", "grpc-encoding",
+ "grpc-internal-encoding-request", "grpc-message", "grpc-status",
+ "grpc-timeout", "gzip", "gzip, deflate", "host", "http", "https",
+ "identity", "identity,deflate", "identity,deflate,gzip", "identity,gzip",
+ "if-match", "if-modified-since", "if-none-match", "if-range",
+ "if-unmodified-since", "last-modified", "link", "location", "max-forwards",
+ ":method", ":path", "POST", "proxy-authenticate", "proxy-authorization",
+ "range", "referer", "refresh", "retry-after", ":scheme", "server",
+ "set-cookie", "/", "/index.html", ":status", "strict-transport-security",
+ "te", "trailers", "transfer-encoding", "user-agent", "vary", "via",
+ "www-authenticate"};
+
+const gpr_uint8 grpc_static_accept_encoding_metadata[8] = {0, 29, 26, 30,
+ 28, 32, 27, 31};
diff --git a/src/core/transport/static_metadata.h b/src/core/transport/static_metadata.h
new file mode 100644
index 0000000000..0e630b1b03
--- /dev/null
+++ b/src/core/transport/static_metadata.h
@@ -0,0 +1,406 @@
+/*
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * WARNING: Auto-generated code.
+ *
+ * To make changes to this file, change
+ * tools/codegen/core/gen_static_metadata.py,
+ * and then re-run it.
+ *
+ * See metadata.h for an explanation of the interface here, and metadata.c for
+ * an
+ * explanation of what's going on.
+ */
+
+#ifndef GRPC_INTERNAL_CORE_TRANSPORT_STATIC_METADATA_H
+#define GRPC_INTERNAL_CORE_TRANSPORT_STATIC_METADATA_H
+
+#include "src/core/transport/metadata.h"
+
+#define GRPC_STATIC_MDSTR_COUNT 89
+extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
+/* "0" */
+#define GRPC_MDSTR_0 (&grpc_static_mdstr_table[0])
+/* "1" */
+#define GRPC_MDSTR_1 (&grpc_static_mdstr_table[1])
+/* "2" */
+#define GRPC_MDSTR_2 (&grpc_static_mdstr_table[2])
+/* "200" */
+#define GRPC_MDSTR_200 (&grpc_static_mdstr_table[3])
+/* "204" */
+#define GRPC_MDSTR_204 (&grpc_static_mdstr_table[4])
+/* "206" */
+#define GRPC_MDSTR_206 (&grpc_static_mdstr_table[5])
+/* "304" */
+#define GRPC_MDSTR_304 (&grpc_static_mdstr_table[6])
+/* "400" */
+#define GRPC_MDSTR_400 (&grpc_static_mdstr_table[7])
+/* "404" */
+#define GRPC_MDSTR_404 (&grpc_static_mdstr_table[8])
+/* "500" */
+#define GRPC_MDSTR_500 (&grpc_static_mdstr_table[9])
+/* "accept" */
+#define GRPC_MDSTR_ACCEPT (&grpc_static_mdstr_table[10])
+/* "accept-charset" */
+#define GRPC_MDSTR_ACCEPT_CHARSET (&grpc_static_mdstr_table[11])
+/* "accept-encoding" */
+#define GRPC_MDSTR_ACCEPT_ENCODING (&grpc_static_mdstr_table[12])
+/* "accept-language" */
+#define GRPC_MDSTR_ACCEPT_LANGUAGE (&grpc_static_mdstr_table[13])
+/* "accept-ranges" */
+#define GRPC_MDSTR_ACCEPT_RANGES (&grpc_static_mdstr_table[14])
+/* "access-control-allow-origin" */
+#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (&grpc_static_mdstr_table[15])
+/* "age" */
+#define GRPC_MDSTR_AGE (&grpc_static_mdstr_table[16])
+/* "allow" */
+#define GRPC_MDSTR_ALLOW (&grpc_static_mdstr_table[17])
+/* "application/grpc" */
+#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (&grpc_static_mdstr_table[18])
+/* ":authority" */
+#define GRPC_MDSTR_AUTHORITY (&grpc_static_mdstr_table[19])
+/* "authorization" */
+#define GRPC_MDSTR_AUTHORIZATION (&grpc_static_mdstr_table[20])
+/* "cache-control" */
+#define GRPC_MDSTR_CACHE_CONTROL (&grpc_static_mdstr_table[21])
+/* "census" */
+#define GRPC_MDSTR_CENSUS (&grpc_static_mdstr_table[22])
+/* "census-bin" */
+#define GRPC_MDSTR_CENSUS_BIN (&grpc_static_mdstr_table[23])
+/* "content-disposition" */
+#define GRPC_MDSTR_CONTENT_DISPOSITION (&grpc_static_mdstr_table[24])
+/* "content-encoding" */
+#define GRPC_MDSTR_CONTENT_ENCODING (&grpc_static_mdstr_table[25])
+/* "content-language" */
+#define GRPC_MDSTR_CONTENT_LANGUAGE (&grpc_static_mdstr_table[26])
+/* "content-length" */
+#define GRPC_MDSTR_CONTENT_LENGTH (&grpc_static_mdstr_table[27])
+/* "content-location" */
+#define GRPC_MDSTR_CONTENT_LOCATION (&grpc_static_mdstr_table[28])
+/* "content-range" */
+#define GRPC_MDSTR_CONTENT_RANGE (&grpc_static_mdstr_table[29])
+/* "content-type" */
+#define GRPC_MDSTR_CONTENT_TYPE (&grpc_static_mdstr_table[30])
+/* "cookie" */
+#define GRPC_MDSTR_COOKIE (&grpc_static_mdstr_table[31])
+/* "date" */
+#define GRPC_MDSTR_DATE (&grpc_static_mdstr_table[32])
+/* "deflate" */
+#define GRPC_MDSTR_DEFLATE (&grpc_static_mdstr_table[33])
+/* "deflate,gzip" */
+#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (&grpc_static_mdstr_table[34])
+/* "" */
+#define GRPC_MDSTR_EMPTY (&grpc_static_mdstr_table[35])
+/* "etag" */
+#define GRPC_MDSTR_ETAG (&grpc_static_mdstr_table[36])
+/* "expect" */
+#define GRPC_MDSTR_EXPECT (&grpc_static_mdstr_table[37])
+/* "expires" */
+#define GRPC_MDSTR_EXPIRES (&grpc_static_mdstr_table[38])
+/* "from" */
+#define GRPC_MDSTR_FROM (&grpc_static_mdstr_table[39])
+/* "GET" */
+#define GRPC_MDSTR_GET (&grpc_static_mdstr_table[40])
+/* "grpc" */
+#define GRPC_MDSTR_GRPC (&grpc_static_mdstr_table[41])
+/* "grpc-accept-encoding" */
+#define GRPC_MDSTR_GRPC_ACCEPT_ENCODING (&grpc_static_mdstr_table[42])
+/* "grpc-encoding" */
+#define GRPC_MDSTR_GRPC_ENCODING (&grpc_static_mdstr_table[43])
+/* "grpc-internal-encoding-request" */
+#define GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST (&grpc_static_mdstr_table[44])
+/* "grpc-message" */
+#define GRPC_MDSTR_GRPC_MESSAGE (&grpc_static_mdstr_table[45])
+/* "grpc-status" */
+#define GRPC_MDSTR_GRPC_STATUS (&grpc_static_mdstr_table[46])
+/* "grpc-timeout" */
+#define GRPC_MDSTR_GRPC_TIMEOUT (&grpc_static_mdstr_table[47])
+/* "gzip" */
+#define GRPC_MDSTR_GZIP (&grpc_static_mdstr_table[48])
+/* "gzip, deflate" */
+#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (&grpc_static_mdstr_table[49])
+/* "host" */
+#define GRPC_MDSTR_HOST (&grpc_static_mdstr_table[50])
+/* "http" */
+#define GRPC_MDSTR_HTTP (&grpc_static_mdstr_table[51])
+/* "https" */
+#define GRPC_MDSTR_HTTPS (&grpc_static_mdstr_table[52])
+/* "identity" */
+#define GRPC_MDSTR_IDENTITY (&grpc_static_mdstr_table[53])
+/* "identity,deflate" */
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (&grpc_static_mdstr_table[54])
+/* "identity,deflate,gzip" */
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
+ (&grpc_static_mdstr_table[55])
+/* "identity,gzip" */
+#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (&grpc_static_mdstr_table[56])
+/* "if-match" */
+#define GRPC_MDSTR_IF_MATCH (&grpc_static_mdstr_table[57])
+/* "if-modified-since" */
+#define GRPC_MDSTR_IF_MODIFIED_SINCE (&grpc_static_mdstr_table[58])
+/* "if-none-match" */
+#define GRPC_MDSTR_IF_NONE_MATCH (&grpc_static_mdstr_table[59])
+/* "if-range" */
+#define GRPC_MDSTR_IF_RANGE (&grpc_static_mdstr_table[60])
+/* "if-unmodified-since" */
+#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (&grpc_static_mdstr_table[61])
+/* "last-modified" */
+#define GRPC_MDSTR_LAST_MODIFIED (&grpc_static_mdstr_table[62])
+/* "link" */
+#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[63])
+/* "location" */
+#define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[64])
+/* "max-forwards" */
+#define GRPC_MDSTR_MAX_FORWARDS (&grpc_static_mdstr_table[65])
+/* ":method" */
+#define GRPC_MDSTR_METHOD (&grpc_static_mdstr_table[66])
+/* ":path" */
+#define GRPC_MDSTR_PATH (&grpc_static_mdstr_table[67])
+/* "POST" */
+#define GRPC_MDSTR_POST (&grpc_static_mdstr_table[68])
+/* "proxy-authenticate" */
+#define GRPC_MDSTR_PROXY_AUTHENTICATE (&grpc_static_mdstr_table[69])
+/* "proxy-authorization" */
+#define GRPC_MDSTR_PROXY_AUTHORIZATION (&grpc_static_mdstr_table[70])
+/* "range" */
+#define GRPC_MDSTR_RANGE (&grpc_static_mdstr_table[71])
+/* "referer" */
+#define GRPC_MDSTR_REFERER (&grpc_static_mdstr_table[72])
+/* "refresh" */
+#define GRPC_MDSTR_REFRESH (&grpc_static_mdstr_table[73])
+/* "retry-after" */
+#define GRPC_MDSTR_RETRY_AFTER (&grpc_static_mdstr_table[74])
+/* ":scheme" */
+#define GRPC_MDSTR_SCHEME (&grpc_static_mdstr_table[75])
+/* "server" */
+#define GRPC_MDSTR_SERVER (&grpc_static_mdstr_table[76])
+/* "set-cookie" */
+#define GRPC_MDSTR_SET_COOKIE (&grpc_static_mdstr_table[77])
+/* "/" */
+#define GRPC_MDSTR_SLASH (&grpc_static_mdstr_table[78])
+/* "/index.html" */
+#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (&grpc_static_mdstr_table[79])
+/* ":status" */
+#define GRPC_MDSTR_STATUS (&grpc_static_mdstr_table[80])
+/* "strict-transport-security" */
+#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (&grpc_static_mdstr_table[81])
+/* "te" */
+#define GRPC_MDSTR_TE (&grpc_static_mdstr_table[82])
+/* "trailers" */
+#define GRPC_MDSTR_TRAILERS (&grpc_static_mdstr_table[83])
+/* "transfer-encoding" */
+#define GRPC_MDSTR_TRANSFER_ENCODING (&grpc_static_mdstr_table[84])
+/* "user-agent" */
+#define GRPC_MDSTR_USER_AGENT (&grpc_static_mdstr_table[85])
+/* "vary" */
+#define GRPC_MDSTR_VARY (&grpc_static_mdstr_table[86])
+/* "via" */
+#define GRPC_MDSTR_VIA (&grpc_static_mdstr_table[87])
+/* "www-authenticate" */
+#define GRPC_MDSTR_WWW_AUTHENTICATE (&grpc_static_mdstr_table[88])
+
+#define GRPC_STATIC_MDELEM_COUNT 78
+extern grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
+extern gpr_uintptr grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
+/* "accept-charset": "" */
+#define GRPC_MDELEM_ACCEPT_CHARSET_EMPTY (&grpc_static_mdelem_table[0])
+/* "accept": "" */
+#define GRPC_MDELEM_ACCEPT_EMPTY (&grpc_static_mdelem_table[1])
+/* "accept-encoding": "" */
+#define GRPC_MDELEM_ACCEPT_ENCODING_EMPTY (&grpc_static_mdelem_table[2])
+/* "accept-encoding": "gzip, deflate" */
+#define GRPC_MDELEM_ACCEPT_ENCODING_GZIP_COMMA_DEFLATE \
+ (&grpc_static_mdelem_table[3])
+/* "accept-language": "" */
+#define GRPC_MDELEM_ACCEPT_LANGUAGE_EMPTY (&grpc_static_mdelem_table[4])
+/* "accept-ranges": "" */
+#define GRPC_MDELEM_ACCEPT_RANGES_EMPTY (&grpc_static_mdelem_table[5])
+/* "access-control-allow-origin": "" */
+#define GRPC_MDELEM_ACCESS_CONTROL_ALLOW_ORIGIN_EMPTY \
+ (&grpc_static_mdelem_table[6])
+/* "age": "" */
+#define GRPC_MDELEM_AGE_EMPTY (&grpc_static_mdelem_table[7])
+/* "allow": "" */
+#define GRPC_MDELEM_ALLOW_EMPTY (&grpc_static_mdelem_table[8])
+/* ":authority": "" */
+#define GRPC_MDELEM_AUTHORITY_EMPTY (&grpc_static_mdelem_table[9])
+/* "authorization": "" */
+#define GRPC_MDELEM_AUTHORIZATION_EMPTY (&grpc_static_mdelem_table[10])
+/* "cache-control": "" */
+#define GRPC_MDELEM_CACHE_CONTROL_EMPTY (&grpc_static_mdelem_table[11])
+/* "content-disposition": "" */
+#define GRPC_MDELEM_CONTENT_DISPOSITION_EMPTY (&grpc_static_mdelem_table[12])
+/* "content-encoding": "" */
+#define GRPC_MDELEM_CONTENT_ENCODING_EMPTY (&grpc_static_mdelem_table[13])
+/* "content-language": "" */
+#define GRPC_MDELEM_CONTENT_LANGUAGE_EMPTY (&grpc_static_mdelem_table[14])
+/* "content-length": "" */
+#define GRPC_MDELEM_CONTENT_LENGTH_EMPTY (&grpc_static_mdelem_table[15])
+/* "content-location": "" */
+#define GRPC_MDELEM_CONTENT_LOCATION_EMPTY (&grpc_static_mdelem_table[16])
+/* "content-range": "" */
+#define GRPC_MDELEM_CONTENT_RANGE_EMPTY (&grpc_static_mdelem_table[17])
+/* "content-type": "application/grpc" */
+#define GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC \
+ (&grpc_static_mdelem_table[18])
+/* "content-type": "" */
+#define GRPC_MDELEM_CONTENT_TYPE_EMPTY (&grpc_static_mdelem_table[19])
+/* "cookie": "" */
+#define GRPC_MDELEM_COOKIE_EMPTY (&grpc_static_mdelem_table[20])
+/* "date": "" */
+#define GRPC_MDELEM_DATE_EMPTY (&grpc_static_mdelem_table[21])
+/* "etag": "" */
+#define GRPC_MDELEM_ETAG_EMPTY (&grpc_static_mdelem_table[22])
+/* "expect": "" */
+#define GRPC_MDELEM_EXPECT_EMPTY (&grpc_static_mdelem_table[23])
+/* "expires": "" */
+#define GRPC_MDELEM_EXPIRES_EMPTY (&grpc_static_mdelem_table[24])
+/* "from": "" */
+#define GRPC_MDELEM_FROM_EMPTY (&grpc_static_mdelem_table[25])
+/* "grpc-accept-encoding": "deflate" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE (&grpc_static_mdelem_table[26])
+/* "grpc-accept-encoding": "deflate,gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE_COMMA_GZIP \
+ (&grpc_static_mdelem_table[27])
+/* "grpc-accept-encoding": "gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_GZIP (&grpc_static_mdelem_table[28])
+/* "grpc-accept-encoding": "identity" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY \
+ (&grpc_static_mdelem_table[29])
+/* "grpc-accept-encoding": "identity,deflate" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE \
+ (&grpc_static_mdelem_table[30])
+/* "grpc-accept-encoding": "identity,deflate,gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
+ (&grpc_static_mdelem_table[31])
+/* "grpc-accept-encoding": "identity,gzip" */
+#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_GZIP \
+ (&grpc_static_mdelem_table[32])
+/* "grpc-encoding": "deflate" */
+#define GRPC_MDELEM_GRPC_ENCODING_DEFLATE (&grpc_static_mdelem_table[33])
+/* "grpc-encoding": "gzip" */
+#define GRPC_MDELEM_GRPC_ENCODING_GZIP (&grpc_static_mdelem_table[34])
+/* "grpc-encoding": "identity" */
+#define GRPC_MDELEM_GRPC_ENCODING_IDENTITY (&grpc_static_mdelem_table[35])
+/* "grpc-status": "0" */
+#define GRPC_MDELEM_GRPC_STATUS_0 (&grpc_static_mdelem_table[36])
+/* "grpc-status": "1" */
+#define GRPC_MDELEM_GRPC_STATUS_1 (&grpc_static_mdelem_table[37])
+/* "grpc-status": "2" */
+#define GRPC_MDELEM_GRPC_STATUS_2 (&grpc_static_mdelem_table[38])
+/* "host": "" */
+#define GRPC_MDELEM_HOST_EMPTY (&grpc_static_mdelem_table[39])
+/* "if-match": "" */
+#define GRPC_MDELEM_IF_MATCH_EMPTY (&grpc_static_mdelem_table[40])
+/* "if-modified-since": "" */
+#define GRPC_MDELEM_IF_MODIFIED_SINCE_EMPTY (&grpc_static_mdelem_table[41])
+/* "if-none-match": "" */
+#define GRPC_MDELEM_IF_NONE_MATCH_EMPTY (&grpc_static_mdelem_table[42])
+/* "if-range": "" */
+#define GRPC_MDELEM_IF_RANGE_EMPTY (&grpc_static_mdelem_table[43])
+/* "if-unmodified-since": "" */
+#define GRPC_MDELEM_IF_UNMODIFIED_SINCE_EMPTY (&grpc_static_mdelem_table[44])
+/* "last-modified": "" */
+#define GRPC_MDELEM_LAST_MODIFIED_EMPTY (&grpc_static_mdelem_table[45])
+/* "link": "" */
+#define GRPC_MDELEM_LINK_EMPTY (&grpc_static_mdelem_table[46])
+/* "location": "" */
+#define GRPC_MDELEM_LOCATION_EMPTY (&grpc_static_mdelem_table[47])
+/* "max-forwards": "" */
+#define GRPC_MDELEM_MAX_FORWARDS_EMPTY (&grpc_static_mdelem_table[48])
+/* ":method": "GET" */
+#define GRPC_MDELEM_METHOD_GET (&grpc_static_mdelem_table[49])
+/* ":method": "POST" */
+#define GRPC_MDELEM_METHOD_POST (&grpc_static_mdelem_table[50])
+/* ":path": "/" */
+#define GRPC_MDELEM_PATH_SLASH (&grpc_static_mdelem_table[51])
+/* ":path": "/index.html" */
+#define GRPC_MDELEM_PATH_SLASH_INDEX_DOT_HTML (&grpc_static_mdelem_table[52])
+/* "proxy-authenticate": "" */
+#define GRPC_MDELEM_PROXY_AUTHENTICATE_EMPTY (&grpc_static_mdelem_table[53])
+/* "proxy-authorization": "" */
+#define GRPC_MDELEM_PROXY_AUTHORIZATION_EMPTY (&grpc_static_mdelem_table[54])
+/* "range": "" */
+#define GRPC_MDELEM_RANGE_EMPTY (&grpc_static_mdelem_table[55])
+/* "referer": "" */
+#define GRPC_MDELEM_REFERER_EMPTY (&grpc_static_mdelem_table[56])
+/* "refresh": "" */
+#define GRPC_MDELEM_REFRESH_EMPTY (&grpc_static_mdelem_table[57])
+/* "retry-after": "" */
+#define GRPC_MDELEM_RETRY_AFTER_EMPTY (&grpc_static_mdelem_table[58])
+/* ":scheme": "grpc" */
+#define GRPC_MDELEM_SCHEME_GRPC (&grpc_static_mdelem_table[59])
+/* ":scheme": "http" */
+#define GRPC_MDELEM_SCHEME_HTTP (&grpc_static_mdelem_table[60])
+/* ":scheme": "https" */
+#define GRPC_MDELEM_SCHEME_HTTPS (&grpc_static_mdelem_table[61])
+/* "server": "" */
+#define GRPC_MDELEM_SERVER_EMPTY (&grpc_static_mdelem_table[62])
+/* "set-cookie": "" */
+#define GRPC_MDELEM_SET_COOKIE_EMPTY (&grpc_static_mdelem_table[63])
+/* ":status": "200" */
+#define GRPC_MDELEM_STATUS_200 (&grpc_static_mdelem_table[64])
+/* ":status": "204" */
+#define GRPC_MDELEM_STATUS_204 (&grpc_static_mdelem_table[65])
+/* ":status": "206" */
+#define GRPC_MDELEM_STATUS_206 (&grpc_static_mdelem_table[66])
+/* ":status": "304" */
+#define GRPC_MDELEM_STATUS_304 (&grpc_static_mdelem_table[67])
+/* ":status": "400" */
+#define GRPC_MDELEM_STATUS_400 (&grpc_static_mdelem_table[68])
+/* ":status": "404" */
+#define GRPC_MDELEM_STATUS_404 (&grpc_static_mdelem_table[69])
+/* ":status": "500" */
+#define GRPC_MDELEM_STATUS_500 (&grpc_static_mdelem_table[70])
+/* "strict-transport-security": "" */
+#define GRPC_MDELEM_STRICT_TRANSPORT_SECURITY_EMPTY \
+ (&grpc_static_mdelem_table[71])
+/* "te": "trailers" */
+#define GRPC_MDELEM_TE_TRAILERS (&grpc_static_mdelem_table[72])
+/* "transfer-encoding": "" */
+#define GRPC_MDELEM_TRANSFER_ENCODING_EMPTY (&grpc_static_mdelem_table[73])
+/* "user-agent": "" */
+#define GRPC_MDELEM_USER_AGENT_EMPTY (&grpc_static_mdelem_table[74])
+/* "vary": "" */
+#define GRPC_MDELEM_VARY_EMPTY (&grpc_static_mdelem_table[75])
+/* "via": "" */
+#define GRPC_MDELEM_VIA_EMPTY (&grpc_static_mdelem_table[76])
+/* "www-authenticate": "" */
+#define GRPC_MDELEM_WWW_AUTHENTICATE_EMPTY (&grpc_static_mdelem_table[77])
+
+extern const gpr_uint8
+ grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT * 2];
+extern const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT];
+extern const gpr_uint8 grpc_static_accept_encoding_metadata[8];
+#define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) \
+ (&grpc_static_mdelem_table[grpc_static_accept_encoding_metadata[(algs)]])
+#endif /* GRPC_INTERNAL_CORE_TRANSPORT_STATIC_METADATA_H */
diff --git a/src/core/transport/transport.c b/src/core/transport/transport.c
index 828d212cfe..2ab978be46 100644
--- a/src/core/transport/transport.c
+++ b/src/core/transport/transport.c
@@ -33,9 +33,49 @@
#include "src/core/transport/transport.h"
#include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include "src/core/transport/transport_impl.h"
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
+ gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
+ gpr_log(GPR_DEBUG, "%s %p:%p REF %d->%d %s", refcount->object_type,
+ refcount, refcount->destroy.cb_arg, val, val + 1, reason);
+#else
+void grpc_stream_ref(grpc_stream_refcount *refcount) {
+#endif
+ gpr_ref(&refcount->refs);
+}
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
+ const char *reason) {
+ gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
+ gpr_log(GPR_DEBUG, "%s %p:%p UNREF %d->%d %s", refcount->object_type,
+ refcount, refcount->destroy.cb_arg, val, val - 1, reason);
+#else
+void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
+ grpc_stream_refcount *refcount) {
+#endif
+ if (gpr_unref(&refcount->refs)) {
+ grpc_exec_ctx_enqueue(exec_ctx, &refcount->destroy, 1);
+ }
+}
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void *cb_arg,
+ const char *object_type) {
+ refcount->object_type = object_type;
+#else
+void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void *cb_arg) {
+#endif
+ gpr_ref_init(&refcount->refs, initial_refs);
+ grpc_closure_init(&refcount->destroy, cb, cb_arg);
+}
+
size_t grpc_transport_stream_size(grpc_transport *transport) {
return transport->vtable->sizeof_stream;
}
@@ -47,10 +87,10 @@ void grpc_transport_destroy(grpc_exec_ctx *exec_ctx,
int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
grpc_transport *transport, grpc_stream *stream,
- const void *server_data,
- grpc_transport_stream_op *initial_op) {
- return transport->vtable->init_stream(exec_ctx, transport, stream,
- server_data, initial_op);
+ grpc_stream_refcount *refcount,
+ const void *server_data) {
+ return transport->vtable->init_stream(exec_ctx, transport, stream, refcount,
+ server_data);
}
void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx,
@@ -66,6 +106,12 @@ void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx,
transport->vtable->perform_op(exec_ctx, transport, op);
}
+void grpc_transport_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport, grpc_stream *stream,
+ grpc_pollset *pollset) {
+ transport->vtable->set_pollset(exec_ctx, transport, stream, pollset);
+}
+
void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
grpc_stream *stream) {
@@ -79,9 +125,8 @@ char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
void grpc_transport_stream_op_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op) {
- grpc_exec_ctx_enqueue(exec_ctx, op->on_done_recv, 0);
- grpc_exec_ctx_enqueue(exec_ctx, op->on_done_send, 0);
- grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 0);
+ grpc_exec_ctx_enqueue(exec_ctx, op->recv_message_ready, 0);
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_complete, 0);
}
void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
@@ -129,9 +174,9 @@ void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
if (optional_message) {
cmd = gpr_malloc(sizeof(*cmd));
cmd->message = *optional_message;
- cmd->then_call = op->on_consumed;
+ cmd->then_call = op->on_complete;
grpc_closure_init(&cmd->closure, free_message, cmd);
- op->on_consumed = &cmd->closure;
+ op->on_complete = &cmd->closure;
op->optional_close_message = &cmd->message;
}
op->close_with_status = status;
diff --git a/src/core/transport/transport.h b/src/core/transport/transport.h
index d4cee03862..f94f0ae76e 100644
--- a/src/core/transport/transport.h
+++ b/src/core/transport/transport.h
@@ -38,7 +38,8 @@
#include "src/core/iomgr/pollset.h"
#include "src/core/iomgr/pollset_set.h"
-#include "src/core/transport/stream_op.h"
+#include "src/core/transport/metadata_batch.h"
+#include "src/core/transport/byte_stream.h"
#include "src/core/channel/context.h"
/* forward declarations */
@@ -49,36 +50,48 @@ typedef struct grpc_transport grpc_transport;
for a stream. */
typedef struct grpc_stream grpc_stream;
-/* Represents the send/recv closed state of a stream. */
-typedef enum grpc_stream_state {
- /* the stream is open for sends and receives */
- GRPC_STREAM_OPEN,
- /* the stream is closed for sends, but may still receive data */
- GRPC_STREAM_SEND_CLOSED,
- /* the stream is closed for receives, but may still send data */
- GRPC_STREAM_RECV_CLOSED,
- /* the stream is closed for both sends and receives */
- GRPC_STREAM_CLOSED
-} grpc_stream_state;
+/*#define GRPC_STREAM_REFCOUNT_DEBUG*/
+
+typedef struct grpc_stream_refcount {
+ gpr_refcount refs;
+ grpc_closure destroy;
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+ const char *object_type;
+#endif
+} grpc_stream_refcount;
+
+#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void *cb_arg,
+ const char *object_type);
+void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason);
+void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
+ const char *reason);
+#define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \
+ grpc_stream_ref_init(rc, ir, cb, cb_arg, objtype)
+#else
+void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void *cb_arg);
+void grpc_stream_ref(grpc_stream_refcount *refcount);
+void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount);
+#define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \
+ grpc_stream_ref_init(rc, ir, cb, cb_arg)
+#endif
/* Transport stream op: a set of operations to perform on a transport
against a single stream */
typedef struct grpc_transport_stream_op {
- grpc_closure *on_consumed;
+ grpc_metadata_batch *send_initial_metadata;
+ grpc_metadata_batch *send_trailing_metadata;
- grpc_stream_op_buffer *send_ops;
- int is_last_send;
- grpc_closure *on_done_send;
+ grpc_byte_stream *send_message;
- grpc_stream_op_buffer *recv_ops;
- grpc_stream_state *recv_state;
- /** The number of bytes this peer is currently prepared to receive.
- These bytes will be eventually used to replenish per-stream flow control
- windows. */
- size_t max_recv_bytes;
- grpc_closure *on_done_recv;
+ grpc_metadata_batch *recv_initial_metadata;
+ grpc_byte_stream **recv_message;
+ grpc_closure *recv_message_ready;
+ grpc_metadata_batch *recv_trailing_metadata;
- grpc_pollset *bind_pollset;
+ grpc_closure *on_complete;
/** If != GRPC_STATUS_OK, cancel this stream */
grpc_status_code cancel_with_status;
@@ -96,7 +109,7 @@ typedef struct grpc_transport_stream_op {
typedef struct grpc_transport_op {
/** called when processing of this op is done */
grpc_closure *on_consumed;
- /** connectivity monitoring */
+ /** connectivity monitoring - set connectivity_state to NULL to unsubscribe */
grpc_closure *on_connectivity_state_change;
grpc_connectivity_state *connectivity_state;
/** should the transport be disconnected */
@@ -110,8 +123,8 @@ typedef struct grpc_transport_op {
gpr_slice *goaway_message;
/** set the callback for accepting new streams;
this is a permanent callback, unlike the other one-shot closures */
- void (*set_accept_stream)(void *user_data, grpc_transport *transport,
- const void *server_data);
+ void (*set_accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_transport *transport, const void *server_data);
void *set_accept_stream_user_data;
/** add this transport to a pollset */
grpc_pollset *bind_pollset;
@@ -136,8 +149,12 @@ size_t grpc_transport_stream_size(grpc_transport *transport);
supplied from the accept_stream callback function */
int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
grpc_transport *transport, grpc_stream *stream,
- const void *server_data,
- grpc_transport_stream_op *initial_op);
+ grpc_stream_refcount *refcount,
+ const void *server_data);
+
+void grpc_transport_set_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport, grpc_stream *stream,
+ grpc_pollset *pollset);
/* Destroy transport data for a stream.
diff --git a/src/core/transport/transport_impl.h b/src/core/transport/transport_impl.h
index 900c6340ff..40bfb4b13a 100644
--- a/src/core/transport/transport_impl.h
+++ b/src/core/transport/transport_impl.h
@@ -43,8 +43,12 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_init_stream */
int (*init_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, const void *server_data,
- grpc_transport_stream_op *initial_op);
+ grpc_stream *stream, grpc_stream_refcount *refcount,
+ const void *server_data);
+
+ /* implementation of grpc_transport_set_pollset */
+ void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
+ grpc_stream *stream, grpc_pollset *pollset);
/* implementation of grpc_transport_perform_stream_op */
void (*perform_stream_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
diff --git a/src/core/transport/transport_op_string.c b/src/core/transport/transport_op_string.c
index f62c340e97..98b51afc88 100644
--- a/src/core/transport/transport_op_string.c
+++ b/src/core/transport/transport_op_string.c
@@ -63,48 +63,12 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
}
if (gpr_time_cmp(md.deadline, gpr_inf_future(md.deadline.clock_type)) != 0) {
char *tmp;
- gpr_asprintf(&tmp, " deadline=%d.%09d", md.deadline.tv_sec,
- md.deadline.tv_nsec);
+ gpr_asprintf(&tmp, " deadline=%lld.%09d", (long long)md.deadline.tv_sec,
+ (int)md.deadline.tv_nsec);
gpr_strvec_add(b, tmp);
}
}
-char *grpc_sopb_string(grpc_stream_op_buffer *sopb) {
- char *out;
- char *tmp;
- size_t i;
- gpr_strvec b;
- gpr_strvec_init(&b);
-
- for (i = 0; i < sopb->nops; i++) {
- grpc_stream_op *op = &sopb->ops[i];
- if (i > 0) gpr_strvec_add(&b, gpr_strdup(", "));
- switch (op->type) {
- case GRPC_NO_OP:
- gpr_strvec_add(&b, gpr_strdup("NO_OP"));
- break;
- case GRPC_OP_BEGIN_MESSAGE:
- gpr_asprintf(&tmp, "BEGIN_MESSAGE:%d", op->data.begin_message.length);
- gpr_strvec_add(&b, tmp);
- break;
- case GRPC_OP_SLICE:
- gpr_asprintf(&tmp, "SLICE:%d", GPR_SLICE_LENGTH(op->data.slice));
- gpr_strvec_add(&b, tmp);
- break;
- case GRPC_OP_METADATA:
- gpr_strvec_add(&b, gpr_strdup("METADATA{"));
- put_metadata_list(&b, op->data.metadata);
- gpr_strvec_add(&b, gpr_strdup("}"));
- break;
- }
- }
-
- out = gpr_strvec_flatten(&b, NULL);
- gpr_strvec_destroy(&b);
-
- return out;
-}
-
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
char *tmp;
char *out;
@@ -113,42 +77,52 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
gpr_strvec b;
gpr_strvec_init(&b);
- if (op->send_ops) {
+ if (op->send_initial_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
- gpr_asprintf(&tmp, "SEND%s:%p", op->is_last_send ? "_LAST" : "",
- op->on_done_send);
- gpr_strvec_add(&b, tmp);
- gpr_strvec_add(&b, gpr_strdup("["));
- gpr_strvec_add(&b, grpc_sopb_string(op->send_ops));
- gpr_strvec_add(&b, gpr_strdup("]"));
+ gpr_strvec_add(&b, gpr_strdup("SEND_INITIAL_METADATA{"));
+ put_metadata_list(&b, *op->send_initial_metadata);
+ gpr_strvec_add(&b, gpr_strdup("}"));
}
- if (op->recv_ops) {
+ if (op->send_message != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
- gpr_asprintf(&tmp, "RECV:%p:max_recv_bytes=%d", op->on_done_recv,
- op->max_recv_bytes);
+ gpr_asprintf(&tmp, "SEND_MESSAGE:flags=0x%08x:len=%d",
+ op->send_message->flags, op->send_message->length);
gpr_strvec_add(&b, tmp);
}
- if (op->bind_pollset) {
+ if (op->send_trailing_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
- gpr_strvec_add(&b, gpr_strdup("BIND"));
+ gpr_strvec_add(&b, gpr_strdup("SEND_TRAILING_METADATA{"));
+ put_metadata_list(&b, *op->send_trailing_metadata);
+ gpr_strvec_add(&b, gpr_strdup("}"));
}
- if (op->cancel_with_status != GRPC_STATUS_OK) {
+ if (op->recv_initial_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
- gpr_asprintf(&tmp, "CANCEL:%d", op->cancel_with_status);
- gpr_strvec_add(&b, tmp);
+ gpr_strvec_add(&b, gpr_strdup("RECV_INITIAL_METADATA"));
+ }
+
+ if (op->recv_message != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = 0;
+ gpr_strvec_add(&b, gpr_strdup("RECV_MESSAGE"));
}
- if (op->on_consumed != NULL) {
+ if (op->recv_trailing_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
- gpr_asprintf(&tmp, "ON_CONSUMED:%p", op->on_consumed);
+ gpr_strvec_add(&b, gpr_strdup("RECV_TRAILING_METADATA"));
+ }
+
+ if (op->cancel_with_status != GRPC_STATUS_OK) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = 0;
+ gpr_asprintf(&tmp, "CANCEL:%d", op->cancel_with_status);
gpr_strvec_add(&b, tmp);
}
diff --git a/src/core/tsi/transport_security.c b/src/core/tsi/transport_security.c
index c39e584496..db219a50a6 100644
--- a/src/core/tsi/transport_security.c
+++ b/src/core/tsi/transport_security.c
@@ -145,7 +145,9 @@ void tsi_frame_protector_destroy(tsi_frame_protector *self) {
tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
unsigned char *bytes,
size_t *bytes_size) {
- if (self == NULL) return TSI_INVALID_ARGUMENT;
+ if (self == NULL || bytes == NULL || bytes_size == NULL) {
+ return TSI_INVALID_ARGUMENT;
+ }
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
return self->vtable->get_bytes_to_send_to_peer(self, bytes, bytes_size);
}
@@ -153,7 +155,9 @@ tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self,
const unsigned char *bytes,
size_t *bytes_size) {
- if (self == NULL) return TSI_INVALID_ARGUMENT;
+ if (self == NULL || bytes == NULL || bytes_size == NULL) {
+ return TSI_INVALID_ARGUMENT;
+ }
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
return self->vtable->process_bytes_from_peer(self, bytes, bytes_size);
}