aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Nate Kibler <nkibler@google.com>2015-09-25 10:02:33 -0700
committerGravatar Nate Kibler <nkibler@google.com>2015-09-25 10:02:33 -0700
commit8d282a2f45b38899c7574d1304576283e95cc955 (patch)
tree2f50cc9d970776886de66070e8ed323e7250559f /src
parent6d19724a9008919eb1ef0a01790a2fa6299ac9d8 (diff)
parentff60024eaf67dea0faf899f80c099460d87fa3ad (diff)
Merge branch 'grpc/master'
Diffstat (limited to 'src')
-rw-r--r--src/core/census/context.h6
-rw-r--r--src/core/census/grpc_filter.c103
-rw-r--r--src/core/channel/channel_args.c8
-rw-r--r--src/core/channel/channel_args.h4
-rw-r--r--src/core/channel/channel_stack.c41
-rw-r--r--src/core/channel/channel_stack.h43
-rw-r--r--src/core/channel/client_channel.c292
-rw-r--r--src/core/channel/client_channel.h15
-rw-r--r--src/core/channel/compress_filter.c20
-rw-r--r--src/core/channel/connected_channel.c31
-rw-r--r--src/core/channel/connected_channel.h4
-rw-r--r--src/core/channel/http_client_filter.c71
-rw-r--r--src/core/channel/http_server_filter.c52
-rw-r--r--src/core/channel/noop_filter.c23
-rw-r--r--src/core/client_config/client_config.c8
-rw-r--r--src/core/client_config/client_config.h3
-rw-r--r--src/core/client_config/connector.c21
-rw-r--r--src/core/client_config/connector.h19
-rw-r--r--src/core/client_config/lb_policies/pick_first.c165
-rw-r--r--src/core/client_config/lb_policies/round_robin.c137
-rw-r--r--src/core/client_config/lb_policies/round_robin.h1
-rw-r--r--src/core/client_config/lb_policy.c42
-rw-r--r--src/core/client_config/lb_policy.h56
-rw-r--r--src/core/client_config/lb_policy_factory.c9
-rw-r--r--src/core/client_config/lb_policy_registry.c6
-rw-r--r--src/core/client_config/resolver.c28
-rw-r--r--src/core/client_config/resolver.h31
-rw-r--r--src/core/client_config/resolver_factory.c15
-rw-r--r--src/core/client_config/resolver_factory.h13
-rw-r--r--src/core/client_config/resolver_registry.c8
-rw-r--r--src/core/client_config/resolvers/dns_resolver.c76
-rw-r--r--src/core/client_config/resolvers/sockaddr_resolver.c85
-rw-r--r--src/core/client_config/resolvers/zookeeper_resolver.c96
-rw-r--r--src/core/client_config/subchannel.c251
-rw-r--r--src/core/client_config/subchannel.h49
-rw-r--r--src/core/client_config/subchannel_factory.c15
-rw-r--r--src/core/client_config/subchannel_factory.h11
-rw-r--r--src/core/client_config/subchannel_factory_decorators/add_channel_arg.h2
-rw-r--r--src/core/client_config/subchannel_factory_decorators/merge_channel_args.c10
-rw-r--r--src/core/client_config/subchannel_factory_decorators/merge_channel_args.h2
-rw-r--r--src/core/client_config/uri_parser.c3
-rw-r--r--src/core/compression/algorithm.c2
-rw-r--r--src/core/compression/message_compress.c18
-rw-r--r--src/core/compression/message_compress.h4
-rw-r--r--src/core/httpcli/httpcli.c187
-rw-r--r--src/core/httpcli/httpcli.h30
-rw-r--r--src/core/httpcli/httpcli_security_connector.c32
-rw-r--r--src/core/httpcli/parser.h8
-rw-r--r--src/core/iomgr/alarm.c65
-rw-r--r--src/core/iomgr/alarm.h12
-rw-r--r--src/core/iomgr/alarm_internal.h7
-rw-r--r--src/core/iomgr/closure.c71
-rw-r--r--src/core/iomgr/closure.h88
-rw-r--r--src/core/iomgr/endpoint.c36
-rw-r--r--src/core/iomgr/endpoint.h44
-rw-r--r--src/core/iomgr/exec_ctx.c62
-rw-r--r--src/core/iomgr/exec_ctx.h77
-rw-r--r--src/core/iomgr/fd_posix.c92
-rw-r--r--src/core/iomgr/fd_posix.h25
-rw-r--r--src/core/iomgr/iocp_windows.c48
-rw-r--r--src/core/iomgr/iocp_windows.h10
-rw-r--r--src/core/iomgr/iomgr.c171
-rw-r--r--src/core/iomgr/iomgr.h37
-rw-r--r--src/core/iomgr/iomgr_internal.h3
-rw-r--r--src/core/iomgr/pollset.h15
-rw-r--r--src/core/iomgr/pollset_multipoller_with_epoll.c50
-rw-r--r--src/core/iomgr/pollset_multipoller_with_poll_posix.c25
-rw-r--r--src/core/iomgr/pollset_posix.c124
-rw-r--r--src/core/iomgr/pollset_posix.h32
-rw-r--r--src/core/iomgr/pollset_set.h14
-rw-r--r--src/core/iomgr/pollset_set_posix.c16
-rw-r--r--src/core/iomgr/pollset_set_posix.h6
-rw-r--r--src/core/iomgr/pollset_set_windows.c14
-rw-r--r--src/core/iomgr/pollset_windows.c22
-rw-r--r--src/core/iomgr/resolve_address.h5
-rw-r--r--src/core/iomgr/resolve_address_posix.c12
-rw-r--r--src/core/iomgr/resolve_address_windows.c6
-rw-r--r--src/core/iomgr/sockaddr_utils.c4
-rw-r--r--src/core/iomgr/socket_windows.c3
-rw-r--r--src/core/iomgr/socket_windows.h6
-rw-r--r--src/core/iomgr/tcp_client.h5
-rw-r--r--src/core/iomgr/tcp_client_posix.c57
-rw-r--r--src/core/iomgr/tcp_client_windows.c43
-rw-r--r--src/core/iomgr/tcp_posix.c138
-rw-r--r--src/core/iomgr/tcp_server.h12
-rw-r--r--src/core/iomgr/tcp_server_posix.c63
-rw-r--r--src/core/iomgr/tcp_server_windows.c67
-rw-r--r--src/core/iomgr/tcp_windows.c92
-rw-r--r--src/core/iomgr/time_averaged_stats.c6
-rw-r--r--src/core/iomgr/time_averaged_stats.h6
-rw-r--r--src/core/iomgr/udp_server.c55
-rw-r--r--src/core/iomgr/udp_server.h9
-rw-r--r--src/core/iomgr/wakeup_fd_eventfd.c10
-rw-r--r--src/core/iomgr/wakeup_fd_pipe.c12
-rw-r--r--src/core/iomgr/wakeup_fd_posix.h16
-rw-r--r--src/core/iomgr/workqueue.h85
-rw-r--r--src/core/iomgr/workqueue_posix.c142
-rw-r--r--src/core/iomgr/workqueue_posix.h51
-rw-r--r--src/core/iomgr/workqueue_windows.c40
-rw-r--r--src/core/iomgr/workqueue_windows.h37
-rw-r--r--src/core/json/json.h22
-rw-r--r--src/core/json/json_reader.c32
-rw-r--r--src/core/json/json_reader.h36
-rw-r--r--src/core/json/json_string.c100
-rw-r--r--src/core/json/json_writer.c40
-rw-r--r--src/core/json/json_writer.h30
-rw-r--r--src/core/profiling/basic_timers.c35
-rw-r--r--src/core/profiling/stap_timers.c12
-rw-r--r--src/core/profiling/timers.h4
-rw-r--r--src/core/security/client_auth_filter.c54
-rw-r--r--src/core/security/credentials.c154
-rw-r--r--src/core/security/credentials.h19
-rw-r--r--src/core/security/google_default_credentials.c20
-rw-r--r--src/core/security/handshake.c122
-rw-r--r--src/core/security/handshake.h4
-rw-r--r--src/core/security/jwt_verifier.c21
-rw-r--r--src/core/security/jwt_verifier.h3
-rw-r--r--src/core/security/secure_endpoint.c113
-rw-r--r--src/core/security/security_connector.c55
-rw-r--r--src/core/security/security_connector.h23
-rw-r--r--src/core/security/server_auth_filter.c39
-rw-r--r--src/core/security/server_secure_chttp2.c57
-rw-r--r--src/core/statistics/census_interface.h4
-rw-r--r--src/core/statistics/census_log.c102
-rw-r--r--src/core/statistics/census_log.h6
-rw-r--r--src/core/statistics/census_rpc_stats.c77
-rw-r--r--src/core/statistics/census_rpc_stats.h16
-rw-r--r--src/core/statistics/census_tracing.c69
-rw-r--r--src/core/statistics/census_tracing.h14
-rw-r--r--src/core/statistics/hash_table.c62
-rw-r--r--src/core/statistics/hash_table.h32
-rw-r--r--src/core/statistics/window_stats.c70
-rw-r--r--src/core/statistics/window_stats.h24
-rw-r--r--src/core/support/cmdline.c60
-rw-r--r--src/core/support/host_port.c2
-rw-r--r--src/core/support/stack_lockfree.c2
-rw-r--r--src/core/support/stack_lockfree.h8
-rw-r--r--src/core/support/string_win32.c6
-rw-r--r--src/core/support/sync_posix.c22
-rw-r--r--src/core/support/sync_win32.c2
-rw-r--r--src/core/support/thd.c8
-rw-r--r--src/core/support/time_precise.h4
-rw-r--r--src/core/surface/call.c374
-rw-r--r--src/core/surface/call.h27
-rw-r--r--src/core/surface/call_details.c4
-rw-r--r--src/core/surface/channel.c28
-rw-r--r--src/core/surface/channel.h17
-rw-r--r--src/core/surface/channel_connectivity.c56
-rw-r--r--src/core/surface/channel_create.c63
-rw-r--r--src/core/surface/completion_queue.c32
-rw-r--r--src/core/surface/completion_queue.h9
-rw-r--r--src/core/surface/lame_client.c37
-rw-r--r--src/core/surface/metadata_array.c4
-rw-r--r--src/core/surface/secure_channel_create.c93
-rw-r--r--src/core/surface/server.c382
-rw-r--r--src/core/surface/server.h16
-rw-r--r--src/core/surface/server_chttp2.c41
-rw-r--r--src/core/transport/chttp2/frame_data.c5
-rw-r--r--src/core/transport/chttp2/frame_data.h4
-rw-r--r--src/core/transport/chttp2/frame_goaway.c5
-rw-r--r--src/core/transport/chttp2/frame_goaway.h4
-rw-r--r--src/core/transport/chttp2/frame_ping.c5
-rw-r--r--src/core/transport/chttp2/frame_ping.h4
-rw-r--r--src/core/transport/chttp2/frame_rst_stream.c3
-rw-r--r--src/core/transport/chttp2/frame_rst_stream.h4
-rw-r--r--src/core/transport/chttp2/frame_settings.c3
-rw-r--r--src/core/transport/chttp2/frame_settings.h4
-rw-r--r--src/core/transport/chttp2/frame_window_update.c3
-rw-r--r--src/core/transport/chttp2/frame_window_update.h4
-rw-r--r--src/core/transport/chttp2/hpack_parser.c12
-rw-r--r--src/core/transport/chttp2/hpack_parser.h4
-rw-r--r--src/core/transport/chttp2/hpack_table.c186
-rw-r--r--src/core/transport/chttp2/internal.h42
-rw-r--r--src/core/transport/chttp2/parsing.c27
-rw-r--r--src/core/transport/chttp2/stream_encoder.c2
-rw-r--r--src/core/transport/chttp2/stream_map.c3
-rw-r--r--src/core/transport/chttp2/writing.c21
-rw-r--r--src/core/transport/chttp2_transport.c334
-rw-r--r--src/core/transport/chttp2_transport.h7
-rw-r--r--src/core/transport/connectivity_state.c63
-rw-r--r--src/core/transport/connectivity_state.h20
-rw-r--r--src/core/transport/transport.c56
-rw-r--r--src/core/transport/transport.h34
-rw-r--r--src/core/transport/transport_impl.h18
-rw-r--r--src/core/tsi/fake_transport_security.c118
-rw-r--r--src/core/tsi/fake_transport_security.h6
-rw-r--r--src/core/tsi/ssl_transport_security.c360
-rw-r--r--src/core/tsi/ssl_transport_security.h34
-rw-r--r--src/core/tsi/transport_security.c68
-rw-r--r--src/core/tsi/transport_security.h74
-rw-r--r--src/core/tsi/transport_security_interface.h54
-rw-r--r--src/cpp/proto/proto_utils.cc4
-rw-r--r--src/cpp/util/time.cc8
193 files changed, 4808 insertions, 3867 deletions
diff --git a/src/core/census/context.h b/src/core/census/context.h
index d43a69f7e5..d9907d4da7 100644
--- a/src/core/census/context.h
+++ b/src/core/census/context.h
@@ -41,9 +41,9 @@
struct census_context {
gpr_uint64 op_id; /* Operation identifier - unique per-context */
gpr_uint64 trace_id; /* Globally unique trace identifier */
- /* TODO(aveitch) Add census tags:
- const census_tag_set *tags;
- */
+ /* TODO(aveitch) Add census tags:
+ const census_tag_set *tags;
+ */
};
#endif /* GRPC_INTERNAL_CORE_CENSUS_CONTEXT_H */
diff --git a/src/core/census/grpc_filter.c b/src/core/census/grpc_filter.c
index 8b6ba1d472..1830af3feb 100644
--- a/src/core/census/grpc_filter.c
+++ b/src/core/census/grpc_filter.c
@@ -48,65 +48,67 @@
typedef struct call_data {
census_op_id op_id;
- census_context* ctxt;
+ census_context *ctxt;
gpr_timespec start_ts;
int error;
/* recv callback */
- grpc_stream_op_buffer* recv_ops;
- grpc_iomgr_closure* on_done_recv;
+ grpc_stream_op_buffer *recv_ops;
+ grpc_closure *on_done_recv;
} call_data;
typedef struct channel_data {
- grpc_mdstr* path_str; /* pointer to meta data str with key == ":path" */
+ grpc_mdstr *path_str; /* pointer to meta data str with key == ":path" */
} channel_data;
-static void extract_and_annotate_method_tag(grpc_stream_op_buffer* sopb,
- call_data* calld,
- channel_data* chand) {
- grpc_linked_mdelem* m;
+static void extract_and_annotate_method_tag(grpc_stream_op_buffer *sopb,
+ call_data *calld,
+ channel_data *chand) {
+ grpc_linked_mdelem *m;
size_t i;
for (i = 0; i < sopb->nops; i++) {
- grpc_stream_op* op = &sopb->ops[i];
+ grpc_stream_op *op = &sopb->ops[i];
if (op->type != GRPC_OP_METADATA) continue;
for (m = op->data.metadata.list.head; m != NULL; m = m->next) {
if (m->md->key == chand->path_str) {
gpr_log(GPR_DEBUG, "%s",
- (const char*)GPR_SLICE_START_PTR(m->md->value->slice));
+ (const char *)GPR_SLICE_START_PTR(m->md->value->slice));
/* Add method tag here */
}
}
}
}
-static void client_mutate_op(grpc_call_element* elem,
- grpc_transport_stream_op* op) {
- call_data* calld = elem->call_data;
- channel_data* chand = elem->channel_data;
+static void client_mutate_op(grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
if (op->send_ops) {
extract_and_annotate_method_tag(op->send_ops, calld, chand);
}
}
-static void client_start_transport_op(grpc_call_element* elem,
- grpc_transport_stream_op* op) {
+static void client_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
client_mutate_op(elem, op);
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
-static void server_on_done_recv(void* ptr, int success) {
- grpc_call_element* elem = ptr;
- call_data* calld = elem->call_data;
- channel_data* chand = elem->channel_data;
+static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
+ int success) {
+ grpc_call_element *elem = ptr;
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
if (success) {
extract_and_annotate_method_tag(calld->recv_ops, calld, chand);
}
- calld->on_done_recv->cb(calld->on_done_recv->cb_arg, success);
+ calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
-static void server_mutate_op(grpc_call_element* elem,
- grpc_transport_stream_op* op) {
- call_data* calld = elem->call_data;
+static void server_mutate_op(grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ call_data *calld = elem->call_data;
if (op->recv_ops) {
/* substitute our callback for the op callback */
calld->recv_ops = op->recv_ops;
@@ -115,56 +117,63 @@ static void server_mutate_op(grpc_call_element* elem,
}
}
-static void server_start_transport_op(grpc_call_element* elem,
- grpc_transport_stream_op* op) {
- call_data* calld = elem->call_data;
+static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ call_data *calld = elem->call_data;
GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0));
server_mutate_op(elem, op);
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
-static void client_init_call_elem(grpc_call_element* elem,
- const void* server_transport_data,
- grpc_transport_stream_op* initial_op) {
- call_data* d = elem->call_data;
+static void client_init_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ const void *server_transport_data,
+ grpc_transport_stream_op *initial_op) {
+ call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
if (initial_op) client_mutate_op(elem, initial_op);
}
-static void client_destroy_call_elem(grpc_call_element* elem) {
- call_data* d = elem->call_data;
+static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc client stats and census_rpc_end_op here */
}
-static void server_init_call_elem(grpc_call_element* elem,
- const void* server_transport_data,
- grpc_transport_stream_op* initial_op) {
- call_data* d = elem->call_data;
+static void server_init_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ const void *server_transport_data,
+ grpc_transport_stream_op *initial_op) {
+ call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
/* TODO(hongyu): call census_tracing_start_op here. */
- grpc_iomgr_closure_init(d->on_done_recv, server_on_done_recv, elem);
+ grpc_closure_init(d->on_done_recv, server_on_done_recv, elem);
if (initial_op) server_mutate_op(elem, initial_op);
}
-static void server_destroy_call_elem(grpc_call_element* elem) {
- call_data* d = elem->call_data;
+static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc server stats and census_tracing_end_op here */
}
-static void init_channel_elem(grpc_channel_element* elem, grpc_channel* master,
- const grpc_channel_args* args, grpc_mdctx* mdctx,
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_channel *master,
+ const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
- channel_data* chand = elem->channel_data;
+ channel_data *chand = elem->channel_data;
GPR_ASSERT(chand != NULL);
chand->path_str = grpc_mdstr_from_string(mdctx, ":path", 0);
}
-static void destroy_channel_elem(grpc_channel_element* elem) {
- channel_data* chand = elem->channel_data;
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
+ channel_data *chand = elem->channel_data;
GPR_ASSERT(chand != NULL);
if (chand->path_str != NULL) {
GRPC_MDSTR_UNREF(chand->path_str);
diff --git a/src/core/channel/channel_args.c b/src/core/channel/channel_args.c
index 591135cd6f..487db1119a 100644
--- a/src/core/channel/channel_args.c
+++ b/src/core/channel/channel_args.c
@@ -151,8 +151,8 @@ grpc_channel_args *grpc_channel_args_set_compression_algorithm(
/** Returns 1 if the argument for compression algorithm's enabled states bitset
* was found in \a a, returning the arg's value in \a states. Otherwise, returns
* 0. */
-static int find_compression_algorithm_states_bitset(
- const grpc_channel_args *a, int **states_arg) {
+static int find_compression_algorithm_states_bitset(const grpc_channel_args *a,
+ int **states_arg) {
if (a != NULL) {
size_t i;
for (i = 0; i < a->num_args; ++i) {
@@ -167,9 +167,7 @@ static int find_compression_algorithm_states_bitset(
}
grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
- grpc_channel_args **a,
- grpc_compression_algorithm algorithm,
- int state) {
+ grpc_channel_args **a, grpc_compression_algorithm algorithm, int state) {
int *states_arg;
grpc_channel_args *result = *a;
const int states_arg_found =
diff --git a/src/core/channel/channel_args.h b/src/core/channel/channel_args.h
index 1a6be91359..480cc9aec2 100644
--- a/src/core/channel/channel_args.h
+++ b/src/core/channel/channel_args.h
@@ -75,9 +75,7 @@ grpc_channel_args *grpc_channel_args_set_compression_algorithm(
* modified to point to the returned instance (which may be different from the
* input value of \a a). */
grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
- grpc_channel_args **a,
- grpc_compression_algorithm algorithm,
- int enabled);
+ grpc_channel_args **a, grpc_compression_algorithm algorithm, int enabled);
/** Returns the bitset representing the support state (true for enabled, false
* for disabled) for compression algorithms.
diff --git a/src/core/channel/channel_stack.c b/src/core/channel/channel_stack.c
index 4eb5df5de3..abd7f719e7 100644
--- a/src/core/channel/channel_stack.c
+++ b/src/core/channel/channel_stack.c
@@ -101,7 +101,8 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack,
return CALL_ELEMS_FROM_STACK(call_stack) + index;
}
-void grpc_channel_stack_init(const grpc_channel_filter **filters,
+void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
+ const grpc_channel_filter **filters,
size_t filter_count, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context,
@@ -123,7 +124,7 @@ void grpc_channel_stack_init(const grpc_channel_filter **filters,
for (i = 0; i < filter_count; i++) {
elems[i].filter = filters[i];
elems[i].channel_data = user_data;
- elems[i].filter->init_channel_elem(&elems[i], master, args,
+ elems[i].filter->init_channel_elem(exec_ctx, &elems[i], master, args,
metadata_context, i == 0,
i == (filter_count - 1));
user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
@@ -137,18 +138,20 @@ void grpc_channel_stack_init(const grpc_channel_filter **filters,
stack->call_stack_size = call_size;
}
-void grpc_channel_stack_destroy(grpc_channel_stack *stack) {
+void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(stack);
size_t count = stack->count;
size_t i;
/* destroy per-filter data */
for (i = 0; i < count; i++) {
- channel_elems[i].filter->destroy_channel_elem(&channel_elems[i]);
+ channel_elems[i].filter->destroy_channel_elem(exec_ctx, &channel_elems[i]);
}
}
-void grpc_call_stack_init(grpc_channel_stack *channel_stack,
+void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *channel_stack,
const void *transport_server_data,
grpc_transport_stream_op *initial_op,
grpc_call_stack *call_stack) {
@@ -168,37 +171,40 @@ void grpc_call_stack_init(grpc_channel_stack *channel_stack,
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
- call_elems[i].filter->init_call_elem(&call_elems[i], transport_server_data,
- initial_op);
+ call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i],
+ transport_server_data, initial_op);
user_data +=
ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
}
}
-void grpc_call_stack_destroy(grpc_call_stack *stack) {
+void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack) {
grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack);
size_t count = stack->count;
size_t i;
/* destroy per-filter data */
for (i = 0; i < count; i++) {
- elems[i].filter->destroy_call_elem(&elems[i]);
+ elems[i].filter->destroy_call_elem(exec_ctx, &elems[i]);
}
}
-void grpc_call_next_op(grpc_call_element *elem, grpc_transport_stream_op *op) {
+void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
grpc_call_element *next_elem = elem + 1;
- next_elem->filter->start_transport_stream_op(next_elem, op);
+ next_elem->filter->start_transport_stream_op(exec_ctx, next_elem, op);
}
-char *grpc_call_next_get_peer(grpc_call_element *elem) {
+char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
grpc_call_element *next_elem = elem + 1;
- return next_elem->filter->get_peer(next_elem);
+ return next_elem->filter->get_peer(exec_ctx, next_elem);
}
-void grpc_channel_next_op(grpc_channel_element *elem, grpc_transport_op *op) {
+void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_transport_op *op) {
grpc_channel_element *next_elem = elem + 1;
- next_elem->filter->start_transport_op(next_elem, op);
+ next_elem->filter->start_transport_op(exec_ctx, next_elem, op);
}
grpc_channel_stack *grpc_channel_stack_from_top_element(
@@ -212,9 +218,10 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
sizeof(grpc_call_stack)));
}
-void grpc_call_element_send_cancel(grpc_call_element *cur_elem) {
+void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *cur_elem) {
grpc_transport_stream_op op;
memset(&op, 0, sizeof(op));
op.cancel_with_status = GRPC_STATUS_CANCELLED;
- grpc_call_next_op(cur_elem, &op);
+ grpc_call_next_op(exec_ctx, cur_elem, &op);
}
diff --git a/src/core/channel/channel_stack.h b/src/core/channel/channel_stack.h
index 4a608b956e..6732cc3018 100644
--- a/src/core/channel/channel_stack.h
+++ b/src/core/channel/channel_stack.h
@@ -64,12 +64,14 @@ typedef struct grpc_call_element grpc_call_element;
typedef struct {
/* Called to eg. send/receive data on a call.
See grpc_call_next_op on how to call the next element in the stack */
- void (*start_transport_stream_op)(grpc_call_element *elem,
+ void (*start_transport_stream_op)(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op);
/* Called to handle channel level operations - e.g. new calls, or transport
closure.
See grpc_channel_next_op on how to call the next element in the stack */
- void (*start_transport_op)(grpc_channel_element *elem, grpc_transport_op *op);
+ void (*start_transport_op)(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_transport_op *op);
/* sizeof(per call data) */
size_t sizeof_call_data;
@@ -80,13 +82,13 @@ typedef struct {
server_transport_data is an opaque pointer. If it is NULL, this call is
on a client; if it is non-NULL, then it points to memory owned by the
transport and is on the server. Most filters want to ignore this
- argument.*/
- void (*init_call_elem)(grpc_call_element *elem,
+ argument. */
+ void (*init_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op);
/* Destroy per call data.
The filter does not need to do any chaining */
- void (*destroy_call_elem)(grpc_call_element *elem);
+ void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* sizeof(per channel data) */
size_t sizeof_channel_data;
@@ -96,16 +98,17 @@ typedef struct {
is_first, is_last designate this elements position in the stack, and are
useful for asserting correct configuration by upper layer code.
The filter does not need to do any chaining */
- void (*init_channel_elem)(grpc_channel_element *elem, grpc_channel *master,
- const grpc_channel_args *args,
+ void (*init_channel_elem)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_channel *master, const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last);
/* Destroy per channel data.
The filter does not need to do any chaining */
- void (*destroy_channel_elem)(grpc_channel_element *elem);
+ void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem);
/* Implement grpc_call_get_peer() */
- char *(*get_peer)(grpc_call_element *elem);
+ char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* The name of this filter */
const char *name;
@@ -153,31 +156,36 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *stack, size_t i);
size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
size_t filter_count);
/* Initialize a channel stack given some filters */
-void grpc_channel_stack_init(const grpc_channel_filter **filters,
+void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
+ const grpc_channel_filter **filters,
size_t filter_count, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context,
grpc_channel_stack *stack);
/* Destroy a channel stack */
-void grpc_channel_stack_destroy(grpc_channel_stack *stack);
+void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *stack);
/* Initialize a call stack given a channel stack. transport_server_data is
expected to be NULL on a client, or an opaque transport owned pointer on the
server. */
-void grpc_call_stack_init(grpc_channel_stack *channel_stack,
+void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *channel_stack,
const void *transport_server_data,
grpc_transport_stream_op *initial_op,
grpc_call_stack *call_stack);
/* Destroy a call stack */
-void grpc_call_stack_destroy(grpc_call_stack *stack);
+void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack);
/* Call the next operation in a call stack */
-void grpc_call_next_op(grpc_call_element *elem, grpc_transport_stream_op *op);
+void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op *op);
/* Call the next operation (depending on call directionality) in a channel
stack */
-void grpc_channel_next_op(grpc_channel_element *elem, grpc_transport_op *op);
+void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_transport_op *op);
/* Pass through a request to get_peer to the next child element */
-char *grpc_call_next_get_peer(grpc_call_element *elem);
+char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* Given the top element of a channel stack, get the channel stack itself */
grpc_channel_stack *grpc_channel_stack_from_top_element(
@@ -188,7 +196,8 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_transport_stream_op *op);
-void grpc_call_element_send_cancel(grpc_call_element *cur_elem);
+void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *cur_elem);
extern int grpc_trace_channel;
diff --git a/src/core/channel/client_channel.c b/src/core/channel/client_channel.c
index 2e25033813..0193928a50 100644
--- a/src/core/channel/client_channel.c
+++ b/src/core/channel/client_channel.c
@@ -73,9 +73,9 @@ typedef struct {
guarded by mu_config */
grpc_client_config *incoming_configuration;
/** a list of closures that are all waiting for config to come in */
- grpc_iomgr_closure *waiting_for_config_closures;
+ grpc_closure_list waiting_for_config_closures;
/** resolver callback */
- grpc_iomgr_closure on_config_changed;
+ grpc_closure on_config_changed;
/** connectivity state being tracked */
grpc_connectivity_state_tracker state_tracker;
/** when an lb_policy arrives, should we try to exit idle */
@@ -91,7 +91,7 @@ typedef struct {
update the channel, and create a new watcher */
typedef struct {
channel_data *chand;
- grpc_iomgr_closure on_changed;
+ grpc_closure on_changed;
grpc_connectivity_state state;
grpc_lb_policy *lb_policy;
} lb_policy_connectivity_watcher;
@@ -115,7 +115,7 @@ struct call_data {
call_state state;
gpr_timespec deadline;
grpc_subchannel *picked_channel;
- grpc_iomgr_closure async_setup_task;
+ grpc_closure async_setup_task;
grpc_transport_stream_op waiting_op;
/* our child call stack */
grpc_subchannel_call *subchannel_call;
@@ -123,17 +123,18 @@ struct call_data {
grpc_linked_mdelem details;
};
-static grpc_iomgr_closure *merge_into_waiting_op(
- grpc_call_element *elem,
- grpc_transport_stream_op *new_op) GRPC_MUST_USE_RESULT;
+static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
+ grpc_transport_stream_op *new_op)
+ GRPC_MUST_USE_RESULT;
-static void handle_op_after_cancellation(grpc_call_element *elem,
+static void handle_op_after_cancellation(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (op->send_ops) {
grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
- op->on_done_send->cb(op->on_done_send->cb_arg, 0);
+ op->on_done_send->cb(exec_ctx, op->on_done_send->cb_arg, 0);
}
if (op->recv_ops) {
char status[GPR_LTOA_MIN_BUFSIZE];
@@ -152,26 +153,28 @@ static void handle_op_after_cancellation(grpc_call_element *elem,
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
- op->on_done_recv->cb(op->on_done_recv->cb_arg, 1);
+ op->on_done_recv->cb(exec_ctx, op->on_done_recv->cb_arg, 1);
}
if (op->on_consumed) {
- op->on_consumed->cb(op->on_consumed->cb_arg, 0);
+ op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 0);
}
}
typedef struct {
- grpc_iomgr_closure closure;
+ grpc_closure closure;
grpc_call_element *elem;
} waiting_call;
-static void perform_transport_stream_op(grpc_call_element *elem,
+static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op,
int continuation);
-static void continue_with_pick(void *arg, int iomgr_success) {
+static void continue_with_pick(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
waiting_call *wc = arg;
call_data *calld = wc->elem->call_data;
- perform_transport_stream_op(wc->elem, &calld->waiting_op, 1);
+ perform_transport_stream_op(exec_ctx, wc->elem, &calld->waiting_op, 1);
gpr_free(wc);
}
@@ -179,10 +182,9 @@ static void add_to_lb_policy_wait_queue_locked_state_config(
grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
waiting_call *wc = gpr_malloc(sizeof(*wc));
- grpc_iomgr_closure_init(&wc->closure, continue_with_pick, wc);
+ grpc_closure_init(&wc->closure, continue_with_pick, wc);
wc->elem = elem;
- wc->closure.next = chand->waiting_for_config_closures;
- chand->waiting_for_config_closures = &wc->closure;
+ grpc_closure_list_add(&chand->waiting_for_config_closures, &wc->closure, 1);
}
static int is_empty(void *p, int len) {
@@ -194,7 +196,8 @@ static int is_empty(void *p, int len) {
return 1;
}
-static void started_call(void *arg, int iomgr_success) {
+static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
call_data *calld = arg;
grpc_transport_stream_op op;
int have_waiting;
@@ -204,21 +207,21 @@ static void started_call(void *arg, int iomgr_success) {
memset(&op, 0, sizeof(op));
op.cancel_with_status = GRPC_STATUS_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
- grpc_subchannel_call_process_op(calld->subchannel_call, &op);
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, &op);
} else if (calld->state == CALL_WAITING_FOR_CALL) {
have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
if (calld->subchannel_call != NULL) {
calld->state = CALL_ACTIVE;
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
- grpc_subchannel_call_process_op(calld->subchannel_call,
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
&calld->waiting_op);
}
} else {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
- handle_op_after_cancellation(calld->elem, &calld->waiting_op);
+ handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
}
}
} else {
@@ -227,36 +230,37 @@ static void started_call(void *arg, int iomgr_success) {
}
}
-static void picked_target(void *arg, int iomgr_success) {
+static void picked_target(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
call_data *calld = arg;
grpc_pollset *pollset;
if (calld->picked_channel == NULL) {
/* treat this like a cancellation */
calld->waiting_op.cancel_with_status = GRPC_STATUS_UNAVAILABLE;
- perform_transport_stream_op(calld->elem, &calld->waiting_op, 1);
+ perform_transport_stream_op(exec_ctx, calld->elem, &calld->waiting_op, 1);
} else {
gpr_mu_lock(&calld->mu_state);
if (calld->state == CALL_CANCELLED) {
gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(calld->elem, &calld->waiting_op);
+ handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
} else {
GPR_ASSERT(calld->state == CALL_WAITING_FOR_PICK);
calld->state = CALL_WAITING_FOR_CALL;
pollset = calld->waiting_op.bind_pollset;
gpr_mu_unlock(&calld->mu_state);
- grpc_iomgr_closure_init(&calld->async_setup_task, started_call, calld);
- grpc_subchannel_create_call(calld->picked_channel, pollset,
+ grpc_closure_init(&calld->async_setup_task, started_call, calld);
+ grpc_subchannel_create_call(exec_ctx, calld->picked_channel, pollset,
&calld->subchannel_call,
&calld->async_setup_task);
}
}
}
-static grpc_iomgr_closure *merge_into_waiting_op(
- grpc_call_element *elem, grpc_transport_stream_op *new_op) {
+static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
+ grpc_transport_stream_op *new_op) {
call_data *calld = elem->call_data;
- grpc_iomgr_closure *consumed_op = NULL;
+ grpc_closure *consumed_op = NULL;
grpc_transport_stream_op *waiting_op = &calld->waiting_op;
GPR_ASSERT((waiting_op->send_ops != NULL) + (new_op->send_ops != NULL) <= 1);
GPR_ASSERT((waiting_op->recv_ops != NULL) + (new_op->recv_ops != NULL) <= 1);
@@ -282,7 +286,7 @@ static grpc_iomgr_closure *merge_into_waiting_op(
return consumed_op;
}
-static char *cc_get_peer(grpc_call_element *elem) {
+static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_subchannel_call *subchannel_call;
@@ -293,8 +297,8 @@ static char *cc_get_peer(grpc_call_element *elem) {
subchannel_call = calld->subchannel_call;
GRPC_SUBCHANNEL_CALL_REF(subchannel_call, "get_peer");
gpr_mu_unlock(&calld->mu_state);
- result = grpc_subchannel_call_get_peer(subchannel_call);
- GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call, "get_peer");
+ result = grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "get_peer");
return result;
} else {
gpr_mu_unlock(&calld->mu_state);
@@ -302,7 +306,8 @@ static char *cc_get_peer(grpc_call_element *elem) {
}
}
-static void perform_transport_stream_op(grpc_call_element *elem,
+static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op,
int continuation) {
call_data *calld = elem->call_data;
@@ -310,7 +315,6 @@ static void perform_transport_stream_op(grpc_call_element *elem,
grpc_subchannel_call *subchannel_call;
grpc_lb_policy *lb_policy;
grpc_transport_stream_op op2;
- grpc_iomgr_closure *consumed_op = NULL;
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
@@ -320,15 +324,15 @@ static void perform_transport_stream_op(grpc_call_element *elem,
GPR_ASSERT(!continuation);
subchannel_call = calld->subchannel_call;
gpr_mu_unlock(&calld->mu_state);
- grpc_subchannel_call_process_op(subchannel_call, op);
+ grpc_subchannel_call_process_op(exec_ctx, subchannel_call, op);
break;
case CALL_CANCELLED:
gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(elem, op);
+ handle_op_after_cancellation(exec_ctx, elem, op);
break;
case CALL_WAITING_FOR_SEND:
GPR_ASSERT(!continuation);
- consumed_op = merge_into_waiting_op(elem, op);
+ grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
if (!calld->waiting_op.send_ops &&
calld->waiting_op.cancel_with_status == GRPC_STATUS_OK) {
gpr_mu_unlock(&calld->mu_state);
@@ -354,10 +358,10 @@ static void perform_transport_stream_op(grpc_call_element *elem,
op2.on_consumed = NULL;
}
gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(elem, op);
- handle_op_after_cancellation(elem, &op2);
+ handle_op_after_cancellation(exec_ctx, elem, op);
+ handle_op_after_cancellation(exec_ctx, elem, &op2);
} else {
- consumed_op = merge_into_waiting_op(elem, op);
+ grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
gpr_mu_unlock(&calld->mu_state);
}
break;
@@ -367,7 +371,7 @@ static void perform_transport_stream_op(grpc_call_element *elem,
if (op->cancel_with_status != GRPC_STATUS_OK) {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(elem, op);
+ handle_op_after_cancellation(exec_ctx, elem, op);
} else {
calld->waiting_op = *op;
@@ -380,34 +384,33 @@ static void perform_transport_stream_op(grpc_call_element *elem,
gpr_mu_lock(&chand->mu_config);
lb_policy = chand->lb_policy;
if (lb_policy) {
- grpc_transport_stream_op *op = &calld->waiting_op;
- grpc_pollset *bind_pollset = op->bind_pollset;
+ grpc_transport_stream_op *waiting_op = &calld->waiting_op;
+ grpc_pollset *bind_pollset = waiting_op->bind_pollset;
grpc_metadata_batch *initial_metadata =
- &op->send_ops->ops[0].data.metadata;
+ &waiting_op->send_ops->ops[0].data.metadata;
GRPC_LB_POLICY_REF(lb_policy, "pick");
gpr_mu_unlock(&chand->mu_config);
calld->state = CALL_WAITING_FOR_PICK;
- GPR_ASSERT(op->bind_pollset);
- GPR_ASSERT(op->send_ops);
- GPR_ASSERT(op->send_ops->nops >= 1);
- GPR_ASSERT(op->send_ops->ops[0].type == GRPC_OP_METADATA);
+ GPR_ASSERT(waiting_op->bind_pollset);
+ GPR_ASSERT(waiting_op->send_ops);
+ GPR_ASSERT(waiting_op->send_ops->nops >= 1);
+ GPR_ASSERT(waiting_op->send_ops->ops[0].type == GRPC_OP_METADATA);
gpr_mu_unlock(&calld->mu_state);
- grpc_iomgr_closure_init(&calld->async_setup_task, picked_target,
- calld);
- grpc_lb_policy_pick(lb_policy, bind_pollset, initial_metadata,
- &calld->picked_channel,
+ grpc_closure_init(&calld->async_setup_task, picked_target, calld);
+ grpc_lb_policy_pick(exec_ctx, lb_policy, bind_pollset,
+ initial_metadata, &calld->picked_channel,
&calld->async_setup_task);
- GRPC_LB_POLICY_UNREF(lb_policy, "pick");
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick");
} else if (chand->resolver != NULL) {
calld->state = CALL_WAITING_FOR_CONFIG;
add_to_lb_policy_wait_queue_locked_state_config(elem);
if (!chand->started_resolving && chand->resolver != NULL) {
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
chand->started_resolving = 1;
- grpc_resolver_next(chand->resolver,
+ grpc_resolver_next(exec_ctx, chand->resolver,
&chand->incoming_configuration,
&chand->on_config_changed);
}
@@ -417,62 +420,68 @@ static void perform_transport_stream_op(grpc_call_element *elem,
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&chand->mu_config);
gpr_mu_unlock(&calld->mu_state);
- handle_op_after_cancellation(elem, op);
+ handle_op_after_cancellation(exec_ctx, elem, op);
}
}
}
break;
}
-
- if (consumed_op != NULL) {
- consumed_op->cb(consumed_op->cb_arg, 1);
- }
}
-static void cc_start_transport_stream_op(grpc_call_element *elem,
+static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
- perform_transport_stream_op(elem, op, 0);
+ perform_transport_stream_op(exec_ctx, elem, op, 0);
}
-static void watch_lb_policy(channel_data *chand, grpc_lb_policy *lb_policy,
+static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
+ grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state);
-static void on_lb_policy_state_changed(void *arg, int iomgr_success) {
+static void on_lb_policy_state_changed_locked(
+ grpc_exec_ctx *exec_ctx, lb_policy_connectivity_watcher *w) {
+ /* check if the notification is for a stale policy */
+ if (w->lb_policy != w->chand->lb_policy) return;
+
+ grpc_connectivity_state_set(exec_ctx, &w->chand->state_tracker, w->state,
+ "lb_changed");
+ if (w->state != GRPC_CHANNEL_FATAL_FAILURE) {
+ watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
+ }
+}
+
+static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
lb_policy_connectivity_watcher *w = arg;
gpr_mu_lock(&w->chand->mu_config);
- /* check if the notification is for a stale policy */
- if (w->lb_policy == w->chand->lb_policy) {
- grpc_connectivity_state_set(&w->chand->state_tracker, w->state,
- "lb_changed");
- if (w->state != GRPC_CHANNEL_FATAL_FAILURE) {
- watch_lb_policy(w->chand, w->lb_policy, w->state);
- }
- }
+ on_lb_policy_state_changed_locked(exec_ctx, w);
gpr_mu_unlock(&w->chand->mu_config);
- GRPC_CHANNEL_INTERNAL_UNREF(w->chand->master, "watch_lb_policy");
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->chand->master, "watch_lb_policy");
gpr_free(w);
}
-static void watch_lb_policy(channel_data *chand, grpc_lb_policy *lb_policy,
+static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
+ grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
GRPC_CHANNEL_INTERNAL_REF(chand->master, "watch_lb_policy");
w->chand = chand;
- grpc_iomgr_closure_init(&w->on_changed, on_lb_policy_state_changed, w);
+ grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w);
w->state = current_state;
w->lb_policy = lb_policy;
- grpc_lb_policy_notify_on_state_change(lb_policy, &w->state, &w->on_changed);
+ grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state,
+ &w->on_changed);
}
-static void cc_on_config_changed(void *arg, int iomgr_success) {
+static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
channel_data *chand = arg;
grpc_lb_policy *lb_policy = NULL;
grpc_lb_policy *old_lb_policy;
grpc_resolver *old_resolver;
- grpc_iomgr_closure *wakeup_closures = NULL;
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
int exit_idle = 0;
@@ -481,10 +490,10 @@ static void cc_on_config_changed(void *arg, int iomgr_success) {
if (lb_policy != NULL) {
GRPC_LB_POLICY_REF(lb_policy, "channel");
GRPC_LB_POLICY_REF(lb_policy, "config_change");
- state = grpc_lb_policy_check_connectivity(lb_policy);
+ state = grpc_lb_policy_check_connectivity(exec_ctx, lb_policy);
}
- grpc_client_config_unref(chand->incoming_configuration);
+ grpc_client_config_unref(exec_ctx, chand->incoming_configuration);
}
chand->incoming_configuration = NULL;
@@ -493,8 +502,7 @@ static void cc_on_config_changed(void *arg, int iomgr_success) {
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
if (lb_policy != NULL || chand->resolver == NULL /* disconnected */) {
- wakeup_closures = chand->waiting_for_config_closures;
- chand->waiting_for_config_closures = NULL;
+ grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures);
}
if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
@@ -505,57 +513,53 @@ static void cc_on_config_changed(void *arg, int iomgr_success) {
if (iomgr_success && chand->resolver) {
grpc_resolver *resolver = chand->resolver;
GRPC_RESOLVER_REF(resolver, "channel-next");
- grpc_connectivity_state_set(&chand->state_tracker, state,
+ grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state,
"new_lb+resolver");
+ if (lb_policy != NULL) {
+ watch_lb_policy(exec_ctx, chand, lb_policy, state);
+ }
gpr_mu_unlock(&chand->mu_config);
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
- grpc_resolver_next(resolver, &chand->incoming_configuration,
+ grpc_resolver_next(exec_ctx, resolver, &chand->incoming_configuration,
&chand->on_config_changed);
- GRPC_RESOLVER_UNREF(resolver, "channel-next");
- if (lb_policy != NULL) {
- watch_lb_policy(chand, lb_policy, state);
- }
+ GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel-next");
} else {
old_resolver = chand->resolver;
chand->resolver = NULL;
- grpc_connectivity_state_set(&chand->state_tracker,
+ grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE, "resolver_gone");
gpr_mu_unlock(&chand->mu_config);
if (old_resolver != NULL) {
- grpc_resolver_shutdown(old_resolver);
- GRPC_RESOLVER_UNREF(old_resolver, "channel");
+ grpc_resolver_shutdown(exec_ctx, old_resolver);
+ GRPC_RESOLVER_UNREF(exec_ctx, old_resolver, "channel");
}
}
if (exit_idle) {
- grpc_lb_policy_exit_idle(lb_policy);
- GRPC_LB_POLICY_UNREF(lb_policy, "exit_idle");
+ grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
}
if (old_lb_policy != NULL) {
- grpc_lb_policy_shutdown(old_lb_policy);
- GRPC_LB_POLICY_UNREF(old_lb_policy, "channel");
- }
-
- while (wakeup_closures) {
- grpc_iomgr_closure *next = wakeup_closures->next;
- wakeup_closures->cb(wakeup_closures->cb_arg, 1);
- wakeup_closures = next;
+ grpc_lb_policy_shutdown(exec_ctx, old_lb_policy);
+ GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
}
if (lb_policy != NULL) {
- GRPC_LB_POLICY_UNREF(lb_policy, "config_change");
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
}
- GRPC_CHANNEL_INTERNAL_UNREF(chand->master, "resolver");
+
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->master, "resolver");
}
-static void cc_start_transport_op(grpc_channel_element *elem,
+static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
grpc_transport_op *op) {
grpc_lb_policy *lb_policy = NULL;
channel_data *chand = elem->channel_data;
grpc_resolver *destroy_resolver = NULL;
- grpc_iomgr_closure *on_consumed = op->on_consumed;
- op->on_consumed = NULL;
+
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
GPR_ASSERT(op->set_accept_stream == NULL);
GPR_ASSERT(op->bind_pollset == NULL);
@@ -563,7 +567,7 @@ static void cc_start_transport_op(grpc_channel_element *elem,
gpr_mu_lock(&chand->mu_config);
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
- &chand->state_tracker, op->connectivity_state,
+ exec_ctx, &chand->state_tracker, op->connectivity_state,
op->on_connectivity_state_change);
op->on_connectivity_state_change = NULL;
op->connectivity_state = NULL;
@@ -577,35 +581,31 @@ static void cc_start_transport_op(grpc_channel_element *elem,
}
if (op->disconnect && chand->resolver != NULL) {
- grpc_connectivity_state_set(&chand->state_tracker,
+ grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE, "disconnect");
destroy_resolver = chand->resolver;
chand->resolver = NULL;
if (chand->lb_policy != NULL) {
- grpc_lb_policy_shutdown(chand->lb_policy);
- GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
+ grpc_lb_policy_shutdown(exec_ctx, chand->lb_policy);
+ GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
chand->lb_policy = NULL;
}
}
gpr_mu_unlock(&chand->mu_config);
if (destroy_resolver) {
- grpc_resolver_shutdown(destroy_resolver);
- GRPC_RESOLVER_UNREF(destroy_resolver, "channel");
+ grpc_resolver_shutdown(exec_ctx, destroy_resolver);
+ GRPC_RESOLVER_UNREF(exec_ctx, destroy_resolver, "channel");
}
if (lb_policy) {
- grpc_lb_policy_broadcast(lb_policy, op);
- GRPC_LB_POLICY_UNREF(lb_policy, "broadcast");
- }
-
- if (on_consumed) {
- grpc_iomgr_add_callback(on_consumed);
+ grpc_lb_policy_broadcast(exec_ctx, lb_policy, op);
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "broadcast");
}
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
call_data *calld = elem->call_data;
@@ -622,7 +622,8 @@ static void init_call_elem(grpc_call_element *elem,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
call_data *calld = elem->call_data;
grpc_subchannel_call *subchannel_call;
@@ -634,7 +635,7 @@ static void destroy_call_elem(grpc_call_element *elem) {
case CALL_ACTIVE:
subchannel_call = calld->subchannel_call;
gpr_mu_unlock(&calld->mu_state);
- GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call, "client_channel");
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "client_channel");
break;
case CALL_CREATED:
case CALL_CANCELLED:
@@ -651,7 +652,8 @@ static void destroy_call_elem(grpc_call_element *elem) {
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
@@ -666,25 +668,25 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
chand->mdctx = metadata_context;
chand->master = master;
grpc_pollset_set_init(&chand->pollset_set);
- grpc_iomgr_closure_init(&chand->on_config_changed, cc_on_config_changed,
- chand);
+ grpc_closure_init(&chand->on_config_changed, cc_on_config_changed, chand);
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
}
/* Destructor for channel_data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
if (chand->resolver != NULL) {
- grpc_resolver_shutdown(chand->resolver);
- GRPC_RESOLVER_UNREF(chand->resolver, "channel");
+ grpc_resolver_shutdown(exec_ctx, chand->resolver);
+ GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
}
if (chand->lb_policy != NULL) {
- GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
+ GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
- grpc_connectivity_state_destroy(&chand->state_tracker);
+ grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(&chand->pollset_set);
gpr_mu_destroy(&chand->mu_config);
}
@@ -702,7 +704,8 @@ const grpc_channel_filter grpc_client_channel_filter = {
"client-channel",
};
-void grpc_client_channel_set_resolver(grpc_channel_stack *channel_stack,
+void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *channel_stack,
grpc_resolver *resolver) {
/* post construction initialization: set the transport setup pointer */
grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack);
@@ -711,31 +714,32 @@ void grpc_client_channel_set_resolver(grpc_channel_stack *channel_stack,
GPR_ASSERT(!chand->resolver);
chand->resolver = resolver;
GRPC_RESOLVER_REF(resolver, "channel");
- if (chand->waiting_for_config_closures != NULL ||
+ if (!grpc_closure_list_empty(chand->waiting_for_config_closures) ||
chand->exit_idle_when_lb_policy_arrives) {
chand->started_resolving = 1;
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
- grpc_resolver_next(resolver, &chand->incoming_configuration,
+ grpc_resolver_next(exec_ctx, resolver, &chand->incoming_configuration,
&chand->on_config_changed);
}
gpr_mu_unlock(&chand->mu_config);
}
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
- grpc_channel_element *elem, int try_to_connect) {
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
channel_data *chand = elem->channel_data;
grpc_connectivity_state out;
gpr_mu_lock(&chand->mu_config);
out = grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
if (chand->lb_policy != NULL) {
- grpc_lb_policy_exit_idle(chand->lb_policy);
+ grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy);
} else {
chand->exit_idle_when_lb_policy_arrives = 1;
if (!chand->started_resolving && chand->resolver != NULL) {
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
chand->started_resolving = 1;
- grpc_resolver_next(chand->resolver, &chand->incoming_configuration,
+ grpc_resolver_next(exec_ctx, chand->resolver,
+ &chand->incoming_configuration,
&chand->on_config_changed);
}
}
@@ -745,12 +749,12 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
}
void grpc_client_channel_watch_connectivity_state(
- grpc_channel_element *elem, grpc_connectivity_state *state,
- grpc_iomgr_closure *on_complete) {
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_connectivity_state *state, grpc_closure *on_complete) {
channel_data *chand = elem->channel_data;
gpr_mu_lock(&chand->mu_config);
- grpc_connectivity_state_notify_on_state_change(&chand->state_tracker, state,
- on_complete);
+ grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &chand->state_tracker, state, on_complete);
gpr_mu_unlock(&chand->mu_config);
}
@@ -760,14 +764,16 @@ grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
return &chand->pollset_set;
}
-void grpc_client_channel_add_interested_party(grpc_channel_element *elem,
+void grpc_client_channel_add_interested_party(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
grpc_pollset *pollset) {
channel_data *chand = elem->channel_data;
- grpc_pollset_set_add_pollset(&chand->pollset_set, pollset);
+ grpc_pollset_set_add_pollset(exec_ctx, &chand->pollset_set, pollset);
}
-void grpc_client_channel_del_interested_party(grpc_channel_element *elem,
+void grpc_client_channel_del_interested_party(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
grpc_pollset *pollset) {
channel_data *chand = elem->channel_data;
- grpc_pollset_set_del_pollset(&chand->pollset_set, pollset);
+ grpc_pollset_set_del_pollset(exec_ctx, &chand->pollset_set, pollset);
}
diff --git a/src/core/channel/client_channel.h b/src/core/channel/client_channel.h
index 13681e3956..5103f07a43 100644
--- a/src/core/channel/client_channel.h
+++ b/src/core/channel/client_channel.h
@@ -49,22 +49,25 @@ extern const grpc_channel_filter grpc_client_channel_filter;
/* post-construction initializer to let the client channel know which
transport setup it should cancel upon destruction, or initiate when it needs
a connection */
-void grpc_client_channel_set_resolver(grpc_channel_stack *channel_stack,
+void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *channel_stack,
grpc_resolver *resolver);
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
- grpc_channel_element *elem, int try_to_connect);
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
void grpc_client_channel_watch_connectivity_state(
- grpc_channel_element *elem, grpc_connectivity_state *state,
- grpc_iomgr_closure *on_complete);
+ grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
+ grpc_connectivity_state *state, grpc_closure *on_complete);
grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
grpc_channel_element *elem);
-void grpc_client_channel_add_interested_party(grpc_channel_element *channel,
+void grpc_client_channel_add_interested_party(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *channel,
grpc_pollset *pollset);
-void grpc_client_channel_del_interested_party(grpc_channel_element *channel,
+void grpc_client_channel_del_interested_party(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *channel,
grpc_pollset *pollset);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_CHANNEL_H */
diff --git a/src/core/channel/compress_filter.c b/src/core/channel/compress_filter.c
index 7959603102..f8dbe8c817 100644
--- a/src/core/channel/compress_filter.c
+++ b/src/core/channel/compress_filter.c
@@ -48,8 +48,8 @@ typedef struct call_data {
gpr_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
- gpr_uint32
- remaining_slice_bytes; /**< Input data to be read, as per BEGIN_MESSAGE */
+ gpr_uint32 remaining_slice_bytes;
+ /**< Input data to be read, as per BEGIN_MESSAGE */
int written_initial_metadata; /**< Already processed initial md? */
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
@@ -268,18 +268,19 @@ static void process_send_ops(grpc_call_element *elem,
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
-static void compress_start_transport_stream_op(grpc_call_element *elem,
+static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
if (op->send_ops && op->send_ops->nops > 0) {
process_send_ops(elem, op->send_ops);
}
/* pass control down the stack */
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
/* grab pointers to our data from the call element */
@@ -298,14 +299,16 @@ static void init_call_elem(grpc_call_element *elem,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
gpr_slice_buffer_destroy(&calld->slices);
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
channel_data *channeld = elem->channel_data;
@@ -369,7 +372,8 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
channel_data *channeld = elem->channel_data;
grpc_compression_algorithm algo_idx;
diff --git a/src/core/channel/connected_channel.c b/src/core/channel/connected_channel.c
index b95ed06f2b..ea701bc284 100644
--- a/src/core/channel/connected_channel.c
+++ b/src/core/channel/connected_channel.c
@@ -61,25 +61,27 @@ typedef struct connected_channel_call_data { void *unused; } call_data;
/* Intercept a call operation and either push it directly up or translate it
into transport stream operations */
-static void con_start_transport_stream_op(grpc_call_element *elem,
+static void con_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- grpc_transport_perform_stream_op(chand->transport,
+ grpc_transport_perform_stream_op(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), op);
}
-static void con_start_transport_op(grpc_channel_element *elem,
+static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
grpc_transport_op *op) {
channel_data *chand = elem->channel_data;
- grpc_transport_perform_op(chand->transport, op);
+ grpc_transport_perform_op(exec_ctx, chand->transport, op);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
call_data *calld = elem->call_data;
@@ -87,23 +89,25 @@ static void init_call_elem(grpc_call_element *elem,
int r;
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
- r = grpc_transport_init_stream(chand->transport,
+ r = grpc_transport_init_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
server_transport_data, initial_op);
GPR_ASSERT(r == 0);
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
- grpc_transport_destroy_stream(chand->transport,
+ grpc_transport_destroy_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld));
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
channel_data *cd = (channel_data *)elem->channel_data;
@@ -113,15 +117,16 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
}
/* Destructor for channel_data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
channel_data *cd = (channel_data *)elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
- grpc_transport_destroy(cd->transport);
+ grpc_transport_destroy(exec_ctx, cd->transport);
}
-static char *con_get_peer(grpc_call_element *elem) {
+static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
- return grpc_transport_get_peer(chand->transport);
+ return grpc_transport_get_peer(exec_ctx, chand->transport);
}
const grpc_channel_filter grpc_connected_channel_filter = {
diff --git a/src/core/channel/connected_channel.h b/src/core/channel/connected_channel.h
index b615b0d350..eac6eb7ebe 100644
--- a/src/core/channel/connected_channel.h
+++ b/src/core/channel/connected_channel.h
@@ -43,7 +43,7 @@ extern const grpc_channel_filter grpc_connected_channel_filter;
/* Post construction fixup: set the transport in the connected channel.
Must be called before any call stack using this filter is used. */
-void grpc_connected_channel_bind_transport(grpc_channel_stack *channel_stack,
- grpc_transport *transport);
+void grpc_connected_channel_bind_transport(grpc_channel_stack* channel_stack,
+ grpc_transport* transport);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CONNECTED_CHANNEL_H */
diff --git a/src/core/channel/http_client_filter.c b/src/core/channel/http_client_filter.c
index 5f20f8c16d..da33c956f7 100644
--- a/src/core/channel/http_client_filter.c
+++ b/src/core/channel/http_client_filter.c
@@ -50,11 +50,11 @@ typedef struct call_data {
grpc_stream_op_buffer *recv_ops;
/** Closure to call when finished with the hc_on_recv hook */
- grpc_iomgr_closure *on_done_recv;
+ grpc_closure *on_done_recv;
/** Receive closures are chained: we inject this closure as the on_done_recv
up-call on transport_op, and remember to call our on_done_recv member
after handling it. */
- grpc_iomgr_closure hc_on_recv;
+ grpc_closure hc_on_recv;
} call_data;
typedef struct channel_data {
@@ -67,16 +67,19 @@ typedef struct channel_data {
grpc_mdelem *user_agent;
} channel_data;
-/* used to silence 'variable not used' warnings */
-static void ignore_unused(void *ignored) {}
+typedef struct {
+ grpc_call_element *elem;
+ grpc_exec_ctx *exec_ctx;
+} client_recv_filter_args;
static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
- grpc_call_element *elem = user_data;
+ client_recv_filter_args *a = user_data;
+ grpc_call_element *elem = a->elem;
channel_data *channeld = elem->channel_data;
if (md == channeld->status) {
return NULL;
} else if (md->key == channeld->status->key) {
- grpc_call_element_send_cancel(elem);
+ grpc_call_element_send_cancel(a->exec_ctx, elem);
return NULL;
} else if (md->key == channeld->content_type->key) {
return NULL;
@@ -84,7 +87,7 @@ static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
return md;
}
-static void hc_on_recv(void *user_data, int success) {
+static void hc_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
size_t i;
@@ -92,15 +95,16 @@ static void hc_on_recv(void *user_data, int success) {
grpc_stream_op *ops = calld->recv_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
+ client_recv_filter_args a;
if (op->type != GRPC_OP_METADATA) continue;
calld->got_initial_metadata = 1;
- grpc_metadata_batch_filter(&op->data.metadata, client_recv_filter, elem);
+ a.elem = elem;
+ a.exec_ctx = exec_ctx;
+ grpc_metadata_batch_filter(&op->data.metadata, client_recv_filter, &a);
}
- calld->on_done_recv->cb(calld->on_done_recv->cb_arg, success);
+ calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
-
-
static grpc_mdelem *client_strip_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
channel_data *channeld = elem->channel_data;
@@ -123,21 +127,25 @@ static void hc_mutate_op(grpc_call_element *elem,
size_t nops = op->send_ops->nops;
grpc_stream_op *ops = op->send_ops->ops;
for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
+ grpc_stream_op *stream_op = &ops[i];
+ if (stream_op->type != GRPC_OP_METADATA) continue;
calld->sent_initial_metadata = 1;
- grpc_metadata_batch_filter(&op->data.metadata, client_strip_filter, elem);
+ grpc_metadata_batch_filter(&stream_op->data.metadata, client_strip_filter,
+ elem);
/* Send : prefixed headers, which have to be before any application
layer headers. */
- grpc_metadata_batch_add_head(&op->data.metadata, &calld->method,
+ grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->method,
GRPC_MDELEM_REF(channeld->method));
- grpc_metadata_batch_add_head(&op->data.metadata, &calld->scheme,
+ grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->scheme,
GRPC_MDELEM_REF(channeld->scheme));
- grpc_metadata_batch_add_tail(&op->data.metadata, &calld->te_trailers,
+ grpc_metadata_batch_add_tail(&stream_op->data.metadata,
+ &calld->te_trailers,
GRPC_MDELEM_REF(channeld->te_trailers));
- grpc_metadata_batch_add_tail(&op->data.metadata, &calld->content_type,
+ grpc_metadata_batch_add_tail(&stream_op->data.metadata,
+ &calld->content_type,
GRPC_MDELEM_REF(channeld->content_type));
- grpc_metadata_batch_add_tail(&op->data.metadata, &calld->user_agent,
+ grpc_metadata_batch_add_tail(&stream_op->data.metadata,
+ &calld->user_agent,
GRPC_MDELEM_REF(channeld->user_agent));
break;
}
@@ -151,34 +159,29 @@ static void hc_mutate_op(grpc_call_element *elem,
}
}
-static void hc_start_transport_op(grpc_call_element *elem,
+static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
hc_mutate_op(elem, op);
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
call_data *calld = elem->call_data;
calld->sent_initial_metadata = 0;
calld->got_initial_metadata = 0;
calld->on_done_recv = NULL;
- grpc_iomgr_closure_init(&calld->hc_on_recv, hc_on_recv, elem);
+ grpc_closure_init(&calld->hc_on_recv, hc_on_recv, elem);
if (initial_op) hc_mutate_op(elem, initial_op);
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
-
- ignore_unused(calld);
- ignore_unused(channeld);
-}
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {}
static const char *scheme_from_args(const grpc_channel_args *args) {
unsigned i;
@@ -243,7 +246,8 @@ static grpc_mdstr *user_agent_from_args(grpc_mdctx *mdctx,
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *channel_args,
grpc_mdctx *mdctx, int is_first, int is_last) {
/* grab pointers to our data from the channel element */
@@ -268,7 +272,8 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
diff --git a/src/core/channel/http_server_filter.c b/src/core/channel/http_server_filter.c
index 9898efd608..549c1da580 100644
--- a/src/core/channel/http_server_filter.c
+++ b/src/core/channel/http_server_filter.c
@@ -50,11 +50,11 @@ typedef struct call_data {
grpc_stream_op_buffer *recv_ops;
/** Closure to call when finished with the hs_on_recv hook */
- grpc_iomgr_closure *on_done_recv;
+ grpc_closure *on_done_recv;
/** Receive closures are chained: we inject this closure as the on_done_recv
up-call on transport_op, and remember to call our on_done_recv member
after handling it. */
- grpc_iomgr_closure hs_on_recv;
+ grpc_closure hs_on_recv;
} call_data;
typedef struct channel_data {
@@ -74,8 +74,14 @@ typedef struct channel_data {
grpc_mdctx *mdctx;
} channel_data;
+typedef struct {
+ grpc_call_element *elem;
+ grpc_exec_ctx *exec_ctx;
+} server_filter_args;
+
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
- grpc_call_element *elem = user_data;
+ server_filter_args *a = user_data;
+ grpc_call_element *elem = a->elem;
channel_data *channeld = elem->channel_data;
call_data *calld = elem->call_data;
@@ -117,7 +123,7 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
/* swallow it and error everything out. */
/* TODO(klempner): We ought to generate more descriptive error messages
on the wire here. */
- grpc_call_element_send_cancel(elem);
+ grpc_call_element_send_cancel(a->exec_ctx, elem);
return NULL;
} else if (md->key == channeld->path_key) {
if (calld->seen_path) {
@@ -143,7 +149,7 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
}
}
-static void hs_on_recv(void *user_data, int success) {
+static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (success) {
@@ -152,9 +158,12 @@ static void hs_on_recv(void *user_data, int success) {
grpc_stream_op *ops = calld->recv_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
+ server_filter_args a;
if (op->type != GRPC_OP_METADATA) continue;
calld->got_initial_metadata = 1;
- grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
+ a.elem = elem;
+ a.exec_ctx = exec_ctx;
+ grpc_metadata_batch_filter(&op->data.metadata, server_filter, &a);
/* Have we seen the required http2 transport headers?
(:method, :scheme, content-type, with :path and :authority covered
at the channel level right now) */
@@ -179,11 +188,11 @@ static void hs_on_recv(void *user_data, int success) {
}
/* Error this call out */
success = 0;
- grpc_call_element_send_cancel(elem);
+ grpc_call_element_send_cancel(exec_ctx, elem);
}
}
}
- calld->on_done_recv->cb(calld->on_done_recv->cb_arg, success);
+ calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
static void hs_mutate_op(grpc_call_element *elem,
@@ -197,12 +206,13 @@ static void hs_mutate_op(grpc_call_element *elem,
size_t nops = op->send_ops->nops;
grpc_stream_op *ops = op->send_ops->ops;
for (i = 0; i < nops; i++) {
- grpc_stream_op *op = &ops[i];
- if (op->type != GRPC_OP_METADATA) continue;
+ grpc_stream_op *stream_op = &ops[i];
+ if (stream_op->type != GRPC_OP_METADATA) continue;
calld->sent_status = 1;
- grpc_metadata_batch_add_head(&op->data.metadata, &calld->status,
+ grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->status,
GRPC_MDELEM_REF(channeld->status_ok));
- grpc_metadata_batch_add_tail(&op->data.metadata, &calld->content_type,
+ grpc_metadata_batch_add_tail(&stream_op->data.metadata,
+ &calld->content_type,
GRPC_MDELEM_REF(channeld->content_type));
break;
}
@@ -216,30 +226,33 @@ static void hs_mutate_op(grpc_call_element *elem,
}
}
-static void hs_start_transport_op(grpc_call_element *elem,
+static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
hs_mutate_op(elem, op);
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
memset(calld, 0, sizeof(*calld));
- grpc_iomgr_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
+ grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
if (initial_op) hs_mutate_op(elem, initial_op);
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {}
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
/* grab pointers to our data from the channel element */
@@ -270,7 +283,8 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
diff --git a/src/core/channel/noop_filter.c b/src/core/channel/noop_filter.c
index d631885aaf..91b30d61ca 100644
--- a/src/core/channel/noop_filter.c
+++ b/src/core/channel/noop_filter.c
@@ -62,16 +62,17 @@ static void noop_mutate_op(grpc_call_element *elem,
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
-static void noop_start_transport_stream_op(grpc_call_element *elem,
+static void noop_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
noop_mutate_op(elem, op);
/* pass control down the stack */
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
/* grab pointers to our data from the call element */
@@ -85,17 +86,12 @@ static void init_call_elem(grpc_call_element *elem,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
-
- ignore_unused(calld);
- ignore_unused(channeld);
-}
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
/* grab pointers to our data from the channel element */
@@ -112,7 +108,8 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
diff --git a/src/core/client_config/client_config.c b/src/core/client_config/client_config.c
index 4453824148..6ecffb3854 100644
--- a/src/core/client_config/client_config.c
+++ b/src/core/client_config/client_config.c
@@ -51,21 +51,19 @@ grpc_client_config *grpc_client_config_create() {
void grpc_client_config_ref(grpc_client_config *c) { gpr_ref(&c->refs); }
-void grpc_client_config_unref(grpc_client_config *c) {
+void grpc_client_config_unref(grpc_exec_ctx *exec_ctx, grpc_client_config *c) {
if (gpr_unref(&c->refs)) {
- GRPC_LB_POLICY_UNREF(c->lb_policy, "client_config");
+ GRPC_LB_POLICY_UNREF(exec_ctx, c->lb_policy, "client_config");
gpr_free(c);
}
}
void grpc_client_config_set_lb_policy(grpc_client_config *c,
grpc_lb_policy *lb_policy) {
+ GPR_ASSERT(c->lb_policy == NULL);
if (lb_policy) {
GRPC_LB_POLICY_REF(lb_policy, "client_config");
}
- if (c->lb_policy) {
- GRPC_LB_POLICY_UNREF(c->lb_policy, "client_config");
- }
c->lb_policy = lb_policy;
}
diff --git a/src/core/client_config/client_config.h b/src/core/client_config/client_config.h
index 47612da42c..04bf036b00 100644
--- a/src/core/client_config/client_config.h
+++ b/src/core/client_config/client_config.h
@@ -42,7 +42,8 @@ typedef struct grpc_client_config grpc_client_config;
grpc_client_config *grpc_client_config_create();
void grpc_client_config_ref(grpc_client_config *client_config);
-void grpc_client_config_unref(grpc_client_config *client_config);
+void grpc_client_config_unref(grpc_exec_ctx *exec_ctx,
+ grpc_client_config *client_config);
void grpc_client_config_set_lb_policy(grpc_client_config *client_config,
grpc_lb_policy *lb_policy);
diff --git a/src/core/client_config/connector.c b/src/core/client_config/connector.c
index c1e583e4a5..1603ffb8be 100644
--- a/src/core/client_config/connector.c
+++ b/src/core/client_config/connector.c
@@ -33,21 +33,22 @@
#include "src/core/client_config/connector.h"
-void grpc_connector_ref(grpc_connector *connector) {
+void grpc_connector_ref(grpc_connector* connector) {
connector->vtable->ref(connector);
}
-void grpc_connector_unref(grpc_connector *connector) {
- connector->vtable->unref(connector);
+void grpc_connector_unref(grpc_exec_ctx* exec_ctx, grpc_connector* connector) {
+ connector->vtable->unref(exec_ctx, connector);
}
-void grpc_connector_connect(grpc_connector *connector,
- const grpc_connect_in_args *in_args,
- grpc_connect_out_args *out_args,
- grpc_iomgr_closure *notify) {
- connector->vtable->connect(connector, in_args, out_args, notify);
+void grpc_connector_connect(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+ const grpc_connect_in_args* in_args,
+ grpc_connect_out_args* out_args,
+ grpc_closure* notify) {
+ connector->vtable->connect(exec_ctx, connector, in_args, out_args, notify);
}
-void grpc_connector_shutdown(grpc_connector *connector) {
- connector->vtable->shutdown(connector);
+void grpc_connector_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_connector* connector) {
+ connector->vtable->shutdown(exec_ctx, connector);
}
diff --git a/src/core/client_config/connector.h b/src/core/client_config/connector.h
index 01aa716412..e9b8be4b53 100644
--- a/src/core/client_config/connector.h
+++ b/src/core/client_config/connector.h
@@ -55,8 +55,6 @@ typedef struct {
gpr_timespec deadline;
/** channel arguments (to be passed to transport) */
const grpc_channel_args *channel_args;
- /** metadata context */
- grpc_mdctx *metadata_context;
} grpc_connect_in_args;
typedef struct {
@@ -69,23 +67,24 @@ typedef struct {
struct grpc_connector_vtable {
void (*ref)(grpc_connector *connector);
- void (*unref)(grpc_connector *connector);
+ void (*unref)(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
/** Implementation of grpc_connector_shutdown */
- void (*shutdown)(grpc_connector *connector);
+ void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
/** Implementation of grpc_connector_connect */
- void (*connect)(grpc_connector *connector,
+ void (*connect)(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
const grpc_connect_in_args *in_args,
- grpc_connect_out_args *out_args, grpc_iomgr_closure *notify);
+ grpc_connect_out_args *out_args, grpc_closure *notify);
};
void grpc_connector_ref(grpc_connector *connector);
-void grpc_connector_unref(grpc_connector *connector);
+void grpc_connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
/** Connect using the connector: max one outstanding call at a time */
-void grpc_connector_connect(grpc_connector *connector,
+void grpc_connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
const grpc_connect_in_args *in_args,
grpc_connect_out_args *out_args,
- grpc_iomgr_closure *notify);
+ grpc_closure *notify);
/** Cancel any pending connection */
-void grpc_connector_shutdown(grpc_connector *connector);
+void grpc_connector_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_connector *connector);
#endif
diff --git a/src/core/client_config/lb_policies/pick_first.c b/src/core/client_config/lb_policies/pick_first.c
index c8262e92ef..5fa1ee4418 100644
--- a/src/core/client_config/lb_policies/pick_first.c
+++ b/src/core/client_config/lb_policies/pick_first.c
@@ -43,7 +43,7 @@ typedef struct pending_pick {
struct pending_pick *next;
grpc_pollset *pollset;
grpc_subchannel **target;
- grpc_iomgr_closure *on_complete;
+ grpc_closure *on_complete;
} pending_pick;
typedef struct {
@@ -53,7 +53,7 @@ typedef struct {
grpc_subchannel **subchannels;
size_t num_subchannels;
- grpc_iomgr_closure connectivity_changed;
+ grpc_closure connectivity_changed;
/** mutex protecting remaining members */
gpr_mu mu;
@@ -76,87 +76,92 @@ typedef struct {
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
-static void del_interested_parties_locked(pick_first_lb_policy *p) {
+static void del_interested_parties_locked(grpc_exec_ctx *exec_ctx,
+ pick_first_lb_policy *p) {
pending_pick *pp;
for (pp = p->pending_picks; pp; pp = pp->next) {
- grpc_subchannel_del_interested_party(p->subchannels[p->checking_subchannel],
- pp->pollset);
+ grpc_subchannel_del_interested_party(
+ exec_ctx, p->subchannels[p->checking_subchannel], pp->pollset);
}
}
-static void add_interested_parties_locked(pick_first_lb_policy *p) {
+static void add_interested_parties_locked(grpc_exec_ctx *exec_ctx,
+ pick_first_lb_policy *p) {
pending_pick *pp;
for (pp = p->pending_picks; pp; pp = pp->next) {
- grpc_subchannel_add_interested_party(p->subchannels[p->checking_subchannel],
- pp->pollset);
+ grpc_subchannel_add_interested_party(
+ exec_ctx, p->subchannels[p->checking_subchannel], pp->pollset);
}
}
-void pf_destroy(grpc_lb_policy *pol) {
+void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
size_t i;
- del_interested_parties_locked(p);
+ GPR_ASSERT(p->pending_picks == NULL);
for (i = 0; i < p->num_subchannels; i++) {
- GRPC_SUBCHANNEL_UNREF(p->subchannels[i], "pick_first");
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
}
- grpc_connectivity_state_destroy(&p->state_tracker);
+ grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
gpr_mu_destroy(&p->mu);
gpr_free(p);
}
-void pf_shutdown(grpc_lb_policy *pol) {
+void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
- del_interested_parties_locked(p);
+ del_interested_parties_locked(exec_ctx, p);
p->shutdown = 1;
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
+ pp = p->pending_picks;
+ p->pending_picks = NULL;
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
+ gpr_mu_unlock(&p->mu);
+ while (pp != NULL) {
+ pending_pick *next = pp->next;
*pp->target = NULL;
- grpc_iomgr_add_delayed_callback(pp->on_complete, 0);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
+ pp = next;
}
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_FATAL_FAILURE,
- "shutdown");
- gpr_mu_unlock(&p->mu);
}
-static void start_picking(pick_first_lb_policy *p) {
+static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) {
p->started_picking = 1;
p->checking_subchannel = 0;
p->checking_connectivity = GRPC_CHANNEL_IDLE;
GRPC_LB_POLICY_REF(&p->base, "pick_first_connectivity");
- grpc_subchannel_notify_on_state_change(p->subchannels[p->checking_subchannel],
- &p->checking_connectivity,
- &p->connectivity_changed);
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, p->subchannels[p->checking_subchannel],
+ &p->checking_connectivity, &p->connectivity_changed);
}
-void pf_exit_idle(grpc_lb_policy *pol) {
+void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
gpr_mu_lock(&p->mu);
if (!p->started_picking) {
- start_picking(p);
+ start_picking(exec_ctx, p);
}
gpr_mu_unlock(&p->mu);
}
-void pf_pick(grpc_lb_policy *pol, grpc_pollset *pollset,
- grpc_metadata_batch *initial_metadata, grpc_subchannel **target,
- grpc_iomgr_closure *on_complete) {
+void pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
+ grpc_subchannel **target, grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
if (p->selected) {
gpr_mu_unlock(&p->mu);
*target = p->selected;
- on_complete->cb(on_complete->cb_arg, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, on_complete, 1);
} else {
if (!p->started_picking) {
- start_picking(p);
+ start_picking(exec_ctx, p);
}
- grpc_subchannel_add_interested_party(p->subchannels[p->checking_subchannel],
- pollset);
+ grpc_subchannel_add_interested_party(
+ exec_ctx, p->subchannels[p->checking_subchannel], pollset);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->pollset = pollset;
@@ -167,105 +172,109 @@ void pf_pick(grpc_lb_policy *pol, grpc_pollset *pollset,
}
}
-static void pf_connectivity_changed(void *arg, int iomgr_success) {
+static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
pick_first_lb_policy *p = arg;
pending_pick *pp;
- int unref = 0;
gpr_mu_lock(&p->mu);
if (p->shutdown) {
- unref = 1;
+ gpr_mu_unlock(&p->mu);
+ GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
+ return;
} else if (p->selected != NULL) {
- grpc_connectivity_state_set(&p->state_tracker, p->checking_connectivity,
- "selected_changed");
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ p->checking_connectivity, "selected_changed");
if (p->checking_connectivity != GRPC_CHANNEL_FATAL_FAILURE) {
- grpc_subchannel_notify_on_state_change(
- p->selected, &p->checking_connectivity, &p->connectivity_changed);
+ grpc_subchannel_notify_on_state_change(exec_ctx, p->selected,
+ &p->checking_connectivity,
+ &p->connectivity_changed);
} else {
- unref = 1;
+ GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
}
} else {
loop:
switch (p->checking_connectivity) {
case GRPC_CHANNEL_READY:
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_READY,
- "connecting_ready");
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_READY, "connecting_ready");
p->selected = p->subchannels[p->checking_subchannel];
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = p->selected;
- grpc_subchannel_del_interested_party(p->selected, pp->pollset);
- grpc_iomgr_add_delayed_callback(pp->on_complete, 1);
+ grpc_subchannel_del_interested_party(exec_ctx, p->selected,
+ pp->pollset);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
- grpc_subchannel_notify_on_state_change(
- p->selected, &p->checking_connectivity, &p->connectivity_changed);
+ grpc_subchannel_notify_on_state_change(exec_ctx, p->selected,
+ &p->checking_connectivity,
+ &p->connectivity_changed);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
- grpc_connectivity_state_set(&p->state_tracker,
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
"connecting_transient_failure");
- del_interested_parties_locked(p);
+ del_interested_parties_locked(exec_ctx, p);
p->checking_subchannel =
(p->checking_subchannel + 1) % p->num_subchannels;
p->checking_connectivity = grpc_subchannel_check_connectivity(
p->subchannels[p->checking_subchannel]);
- add_interested_parties_locked(p);
+ add_interested_parties_locked(exec_ctx, p);
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
grpc_subchannel_notify_on_state_change(
- p->subchannels[p->checking_subchannel], &p->checking_connectivity,
- &p->connectivity_changed);
+ exec_ctx, p->subchannels[p->checking_subchannel],
+ &p->checking_connectivity, &p->connectivity_changed);
} else {
goto loop;
}
break;
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
- grpc_connectivity_state_set(&p->state_tracker, p->checking_connectivity,
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_CONNECTING,
"connecting_changed");
grpc_subchannel_notify_on_state_change(
- p->subchannels[p->checking_subchannel], &p->checking_connectivity,
- &p->connectivity_changed);
+ exec_ctx, p->subchannels[p->checking_subchannel],
+ &p->checking_connectivity, &p->connectivity_changed);
break;
case GRPC_CHANNEL_FATAL_FAILURE:
- del_interested_parties_locked(p);
+ del_interested_parties_locked(exec_ctx, p);
GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
p->subchannels[p->num_subchannels - 1]);
p->num_subchannels--;
- GRPC_SUBCHANNEL_UNREF(p->subchannels[p->num_subchannels], "pick_first");
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
+ "pick_first");
if (p->num_subchannels == 0) {
- grpc_connectivity_state_set(&p->state_tracker,
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE,
"no_more_channels");
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_iomgr_add_delayed_callback(pp->on_complete, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
- unref = 1;
+ GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
} else {
- grpc_connectivity_state_set(&p->state_tracker,
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
"subchannel_failed");
p->checking_subchannel %= p->num_subchannels;
p->checking_connectivity = grpc_subchannel_check_connectivity(
p->subchannels[p->checking_subchannel]);
- add_interested_parties_locked(p);
+ add_interested_parties_locked(exec_ctx, p);
goto loop;
}
}
}
gpr_mu_unlock(&p->mu);
-
- if (unref) {
- GRPC_LB_POLICY_UNREF(&p->base, "pick_first_connectivity");
- }
}
-static void pf_broadcast(grpc_lb_policy *pol, grpc_transport_op *op) {
+static void pf_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_transport_op *op) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
size_t i;
size_t n;
@@ -281,13 +290,14 @@ static void pf_broadcast(grpc_lb_policy *pol, grpc_transport_op *op) {
gpr_mu_unlock(&p->mu);
for (i = 0; i < n; i++) {
- grpc_subchannel_process_transport_op(subchannels[i], op);
- GRPC_SUBCHANNEL_UNREF(subchannels[i], "pf_broadcast");
+ grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], op);
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pf_broadcast");
}
gpr_free(subchannels);
}
-static grpc_connectivity_state pf_check_connectivity(grpc_lb_policy *pol) {
+static grpc_connectivity_state pf_check_connectivity(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
grpc_connectivity_state st;
gpr_mu_lock(&p->mu);
@@ -296,13 +306,13 @@ static grpc_connectivity_state pf_check_connectivity(grpc_lb_policy *pol) {
return st;
}
-static void pf_notify_on_state_change(grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_iomgr_closure *notify) {
+void pf_notify_on_state_change(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
gpr_mu_lock(&p->mu);
- grpc_connectivity_state_notify_on_state_change(&p->state_tracker, current,
- notify);
+ grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
+ current, notify);
gpr_mu_unlock(&p->mu);
}
@@ -325,13 +335,14 @@ static grpc_lb_policy *create_pick_first(grpc_lb_policy_factory *factory,
GPR_ASSERT(args->num_subchannels > 0);
memset(p, 0, sizeof(*p));
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable);
- p->subchannels = gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
+ p->subchannels =
+ gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
p->num_subchannels = args->num_subchannels;
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"pick_first");
memcpy(p->subchannels, args->subchannels,
sizeof(grpc_subchannel *) * args->num_subchannels);
- grpc_iomgr_closure_init(&p->connectivity_changed, pf_connectivity_changed, p);
+ grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p);
gpr_mu_init(&p->mu);
return &p->base;
}
diff --git a/src/core/client_config/lb_policies/round_robin.c b/src/core/client_config/lb_policies/round_robin.c
index 39fb2bc401..479c376724 100644
--- a/src/core/client_config/lb_policies/round_robin.c
+++ b/src/core/client_config/lb_policies/round_robin.c
@@ -47,7 +47,7 @@ typedef struct pending_pick {
struct pending_pick *next;
grpc_pollset *pollset;
grpc_subchannel **target;
- grpc_iomgr_closure *on_complete;
+ grpc_closure *on_complete;
} pending_pick;
/** List of subchannels in a connectivity READY state */
@@ -59,7 +59,7 @@ typedef struct ready_list {
typedef struct {
size_t subchannel_idx; /**< Index over p->subchannels */
- void *p; /**< round_robin_lb_policy instance */
+ void *p; /**< round_robin_lb_policy instance */
} connectivity_changed_cb_arg;
typedef struct {
@@ -72,7 +72,7 @@ typedef struct {
/** Callbacks, one per subchannel being watched, to be called when their
* respective connectivity changes */
- grpc_iomgr_closure *connectivity_changed_cbs;
+ grpc_closure *connectivity_changed_cbs;
connectivity_changed_cb_arg *cb_args;
/** mutex protecting remaining members */
@@ -160,8 +160,7 @@ static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
p->ready_list.prev = new_elem;
}
if (grpc_lb_round_robin_trace) {
- gpr_log(GPR_DEBUG,
- "[READYLIST] ADDING NODE %p (SC %p)", new_elem, csc);
+ gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (SC %p)", new_elem, csc);
}
return new_elem;
}
@@ -201,30 +200,30 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
gpr_free(node);
}
-static void del_interested_parties_locked(round_robin_lb_policy *p,
+static void del_interested_parties_locked(grpc_exec_ctx *exec_ctx,
+ round_robin_lb_policy *p,
const size_t subchannel_idx) {
pending_pick *pp;
for (pp = p->pending_picks; pp; pp = pp->next) {
- grpc_subchannel_del_interested_party(p->subchannels[subchannel_idx],
- pp->pollset);
+ grpc_subchannel_del_interested_party(
+ exec_ctx, p->subchannels[subchannel_idx], pp->pollset);
}
}
-
-void rr_destroy(grpc_lb_policy *pol) {
+void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
size_t i;
ready_list *elem;
for (i = 0; i < p->num_subchannels; i++) {
- del_interested_parties_locked(p, i);
+ del_interested_parties_locked(exec_ctx, p, i);
}
for (i = 0; i < p->num_subchannels; i++) {
- GRPC_SUBCHANNEL_UNREF(p->subchannels[i], "round_robin");
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "round_robin");
}
gpr_free(p->connectivity_changed_cbs);
gpr_free(p->subchannel_connectivity);
- grpc_connectivity_state_destroy(&p->state_tracker);
+ grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
gpr_mu_destroy(&p->mu);
@@ -243,53 +242,53 @@ void rr_destroy(grpc_lb_policy *pol) {
gpr_free(p);
}
-void rr_shutdown(grpc_lb_policy *pol) {
+void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
size_t i;
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
for (i = 0; i < p->num_subchannels; i++) {
- del_interested_parties_locked(p, i);
+ del_interested_parties_locked(exec_ctx, p, i);
}
p->shutdown = 1;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_iomgr_add_delayed_callback(pp->on_complete, 0);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
gpr_free(pp);
}
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_FATAL_FAILURE,
- "shutdown");
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
gpr_mu_unlock(&p->mu);
}
-static void start_picking(round_robin_lb_policy *p) {
+static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
size_t i;
p->started_picking = 1;
for (i = 0; i < p->num_subchannels; i++) {
p->subchannel_connectivity[i] = GRPC_CHANNEL_IDLE;
- grpc_subchannel_notify_on_state_change(p->subchannels[i],
+ grpc_subchannel_notify_on_state_change(exec_ctx, p->subchannels[i],
&p->subchannel_connectivity[i],
&p->connectivity_changed_cbs[i]);
GRPC_LB_POLICY_REF(&p->base, "round_robin_connectivity");
}
}
-void rr_exit_idle(grpc_lb_policy *pol) {
+void rr_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
gpr_mu_lock(&p->mu);
if (!p->started_picking) {
- start_picking(p);
+ start_picking(exec_ctx, p);
}
gpr_mu_unlock(&p->mu);
}
-void rr_pick(grpc_lb_policy *pol, grpc_pollset *pollset,
- grpc_metadata_batch *initial_metadata, grpc_subchannel **target,
- grpc_iomgr_closure *on_complete) {
+void rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
+ grpc_subchannel **target, grpc_closure *on_complete) {
size_t i;
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
@@ -304,13 +303,14 @@ void rr_pick(grpc_lb_policy *pol, grpc_pollset *pollset,
}
/* only advance the last picked pointer if the selection was used */
advance_last_picked_locked(p);
- on_complete->cb(on_complete->cb_arg, 1);
+ on_complete->cb(exec_ctx, on_complete->cb_arg, 1);
} else {
if (!p->started_picking) {
- start_picking(p);
+ start_picking(exec_ctx, p);
}
for (i = 0; i < p->num_subchannels; i++) {
- grpc_subchannel_add_interested_party(p->subchannels[i], pollset);
+ grpc_subchannel_add_interested_party(exec_ctx, p->subchannels[i],
+ pollset);
}
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
@@ -322,7 +322,8 @@ void rr_pick(grpc_lb_policy *pol, grpc_pollset *pollset,
}
}
-static void rr_connectivity_changed(void *arg, int iomgr_success) {
+static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
connectivity_changed_cb_arg *cb_arg = arg;
round_robin_lb_policy *p = cb_arg->p;
/* index over p->subchannels of this cb's subchannel */
@@ -337,16 +338,15 @@ static void rr_connectivity_changed(void *arg, int iomgr_success) {
gpr_mu_lock(&p->mu);
- this_connectivity =
- &p->subchannel_connectivity[this_idx];
+ this_connectivity = &p->subchannel_connectivity[this_idx];
if (p->shutdown) {
unref = 1;
} else {
switch (*this_connectivity) {
case GRPC_CHANNEL_READY:
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_READY,
- "connecting_ready");
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_READY, "connecting_ready");
/* add the newly connected subchannel to the list of connected ones.
* Note that it goes to the "end of the line". */
p->subchannel_index_to_readylist_node[this_idx] =
@@ -368,79 +368,82 @@ static void rr_connectivity_changed(void *arg, int iomgr_success) {
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
selected->subchannel, selected);
}
- grpc_subchannel_del_interested_party(selected->subchannel,
+ grpc_subchannel_del_interested_party(exec_ctx, selected->subchannel,
pp->pollset);
- grpc_iomgr_add_delayed_callback(pp->on_complete, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
grpc_subchannel_notify_on_state_change(
- p->subchannels[this_idx], this_connectivity,
+ exec_ctx, p->subchannels[this_idx], this_connectivity,
&p->connectivity_changed_cbs[this_idx]);
break;
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
- grpc_connectivity_state_set(&p->state_tracker, *this_connectivity,
- "connecting_changed");
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ *this_connectivity, "connecting_changed");
grpc_subchannel_notify_on_state_change(
- p->subchannels[this_idx], this_connectivity,
+ exec_ctx, p->subchannels[this_idx], this_connectivity,
&p->connectivity_changed_cbs[this_idx]);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
- del_interested_parties_locked(p, this_idx);
+ del_interested_parties_locked(exec_ctx, p, this_idx);
/* renew state notification */
grpc_subchannel_notify_on_state_change(
- p->subchannels[this_idx], this_connectivity,
+ exec_ctx, p->subchannels[this_idx], this_connectivity,
&p->connectivity_changed_cbs[this_idx]);
/* remove from ready list if still present */
if (p->subchannel_index_to_readylist_node[this_idx] != NULL) {
- remove_disconnected_sc_locked(p, p->subchannel_index_to_readylist_node[this_idx]);
+ remove_disconnected_sc_locked(
+ p, p->subchannel_index_to_readylist_node[this_idx]);
p->subchannel_index_to_readylist_node[this_idx] = NULL;
}
- grpc_connectivity_state_set(&p->state_tracker,
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
"connecting_transient_failure");
break;
case GRPC_CHANNEL_FATAL_FAILURE:
- del_interested_parties_locked(p, this_idx);
+ del_interested_parties_locked(exec_ctx, p, this_idx);
if (p->subchannel_index_to_readylist_node[this_idx] != NULL) {
- remove_disconnected_sc_locked(p, p->subchannel_index_to_readylist_node[this_idx]);
+ remove_disconnected_sc_locked(
+ p, p->subchannel_index_to_readylist_node[this_idx]);
p->subchannel_index_to_readylist_node[this_idx] = NULL;
}
GPR_SWAP(grpc_subchannel *, p->subchannels[this_idx],
p->subchannels[p->num_subchannels - 1]);
p->num_subchannels--;
- GRPC_SUBCHANNEL_UNREF(p->subchannels[p->num_subchannels],
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
"round_robin");
if (p->num_subchannels == 0) {
- grpc_connectivity_state_set(&p->state_tracker,
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE,
"no_more_channels");
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_iomgr_add_delayed_callback(pp->on_complete, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
unref = 1;
} else {
- grpc_connectivity_state_set(&p->state_tracker,
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
"subchannel_failed");
}
- } /* switch */
- } /* !unref */
+ } /* switch */
+ } /* !unref */
gpr_mu_unlock(&p->mu);
if (unref) {
- GRPC_LB_POLICY_UNREF(&p->base, "round_robin_connectivity");
+ GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
}
}
-static void rr_broadcast(grpc_lb_policy *pol, grpc_transport_op *op) {
+static void rr_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_transport_op *op) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
size_t i;
size_t n;
@@ -456,13 +459,14 @@ static void rr_broadcast(grpc_lb_policy *pol, grpc_transport_op *op) {
gpr_mu_unlock(&p->mu);
for (i = 0; i < n; i++) {
- grpc_subchannel_process_transport_op(subchannels[i], op);
- GRPC_SUBCHANNEL_UNREF(subchannels[i], "rr_broadcast");
+ grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], op);
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "rr_broadcast");
}
gpr_free(subchannels);
}
-static grpc_connectivity_state rr_check_connectivity(grpc_lb_policy *pol) {
+static grpc_connectivity_state rr_check_connectivity(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
grpc_connectivity_state st;
gpr_mu_lock(&p->mu);
@@ -471,13 +475,14 @@ static grpc_connectivity_state rr_check_connectivity(grpc_lb_policy *pol) {
return st;
}
-static void rr_notify_on_state_change(grpc_lb_policy *pol,
+static void rr_notify_on_state_change(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
grpc_connectivity_state *current,
- grpc_iomgr_closure *notify) {
+ grpc_closure *notify) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
gpr_mu_lock(&p->mu);
- grpc_connectivity_state_notify_on_state_change(&p->state_tracker, current,
- notify);
+ grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
+ current, notify);
gpr_mu_unlock(&p->mu);
}
@@ -490,7 +495,6 @@ static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_check_connectivity,
rr_notify_on_state_change};
-
static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
static void round_robin_factory_unref(grpc_lb_policy_factory *factory) {}
@@ -502,7 +506,8 @@ static grpc_lb_policy *create_round_robin(grpc_lb_policy_factory *factory,
GPR_ASSERT(args->num_subchannels > 0);
memset(p, 0, sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable);
- p->subchannels = gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
+ p->subchannels =
+ gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
p->num_subchannels = args->num_subchannels;
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
@@ -511,17 +516,17 @@ static grpc_lb_policy *create_round_robin(grpc_lb_policy_factory *factory,
gpr_mu_init(&p->mu);
p->connectivity_changed_cbs =
- gpr_malloc(sizeof(grpc_iomgr_closure) * args->num_subchannels);
+ gpr_malloc(sizeof(grpc_closure) * args->num_subchannels);
p->subchannel_connectivity =
gpr_malloc(sizeof(grpc_connectivity_state) * args->num_subchannels);
p->cb_args =
gpr_malloc(sizeof(connectivity_changed_cb_arg) * args->num_subchannels);
- for(i = 0; i < args->num_subchannels; i++) {
+ for (i = 0; i < args->num_subchannels; i++) {
p->cb_args[i].subchannel_idx = i;
p->cb_args[i].p = p;
- grpc_iomgr_closure_init(&p->connectivity_changed_cbs[i],
- rr_connectivity_changed, &p->cb_args[i]);
+ grpc_closure_init(&p->connectivity_changed_cbs[i], rr_connectivity_changed,
+ &p->cb_args[i]);
}
/* The (dummy node) root of the ready list */
diff --git a/src/core/client_config/lb_policies/round_robin.h b/src/core/client_config/lb_policies/round_robin.h
index 2c81b9ef17..cf1f69c85f 100644
--- a/src/core/client_config/lb_policies/round_robin.h
+++ b/src/core/client_config/lb_policies/round_robin.h
@@ -43,5 +43,4 @@ extern int grpc_lb_round_robin_trace;
/** Returns a load balancing factory for the round robin policy */
grpc_lb_policy_factory *grpc_round_robin_lb_factory_create();
-
#endif
diff --git a/src/core/client_config/lb_policy.c b/src/core/client_config/lb_policy.c
index 90ec44432f..c955186f7f 100644
--- a/src/core/client_config/lb_policy.c
+++ b/src/core/client_config/lb_policy.c
@@ -51,44 +51,48 @@ void grpc_lb_policy_ref(grpc_lb_policy *policy) {
}
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
-void grpc_lb_policy_unref(grpc_lb_policy *policy, const char *file, int line,
- const char *reason) {
+void grpc_lb_policy_unref(grpc_lb_policy *policy,
+ grpc_closure_list *closure_list, const char *file,
+ int line, const char *reason) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "LB_POLICY:%p unref %d -> %d %s",
policy, (int)policy->refs.count, (int)policy->refs.count - 1, reason);
#else
-void grpc_lb_policy_unref(grpc_lb_policy *policy) {
+void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
#endif
if (gpr_unref(&policy->refs)) {
- policy->vtable->destroy(policy);
+ policy->vtable->destroy(exec_ctx, policy);
}
}
-void grpc_lb_policy_shutdown(grpc_lb_policy *policy) {
- policy->vtable->shutdown(policy);
+void grpc_lb_policy_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
+ policy->vtable->shutdown(exec_ctx, policy);
}
-void grpc_lb_policy_pick(grpc_lb_policy *policy, grpc_pollset *pollset,
+void grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
- grpc_subchannel **target,
- grpc_iomgr_closure *on_complete) {
- policy->vtable->pick(policy, pollset, initial_metadata, target, on_complete);
+ grpc_subchannel **target, grpc_closure *on_complete) {
+ policy->vtable->pick(exec_ctx, policy, pollset, initial_metadata, target,
+ on_complete);
}
-void grpc_lb_policy_broadcast(grpc_lb_policy *policy, grpc_transport_op *op) {
- policy->vtable->broadcast(policy, op);
+void grpc_lb_policy_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_transport_op *op) {
+ policy->vtable->broadcast(exec_ctx, policy, op);
}
-void grpc_lb_policy_exit_idle(grpc_lb_policy *policy) {
- policy->vtable->exit_idle(policy);
+void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
+ policy->vtable->exit_idle(exec_ctx, policy);
}
-void grpc_lb_policy_notify_on_state_change(grpc_lb_policy *policy,
+void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
grpc_connectivity_state *state,
- grpc_iomgr_closure *closure) {
- policy->vtable->notify_on_state_change(policy, state, closure);
+ grpc_closure *closure) {
+ policy->vtable->notify_on_state_change(exec_ctx, policy, state, closure);
}
grpc_connectivity_state grpc_lb_policy_check_connectivity(
- grpc_lb_policy *policy) {
- return policy->vtable->check_connectivity(policy);
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
+ return policy->vtable->check_connectivity(exec_ctx, policy);
}
diff --git a/src/core/client_config/lb_policy.h b/src/core/client_config/lb_policy.h
index 3f7ca8f28d..0eefe64991 100644
--- a/src/core/client_config/lb_policy.h
+++ b/src/core/client_config/lb_policy.h
@@ -35,6 +35,7 @@
#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_LB_POLICY_H
#include "src/core/client_config/subchannel.h"
+#include "src/core/transport/connectivity_state.h"
/** A load balancing policy: specified by a vtable and a struct (which
is expected to be extended to contain some parameters) */
@@ -50,45 +51,48 @@ struct grpc_lb_policy {
};
struct grpc_lb_policy_vtable {
- void (*destroy)(grpc_lb_policy *policy);
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
- void (*shutdown)(grpc_lb_policy *policy);
+ void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** implement grpc_lb_policy_pick */
- void (*pick)(grpc_lb_policy *policy, grpc_pollset *pollset,
- grpc_metadata_batch *initial_metadata, grpc_subchannel **target,
- grpc_iomgr_closure *on_complete);
+ void (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
+ grpc_subchannel **target, grpc_closure *on_complete);
/** try to enter a READY connectivity state */
- void (*exit_idle)(grpc_lb_policy *policy);
+ void (*exit_idle)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** broadcast a transport op to all subchannels */
- void (*broadcast)(grpc_lb_policy *policy, grpc_transport_op *op);
+ void (*broadcast)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_transport_op *op);
/** check the current connectivity of the lb_policy */
- grpc_connectivity_state (*check_connectivity)(grpc_lb_policy *policy);
+ grpc_connectivity_state (*check_connectivity)(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy);
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the policy */
- void (*notify_on_state_change)(grpc_lb_policy *policy,
+ void (*notify_on_state_change)(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
grpc_connectivity_state *state,
- grpc_iomgr_closure *closure);
+ grpc_closure *closure);
};
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_LB_POLICY_UNREF(p, r) \
- grpc_lb_policy_unref((p), __FILE__, __LINE__, (r))
+#define GRPC_LB_POLICY_UNREF(exec_ctx, p, r) \
+ grpc_lb_policy_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line,
const char *reason);
-void grpc_lb_policy_unref(grpc_lb_policy *policy, const char *file, int line,
- const char *reason);
+void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ const char *file, int line, const char *reason);
#else
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
-#define GRPC_LB_POLICY_UNREF(p, r) grpc_lb_policy_unref((p))
+#define GRPC_LB_POLICY_UNREF(cl, p, r) grpc_lb_policy_unref((cl), (p))
void grpc_lb_policy_ref(grpc_lb_policy *policy);
-void grpc_lb_policy_unref(grpc_lb_policy *policy);
+void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
#endif
/** called by concrete implementations to initialize the base struct */
@@ -96,26 +100,28 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable);
/** Start shutting down (fail any pending picks) */
-void grpc_lb_policy_shutdown(grpc_lb_policy *policy);
+void grpc_lb_policy_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** Given initial metadata in \a initial_metadata, find an appropriate
target for this rpc, and 'return' it by calling \a on_complete after setting
\a target.
Picking can be asynchronous. Any IO should be done under \a pollset. */
-void grpc_lb_policy_pick(grpc_lb_policy *policy, grpc_pollset *pollset,
+void grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
- grpc_subchannel **target,
- grpc_iomgr_closure *on_complete);
+ grpc_subchannel **target, grpc_closure *on_complete);
-void grpc_lb_policy_broadcast(grpc_lb_policy *policy, grpc_transport_op *op);
+void grpc_lb_policy_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_transport_op *op);
-void grpc_lb_policy_exit_idle(grpc_lb_policy *policy);
+void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
-void grpc_lb_policy_notify_on_state_change(grpc_lb_policy *policy,
+void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
grpc_connectivity_state *state,
- grpc_iomgr_closure *closure);
+ grpc_closure *closure);
grpc_connectivity_state grpc_lb_policy_check_connectivity(
- grpc_lb_policy *policy);
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
#endif /* GRPC_INTERNAL_CORE_CONFIG_LB_POLICY_H */
diff --git a/src/core/client_config/lb_policy_factory.c b/src/core/client_config/lb_policy_factory.c
index 0c097e0542..e49de544e3 100644
--- a/src/core/client_config/lb_policy_factory.c
+++ b/src/core/client_config/lb_policy_factory.c
@@ -33,15 +33,16 @@
#include "src/core/client_config/lb_policy_factory.h"
-void grpc_lb_policy_factory_ref(grpc_lb_policy_factory *factory) {
+void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory) {
factory->vtable->ref(factory);
}
-void grpc_lb_policy_factory_unref(grpc_lb_policy_factory *factory) {
+
+void grpc_lb_policy_factory_unref(grpc_lb_policy_factory* factory) {
factory->vtable->unref(factory);
}
-grpc_lb_policy *grpc_lb_policy_factory_create_lb_policy(
- grpc_lb_policy_factory *factory, grpc_lb_policy_args *args) {
+grpc_lb_policy* grpc_lb_policy_factory_create_lb_policy(
+ grpc_lb_policy_factory* factory, grpc_lb_policy_args* args) {
if (factory == NULL) return NULL;
return factory->vtable->create_lb_policy(factory, args);
}
diff --git a/src/core/client_config/lb_policy_registry.c b/src/core/client_config/lb_policy_registry.c
index ae4a077ef3..fc302e82d7 100644
--- a/src/core/client_config/lb_policy_registry.c
+++ b/src/core/client_config/lb_policy_registry.c
@@ -65,7 +65,7 @@ void grpc_register_lb_policy(grpc_lb_policy_factory *factory) {
g_all_of_the_lb_policies[g_number_of_lb_policies++] = factory;
}
-static grpc_lb_policy_factory *lookup_factory(const char* name) {
+static grpc_lb_policy_factory *lookup_factory(const char *name) {
int i;
if (name == NULL) return NULL;
@@ -82,7 +82,7 @@ static grpc_lb_policy_factory *lookup_factory(const char* name) {
grpc_lb_policy *grpc_lb_policy_create(const char *name,
grpc_lb_policy_args *args) {
grpc_lb_policy_factory *factory = lookup_factory(name);
- grpc_lb_policy *lb_policy = grpc_lb_policy_factory_create_lb_policy(
- factory, args);
+ grpc_lb_policy *lb_policy =
+ grpc_lb_policy_factory_create_lb_policy(factory, args);
return lb_policy;
}
diff --git a/src/core/client_config/resolver.c b/src/core/client_config/resolver.c
index 91e42bb684..081097eb19 100644
--- a/src/core/client_config/resolver.c
+++ b/src/core/client_config/resolver.c
@@ -40,8 +40,8 @@ void grpc_resolver_init(grpc_resolver *resolver,
}
#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
-void grpc_resolver_ref(grpc_resolver *resolver, const char *file, int line,
- const char *reason) {
+void grpc_resolver_ref(grpc_resolver *resolver, grpc_closure_list *closure_list,
+ const char *file, int line, const char *reason) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "RESOLVER:%p ref %d -> %d %s",
resolver, (int)resolver->refs.count, (int)resolver->refs.count + 1,
reason);
@@ -52,32 +52,34 @@ void grpc_resolver_ref(grpc_resolver *resolver) {
}
#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
-void grpc_resolver_unref(grpc_resolver *resolver, const char *file, int line,
- const char *reason) {
+void grpc_resolver_unref(grpc_resolver *resolver,
+ grpc_closure_list *closure_list, const char *file,
+ int line, const char *reason) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "RESOLVER:%p unref %d -> %d %s",
resolver, (int)resolver->refs.count, (int)resolver->refs.count - 1,
reason);
#else
-void grpc_resolver_unref(grpc_resolver *resolver) {
+void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
#endif
if (gpr_unref(&resolver->refs)) {
- resolver->vtable->destroy(resolver);
+ resolver->vtable->destroy(exec_ctx, resolver);
}
}
-void grpc_resolver_shutdown(grpc_resolver *resolver) {
- resolver->vtable->shutdown(resolver);
+void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
+ resolver->vtable->shutdown(exec_ctx, resolver);
}
-void grpc_resolver_channel_saw_error(grpc_resolver *resolver,
+void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver,
struct sockaddr *failing_address,
int failing_address_len) {
- resolver->vtable->channel_saw_error(resolver, failing_address,
+ resolver->vtable->channel_saw_error(exec_ctx, resolver, failing_address,
failing_address_len);
}
-void grpc_resolver_next(grpc_resolver *resolver,
+void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_client_config **target_config,
- grpc_iomgr_closure *on_complete) {
- resolver->vtable->next(resolver, target_config, on_complete);
+ grpc_closure *on_complete) {
+ resolver->vtable->next(exec_ctx, resolver, target_config, on_complete);
}
diff --git a/src/core/client_config/resolver.h b/src/core/client_config/resolver.h
index 8ad87d789b..7ba0cd5bd4 100644
--- a/src/core/client_config/resolver.h
+++ b/src/core/client_config/resolver.h
@@ -49,38 +49,39 @@ struct grpc_resolver {
};
struct grpc_resolver_vtable {
- void (*destroy)(grpc_resolver *resolver);
- void (*shutdown)(grpc_resolver *resolver);
- void (*channel_saw_error)(grpc_resolver *resolver,
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
+ void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
+ void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
struct sockaddr *failing_address,
int failing_address_len);
- void (*next)(grpc_resolver *resolver, grpc_client_config **target_config,
- grpc_iomgr_closure *on_complete);
+ void (*next)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_client_config **target_config, grpc_closure *on_complete);
};
#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_RESOLVER_UNREF(p, r) \
- grpc_resolver_unref((p), __FILE__, __LINE__, (r))
+#define GRPC_RESOLVER_UNREF(cl, p, r) \
+ grpc_resolver_unref((cl), (p), __FILE__, __LINE__, (r))
void grpc_resolver_ref(grpc_resolver *policy, const char *file, int line,
const char *reason);
-void grpc_resolver_unref(grpc_resolver *policy, const char *file, int line,
- const char *reason);
+void grpc_resolver_unref(grpc_resolver *policy, grpc_closure_list *closure_list,
+ const char *file, int line, const char *reason);
#else
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p))
-#define GRPC_RESOLVER_UNREF(p, r) grpc_resolver_unref((p))
+#define GRPC_RESOLVER_UNREF(cl, p, r) grpc_resolver_unref((cl), (p))
void grpc_resolver_ref(grpc_resolver *policy);
-void grpc_resolver_unref(grpc_resolver *policy);
+void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy);
#endif
void grpc_resolver_init(grpc_resolver *resolver,
const grpc_resolver_vtable *vtable);
-void grpc_resolver_shutdown(grpc_resolver *resolver);
+void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
/** Notification that the channel has seen an error on some address.
Can be used as a hint that re-resolution is desirable soon. */
-void grpc_resolver_channel_saw_error(grpc_resolver *resolver,
+void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver,
struct sockaddr *failing_address,
int failing_address_len);
@@ -90,8 +91,8 @@ void grpc_resolver_channel_saw_error(grpc_resolver *resolver,
If resolution is fatally broken, set *target_config to NULL and
schedule on_complete. */
-void grpc_resolver_next(grpc_resolver *resolver,
+void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_client_config **target_config,
- grpc_iomgr_closure *on_complete);
+ grpc_closure *on_complete);
#endif /* GRPC_INTERNAL_CORE_CONFIG_RESOLVER_H */
diff --git a/src/core/client_config/resolver_factory.c b/src/core/client_config/resolver_factory.c
index 5b859a8d10..e7e9196ac4 100644
--- a/src/core/client_config/resolver_factory.c
+++ b/src/core/client_config/resolver_factory.c
@@ -33,24 +33,23 @@
#include "src/core/client_config/resolver_factory.h"
-void grpc_resolver_factory_ref(grpc_resolver_factory *factory) {
+void grpc_resolver_factory_ref(grpc_resolver_factory* factory) {
factory->vtable->ref(factory);
}
-void grpc_resolver_factory_unref(grpc_resolver_factory *factory) {
+void grpc_resolver_factory_unref(grpc_resolver_factory* factory) {
factory->vtable->unref(factory);
}
/** Create a resolver instance for a name */
-grpc_resolver *grpc_resolver_factory_create_resolver(
- grpc_resolver_factory *factory, grpc_uri *uri,
- grpc_subchannel_factory *subchannel_factory) {
+grpc_resolver* grpc_resolver_factory_create_resolver(
+ grpc_resolver_factory* factory, grpc_resolver_args* args) {
if (factory == NULL) return NULL;
- return factory->vtable->create_resolver(factory, uri, subchannel_factory);
+ return factory->vtable->create_resolver(factory, args);
}
-char *grpc_resolver_factory_get_default_authority(
- grpc_resolver_factory *factory, grpc_uri *uri) {
+char* grpc_resolver_factory_get_default_authority(
+ grpc_resolver_factory* factory, grpc_uri* uri) {
if (factory == NULL) return NULL;
return factory->vtable->get_default_authority(factory, uri);
}
diff --git a/src/core/client_config/resolver_factory.h b/src/core/client_config/resolver_factory.h
index e243b23988..4c4df353f7 100644
--- a/src/core/client_config/resolver_factory.h
+++ b/src/core/client_config/resolver_factory.h
@@ -47,14 +47,18 @@ struct grpc_resolver_factory {
const grpc_resolver_factory_vtable *vtable;
};
+typedef struct grpc_resolver_args {
+ grpc_uri *uri;
+ grpc_subchannel_factory *subchannel_factory;
+} grpc_resolver_args;
+
struct grpc_resolver_factory_vtable {
void (*ref)(grpc_resolver_factory *factory);
void (*unref)(grpc_resolver_factory *factory);
/** Implementation of grpc_resolver_factory_create_resolver */
- grpc_resolver *(*create_resolver)(
- grpc_resolver_factory *factory, grpc_uri *uri,
- grpc_subchannel_factory *subchannel_factory);
+ grpc_resolver *(*create_resolver)(grpc_resolver_factory *factory,
+ grpc_resolver_args *args);
/** Implementation of grpc_resolver_factory_get_default_authority */
char *(*get_default_authority)(grpc_resolver_factory *factory, grpc_uri *uri);
@@ -68,8 +72,7 @@ void grpc_resolver_factory_unref(grpc_resolver_factory *resolver);
/** Create a resolver instance for a name */
grpc_resolver *grpc_resolver_factory_create_resolver(
- grpc_resolver_factory *factory, grpc_uri *uri,
- grpc_subchannel_factory *subchannel_factory);
+ grpc_resolver_factory *factory, grpc_resolver_args *args);
/** Return a (freshly allocated with gpr_malloc) string representing
the default authority to use for this scheme. */
diff --git a/src/core/client_config/resolver_registry.c b/src/core/client_config/resolver_registry.c
index 37979b3b86..89a945c2d3 100644
--- a/src/core/client_config/resolver_registry.c
+++ b/src/core/client_config/resolver_registry.c
@@ -118,8 +118,12 @@ grpc_resolver *grpc_resolver_create(
const char *target, grpc_subchannel_factory *subchannel_factory) {
grpc_uri *uri = NULL;
grpc_resolver_factory *factory = resolve_factory(target, &uri);
- grpc_resolver *resolver =
- grpc_resolver_factory_create_resolver(factory, uri, subchannel_factory);
+ grpc_resolver *resolver;
+ grpc_resolver_args args;
+ memset(&args, 0, sizeof(args));
+ args.uri = uri;
+ args.subchannel_factory = subchannel_factory;
+ resolver = grpc_resolver_factory_create_resolver(factory, &args);
grpc_uri_destroy(uri);
return resolver;
}
diff --git a/src/core/client_config/resolvers/dns_resolver.c b/src/core/client_config/resolvers/dns_resolver.c
index ccec07a08c..7f9dd2543f 100644
--- a/src/core/client_config/resolvers/dns_resolver.c
+++ b/src/core/client_config/resolvers/dns_resolver.c
@@ -67,40 +67,43 @@ typedef struct {
/** which version of resolved_config is current? */
int resolved_version;
/** pending next completion, or NULL */
- grpc_iomgr_closure *next_completion;
+ grpc_closure *next_completion;
/** target config address for next completion */
grpc_client_config **target_config;
/** current (fully resolved) config */
grpc_client_config *resolved_config;
} dns_resolver;
-static void dns_destroy(grpc_resolver *r);
+static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_start_resolving_locked(dns_resolver *r);
-static void dns_maybe_finish_next_locked(dns_resolver *r);
+static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ dns_resolver *r);
-static void dns_shutdown(grpc_resolver *r);
-static void dns_channel_saw_error(grpc_resolver *r,
+static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
-static void dns_next(grpc_resolver *r, grpc_client_config **target_config,
- grpc_iomgr_closure *on_complete);
+static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_client_config **target_config,
+ grpc_closure *on_complete);
static const grpc_resolver_vtable dns_resolver_vtable = {
dns_destroy, dns_shutdown, dns_channel_saw_error, dns_next};
-static void dns_shutdown(grpc_resolver *resolver) {
+static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_config = NULL;
- grpc_iomgr_add_callback(r->next_completion);
+ grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, 1);
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
}
-static void dns_channel_saw_error(grpc_resolver *resolver, struct sockaddr *sa,
+static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver, struct sockaddr *sa,
int len) {
dns_resolver *r = (dns_resolver *)resolver;
gpr_mu_lock(&r->mu);
@@ -110,9 +113,9 @@ static void dns_channel_saw_error(grpc_resolver *resolver, struct sockaddr *sa,
gpr_mu_unlock(&r->mu);
}
-static void dns_next(grpc_resolver *resolver,
+static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_client_config **target_config,
- grpc_iomgr_closure *on_complete) {
+ grpc_closure *on_complete) {
dns_resolver *r = (dns_resolver *)resolver;
gpr_mu_lock(&r->mu);
GPR_ASSERT(!r->next_completion);
@@ -121,12 +124,13 @@ static void dns_next(grpc_resolver *resolver,
if (r->resolved_version == 0 && !r->resolving) {
dns_start_resolving_locked(r);
} else {
- dns_maybe_finish_next_locked(r);
+ dns_maybe_finish_next_locked(exec_ctx, r);
}
gpr_mu_unlock(&r->mu);
}
-static void dns_on_resolved(void *arg, grpc_resolved_addresses *addresses) {
+static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_resolved_addresses *addresses) {
dns_resolver *r = arg;
grpc_client_config *config = NULL;
grpc_subchannel **subchannels;
@@ -142,13 +146,14 @@ static void dns_on_resolved(void *arg, grpc_resolved_addresses *addresses) {
args.addr = (struct sockaddr *)(addresses->addrs[i].addr);
args.addr_len = (size_t)addresses->addrs[i].len;
subchannels[i] = grpc_subchannel_factory_create_subchannel(
- r->subchannel_factory, &args);
+ exec_ctx, r->subchannel_factory, &args);
}
+ memset(&lb_policy_args, 0, sizeof(lb_policy_args));
lb_policy_args.subchannels = subchannels;
lb_policy_args.num_subchannels = addresses->naddrs;
lb_policy = grpc_lb_policy_create(r->lb_policy_name, &lb_policy_args);
grpc_client_config_set_lb_policy(config, lb_policy);
- GRPC_LB_POLICY_UNREF(lb_policy, "construction");
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "construction");
grpc_resolved_addresses_destroy(addresses);
gpr_free(subchannels);
}
@@ -156,14 +161,14 @@ static void dns_on_resolved(void *arg, grpc_resolved_addresses *addresses) {
GPR_ASSERT(r->resolving);
r->resolving = 0;
if (r->resolved_config) {
- grpc_client_config_unref(r->resolved_config);
+ grpc_client_config_unref(exec_ctx, r->resolved_config);
}
r->resolved_config = config;
r->resolved_version++;
- dns_maybe_finish_next_locked(r);
+ dns_maybe_finish_next_locked(exec_ctx, r);
gpr_mu_unlock(&r->mu);
- GRPC_RESOLVER_UNREF(&r->base, "dns-resolving");
+ GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
static void dns_start_resolving_locked(dns_resolver *r) {
@@ -173,41 +178,41 @@ static void dns_start_resolving_locked(dns_resolver *r) {
grpc_resolve_address(r->name, r->default_port, dns_on_resolved, r);
}
-static void dns_maybe_finish_next_locked(dns_resolver *r) {
+static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ dns_resolver *r) {
if (r->next_completion != NULL &&
r->resolved_version != r->published_version) {
*r->target_config = r->resolved_config;
if (r->resolved_config) {
grpc_client_config_ref(r->resolved_config);
}
- grpc_iomgr_add_callback(r->next_completion);
+ grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, 1);
r->next_completion = NULL;
r->published_version = r->resolved_version;
}
}
-static void dns_destroy(grpc_resolver *gr) {
+static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
dns_resolver *r = (dns_resolver *)gr;
gpr_mu_destroy(&r->mu);
if (r->resolved_config) {
- grpc_client_config_unref(r->resolved_config);
+ grpc_client_config_unref(exec_ctx, r->resolved_config);
}
- grpc_subchannel_factory_unref(r->subchannel_factory);
+ grpc_subchannel_factory_unref(exec_ctx, r->subchannel_factory);
gpr_free(r->name);
gpr_free(r->default_port);
gpr_free(r->lb_policy_name);
gpr_free(r);
}
-static grpc_resolver *dns_create(
- grpc_uri *uri, const char *default_port,
- const char* lb_policy_name,
- grpc_subchannel_factory *subchannel_factory) {
+static grpc_resolver *dns_create(grpc_resolver_args *args,
+ const char *default_port,
+ const char *lb_policy_name) {
dns_resolver *r;
- const char *path = uri->path;
+ const char *path = args->uri->path;
- if (0 != strcmp(uri->authority, "")) {
- gpr_log(GPR_ERROR, "authority based uri's not supported");
+ if (0 != strcmp(args->uri->authority, "")) {
+ gpr_log(GPR_ERROR, "authority based dns uri's not supported");
return NULL;
}
@@ -220,8 +225,8 @@ static grpc_resolver *dns_create(
grpc_resolver_init(&r->base, &dns_resolver_vtable);
r->name = gpr_strdup(path);
r->default_port = gpr_strdup(default_port);
- r->subchannel_factory = subchannel_factory;
- grpc_subchannel_factory_ref(subchannel_factory);
+ r->subchannel_factory = args->subchannel_factory;
+ grpc_subchannel_factory_ref(r->subchannel_factory);
r->lb_policy_name = gpr_strdup(lb_policy_name);
return &r->base;
}
@@ -235,9 +240,8 @@ static void dns_factory_ref(grpc_resolver_factory *factory) {}
static void dns_factory_unref(grpc_resolver_factory *factory) {}
static grpc_resolver *dns_factory_create_resolver(
- grpc_resolver_factory *factory, grpc_uri *uri,
- grpc_subchannel_factory *subchannel_factory) {
- return dns_create(uri, "https", "pick_first", subchannel_factory);
+ grpc_resolver_factory *factory, grpc_resolver_args *args) {
+ return dns_create(args, "https", "pick_first");
}
char *dns_factory_get_default_host_name(grpc_resolver_factory *factory,
diff --git a/src/core/client_config/resolvers/sockaddr_resolver.c b/src/core/client_config/resolvers/sockaddr_resolver.c
index ea2df07035..0b017f06c7 100644
--- a/src/core/client_config/resolvers/sockaddr_resolver.c
+++ b/src/core/client_config/resolvers/sockaddr_resolver.c
@@ -71,54 +71,59 @@ typedef struct {
/** have we published? */
int published;
/** pending next completion, or NULL */
- grpc_iomgr_closure *next_completion;
+ grpc_closure *next_completion;
/** target config address for next completion */
grpc_client_config **target_config;
} sockaddr_resolver;
-static void sockaddr_destroy(grpc_resolver *r);
+static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void sockaddr_maybe_finish_next_locked(sockaddr_resolver *r);
+static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ sockaddr_resolver *r);
-static void sockaddr_shutdown(grpc_resolver *r);
-static void sockaddr_channel_saw_error(grpc_resolver *r,
+static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
-static void sockaddr_next(grpc_resolver *r, grpc_client_config **target_config,
- grpc_iomgr_closure *on_complete);
+static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_client_config **target_config,
+ grpc_closure *on_complete);
static const grpc_resolver_vtable sockaddr_resolver_vtable = {
sockaddr_destroy, sockaddr_shutdown, sockaddr_channel_saw_error,
sockaddr_next};
-static void sockaddr_shutdown(grpc_resolver *resolver) {
+static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_config = NULL;
- /* TODO(ctiller): add delayed callback */
- grpc_iomgr_add_callback(r->next_completion);
+ grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, 1);
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
}
-static void sockaddr_channel_saw_error(grpc_resolver *resolver,
+static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver,
struct sockaddr *sa, int len) {}
-static void sockaddr_next(grpc_resolver *resolver,
+static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_client_config **target_config,
- grpc_iomgr_closure *on_complete) {
+ grpc_closure *on_complete) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
gpr_mu_lock(&r->mu);
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_config = target_config;
- sockaddr_maybe_finish_next_locked(r);
+ sockaddr_maybe_finish_next_locked(exec_ctx, r);
gpr_mu_unlock(&r->mu);
}
-static void sockaddr_maybe_finish_next_locked(sockaddr_resolver *r) {
+static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ sockaddr_resolver *r) {
grpc_client_config *cfg;
grpc_lb_policy *lb_policy;
grpc_lb_policy_args lb_policy_args;
@@ -134,26 +139,26 @@ static void sockaddr_maybe_finish_next_locked(sockaddr_resolver *r) {
args.addr = (struct sockaddr *)&r->addrs[i];
args.addr_len = r->addrs_len[i];
subchannels[i] = grpc_subchannel_factory_create_subchannel(
- r->subchannel_factory, &args);
+ exec_ctx, r->subchannel_factory, &args);
}
+ memset(&lb_policy_args, 0, sizeof(lb_policy_args));
lb_policy_args.subchannels = subchannels;
lb_policy_args.num_subchannels = r->num_addrs;
- lb_policy =
- grpc_lb_policy_create(r->lb_policy_name, &lb_policy_args);
+ lb_policy = grpc_lb_policy_create(r->lb_policy_name, &lb_policy_args);
gpr_free(subchannels);
grpc_client_config_set_lb_policy(cfg, lb_policy);
- GRPC_LB_POLICY_UNREF(lb_policy, "unix");
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "sockaddr");
r->published = 1;
*r->target_config = cfg;
- grpc_iomgr_add_callback(r->next_completion);
+ grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, 1);
r->next_completion = NULL;
}
}
-static void sockaddr_destroy(grpc_resolver *gr) {
+static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
sockaddr_resolver *r = (sockaddr_resolver *)gr;
gpr_mu_destroy(&r->mu);
- grpc_subchannel_factory_unref(r->subchannel_factory);
+ grpc_subchannel_factory_unref(exec_ctx, r->subchannel_factory);
gpr_free(r->addrs);
gpr_free(r->addrs_len);
gpr_free(r->lb_policy_name);
@@ -277,9 +282,9 @@ done:
}
static void do_nothing(void *ignored) {}
+
static grpc_resolver *sockaddr_create(
- grpc_uri *uri, const char *default_lb_policy_name,
- grpc_subchannel_factory *subchannel_factory,
+ grpc_resolver_args *args, const char *default_lb_policy_name,
int parse(grpc_uri *uri, struct sockaddr_storage *dst, size_t *len)) {
size_t i;
int errors_found = 0; /* GPR_FALSE */
@@ -287,8 +292,9 @@ static grpc_resolver *sockaddr_create(
gpr_slice path_slice;
gpr_slice_buffer path_parts;
- if (0 != strcmp(uri->authority, "")) {
- gpr_log(GPR_ERROR, "authority based uri's not supported");
+ if (0 != strcmp(args->uri->authority, "")) {
+ gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme",
+ args->uri->scheme);
return NULL;
}
@@ -296,11 +302,12 @@ static grpc_resolver *sockaddr_create(
memset(r, 0, sizeof(*r));
r->lb_policy_name = NULL;
- if (0 != strcmp(uri->query, "")) {
+ if (0 != strcmp(args->uri->query, "")) {
gpr_slice query_slice;
gpr_slice_buffer query_parts;
- query_slice = gpr_slice_new(uri->query, strlen(uri->query), do_nothing);
+ query_slice =
+ gpr_slice_new(args->uri->query, strlen(args->uri->query), do_nothing);
gpr_slice_buffer_init(&query_parts);
gpr_slice_split(query_slice, "=", &query_parts);
GPR_ASSERT(query_parts.count == 2);
@@ -314,7 +321,8 @@ static grpc_resolver *sockaddr_create(
r->lb_policy_name = gpr_strdup(default_lb_policy_name);
}
- path_slice = gpr_slice_new(uri->path, strlen(uri->path), do_nothing);
+ path_slice =
+ gpr_slice_new(args->uri->path, strlen(args->uri->path), do_nothing);
gpr_slice_buffer_init(&path_parts);
gpr_slice_split(path_slice, ",", &path_parts);
@@ -322,9 +330,9 @@ static grpc_resolver *sockaddr_create(
r->addrs = gpr_malloc(sizeof(struct sockaddr_storage) * r->num_addrs);
r->addrs_len = gpr_malloc(sizeof(*r->addrs_len) * r->num_addrs);
- for(i = 0; i < r->num_addrs; i++) {
- grpc_uri ith_uri = *uri;
- char* part_str = gpr_dump_slice(path_parts.slices[i], GPR_DUMP_ASCII);
+ for (i = 0; i < r->num_addrs; i++) {
+ grpc_uri ith_uri = *args->uri;
+ char *part_str = gpr_dump_slice(path_parts.slices[i], GPR_DUMP_ASCII);
ith_uri.path = part_str;
if (!parse(&ith_uri, &r->addrs[i], &r->addrs_len[i])) {
errors_found = 1; /* GPR_TRUE */
@@ -343,9 +351,9 @@ static grpc_resolver *sockaddr_create(
gpr_ref_init(&r->refs, 1);
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &sockaddr_resolver_vtable);
- r->subchannel_factory = subchannel_factory;
+ r->subchannel_factory = args->subchannel_factory;
+ grpc_subchannel_factory_ref(r->subchannel_factory);
- grpc_subchannel_factory_ref(subchannel_factory);
return &r->base;
}
@@ -359,10 +367,8 @@ static void sockaddr_factory_unref(grpc_resolver_factory *factory) {}
#define DECL_FACTORY(name) \
static grpc_resolver *name##_factory_create_resolver( \
- grpc_resolver_factory *factory, grpc_uri *uri, \
- grpc_subchannel_factory *subchannel_factory) { \
- return sockaddr_create(uri, "pick_first", \
- subchannel_factory, parse_##name); \
+ grpc_resolver_factory *factory, grpc_resolver_args *args) { \
+ return sockaddr_create(args, "pick_first", parse_##name); \
} \
static const grpc_resolver_factory_vtable name##_factory_vtable = { \
sockaddr_factory_ref, sockaddr_factory_unref, \
@@ -376,5 +382,4 @@ static void sockaddr_factory_unref(grpc_resolver_factory *factory) {}
#ifdef GPR_POSIX_SOCKET
DECL_FACTORY(unix)
#endif
-DECL_FACTORY(ipv4)
-DECL_FACTORY(ipv6)
+DECL_FACTORY(ipv4) DECL_FACTORY(ipv6)
diff --git a/src/core/client_config/resolvers/zookeeper_resolver.c b/src/core/client_config/resolvers/zookeeper_resolver.c
index 2594e6fae9..f640a0084a 100644
--- a/src/core/client_config/resolvers/zookeeper_resolver.c
+++ b/src/core/client_config/resolvers/zookeeper_resolver.c
@@ -71,7 +71,7 @@ typedef struct {
/** which version of resolved_config is current? */
int resolved_version;
/** pending next completion, or NULL */
- grpc_iomgr_closure *next_completion;
+ grpc_closure *next_completion;
/** target config address for next completion */
grpc_client_config **target_config;
/** current (fully resolved) config */
@@ -87,35 +87,44 @@ typedef struct {
int resolved_num;
} zookeeper_resolver;
-static void zookeeper_destroy(grpc_resolver *r);
+static void zookeeper_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void zookeeper_start_resolving_locked(zookeeper_resolver *r);
-static void zookeeper_maybe_finish_next_locked(zookeeper_resolver *r);
+static void zookeeper_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ zookeeper_resolver *r);
-static void zookeeper_shutdown(grpc_resolver *r);
-static void zookeeper_channel_saw_error(grpc_resolver *r,
+static void zookeeper_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void zookeeper_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
-static void zookeeper_next(grpc_resolver *r, grpc_client_config **target_config,
- grpc_iomgr_closure *on_complete);
+static void zookeeper_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_client_config **target_config,
+ grpc_closure *on_complete);
static const grpc_resolver_vtable zookeeper_resolver_vtable = {
zookeeper_destroy, zookeeper_shutdown, zookeeper_channel_saw_error,
zookeeper_next};
-static void zookeeper_shutdown(grpc_resolver *resolver) {
+static void zookeeper_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
zookeeper_resolver *r = (zookeeper_resolver *)resolver;
+ grpc_closure *call = NULL;
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_config = NULL;
- grpc_iomgr_add_callback(r->next_completion);
+ call = r->next_completion;
r->next_completion = NULL;
}
zookeeper_close(r->zookeeper_handle);
gpr_mu_unlock(&r->mu);
+ if (call != NULL) {
+ call->cb(exec_ctx, call->cb_arg, 1);
+ }
}
-static void zookeeper_channel_saw_error(grpc_resolver *resolver,
+static void zookeeper_channel_saw_error(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver,
struct sockaddr *sa, int len) {
zookeeper_resolver *r = (zookeeper_resolver *)resolver;
gpr_mu_lock(&r->mu);
@@ -125,9 +134,9 @@ static void zookeeper_channel_saw_error(grpc_resolver *resolver,
gpr_mu_unlock(&r->mu);
}
-static void zookeeper_next(grpc_resolver *resolver,
+static void zookeeper_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_client_config **target_config,
- grpc_iomgr_closure *on_complete) {
+ grpc_closure *on_complete) {
zookeeper_resolver *r = (zookeeper_resolver *)resolver;
gpr_mu_lock(&r->mu);
GPR_ASSERT(r->next_completion == NULL);
@@ -136,7 +145,7 @@ static void zookeeper_next(grpc_resolver *resolver,
if (r->resolved_version == 0 && r->resolving == 0) {
zookeeper_start_resolving_locked(r);
} else {
- zookeeper_maybe_finish_next_locked(r);
+ zookeeper_maybe_finish_next_locked(exec_ctx, r);
}
gpr_mu_unlock(&r->mu);
}
@@ -173,7 +182,7 @@ static void zookeeper_watcher(zhandle_t *zookeeper_handle, int type, int state,
/** Callback function after getting all resolved addresses
Creates a subchannel for each address */
-static void zookeeper_on_resolved(void *arg,
+static void zookeeper_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
grpc_resolved_addresses *addresses) {
zookeeper_resolver *r = arg;
grpc_client_config *config = NULL;
@@ -190,14 +199,13 @@ static void zookeeper_on_resolved(void *arg,
args.addr = (struct sockaddr *)(addresses->addrs[i].addr);
args.addr_len = addresses->addrs[i].len;
subchannels[i] = grpc_subchannel_factory_create_subchannel(
- r->subchannel_factory, &args);
+ exec_ctx, r->subchannel_factory, &args);
}
lb_policy_args.subchannels = subchannels;
lb_policy_args.num_subchannels = addresses->naddrs;
- lb_policy =
- grpc_lb_policy_create(r->lb_policy_name, &lb_policy_args);
+ lb_policy = grpc_lb_policy_create(r->lb_policy_name, &lb_policy_args);
grpc_client_config_set_lb_policy(config, lb_policy);
- GRPC_LB_POLICY_UNREF(lb_policy, "construction");
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "construction");
grpc_resolved_addresses_destroy(addresses);
gpr_free(subchannels);
}
@@ -205,18 +213,18 @@ static void zookeeper_on_resolved(void *arg,
GPR_ASSERT(r->resolving == 1);
r->resolving = 0;
if (r->resolved_config != NULL) {
- grpc_client_config_unref(r->resolved_config);
+ grpc_client_config_unref(exec_ctx, r->resolved_config);
}
r->resolved_config = config;
r->resolved_version++;
- zookeeper_maybe_finish_next_locked(r);
+ zookeeper_maybe_finish_next_locked(exec_ctx, r);
gpr_mu_unlock(&r->mu);
- GRPC_RESOLVER_UNREF(&r->base, "zookeeper-resolving");
+ GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "zookeeper-resolving");
}
/** Callback function for each DNS resolved address */
-static void zookeeper_dns_resolved(void *arg,
+static void zookeeper_dns_resolved(grpc_exec_ctx *exec_ctx, void *arg,
grpc_resolved_addresses *addresses) {
size_t i;
zookeeper_resolver *r = arg;
@@ -242,7 +250,7 @@ static void zookeeper_dns_resolved(void *arg,
resolve_done = (r->resolved_num == r->resolved_total);
gpr_mu_unlock(&r->mu);
if (resolve_done) {
- zookeeper_on_resolved(r, r->resolved_addrs);
+ zookeeper_on_resolved(exec_ctx, r, r->resolved_addrs);
}
}
@@ -291,9 +299,11 @@ static void zookeeper_get_children_node_completion(int rc, const char *value,
char *address = NULL;
zookeeper_resolver *r = (zookeeper_resolver *)arg;
int resolve_done = 0;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (rc != 0) {
gpr_log(GPR_ERROR, "Error in getting a child node of %s", r->name);
+ grpc_exec_ctx_finish(&exec_ctx);
return;
}
@@ -309,9 +319,11 @@ static void zookeeper_get_children_node_completion(int rc, const char *value,
resolve_done = (r->resolved_num == r->resolved_total);
gpr_mu_unlock(&r->mu);
if (resolve_done) {
- zookeeper_on_resolved(r, r->resolved_addrs);
+ zookeeper_on_resolved(&exec_ctx, r, r->resolved_addrs);
}
}
+
+ grpc_exec_ctx_finish(&exec_ctx);
}
static void zookeeper_get_children_completion(
@@ -402,39 +414,39 @@ static void zookeeper_start_resolving_locked(zookeeper_resolver *r) {
zookeeper_resolve_address(r);
}
-static void zookeeper_maybe_finish_next_locked(zookeeper_resolver *r) {
+static void zookeeper_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ zookeeper_resolver *r) {
if (r->next_completion != NULL &&
r->resolved_version != r->published_version) {
*r->target_config = r->resolved_config;
if (r->resolved_config != NULL) {
grpc_client_config_ref(r->resolved_config);
}
- grpc_iomgr_add_callback(r->next_completion);
+ grpc_exec_ctx_enqueue(exec_ctx, r->next_completion, 1);
r->next_completion = NULL;
r->published_version = r->resolved_version;
}
}
-static void zookeeper_destroy(grpc_resolver *gr) {
+static void zookeeper_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
zookeeper_resolver *r = (zookeeper_resolver *)gr;
gpr_mu_destroy(&r->mu);
if (r->resolved_config != NULL) {
- grpc_client_config_unref(r->resolved_config);
+ grpc_client_config_unref(exec_ctx, r->resolved_config);
}
- grpc_subchannel_factory_unref(r->subchannel_factory);
+ grpc_subchannel_factory_unref(exec_ctx, r->subchannel_factory);
gpr_free(r->name);
gpr_free(r->lb_policy_name);
gpr_free(r);
}
-static grpc_resolver *zookeeper_create(
- grpc_uri *uri, const char *lb_policy_name,
- grpc_subchannel_factory *subchannel_factory) {
+static grpc_resolver *zookeeper_create(grpc_resolver_args *args,
+ const char *lb_policy_name) {
zookeeper_resolver *r;
size_t length;
- char *path = uri->path;
+ char *path = args->uri->path;
- if (0 == strcmp(uri->authority, "")) {
+ if (0 == strcmp(args->uri->authority, "")) {
gpr_log(GPR_ERROR, "No authority specified in zookeeper uri");
return NULL;
}
@@ -452,14 +464,16 @@ static grpc_resolver *zookeeper_create(
grpc_resolver_init(&r->base, &zookeeper_resolver_vtable);
r->name = gpr_strdup(path);
- r->subchannel_factory = subchannel_factory;
+ r->subchannel_factory = args->subchannel_factory;
+ grpc_subchannel_factory_ref(r->subchannel_factory);
+
r->lb_policy_name = gpr_strdup(lb_policy_name);
- grpc_subchannel_factory_ref(subchannel_factory);
/** Initializes zookeeper client */
zoo_set_debug_level(ZOO_LOG_LEVEL_WARN);
- r->zookeeper_handle = zookeeper_init(uri->authority, zookeeper_global_watcher,
- GRPC_ZOOKEEPER_SESSION_TIMEOUT, 0, 0, 0);
+ r->zookeeper_handle =
+ zookeeper_init(args->uri->authority, zookeeper_global_watcher,
+ GRPC_ZOOKEEPER_SESSION_TIMEOUT, 0, 0, 0);
if (r->zookeeper_handle == NULL) {
gpr_log(GPR_ERROR, "Unable to connect to zookeeper server");
return NULL;
@@ -490,15 +504,15 @@ static char *zookeeper_factory_get_default_hostname(
}
static grpc_resolver *zookeeper_factory_create_resolver(
- grpc_resolver_factory *factory, grpc_uri *uri,
- grpc_subchannel_factory *subchannel_factory) {
- return zookeeper_create(uri, "pick_first", subchannel_factory);
+ grpc_resolver_factory *factory, grpc_resolver_args *args) {
+ return zookeeper_create(args, "pick_first");
}
static const grpc_resolver_factory_vtable zookeeper_factory_vtable = {
zookeeper_factory_ref, zookeeper_factory_unref,
zookeeper_factory_create_resolver, zookeeper_factory_get_default_hostname,
"zookeeper"};
+
static grpc_resolver_factory zookeeper_resolver_factory = {
&zookeeper_factory_vtable};
diff --git a/src/core/client_config/subchannel.c b/src/core/client_config/subchannel.c
index 876d2aa418..740389003a 100644
--- a/src/core/client_config/subchannel.c
+++ b/src/core/client_config/subchannel.c
@@ -59,7 +59,7 @@ typedef struct {
} connection;
typedef struct {
- grpc_iomgr_closure closure;
+ grpc_closure closure;
size_t version;
grpc_subchannel *subchannel;
grpc_connectivity_state connectivity_state;
@@ -67,11 +67,11 @@ typedef struct {
typedef struct waiting_for_connect {
struct waiting_for_connect *next;
- grpc_iomgr_closure *notify;
+ grpc_closure *notify;
grpc_pollset *pollset;
grpc_subchannel_call **target;
grpc_subchannel *subchannel;
- grpc_iomgr_closure continuation;
+ grpc_closure continuation;
} waiting_for_connect;
struct grpc_subchannel {
@@ -99,7 +99,7 @@ struct grpc_subchannel {
grpc_connect_out_args connecting_result;
/** callback for connection finishing */
- grpc_iomgr_closure connected;
+ grpc_closure connected;
/** pollset_set tracking who's interested in a connection
being setup - owned by the master channel (in particular the
@@ -143,12 +143,15 @@ struct grpc_subchannel_call {
#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack *)((call) + 1))
#define CHANNEL_STACK_FROM_CONNECTION(con) ((grpc_channel_stack *)((con) + 1))
-static grpc_subchannel_call *create_call(connection *con);
-static void connectivity_state_changed_locked(grpc_subchannel *c,
+static grpc_subchannel_call *create_call(grpc_exec_ctx *exec_ctx,
+ connection *con);
+static void connectivity_state_changed_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c,
const char *reason);
static grpc_connectivity_state compute_connectivity_locked(grpc_subchannel *c);
static gpr_timespec compute_connect_deadline(grpc_subchannel *c);
-static void subchannel_connected(void *subchannel, int iomgr_success);
+static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
+ int iomgr_success);
static void subchannel_ref_locked(
grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
@@ -156,8 +159,9 @@ static int subchannel_unref_locked(
grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) GRPC_MUST_USE_RESULT;
static void connection_ref_locked(connection *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
static grpc_subchannel *connection_unref_locked(
+ grpc_exec_ctx *exec_ctx,
connection *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) GRPC_MUST_USE_RESULT;
-static void subchannel_destroy(grpc_subchannel *c);
+static void subchannel_destroy(grpc_exec_ctx *exec_ctx, grpc_subchannel *c);
#ifdef GRPC_SUBCHANNEL_REFCOUNT_DEBUG
#define SUBCHANNEL_REF_LOCKED(p, r) \
@@ -166,8 +170,8 @@ static void subchannel_destroy(grpc_subchannel *c);
subchannel_unref_locked((p), __FILE__, __LINE__, (r))
#define CONNECTION_REF_LOCKED(p, r) \
connection_ref_locked((p), __FILE__, __LINE__, (r))
-#define CONNECTION_UNREF_LOCKED(p, r) \
- connection_unref_locked((p), __FILE__, __LINE__, (r))
+#define CONNECTION_UNREF_LOCKED(cl, p, r) \
+ connection_unref_locked((cl), (p), __FILE__, __LINE__, (r))
#define REF_PASS_ARGS , file, line, reason
#define REF_LOG(name, p) \
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "%s: %p ref %d -> %d %s", \
@@ -179,7 +183,7 @@ static void subchannel_destroy(grpc_subchannel *c);
#define SUBCHANNEL_REF_LOCKED(p, r) subchannel_ref_locked((p))
#define SUBCHANNEL_UNREF_LOCKED(p, r) subchannel_unref_locked((p))
#define CONNECTION_REF_LOCKED(p, r) connection_ref_locked((p))
-#define CONNECTION_UNREF_LOCKED(p, r) connection_unref_locked((p))
+#define CONNECTION_UNREF_LOCKED(cl, p, r) connection_unref_locked((cl), (p))
#define REF_PASS_ARGS
#define REF_LOG(name, p) \
do { \
@@ -193,9 +197,9 @@ static void subchannel_destroy(grpc_subchannel *c);
* connection implementation
*/
-static void connection_destroy(connection *c) {
+static void connection_destroy(grpc_exec_ctx *exec_ctx, connection *c) {
GPR_ASSERT(c->refs == 0);
- grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CONNECTION(c));
+ grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c));
gpr_free(c);
}
@@ -207,14 +211,14 @@ static void connection_ref_locked(
}
static grpc_subchannel *connection_unref_locked(
- connection *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ grpc_exec_ctx *exec_ctx, connection *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
grpc_subchannel *destroy = NULL;
UNREF_LOG("CONNECTION", c);
if (subchannel_unref_locked(c->subchannel REF_PASS_ARGS)) {
destroy = c->subchannel;
}
if (--c->refs == 0 && c->subchannel->active != c) {
- connection_destroy(c);
+ connection_destroy(exec_ctx, c);
}
return destroy;
}
@@ -241,35 +245,38 @@ void grpc_subchannel_ref(grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_mu_unlock(&c->mu);
}
-void grpc_subchannel_unref(grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
int destroy;
gpr_mu_lock(&c->mu);
destroy = subchannel_unref_locked(c REF_PASS_ARGS);
gpr_mu_unlock(&c->mu);
- if (destroy) subchannel_destroy(c);
+ if (destroy) subchannel_destroy(exec_ctx, c);
}
-static void subchannel_destroy(grpc_subchannel *c) {
+static void subchannel_destroy(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
if (c->active != NULL) {
- connection_destroy(c->active);
+ connection_destroy(exec_ctx, c->active);
}
- gpr_free(c->filters);
+ gpr_free((void *)c->filters);
grpc_channel_args_destroy(c->args);
gpr_free(c->addr);
grpc_mdctx_unref(c->mdctx);
- grpc_connectivity_state_destroy(&c->state_tracker);
- grpc_connector_unref(c->connector);
+ grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
+ grpc_connector_unref(exec_ctx, c->connector);
gpr_free(c);
}
-void grpc_subchannel_add_interested_party(grpc_subchannel *c,
+void grpc_subchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c,
grpc_pollset *pollset) {
- grpc_pollset_set_add_pollset(c->pollset_set, pollset);
+ grpc_pollset_set_add_pollset(exec_ctx, c->pollset_set, pollset);
}
-void grpc_subchannel_del_interested_party(grpc_subchannel *c,
+void grpc_subchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c,
grpc_pollset *pollset) {
- grpc_pollset_set_del_pollset(c->pollset_set, pollset);
+ grpc_pollset_set_del_pollset(exec_ctx, c->pollset_set, pollset);
}
static gpr_uint32 random_seed() {
@@ -287,7 +294,7 @@ grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
grpc_connector_ref(c->connector);
c->num_filters = args->filter_count;
c->filters = gpr_malloc(sizeof(grpc_channel_filter *) * c->num_filters);
- memcpy(c->filters, args->filters,
+ memcpy((void *)c->filters, args->filters,
sizeof(grpc_channel_filter *) * c->num_filters);
c->addr = gpr_malloc(args->addr_len);
memcpy(c->addr, args->addr, args->addr_len);
@@ -298,14 +305,14 @@ grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
c->pollset_set = grpc_client_channel_get_connecting_pollset_set(parent_elem);
c->random = random_seed();
grpc_mdctx_ref(c->mdctx);
- grpc_iomgr_closure_init(&c->connected, subchannel_connected, c);
+ grpc_closure_init(&c->connected, subchannel_connected, c);
grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
"subchannel");
gpr_mu_init(&c->mu);
return c;
}
-static void continue_connect(grpc_subchannel *c) {
+static void continue_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
grpc_connect_in_args args;
args.interested_parties = c->pollset_set;
@@ -313,32 +320,33 @@ static void continue_connect(grpc_subchannel *c) {
args.addr_len = c->addr_len;
args.deadline = compute_connect_deadline(c);
args.channel_args = c->args;
- args.metadata_context = c->mdctx;
- grpc_connector_connect(c->connector, &args, &c->connecting_result,
+ grpc_connector_connect(exec_ctx, c->connector, &args, &c->connecting_result,
&c->connected);
}
-static void start_connect(grpc_subchannel *c) {
+static void start_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
c->backoff_delta = gpr_time_from_seconds(
GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS, GPR_TIMESPAN);
c->next_attempt =
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), c->backoff_delta);
- continue_connect(c);
+ continue_connect(exec_ctx, c);
}
-static void continue_creating_call(void *arg, int iomgr_success) {
+static void continue_creating_call(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
waiting_for_connect *w4c = arg;
- grpc_subchannel_del_interested_party(w4c->subchannel, w4c->pollset);
- grpc_subchannel_create_call(w4c->subchannel, w4c->pollset, w4c->target,
- w4c->notify);
- GRPC_SUBCHANNEL_UNREF(w4c->subchannel, "waiting_for_connect");
+ grpc_subchannel_del_interested_party(exec_ctx, w4c->subchannel, w4c->pollset);
+ grpc_subchannel_create_call(exec_ctx, w4c->subchannel, w4c->pollset,
+ w4c->target, w4c->notify);
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, w4c->subchannel, "waiting_for_connect");
gpr_free(w4c);
}
-void grpc_subchannel_create_call(grpc_subchannel *c, grpc_pollset *pollset,
+void grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
+ grpc_pollset *pollset,
grpc_subchannel_call **target,
- grpc_iomgr_closure *notify) {
+ grpc_closure *notify) {
connection *con;
gpr_mu_lock(&c->mu);
if (c->active != NULL) {
@@ -346,8 +354,8 @@ void grpc_subchannel_create_call(grpc_subchannel *c, grpc_pollset *pollset,
CONNECTION_REF_LOCKED(con, "call");
gpr_mu_unlock(&c->mu);
- *target = create_call(con);
- notify->cb(notify->cb_arg, 1);
+ *target = create_call(exec_ctx, con);
+ notify->cb(exec_ctx, notify->cb_arg, 1);
} else {
waiting_for_connect *w4c = gpr_malloc(sizeof(*w4c));
w4c->next = c->waiting;
@@ -357,18 +365,18 @@ void grpc_subchannel_create_call(grpc_subchannel *c, grpc_pollset *pollset,
w4c->subchannel = c;
/* released when clearing w4c */
SUBCHANNEL_REF_LOCKED(c, "waiting_for_connect");
- grpc_iomgr_closure_init(&w4c->continuation, continue_creating_call, w4c);
+ grpc_closure_init(&w4c->continuation, continue_creating_call, w4c);
c->waiting = w4c;
- grpc_subchannel_add_interested_party(c, pollset);
+ grpc_subchannel_add_interested_party(exec_ctx, c, pollset);
if (!c->connecting) {
c->connecting = 1;
- connectivity_state_changed_locked(c, "create_call");
+ connectivity_state_changed_locked(exec_ctx, c, "create_call");
/* released by connection */
SUBCHANNEL_REF_LOCKED(c, "connecting");
GRPC_CHANNEL_INTERNAL_REF(c->master, "connecting");
gpr_mu_unlock(&c->mu);
- start_connect(c);
+ start_connect(exec_ctx, c);
} else {
gpr_mu_unlock(&c->mu);
}
@@ -383,69 +391,73 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c) {
return state;
}
-void grpc_subchannel_notify_on_state_change(grpc_subchannel *c,
+void grpc_subchannel_notify_on_state_change(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c,
grpc_connectivity_state *state,
- grpc_iomgr_closure *notify) {
+ grpc_closure *notify) {
int do_connect = 0;
gpr_mu_lock(&c->mu);
- if (grpc_connectivity_state_notify_on_state_change(&c->state_tracker, state,
- notify)) {
+ if (grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &c->state_tracker, state, notify)) {
do_connect = 1;
c->connecting = 1;
/* released by connection */
SUBCHANNEL_REF_LOCKED(c, "connecting");
GRPC_CHANNEL_INTERNAL_REF(c->master, "connecting");
- connectivity_state_changed_locked(c, "state_change");
+ connectivity_state_changed_locked(exec_ctx, c, "state_change");
}
gpr_mu_unlock(&c->mu);
+
if (do_connect) {
- start_connect(c);
+ start_connect(exec_ctx, c);
}
}
-void grpc_subchannel_process_transport_op(grpc_subchannel *c,
+void grpc_subchannel_process_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c,
grpc_transport_op *op) {
connection *con = NULL;
grpc_subchannel *destroy;
int cancel_alarm = 0;
gpr_mu_lock(&c->mu);
+ if (c->active != NULL) {
+ con = c->active;
+ CONNECTION_REF_LOCKED(con, "transport-op");
+ }
if (op->disconnect) {
c->disconnected = 1;
- connectivity_state_changed_locked(c, "disconnect");
+ connectivity_state_changed_locked(exec_ctx, c, "disconnect");
if (c->have_alarm) {
cancel_alarm = 1;
}
}
- if (c->active != NULL) {
- con = c->active;
- CONNECTION_REF_LOCKED(con, "transport-op");
- }
gpr_mu_unlock(&c->mu);
if (con != NULL) {
grpc_channel_stack *channel_stack = CHANNEL_STACK_FROM_CONNECTION(con);
grpc_channel_element *top_elem =
grpc_channel_stack_element(channel_stack, 0);
- top_elem->filter->start_transport_op(top_elem, op);
+ top_elem->filter->start_transport_op(exec_ctx, top_elem, op);
gpr_mu_lock(&c->mu);
- destroy = CONNECTION_UNREF_LOCKED(con, "transport-op");
+ destroy = CONNECTION_UNREF_LOCKED(exec_ctx, con, "transport-op");
gpr_mu_unlock(&c->mu);
if (destroy) {
- subchannel_destroy(destroy);
+ subchannel_destroy(exec_ctx, destroy);
}
}
if (cancel_alarm) {
- grpc_alarm_cancel(&c->alarm);
+ grpc_alarm_cancel(exec_ctx, &c->alarm);
}
if (op->disconnect) {
- grpc_connector_shutdown(c->connector);
+ grpc_connector_shutdown(exec_ctx, c->connector);
}
}
-static void on_state_changed(void *p, int iomgr_success) {
+static void on_state_changed(grpc_exec_ctx *exec_ctx, void *p,
+ int iomgr_success) {
state_watcher *sw = p;
grpc_subchannel *c = sw->subchannel;
gpr_mu *mu = &c->mu;
@@ -472,7 +484,7 @@ static void on_state_changed(void *p, int iomgr_success) {
op.on_connectivity_state_change = &sw->closure;
elem = grpc_channel_stack_element(
CHANNEL_STACK_FROM_CONNECTION(c->active), 0);
- elem->filter->start_transport_op(elem, &op);
+ elem->filter->start_transport_op(exec_ctx, elem, &op);
/* early out */
gpr_mu_unlock(mu);
return;
@@ -483,27 +495,28 @@ static void on_state_changed(void *p, int iomgr_success) {
destroy_connection = sw->subchannel->active;
}
sw->subchannel->active = NULL;
- grpc_connectivity_state_set(
- &c->state_tracker, c->disconnected ? GRPC_CHANNEL_FATAL_FAILURE
- : GRPC_CHANNEL_TRANSIENT_FAILURE,
- "connection_failed");
+ grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
+ c->disconnected
+ ? GRPC_CHANNEL_FATAL_FAILURE
+ : GRPC_CHANNEL_TRANSIENT_FAILURE,
+ "connection_failed");
break;
}
done:
- connectivity_state_changed_locked(c, "transport_state_changed");
+ connectivity_state_changed_locked(exec_ctx, c, "transport_state_changed");
destroy = SUBCHANNEL_UNREF_LOCKED(c, "state_watcher");
gpr_free(sw);
gpr_mu_unlock(mu);
if (destroy) {
- subchannel_destroy(c);
+ subchannel_destroy(exec_ctx, c);
}
if (destroy_connection != NULL) {
- connection_destroy(destroy_connection);
+ connection_destroy(exec_ctx, destroy_connection);
}
}
-static void publish_transport(grpc_subchannel *c) {
+static void publish_transport(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
size_t channel_stack_size;
connection *con;
grpc_channel_stack *stk;
@@ -518,8 +531,8 @@ static void publish_transport(grpc_subchannel *c) {
/* build final filter list */
num_filters = c->num_filters + c->connecting_result.num_filters + 1;
filters = gpr_malloc(sizeof(*filters) * num_filters);
- memcpy(filters, c->filters, sizeof(*filters) * c->num_filters);
- memcpy(filters + c->num_filters, c->connecting_result.filters,
+ memcpy((void *)filters, c->filters, sizeof(*filters) * c->num_filters);
+ memcpy((void *)(filters + c->num_filters), c->connecting_result.filters,
sizeof(*filters) * c->connecting_result.num_filters);
filters[num_filters - 1] = &grpc_connected_channel_filter;
@@ -529,15 +542,15 @@ static void publish_transport(grpc_subchannel *c) {
stk = (grpc_channel_stack *)(con + 1);
con->refs = 0;
con->subchannel = c;
- grpc_channel_stack_init(filters, num_filters, c->master, c->args, c->mdctx,
- stk);
+ grpc_channel_stack_init(exec_ctx, filters, num_filters, c->master, c->args,
+ c->mdctx, stk);
grpc_connected_channel_bind_transport(stk, c->connecting_result.transport);
- gpr_free(c->connecting_result.filters);
+ gpr_free((void *)c->connecting_result.filters);
memset(&c->connecting_result, 0, sizeof(c->connecting_result));
/* initialize state watcher */
sw = gpr_malloc(sizeof(*sw));
- grpc_iomgr_closure_init(&sw->closure, on_state_changed, sw);
+ grpc_closure_init(&sw->closure, on_state_changed, sw);
sw->subchannel = c;
sw->connectivity_state = GRPC_CHANNEL_READY;
@@ -546,10 +559,10 @@ static void publish_transport(grpc_subchannel *c) {
if (c->disconnected) {
gpr_mu_unlock(&c->mu);
gpr_free(sw);
- gpr_free(filters);
- grpc_channel_stack_destroy(stk);
- GRPC_CHANNEL_INTERNAL_UNREF(c->master, "connecting");
- GRPC_SUBCHANNEL_UNREF(c, "connecting");
+ gpr_free((void *)filters);
+ grpc_channel_stack_destroy(exec_ctx, stk);
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->master, "connecting");
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, c, "connecting");
return;
}
@@ -569,25 +582,29 @@ static void publish_transport(grpc_subchannel *c) {
op.on_connectivity_state_change = &sw->closure;
op.bind_pollset_set = c->pollset_set;
SUBCHANNEL_REF_LOCKED(c, "state_watcher");
- GRPC_CHANNEL_INTERNAL_UNREF(c->master, "connecting");
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->master, "connecting");
GPR_ASSERT(!SUBCHANNEL_UNREF_LOCKED(c, "connecting"));
elem =
grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(c->active), 0);
- elem->filter->start_transport_op(elem, &op);
+ elem->filter->start_transport_op(exec_ctx, elem, &op);
/* signal completion */
- connectivity_state_changed_locked(c, "connected");
- while ((w4c = c->waiting)) {
- c->waiting = w4c->next;
- grpc_iomgr_add_callback(&w4c->continuation);
- }
+ connectivity_state_changed_locked(exec_ctx, c, "connected");
+ w4c = c->waiting;
+ c->waiting = NULL;
gpr_mu_unlock(&c->mu);
- gpr_free(filters);
+ while (w4c != NULL) {
+ waiting_for_connect *next = w4c->next;
+ grpc_exec_ctx_enqueue(exec_ctx, &w4c->continuation, 1);
+ w4c = next;
+ }
+
+ gpr_free((void *)filters);
if (destroy_connection != NULL) {
- connection_destroy(destroy_connection);
+ connection_destroy(exec_ctx, destroy_connection);
}
}
@@ -620,35 +637,36 @@ static void update_reconnect_parameters(grpc_subchannel *c) {
gpr_time_add(c->next_attempt, gpr_time_from_millis(jitter, GPR_TIMESPAN));
}
-static void on_alarm(void *arg, int iomgr_success) {
+static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, int iomgr_success) {
grpc_subchannel *c = arg;
gpr_mu_lock(&c->mu);
c->have_alarm = 0;
if (c->disconnected) {
iomgr_success = 0;
}
- connectivity_state_changed_locked(c, "alarm");
+ connectivity_state_changed_locked(exec_ctx, c, "alarm");
gpr_mu_unlock(&c->mu);
if (iomgr_success) {
update_reconnect_parameters(c);
- continue_connect(c);
+ continue_connect(exec_ctx, c);
} else {
- GRPC_CHANNEL_INTERNAL_UNREF(c->master, "connecting");
- GRPC_SUBCHANNEL_UNREF(c, "connecting");
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->master, "connecting");
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, c, "connecting");
}
}
-static void subchannel_connected(void *arg, int iomgr_success) {
+static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_success) {
grpc_subchannel *c = arg;
if (c->connecting_result.transport != NULL) {
- publish_transport(c);
+ publish_transport(exec_ctx, c);
} else {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->have_alarm);
c->have_alarm = 1;
- connectivity_state_changed_locked(c, "connect_failed");
- grpc_alarm_init(&c->alarm, c->next_attempt, on_alarm, c, now);
+ connectivity_state_changed_locked(exec_ctx, c, "connect_failed");
+ grpc_alarm_init(exec_ctx, &c->alarm, c->next_attempt, on_alarm, c, now);
gpr_mu_unlock(&c->mu);
}
}
@@ -680,10 +698,11 @@ static grpc_connectivity_state compute_connectivity_locked(grpc_subchannel *c) {
return GRPC_CHANNEL_IDLE;
}
-static void connectivity_state_changed_locked(grpc_subchannel *c,
+static void connectivity_state_changed_locked(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *c,
const char *reason) {
grpc_connectivity_state current = compute_connectivity_locked(c);
- grpc_connectivity_state_set(&c->state_tracker, current, reason);
+ grpc_connectivity_state_set(exec_ctx, &c->state_tracker, current, reason);
}
/*
@@ -695,42 +714,46 @@ void grpc_subchannel_call_ref(
gpr_ref(&c->refs);
}
-void grpc_subchannel_call_unref(
- grpc_subchannel_call *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *c
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
if (gpr_unref(&c->refs)) {
gpr_mu *mu = &c->connection->subchannel->mu;
grpc_subchannel *destroy;
- grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c));
+ grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c));
gpr_mu_lock(mu);
- destroy = CONNECTION_UNREF_LOCKED(c->connection, "call");
+ destroy = CONNECTION_UNREF_LOCKED(exec_ctx, c->connection, "call");
gpr_mu_unlock(mu);
gpr_free(c);
if (destroy != NULL) {
- subchannel_destroy(destroy);
+ subchannel_destroy(exec_ctx, destroy);
}
}
}
-char *grpc_subchannel_call_get_peer(grpc_subchannel_call *call) {
+char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *call) {
grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
- return top_elem->filter->get_peer(top_elem);
+ return top_elem->filter->get_peer(exec_ctx, top_elem);
}
-void grpc_subchannel_call_process_op(grpc_subchannel_call *call,
+void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *call,
grpc_transport_stream_op *op) {
grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
- top_elem->filter->start_transport_stream_op(top_elem, op);
+ top_elem->filter->start_transport_stream_op(exec_ctx, top_elem, op);
}
-grpc_subchannel_call *create_call(connection *con) {
+static grpc_subchannel_call *create_call(grpc_exec_ctx *exec_ctx,
+ connection *con) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
grpc_subchannel_call *call =
gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(call);
call->connection = con;
gpr_ref_init(&call->refs, 1);
- grpc_call_stack_init(chanstk, NULL, NULL, callstk);
+ grpc_call_stack_init(exec_ctx, chanstk, NULL, NULL, callstk);
return call;
}
diff --git a/src/core/client_config/subchannel.h b/src/core/client_config/subchannel.h
index 2e36c69134..c9e5861d9c 100644
--- a/src/core/client_config/subchannel.h
+++ b/src/core/client_config/subchannel.h
@@ -36,6 +36,7 @@
#include "src/core/channel/channel_stack.h"
#include "src/core/client_config/connector.h"
+#include "src/core/transport/connectivity_state.h"
/** A (sub-)channel that knows how to connect to exactly one target
address. Provides a target for load balancing. */
@@ -46,39 +47,44 @@ typedef struct grpc_subchannel_args grpc_subchannel_args;
#ifdef GRPC_SUBCHANNEL_REFCOUNT_DEBUG
#define GRPC_SUBCHANNEL_REF(p, r) \
grpc_subchannel_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_SUBCHANNEL_UNREF(p, r) \
- grpc_subchannel_unref((p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_UNREF(cl, p, r) \
+ grpc_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) \
grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) \
- grpc_subchannel_call_unref((p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
+ grpc_subchannel_call_unref((cl), (p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_REF_EXTRA_ARGS \
, const char *file, int line, const char *reason
#else
#define GRPC_SUBCHANNEL_REF(p, r) grpc_subchannel_ref((p))
-#define GRPC_SUBCHANNEL_UNREF(p, r) grpc_subchannel_unref((p))
+#define GRPC_SUBCHANNEL_UNREF(cl, p, r) grpc_subchannel_unref((cl), (p))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p))
-#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) grpc_subchannel_call_unref((p))
+#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
+ grpc_subchannel_call_unref((cl), (p))
#define GRPC_SUBCHANNEL_REF_EXTRA_ARGS
#endif
void grpc_subchannel_ref(
grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_unref(
- grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *channel
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_ref(
grpc_subchannel_call *call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_call_unref(
- grpc_subchannel_call *call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *call
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
/** construct a call (possibly asynchronously) */
-void grpc_subchannel_create_call(grpc_subchannel *subchannel,
+void grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *subchannel,
grpc_pollset *pollset,
grpc_subchannel_call **target,
- grpc_iomgr_closure *notify);
+ grpc_closure *notify);
/** process a transport level op */
-void grpc_subchannel_process_transport_op(grpc_subchannel *subchannel,
+void grpc_subchannel_process_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *subchannel,
grpc_transport_op *op);
/** poll the current connectivity state of a channel */
@@ -87,23 +93,28 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the channel */
-void grpc_subchannel_notify_on_state_change(grpc_subchannel *channel,
+void grpc_subchannel_notify_on_state_change(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *channel,
grpc_connectivity_state *state,
- grpc_iomgr_closure *notify);
+ grpc_closure *notify);
/** express interest in \a channel's activities through \a pollset. */
-void grpc_subchannel_add_interested_party(grpc_subchannel *channel,
+void grpc_subchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *channel,
grpc_pollset *pollset);
/** stop following \a channel's activity through \a pollset. */
-void grpc_subchannel_del_interested_party(grpc_subchannel *channel,
+void grpc_subchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel *channel,
grpc_pollset *pollset);
/** continue processing a transport op */
-void grpc_subchannel_call_process_op(grpc_subchannel_call *subchannel_call,
+void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *subchannel_call,
grpc_transport_stream_op *op);
/** continue querying for peer */
-char *grpc_subchannel_call_get_peer(grpc_subchannel_call *subchannel_call);
+char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_call *subchannel_call);
struct grpc_subchannel_args {
/** Channel filters for this channel - wrapped factories will likely
diff --git a/src/core/client_config/subchannel_factory.c b/src/core/client_config/subchannel_factory.c
index f71386594c..2c64219e8b 100644
--- a/src/core/client_config/subchannel_factory.c
+++ b/src/core/client_config/subchannel_factory.c
@@ -33,14 +33,17 @@
#include "src/core/client_config/subchannel_factory.h"
-void grpc_subchannel_factory_ref(grpc_subchannel_factory *factory) {
+void grpc_subchannel_factory_ref(grpc_subchannel_factory* factory) {
factory->vtable->ref(factory);
}
-void grpc_subchannel_factory_unref(grpc_subchannel_factory *factory) {
- factory->vtable->unref(factory);
+
+void grpc_subchannel_factory_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_factory* factory) {
+ factory->vtable->unref(exec_ctx, factory);
}
-grpc_subchannel *grpc_subchannel_factory_create_subchannel(
- grpc_subchannel_factory *factory, grpc_subchannel_args *args) {
- return factory->vtable->create_subchannel(factory, args);
+grpc_subchannel* grpc_subchannel_factory_create_subchannel(
+ grpc_exec_ctx* exec_ctx, grpc_subchannel_factory* factory,
+ grpc_subchannel_args* args) {
+ return factory->vtable->create_subchannel(exec_ctx, factory, args);
}
diff --git a/src/core/client_config/subchannel_factory.h b/src/core/client_config/subchannel_factory.h
index d7eae1c964..c6d8cc90be 100644
--- a/src/core/client_config/subchannel_factory.h
+++ b/src/core/client_config/subchannel_factory.h
@@ -48,16 +48,19 @@ struct grpc_subchannel_factory {
struct grpc_subchannel_factory_vtable {
void (*ref)(grpc_subchannel_factory *factory);
- void (*unref)(grpc_subchannel_factory *factory);
- grpc_subchannel *(*create_subchannel)(grpc_subchannel_factory *factory,
+ void (*unref)(grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *factory);
+ grpc_subchannel *(*create_subchannel)(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *factory,
grpc_subchannel_args *args);
};
void grpc_subchannel_factory_ref(grpc_subchannel_factory *factory);
-void grpc_subchannel_factory_unref(grpc_subchannel_factory *factory);
+void grpc_subchannel_factory_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *factory);
/** Create a new grpc_subchannel */
grpc_subchannel *grpc_subchannel_factory_create_subchannel(
- grpc_subchannel_factory *factory, grpc_subchannel_args *args);
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *factory,
+ grpc_subchannel_args *args);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_H */
diff --git a/src/core/client_config/subchannel_factory_decorators/add_channel_arg.h b/src/core/client_config/subchannel_factory_decorators/add_channel_arg.h
index 8457294000..76a535ebed 100644
--- a/src/core/client_config/subchannel_factory_decorators/add_channel_arg.h
+++ b/src/core/client_config/subchannel_factory_decorators/add_channel_arg.h
@@ -43,4 +43,4 @@ grpc_subchannel_factory *grpc_subchannel_factory_add_channel_arg(
grpc_subchannel_factory *input, const grpc_arg *arg);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_ADD_CHANNEL_ARG_H \
- */
+ */
diff --git a/src/core/client_config/subchannel_factory_decorators/merge_channel_args.c b/src/core/client_config/subchannel_factory_decorators/merge_channel_args.c
index c1b5507fde..cd25fdcf0f 100644
--- a/src/core/client_config/subchannel_factory_decorators/merge_channel_args.c
+++ b/src/core/client_config/subchannel_factory_decorators/merge_channel_args.c
@@ -47,23 +47,25 @@ static void merge_args_factory_ref(grpc_subchannel_factory *scf) {
gpr_ref(&f->refs);
}
-static void merge_args_factory_unref(grpc_subchannel_factory *scf) {
+static void merge_args_factory_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *scf) {
merge_args_factory *f = (merge_args_factory *)scf;
if (gpr_unref(&f->refs)) {
- grpc_subchannel_factory_unref(f->wrapped);
+ grpc_subchannel_factory_unref(exec_ctx, f->wrapped);
grpc_channel_args_destroy(f->merge_args);
gpr_free(f);
}
}
static grpc_subchannel *merge_args_factory_create_subchannel(
- grpc_subchannel_factory *scf, grpc_subchannel_args *args) {
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *scf,
+ grpc_subchannel_args *args) {
merge_args_factory *f = (merge_args_factory *)scf;
grpc_channel_args *final_args =
grpc_channel_args_merge(args->args, f->merge_args);
grpc_subchannel *s;
args->args = final_args;
- s = grpc_subchannel_factory_create_subchannel(f->wrapped, args);
+ s = grpc_subchannel_factory_create_subchannel(exec_ctx, f->wrapped, args);
grpc_channel_args_destroy(final_args);
return s;
}
diff --git a/src/core/client_config/subchannel_factory_decorators/merge_channel_args.h b/src/core/client_config/subchannel_factory_decorators/merge_channel_args.h
index f4757f0650..a9e1691871 100644
--- a/src/core/client_config/subchannel_factory_decorators/merge_channel_args.h
+++ b/src/core/client_config/subchannel_factory_decorators/merge_channel_args.h
@@ -43,4 +43,4 @@ grpc_subchannel_factory *grpc_subchannel_factory_merge_channel_args(
grpc_subchannel_factory *input, const grpc_channel_args *args);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_MERGE_CHANNEL_ARGS_H \
- */
+ */
diff --git a/src/core/client_config/uri_parser.c b/src/core/client_config/uri_parser.c
index 2738e2df57..df9f32d403 100644
--- a/src/core/client_config/uri_parser.c
+++ b/src/core/client_config/uri_parser.c
@@ -79,12 +79,11 @@ static size_t parse_pchar(const char *uri_text, size_t i) {
* unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
* pct-encoded = "%" HEXDIG HEXDIG
* sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
- / "*" / "+" / "," / ";" / "=" */
+ / "*" / "+" / "," / ";" / "=" */
char c = uri_text[i];
if (((c >= 'A') && (c <= 'Z')) || ((c >= 'a') && (c <= 'z')) ||
((c >= '0') && (c <= '9')) ||
(c == '-' || c == '.' || c == '_' || c == '~') || /* unreserved */
-
(c == '!' || c == '$' || c == '&' || c == '\'' || c == '$' || c == '&' ||
c == '(' || c == ')' || c == '*' || c == '+' || c == ',' || c == ';' ||
c == '=') /* sub-delims */) {
diff --git a/src/core/compression/algorithm.c b/src/core/compression/algorithm.c
index 76d42fde0f..8adde13b1e 100644
--- a/src/core/compression/algorithm.c
+++ b/src/core/compression/algorithm.c
@@ -106,7 +106,7 @@ grpc_compression_level grpc_compression_level_for_algorithm(
}
void grpc_compression_options_init(grpc_compression_options *opts) {
- opts->enabled_algorithms_bitset = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT)-1;
+ opts->enabled_algorithms_bitset = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
opts->default_compression_algorithm = GRPC_COMPRESS_NONE;
}
diff --git a/src/core/compression/message_compress.c b/src/core/compression/message_compress.c
index 01db7134c3..209c1f0ff1 100644
--- a/src/core/compression/message_compress.c
+++ b/src/core/compression/message_compress.c
@@ -42,9 +42,9 @@
#define OUTPUT_BLOCK_SIZE 1024
-static int zlib_body(z_stream *zs, gpr_slice_buffer *input,
- gpr_slice_buffer *output,
- int (*flate)(z_stream *zs, int flush)) {
+static int zlib_body(z_stream* zs, gpr_slice_buffer* input,
+ gpr_slice_buffer* output,
+ int (*flate)(z_stream* zs, int flush)) {
int r;
int flush;
size_t i;
@@ -91,7 +91,7 @@ error:
return 0;
}
-static int zlib_compress(gpr_slice_buffer *input, gpr_slice_buffer *output,
+static int zlib_compress(gpr_slice_buffer* input, gpr_slice_buffer* output,
int gzip) {
z_stream zs;
int r;
@@ -117,7 +117,7 @@ static int zlib_compress(gpr_slice_buffer *input, gpr_slice_buffer *output,
return r;
}
-static int zlib_decompress(gpr_slice_buffer *input, gpr_slice_buffer *output,
+static int zlib_decompress(gpr_slice_buffer* input, gpr_slice_buffer* output,
int gzip) {
z_stream zs;
int r;
@@ -142,7 +142,7 @@ static int zlib_decompress(gpr_slice_buffer *input, gpr_slice_buffer *output,
return r;
}
-static int copy(gpr_slice_buffer *input, gpr_slice_buffer *output) {
+static int copy(gpr_slice_buffer* input, gpr_slice_buffer* output) {
size_t i;
for (i = 0; i < input->count; i++) {
gpr_slice_buffer_add(output, gpr_slice_ref(input->slices[i]));
@@ -151,7 +151,7 @@ static int copy(gpr_slice_buffer *input, gpr_slice_buffer *output) {
}
int compress_inner(grpc_compression_algorithm algorithm,
- gpr_slice_buffer *input, gpr_slice_buffer *output) {
+ gpr_slice_buffer* input, gpr_slice_buffer* output) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
/* the fallback path always needs to be send uncompressed: we simply
@@ -169,7 +169,7 @@ int compress_inner(grpc_compression_algorithm algorithm,
}
int grpc_msg_compress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer *input, gpr_slice_buffer *output) {
+ gpr_slice_buffer* input, gpr_slice_buffer* output) {
if (!compress_inner(algorithm, input, output)) {
copy(input, output);
return 0;
@@ -178,7 +178,7 @@ int grpc_msg_compress(grpc_compression_algorithm algorithm,
}
int grpc_msg_decompress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer *input, gpr_slice_buffer *output) {
+ gpr_slice_buffer* input, gpr_slice_buffer* output) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
return copy(input, output);
diff --git a/src/core/compression/message_compress.h b/src/core/compression/message_compress.h
index b3eb8f579f..14652004b8 100644
--- a/src/core/compression/message_compress.h
+++ b/src/core/compression/message_compress.h
@@ -41,12 +41,12 @@
On success, appends compressed slices to output and returns 1.
On failure, appends uncompressed slices to output and returns 0. */
int grpc_msg_compress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer *input, gpr_slice_buffer *output);
+ gpr_slice_buffer* input, gpr_slice_buffer* output);
/* decompress 'input' to 'output' using 'algorithm'.
On success, appends slices to output and returns 1.
On failure, output is unchanged, and returns 0. */
int grpc_msg_decompress(grpc_compression_algorithm algorithm,
- gpr_slice_buffer *input, gpr_slice_buffer *output);
+ gpr_slice_buffer* input, gpr_slice_buffer* output);
#endif /* GRPC_INTERNAL_CORE_COMPRESSION_MESSAGE_COMPRESS_H */
diff --git a/src/core/httpcli/httpcli.c b/src/core/httpcli/httpcli.c
index 1e38479eb1..a87f1aa87b 100644
--- a/src/core/httpcli/httpcli.c
+++ b/src/core/httpcli/httpcli.c
@@ -63,18 +63,20 @@ typedef struct {
grpc_iomgr_object iomgr_obj;
gpr_slice_buffer incoming;
gpr_slice_buffer outgoing;
- grpc_iomgr_closure on_read;
- grpc_iomgr_closure done_write;
+ grpc_closure on_read;
+ grpc_closure done_write;
+ grpc_closure connected;
} internal_request;
static grpc_httpcli_get_override g_get_override = NULL;
static grpc_httpcli_post_override g_post_override = NULL;
-static void plaintext_handshake(void *arg, grpc_endpoint *endpoint,
- const char *host,
- void (*on_done)(void *arg,
+static void plaintext_handshake(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *endpoint, const char *host,
+ void (*on_done)(grpc_exec_ctx *exec_ctx,
+ void *arg,
grpc_endpoint *endpoint)) {
- on_done(arg, endpoint);
+ on_done(exec_ctx, arg, endpoint);
}
const grpc_httpcli_handshaker grpc_httpcli_plaintext = {"http",
@@ -88,17 +90,19 @@ void grpc_httpcli_context_destroy(grpc_httpcli_context *context) {
grpc_pollset_set_destroy(&context->pollset_set);
}
-static void next_address(internal_request *req);
+static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req);
-static void finish(internal_request *req, int success) {
- grpc_pollset_set_del_pollset(&req->context->pollset_set, req->pollset);
- req->on_response(req->user_data, success ? &req->parser.r : NULL);
+static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
+ int success) {
+ grpc_pollset_set_del_pollset(exec_ctx, &req->context->pollset_set,
+ req->pollset);
+ req->on_response(exec_ctx, req->user_data, success ? &req->parser.r : NULL);
grpc_httpcli_parser_destroy(&req->parser);
if (req->addresses != NULL) {
grpc_resolved_addresses_destroy(req->addresses);
}
if (req->ep != NULL) {
- grpc_endpoint_destroy(req->ep);
+ grpc_endpoint_destroy(exec_ctx, req->ep);
}
gpr_slice_unref(req->request_text);
gpr_free(req->host);
@@ -108,22 +112,13 @@ static void finish(internal_request *req, int success) {
gpr_free(req);
}
-static void on_read(void *user_data, int success);
+static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, int success);
-static void do_read(internal_request *req) {
- switch (grpc_endpoint_read(req->ep, &req->incoming, &req->on_read)) {
- case GRPC_ENDPOINT_DONE:
- on_read(req, 1);
- break;
- case GRPC_ENDPOINT_PENDING:
- break;
- case GRPC_ENDPOINT_ERROR:
- on_read(req, 0);
- break;
- }
+static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) {
+ grpc_endpoint_read(exec_ctx, req->ep, &req->incoming, &req->on_read);
}
-static void on_read(void *user_data, int success) {
+static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
internal_request *req = user_data;
size_t i;
@@ -131,105 +126,97 @@ static void on_read(void *user_data, int success) {
if (GPR_SLICE_LENGTH(req->incoming.slices[i])) {
req->have_read_byte = 1;
if (!grpc_httpcli_parser_parse(&req->parser, req->incoming.slices[i])) {
- finish(req, 0);
+ finish(exec_ctx, req, 0);
return;
}
}
}
if (success) {
- do_read(req);
+ do_read(exec_ctx, req);
} else if (!req->have_read_byte) {
- next_address(req);
+ next_address(exec_ctx, req);
} else {
- finish(req, grpc_httpcli_parser_eof(&req->parser));
+ finish(exec_ctx, req, grpc_httpcli_parser_eof(&req->parser));
}
}
-static void on_written(internal_request *req) { do_read(req); }
+static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) {
+ do_read(exec_ctx, req);
+}
-static void done_write(void *arg, int success) {
+static void done_write(grpc_exec_ctx *exec_ctx, void *arg, int success) {
internal_request *req = arg;
if (success) {
- on_written(req);
+ on_written(exec_ctx, req);
} else {
- next_address(req);
+ next_address(exec_ctx, req);
}
}
-static void start_write(internal_request *req) {
+static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) {
gpr_slice_ref(req->request_text);
gpr_slice_buffer_add(&req->outgoing, req->request_text);
- switch (grpc_endpoint_write(req->ep, &req->outgoing, &req->done_write)) {
- case GRPC_ENDPOINT_DONE:
- on_written(req);
- break;
- case GRPC_ENDPOINT_PENDING:
- break;
- case GRPC_ENDPOINT_ERROR:
- finish(req, 0);
- break;
- }
+ grpc_endpoint_write(exec_ctx, req->ep, &req->outgoing, &req->done_write);
}
-static void on_handshake_done(void *arg, grpc_endpoint *ep) {
+static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *ep) {
internal_request *req = arg;
if (!ep) {
- next_address(req);
+ next_address(exec_ctx, req);
return;
}
req->ep = ep;
- start_write(req);
+ start_write(exec_ctx, req);
}
-static void on_connected(void *arg, grpc_endpoint *tcp) {
+static void on_connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
internal_request *req = arg;
- if (!tcp) {
- next_address(req);
+ if (!req->ep) {
+ next_address(exec_ctx, req);
return;
}
- req->handshaker->handshake(req, tcp, req->host, on_handshake_done);
+ req->handshaker->handshake(exec_ctx, req, req->ep, req->host,
+ on_handshake_done);
}
-static void next_address(internal_request *req) {
+static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req) {
grpc_resolved_address *addr;
if (req->next_address == req->addresses->naddrs) {
- finish(req, 0);
+ finish(exec_ctx, req, 0);
return;
}
addr = &req->addresses->addrs[req->next_address++];
- grpc_tcp_client_connect(on_connected, req, &req->context->pollset_set,
- (struct sockaddr *)&addr->addr, addr->len,
- req->deadline);
+ grpc_closure_init(&req->connected, on_connected, req);
+ grpc_tcp_client_connect(
+ exec_ctx, &req->connected, &req->ep, &req->context->pollset_set,
+ (struct sockaddr *)&addr->addr, addr->len, req->deadline);
}
-static void on_resolved(void *arg, grpc_resolved_addresses *addresses) {
+static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_resolved_addresses *addresses) {
internal_request *req = arg;
if (!addresses) {
- finish(req, 0);
+ finish(exec_ctx, req, 0);
return;
}
req->addresses = addresses;
req->next_address = 0;
- next_address(req);
+ next_address(exec_ctx, req);
}
-void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset,
- const grpc_httpcli_request *request,
- gpr_timespec deadline,
- grpc_httpcli_response_cb on_response, void *user_data) {
- internal_request *req;
- char *name;
- if (g_get_override &&
- g_get_override(request, deadline, on_response, user_data)) {
- return;
- }
- req = gpr_malloc(sizeof(internal_request));
+static void internal_request_begin(
+ grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
+ grpc_pollset *pollset, const grpc_httpcli_request *request,
+ gpr_timespec deadline, grpc_httpcli_response_cb on_response,
+ void *user_data, const char *name, gpr_slice request_text) {
+ internal_request *req = gpr_malloc(sizeof(internal_request));
memset(req, 0, sizeof(*req));
- req->request_text = grpc_httpcli_format_get_request(request);
+ req->request_text = request_text;
grpc_httpcli_parser_init(&req->parser);
req->on_response = on_response;
req->user_data = user_data;
@@ -238,55 +225,53 @@ void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset,
request->handshaker ? request->handshaker : &grpc_httpcli_plaintext;
req->context = context;
req->pollset = pollset;
- grpc_iomgr_closure_init(&req->on_read, on_read, req);
- grpc_iomgr_closure_init(&req->done_write, done_write, req);
+ grpc_closure_init(&req->on_read, on_read, req);
+ grpc_closure_init(&req->done_write, done_write, req);
gpr_slice_buffer_init(&req->incoming);
gpr_slice_buffer_init(&req->outgoing);
- gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path);
grpc_iomgr_register_object(&req->iomgr_obj, name);
- gpr_free(name);
req->host = gpr_strdup(request->host);
- grpc_pollset_set_add_pollset(&req->context->pollset_set, req->pollset);
+ grpc_pollset_set_add_pollset(exec_ctx, &req->context->pollset_set,
+ req->pollset);
grpc_resolve_address(request->host, req->handshaker->default_port,
on_resolved, req);
}
-void grpc_httpcli_post(grpc_httpcli_context *context, grpc_pollset *pollset,
+void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
+ grpc_pollset *pollset,
+ const grpc_httpcli_request *request,
+ gpr_timespec deadline,
+ grpc_httpcli_response_cb on_response, void *user_data) {
+ char *name;
+ if (g_get_override &&
+ g_get_override(exec_ctx, request, deadline, on_response, user_data)) {
+ return;
+ }
+ gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path);
+ internal_request_begin(exec_ctx, context, pollset, request, deadline,
+ on_response, user_data, name,
+ grpc_httpcli_format_get_request(request));
+ gpr_free(name);
+}
+
+void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
+ grpc_pollset *pollset,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response, void *user_data) {
- internal_request *req;
char *name;
- if (g_post_override && g_post_override(request, body_bytes, body_size,
- deadline, on_response, user_data)) {
+ if (g_post_override &&
+ g_post_override(exec_ctx, request, body_bytes, body_size, deadline,
+ on_response, user_data)) {
return;
}
- req = gpr_malloc(sizeof(internal_request));
- memset(req, 0, sizeof(*req));
- req->request_text =
- grpc_httpcli_format_post_request(request, body_bytes, body_size);
- grpc_httpcli_parser_init(&req->parser);
- req->on_response = on_response;
- req->user_data = user_data;
- req->deadline = deadline;
- req->handshaker =
- request->handshaker ? request->handshaker : &grpc_httpcli_plaintext;
- req->context = context;
- req->pollset = pollset;
- grpc_iomgr_closure_init(&req->on_read, on_read, req);
- grpc_iomgr_closure_init(&req->done_write, done_write, req);
- gpr_slice_buffer_init(&req->incoming);
- gpr_slice_buffer_init(&req->outgoing);
gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->path);
- grpc_iomgr_register_object(&req->iomgr_obj, name);
+ internal_request_begin(
+ exec_ctx, context, pollset, request, deadline, on_response, user_data,
+ name, grpc_httpcli_format_post_request(request, body_bytes, body_size));
gpr_free(name);
- req->host = gpr_strdup(request->host);
-
- grpc_pollset_set_add_pollset(&req->context->pollset_set, req->pollset);
- grpc_resolve_address(request->host, req->handshaker->default_port,
- on_resolved, req);
}
void grpc_httpcli_set_override(grpc_httpcli_get_override get,
diff --git a/src/core/httpcli/httpcli.h b/src/core/httpcli/httpcli.h
index c45966714c..6469c2f03e 100644
--- a/src/core/httpcli/httpcli.h
+++ b/src/core/httpcli/httpcli.h
@@ -61,8 +61,10 @@ typedef struct grpc_httpcli_context {
typedef struct {
const char *default_port;
- void (*handshake)(void *arg, grpc_endpoint *endpoint, const char *host,
- void (*on_done)(void *arg, grpc_endpoint *endpoint));
+ void (*handshake)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint,
+ const char *host,
+ void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *endpoint));
} grpc_httpcli_handshaker;
extern const grpc_httpcli_handshaker grpc_httpcli_plaintext;
@@ -76,7 +78,7 @@ typedef struct grpc_httpcli_request {
char *path;
/* Additional headers: count and key/values; the following are supplied
automatically and MUST NOT be set here:
- Host, Connection, User-Agent */
+ Host, Connection, User-Agent */
size_t hdr_count;
grpc_httpcli_header *hdrs;
/* handshaker to use ssl for the request */
@@ -96,7 +98,8 @@ typedef struct grpc_httpcli_response {
} grpc_httpcli_response;
/* Callback for grpc_httpcli_get and grpc_httpcli_post. */
-typedef void (*grpc_httpcli_response_cb)(void *user_data,
+typedef void (*grpc_httpcli_response_cb)(grpc_exec_ctx *exec_ctx,
+ void *user_data,
const grpc_httpcli_response *response);
void grpc_httpcli_context_init(grpc_httpcli_context *context);
@@ -112,7 +115,8 @@ void grpc_httpcli_context_destroy(grpc_httpcli_context *context);
'deadline' contains a deadline for the request (or gpr_inf_future)
'on_response' is a callback to report results to (and 'user_data' is a user
supplied pointer to pass to said call) */
-void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset,
+void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
+ grpc_pollset *pollset,
const grpc_httpcli_request *request,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response, void *user_data);
@@ -132,23 +136,23 @@ void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset,
'on_response' is a callback to report results to (and 'user_data' is a user
supplied pointer to pass to said call)
Does not support ?var1=val1&var2=val2 in the path. */
-void grpc_httpcli_post(grpc_httpcli_context *context, grpc_pollset *pollset,
+void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
+ grpc_pollset *pollset,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response, void *user_data);
/* override functions return 1 if they handled the request, 0 otherwise */
-typedef int (*grpc_httpcli_get_override)(const grpc_httpcli_request *request,
+typedef int (*grpc_httpcli_get_override)(grpc_exec_ctx *exec_ctx,
+ const grpc_httpcli_request *request,
gpr_timespec deadline,
grpc_httpcli_response_cb on_response,
void *user_data);
-typedef int (*grpc_httpcli_post_override)(const grpc_httpcli_request *request,
- const char *body_bytes,
- size_t body_size,
- gpr_timespec deadline,
- grpc_httpcli_response_cb on_response,
- void *user_data);
+typedef int (*grpc_httpcli_post_override)(
+ grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
+ const char *body_bytes, size_t body_size, gpr_timespec deadline,
+ grpc_httpcli_response_cb on_response, void *user_data);
void grpc_httpcli_set_override(grpc_httpcli_get_override get,
grpc_httpcli_post_override post);
diff --git a/src/core/httpcli/httpcli_security_connector.c b/src/core/httpcli/httpcli_security_connector.c
index 86f34db1d0..fc6699c918 100644
--- a/src/core/httpcli/httpcli_security_connector.c
+++ b/src/core/httpcli/httpcli_security_connector.c
@@ -58,15 +58,17 @@ static void httpcli_ssl_destroy(grpc_security_connector *sc) {
gpr_free(sc);
}
-static void httpcli_ssl_do_handshake(
- grpc_security_connector *sc, grpc_endpoint *nonsecure_endpoint,
- grpc_security_handshake_done_cb cb, void *user_data) {
+static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc,
+ grpc_endpoint *nonsecure_endpoint,
+ grpc_security_handshake_done_cb cb,
+ void *user_data) {
grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc;
tsi_result result = TSI_OK;
tsi_handshaker *handshaker;
if (c->handshaker_factory == NULL) {
- cb(user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
return;
}
result = tsi_ssl_handshaker_factory_create_handshaker(
@@ -74,9 +76,9 @@ static void httpcli_ssl_do_handshake(
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
- cb(user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
} else {
- grpc_do_security_handshake(handshaker, sc, nonsecure_endpoint, cb,
+ grpc_do_security_handshake(exec_ctx, handshaker, sc, nonsecure_endpoint, cb,
user_data);
}
}
@@ -141,33 +143,35 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create(
/* handshaker */
typedef struct {
- void (*func)(void *arg, grpc_endpoint *endpoint);
+ void (*func)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint);
void *arg;
} on_done_closure;
-static void on_secure_transport_setup_done(void *rp,
+static void on_secure_transport_setup_done(grpc_exec_ctx *exec_ctx, void *rp,
grpc_security_status status,
grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
on_done_closure *c = rp;
if (status != GRPC_SECURITY_OK) {
gpr_log(GPR_ERROR, "Secure transport setup failed with error %d.", status);
- c->func(c->arg, NULL);
+ c->func(exec_ctx, c->arg, NULL);
} else {
- c->func(c->arg, secure_endpoint);
+ c->func(exec_ctx, c->arg, secure_endpoint);
}
gpr_free(c);
}
-static void ssl_handshake(void *arg, grpc_endpoint *tcp, const char *host,
- void (*on_done)(void *arg, grpc_endpoint *endpoint)) {
+static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *tcp, const char *host,
+ void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *endpoint)) {
grpc_channel_security_connector *sc = NULL;
const unsigned char *pem_root_certs = NULL;
on_done_closure *c = gpr_malloc(sizeof(*c));
size_t pem_root_certs_size = grpc_get_default_ssl_roots(&pem_root_certs);
if (pem_root_certs == NULL || pem_root_certs_size == 0) {
gpr_log(GPR_ERROR, "Could not get default pem root certs.");
- on_done(arg, NULL);
+ on_done(exec_ctx, arg, NULL);
gpr_free(c);
return;
}
@@ -176,7 +180,7 @@ static void ssl_handshake(void *arg, grpc_endpoint *tcp, const char *host,
GPR_ASSERT(httpcli_ssl_channel_security_connector_create(
pem_root_certs, pem_root_certs_size, host, &sc) ==
GRPC_SECURITY_OK);
- grpc_security_connector_do_handshake(&sc->base, tcp,
+ grpc_security_connector_do_handshake(exec_ctx, &sc->base, tcp,
on_secure_transport_setup_done, c);
GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "httpcli");
}
diff --git a/src/core/httpcli/parser.h b/src/core/httpcli/parser.h
index 3fbb4c7479..40b28f0ee8 100644
--- a/src/core/httpcli/parser.h
+++ b/src/core/httpcli/parser.h
@@ -55,10 +55,10 @@ typedef struct {
size_t cur_line_length;
} grpc_httpcli_parser;
-void grpc_httpcli_parser_init(grpc_httpcli_parser *parser);
-void grpc_httpcli_parser_destroy(grpc_httpcli_parser *parser);
+void grpc_httpcli_parser_init(grpc_httpcli_parser* parser);
+void grpc_httpcli_parser_destroy(grpc_httpcli_parser* parser);
-int grpc_httpcli_parser_parse(grpc_httpcli_parser *parser, gpr_slice slice);
-int grpc_httpcli_parser_eof(grpc_httpcli_parser *parser);
+int grpc_httpcli_parser_parse(grpc_httpcli_parser* parser, gpr_slice slice);
+int grpc_httpcli_parser_eof(grpc_httpcli_parser* parser);
#endif /* GRPC_INTERNAL_CORE_HTTPCLI_PARSER_H */
diff --git a/src/core/iomgr/alarm.c b/src/core/iomgr/alarm.c
index 7b67fe3b1d..0ba5361606 100644
--- a/src/core/iomgr/alarm.c
+++ b/src/core/iomgr/alarm.c
@@ -44,7 +44,6 @@
#define LOG2_NUM_SHARDS 5
#define NUM_SHARDS (1 << LOG2_NUM_SHARDS)
-#define MAX_ALARMS_PER_CHECK 128
#define ADD_DEADLINE_SCALE 0.33
#define MIN_QUEUE_WINDOW_DURATION 0.01
#define MAX_QUEUE_WINDOW_DURATION 1
@@ -73,7 +72,7 @@ static shard_type g_shards[NUM_SHARDS];
/* Protected by g_mu */
static shard_type *g_shard_queue[NUM_SHARDS];
-static int run_some_expired_alarms(gpr_mu *drop_mu, gpr_timespec now,
+static int run_some_expired_alarms(grpc_exec_ctx *exec_ctx, gpr_timespec now,
gpr_timespec *next, int success);
static gpr_timespec compute_min_deadline(shard_type *shard) {
@@ -103,10 +102,9 @@ void grpc_alarm_list_init(gpr_timespec now) {
}
}
-void grpc_alarm_list_shutdown(void) {
+void grpc_alarm_list_shutdown(grpc_exec_ctx *exec_ctx) {
int i;
- while (run_some_expired_alarms(NULL, gpr_inf_future(g_clock_type), NULL, 0))
- ;
+ run_some_expired_alarms(exec_ctx, gpr_inf_future(g_clock_type), NULL, 0);
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
gpr_mu_destroy(&shard->mu);
@@ -172,15 +170,14 @@ static void note_deadline_change(shard_type *shard) {
}
}
-void grpc_alarm_init(grpc_alarm *alarm, gpr_timespec deadline,
- grpc_iomgr_cb_func alarm_cb, void *alarm_cb_arg,
- gpr_timespec now) {
+void grpc_alarm_init(grpc_exec_ctx *exec_ctx, grpc_alarm *alarm,
+ gpr_timespec deadline, grpc_iomgr_cb_func alarm_cb,
+ void *alarm_cb_arg, gpr_timespec now) {
int is_first_alarm = 0;
shard_type *shard = &g_shards[shard_idx(alarm)];
GPR_ASSERT(deadline.clock_type == g_clock_type);
GPR_ASSERT(now.clock_type == g_clock_type);
- alarm->cb = alarm_cb;
- alarm->cb_arg = alarm_cb_arg;
+ grpc_closure_init(&alarm->closure, alarm_cb, alarm_cb_arg);
alarm->deadline = deadline;
alarm->triggered = 0;
@@ -223,12 +220,11 @@ void grpc_alarm_init(grpc_alarm *alarm, gpr_timespec deadline,
}
}
-void grpc_alarm_cancel(grpc_alarm *alarm) {
+void grpc_alarm_cancel(grpc_exec_ctx *exec_ctx, grpc_alarm *alarm) {
shard_type *shard = &g_shards[shard_idx(alarm)];
- int triggered = 0;
gpr_mu_lock(&shard->mu);
if (!alarm->triggered) {
- triggered = 1;
+ grpc_exec_ctx_enqueue(exec_ctx, &alarm->closure, 0);
alarm->triggered = 1;
if (alarm->heap_index == INVALID_HEAP_INDEX) {
list_remove(alarm);
@@ -237,10 +233,6 @@ void grpc_alarm_cancel(grpc_alarm *alarm) {
}
}
gpr_mu_unlock(&shard->mu);
-
- if (triggered) {
- alarm->cb(alarm->cb_arg, 0);
- }
}
/* This is called when the queue is empty and "now" has reached the
@@ -291,40 +283,38 @@ static grpc_alarm *pop_one(shard_type *shard, gpr_timespec now) {
}
/* REQUIRES: shard->mu unlocked */
-static size_t pop_alarms(shard_type *shard, gpr_timespec now,
- grpc_alarm **alarms, size_t max_alarms,
- gpr_timespec *new_min_deadline) {
+static size_t pop_alarms(grpc_exec_ctx *exec_ctx, shard_type *shard,
+ gpr_timespec now, gpr_timespec *new_min_deadline,
+ int success) {
size_t n = 0;
grpc_alarm *alarm;
gpr_mu_lock(&shard->mu);
- while (n < max_alarms && (alarm = pop_one(shard, now))) {
- alarms[n++] = alarm;
+ while ((alarm = pop_one(shard, now))) {
+ grpc_exec_ctx_enqueue(exec_ctx, &alarm->closure, success);
+ n++;
}
*new_min_deadline = compute_min_deadline(shard);
gpr_mu_unlock(&shard->mu);
return n;
}
-static int run_some_expired_alarms(gpr_mu *drop_mu, gpr_timespec now,
+static int run_some_expired_alarms(grpc_exec_ctx *exec_ctx, gpr_timespec now,
gpr_timespec *next, int success) {
size_t n = 0;
- size_t i;
- grpc_alarm *alarms[MAX_ALARMS_PER_CHECK];
/* TODO(ctiller): verify that there are any alarms (atomically) here */
if (gpr_mu_trylock(&g_checker_mu)) {
gpr_mu_lock(&g_mu);
- while (n < MAX_ALARMS_PER_CHECK &&
- gpr_time_cmp(g_shard_queue[0]->min_deadline, now) < 0) {
+ while (gpr_time_cmp(g_shard_queue[0]->min_deadline, now) < 0) {
gpr_timespec new_min_deadline;
/* For efficiency, we pop as many available alarms as we can from the
shard. This may violate perfect alarm deadline ordering, but that
shouldn't be a big deal because we don't make ordering guarantees. */
- n += pop_alarms(g_shard_queue[0], now, alarms + n,
- MAX_ALARMS_PER_CHECK - n, &new_min_deadline);
+ n += pop_alarms(exec_ctx, g_shard_queue[0], now, &new_min_deadline,
+ success);
/* An grpc_alarm_init() on the shard could intervene here, adding a new
alarm that is earlier than new_min_deadline. However,
@@ -343,25 +333,14 @@ static int run_some_expired_alarms(gpr_mu *drop_mu, gpr_timespec now,
gpr_mu_unlock(&g_checker_mu);
}
- if (n && drop_mu) {
- gpr_mu_unlock(drop_mu);
- }
-
- for (i = 0; i < n; i++) {
- alarms[i]->cb(alarms[i]->cb_arg, success);
- }
-
- if (n && drop_mu) {
- gpr_mu_lock(drop_mu);
- }
-
return (int)n;
}
-int grpc_alarm_check(gpr_mu *drop_mu, gpr_timespec now, gpr_timespec *next) {
+int grpc_alarm_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
+ gpr_timespec *next) {
GPR_ASSERT(now.clock_type == g_clock_type);
return run_some_expired_alarms(
- drop_mu, now, next,
+ exec_ctx, now, next,
gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0);
}
diff --git a/src/core/iomgr/alarm.h b/src/core/iomgr/alarm.h
index 4a13527e64..94f9bc1355 100644
--- a/src/core/iomgr/alarm.h
+++ b/src/core/iomgr/alarm.h
@@ -35,6 +35,7 @@
#define GRPC_INTERNAL_CORE_IOMGR_ALARM_H
#include "src/core/iomgr/iomgr.h"
+#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/port_platform.h>
#include <grpc/support/time.h>
@@ -44,8 +45,7 @@ typedef struct grpc_alarm {
int triggered;
struct grpc_alarm *next;
struct grpc_alarm *prev;
- grpc_iomgr_cb_func cb;
- void *cb_arg;
+ grpc_closure closure;
} grpc_alarm;
/* Initialize *alarm. When expired or canceled, alarm_cb will be called with
@@ -54,9 +54,9 @@ typedef struct grpc_alarm {
and application code should check the status to determine how it was
invoked. The application callback is also responsible for maintaining
information about when to free up any user-level state. */
-void grpc_alarm_init(grpc_alarm *alarm, gpr_timespec deadline,
- grpc_iomgr_cb_func alarm_cb, void *alarm_cb_arg,
- gpr_timespec now);
+void grpc_alarm_init(grpc_exec_ctx *exec_ctx, grpc_alarm *alarm,
+ gpr_timespec deadline, grpc_iomgr_cb_func alarm_cb,
+ void *alarm_cb_arg, gpr_timespec now);
/* Note that there is no alarm destroy function. This is because the
alarm is a one-time occurrence with a guarantee that the callback will
@@ -84,6 +84,6 @@ void grpc_alarm_init(grpc_alarm *alarm, gpr_timespec deadline,
matches this aim.
Requires: cancel() must happen after add() on a given alarm */
-void grpc_alarm_cancel(grpc_alarm *alarm);
+void grpc_alarm_cancel(grpc_exec_ctx *exec_ctx, grpc_alarm *alarm);
#endif /* GRPC_INTERNAL_CORE_IOMGR_ALARM_H */
diff --git a/src/core/iomgr/alarm_internal.h b/src/core/iomgr/alarm_internal.h
index e9f98a3444..31d840e6f9 100644
--- a/src/core/iomgr/alarm_internal.h
+++ b/src/core/iomgr/alarm_internal.h
@@ -34,6 +34,7 @@
#ifndef GRPC_INTERNAL_CORE_IOMGR_ALARM_INTERNAL_H
#define GRPC_INTERNAL_CORE_IOMGR_ALARM_INTERNAL_H
+#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@@ -48,10 +49,10 @@
with high probability at least one thread in the system will see an update
at any time slice. */
-int grpc_alarm_check(gpr_mu *drop_mu, gpr_timespec now, gpr_timespec *next);
-
+int grpc_alarm_check(grpc_exec_ctx* exec_ctx, gpr_timespec now,
+ gpr_timespec* next);
void grpc_alarm_list_init(gpr_timespec now);
-void grpc_alarm_list_shutdown(void);
+void grpc_alarm_list_shutdown(grpc_exec_ctx* exec_ctx);
gpr_timespec grpc_alarm_list_next_timeout(void);
diff --git a/src/core/iomgr/closure.c b/src/core/iomgr/closure.c
new file mode 100644
index 0000000000..3265425789
--- /dev/null
+++ b/src/core/iomgr/closure.c
@@ -0,0 +1,71 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/iomgr/closure.h"
+
+void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
+ void *cb_arg) {
+ closure->cb = cb;
+ closure->cb_arg = cb_arg;
+ closure->next = NULL;
+}
+
+void grpc_closure_list_add(grpc_closure_list *closure_list,
+ grpc_closure *closure, int success) {
+ if (closure == NULL) return;
+ closure->next = NULL;
+ closure->success = success;
+ if (closure_list->head == NULL) {
+ closure_list->head = closure;
+ } else {
+ closure_list->tail->next = closure;
+ }
+ closure_list->tail = closure;
+}
+
+int grpc_closure_list_empty(grpc_closure_list closure_list) {
+ return closure_list.head == NULL;
+}
+
+void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst) {
+ if (src->head == NULL) {
+ return;
+ }
+ if (dst->head == NULL) {
+ *dst = *src;
+ } else {
+ dst->tail->next = src->head;
+ dst->tail = src->tail;
+ }
+ src->head = src->tail = NULL;
+}
diff --git a/src/core/iomgr/closure.h b/src/core/iomgr/closure.h
new file mode 100644
index 0000000000..982ffa4e1b
--- /dev/null
+++ b/src/core/iomgr/closure.h
@@ -0,0 +1,88 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H
+#define GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H
+
+#include <stddef.h>
+
+struct grpc_closure;
+typedef struct grpc_closure grpc_closure;
+
+/* forward declaration for exec_ctx.h */
+struct grpc_exec_ctx;
+typedef struct grpc_exec_ctx grpc_exec_ctx;
+
+typedef struct grpc_closure_list {
+ grpc_closure *head;
+ grpc_closure *tail;
+} grpc_closure_list;
+
+/** gRPC Callback definition.
+ *
+ * \param arg Arbitrary input.
+ * \param success An indication on the state of the iomgr. On false, cleanup
+ * actions should be taken (eg, shutdown). */
+typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg,
+ int success);
+
+/** A closure over a grpc_iomgr_cb_func. */
+struct grpc_closure {
+ /** Bound callback. */
+ grpc_iomgr_cb_func cb;
+
+ /** Arguments to be passed to "cb". */
+ void *cb_arg;
+
+ /** Internal. A boolean indication to "cb" on the state of the iomgr.
+ * For instance, closures created during a shutdown would have this field set
+ * to false. */
+ int success;
+
+ /**< Internal. Do not touch */
+ struct grpc_closure *next;
+};
+
+/** Initializes \a closure with \a cb and \a cb_arg. */
+void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
+ void *cb_arg);
+
+#define GRPC_CLOSURE_LIST_INIT \
+ { NULL, NULL }
+
+void grpc_closure_list_add(grpc_closure_list *list, grpc_closure *closure,
+ int success);
+void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
+int grpc_closure_list_empty(grpc_closure_list list);
+
+#endif /* GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H */
diff --git a/src/core/iomgr/endpoint.c b/src/core/iomgr/endpoint.c
index a7878e31dd..bd64707669 100644
--- a/src/core/iomgr/endpoint.c
+++ b/src/core/iomgr/endpoint.c
@@ -33,31 +33,35 @@
#include "src/core/iomgr/endpoint.h"
-grpc_endpoint_op_status grpc_endpoint_read(grpc_endpoint *ep,
- gpr_slice_buffer *slices,
- grpc_iomgr_closure *cb) {
- return ep->vtable->read(ep, slices, cb);
+void grpc_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ gpr_slice_buffer* slices, grpc_closure* cb) {
+ ep->vtable->read(exec_ctx, ep, slices, cb);
}
-grpc_endpoint_op_status grpc_endpoint_write(grpc_endpoint *ep,
- gpr_slice_buffer *slices,
- grpc_iomgr_closure *cb) {
- return ep->vtable->write(ep, slices, cb);
+void grpc_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ gpr_slice_buffer* slices, grpc_closure* cb) {
+ ep->vtable->write(exec_ctx, ep, slices, cb);
}
-void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
- ep->vtable->add_to_pollset(ep, pollset);
+void grpc_endpoint_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset) {
+ ep->vtable->add_to_pollset(exec_ctx, ep, pollset);
}
-void grpc_endpoint_add_to_pollset_set(grpc_endpoint *ep,
- grpc_pollset_set *pollset_set) {
- ep->vtable->add_to_pollset_set(ep, pollset_set);
+void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
+ grpc_pollset_set* pollset_set) {
+ ep->vtable->add_to_pollset_set(exec_ctx, ep, pollset_set);
}
-void grpc_endpoint_shutdown(grpc_endpoint *ep) { ep->vtable->shutdown(ep); }
+void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+ ep->vtable->shutdown(exec_ctx, ep);
+}
-void grpc_endpoint_destroy(grpc_endpoint *ep) { ep->vtable->destroy(ep); }
+void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+ ep->vtable->destroy(exec_ctx, ep);
+}
-char *grpc_endpoint_get_peer(grpc_endpoint *ep) {
+char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
return ep->vtable->get_peer(ep);
}
diff --git a/src/core/iomgr/endpoint.h b/src/core/iomgr/endpoint.h
index d14d52d561..cbdc947abb 100644
--- a/src/core/iomgr/endpoint.h
+++ b/src/core/iomgr/endpoint.h
@@ -46,21 +46,17 @@
typedef struct grpc_endpoint grpc_endpoint;
typedef struct grpc_endpoint_vtable grpc_endpoint_vtable;
-typedef enum grpc_endpoint_op_status {
- GRPC_ENDPOINT_DONE, /* completed immediately, cb won't be called */
- GRPC_ENDPOINT_PENDING, /* cb will be called when completed */
- GRPC_ENDPOINT_ERROR /* write errored out, cb won't be called */
-} grpc_endpoint_op_status;
-
struct grpc_endpoint_vtable {
- grpc_endpoint_op_status (*read)(grpc_endpoint *ep, gpr_slice_buffer *slices,
- grpc_iomgr_closure *cb);
- grpc_endpoint_op_status (*write)(grpc_endpoint *ep, gpr_slice_buffer *slices,
- grpc_iomgr_closure *cb);
- void (*add_to_pollset)(grpc_endpoint *ep, grpc_pollset *pollset);
- void (*add_to_pollset_set)(grpc_endpoint *ep, grpc_pollset_set *pollset);
- void (*shutdown)(grpc_endpoint *ep);
- void (*destroy)(grpc_endpoint *ep);
+ void (*read)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *slices, grpc_closure *cb);
+ void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *slices, grpc_closure *cb);
+ void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset *pollset);
+ void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset_set *pollset);
+ void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
char *(*get_peer)(grpc_endpoint *ep);
};
@@ -68,9 +64,8 @@ struct grpc_endpoint_vtable {
Callback success indicates that the endpoint can accept more reads, failure
indicates the endpoint is closed.
Valid slices may be placed into \a slices even on callback success == 0. */
-grpc_endpoint_op_status grpc_endpoint_read(
- grpc_endpoint *ep, gpr_slice_buffer *slices,
- grpc_iomgr_closure *cb) GRPC_MUST_USE_RESULT;
+void grpc_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *slices, grpc_closure *cb);
char *grpc_endpoint_get_peer(grpc_endpoint *ep);
@@ -84,19 +79,20 @@ char *grpc_endpoint_get_peer(grpc_endpoint *ep);
No guarantee is made to the content of slices after a write EXCEPT that
it is a valid slice buffer.
*/
-grpc_endpoint_op_status grpc_endpoint_write(
- grpc_endpoint *ep, gpr_slice_buffer *slices,
- grpc_iomgr_closure *cb) GRPC_MUST_USE_RESULT;
+void grpc_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *slices, grpc_closure *cb);
/* Causes any pending read/write callbacks to run immediately with
success==0 */
-void grpc_endpoint_shutdown(grpc_endpoint *ep);
-void grpc_endpoint_destroy(grpc_endpoint *ep);
+void grpc_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
+void grpc_endpoint_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
/* Add an endpoint to a pollset, so that when the pollset is polled, events from
this endpoint are considered */
-void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset);
-void grpc_endpoint_add_to_pollset_set(grpc_endpoint *ep,
+void grpc_endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset *pollset);
+void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *ep,
grpc_pollset_set *pollset_set);
struct grpc_endpoint {
diff --git a/src/core/iomgr/exec_ctx.c b/src/core/iomgr/exec_ctx.c
new file mode 100644
index 0000000000..a830a27b0b
--- /dev/null
+++ b/src/core/iomgr/exec_ctx.c
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/iomgr/exec_ctx.h"
+
+#include <grpc/support/log.h>
+
+void grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
+ while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
+ grpc_closure *c = exec_ctx->closure_list.head;
+ exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
+ while (c != NULL) {
+ grpc_closure *next = c->next;
+ c->cb(exec_ctx, c->cb_arg, c->success);
+ c = next;
+ }
+ }
+}
+
+void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
+ grpc_exec_ctx_flush(exec_ctx);
+}
+
+void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ int success) {
+ grpc_closure_list_add(&exec_ctx->closure_list, closure, success);
+}
+
+void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
+ grpc_closure_list *list) {
+ grpc_closure_list_move(list, &exec_ctx->closure_list);
+}
diff --git a/src/core/iomgr/exec_ctx.h b/src/core/iomgr/exec_ctx.h
new file mode 100644
index 0000000000..f99aa038c5
--- /dev/null
+++ b/src/core/iomgr/exec_ctx.h
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CORE_IOMGR_EXEC_CTX_H
+#define GRPC_INTERNAL_CORE_IOMGR_EXEC_CTX_H
+
+#include "src/core/iomgr/closure.h"
+
+/** Execution context.
+ * A bag of data that collects information along a callstack.
+ * Generally created at public API entry points, and passed down as
+ * pointer to child functions that manipulate it.
+ *
+ * Specific responsibilities (this may grow in the future):
+ * - track a list of work that needs to be delayed until the top of the
+ * call stack (this provides a convenient mechanism to run callbacks
+ * without worrying about locking issues)
+ *
+ * CONVENTIONS:
+ * Instance of this must ALWAYS be constructed on the stack, never
+ * heap allocated. Instances and pointers to them must always be called
+ * exec_ctx. Instances are always passed as the first argument
+ * to a function that takes it, and always as a pointer (grpc_exec_ctx
+ * is never copied).
+ */
+struct grpc_exec_ctx {
+ grpc_closure_list closure_list;
+};
+
+#define GRPC_EXEC_CTX_INIT \
+ { GRPC_CLOSURE_LIST_INIT }
+
+/** Flush any work that has been enqueued onto this grpc_exec_ctx.
+ * Caller must guarantee that no interfering locks are held. */
+void grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
+/** Finish any pending work for a grpc_exec_ctx. Must be called before
+ * the instance is destroyed, or work may be lost. */
+void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);
+/** Add a closure to be executed at the next flush/finish point */
+void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ int success);
+/** Add a list of closures to be executed at the next flush/finish point.
+ * Leaves \a list empty. */
+void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
+ grpc_closure_list *list);
+
+#endif
diff --git a/src/core/iomgr/fd_posix.c b/src/core/iomgr/fd_posix.c
index 38a543e36e..b48b7f050a 100644
--- a/src/core/iomgr/fd_posix.c
+++ b/src/core/iomgr/fd_posix.c
@@ -150,6 +150,8 @@ static void unref_by(grpc_fd *fd, int n) {
void grpc_fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
void grpc_fd_global_shutdown(void) {
+ gpr_mu_lock(&fd_freelist_mu);
+ gpr_mu_unlock(&fd_freelist_mu);
while (fd_freelist != NULL) {
grpc_fd *fd = fd_freelist;
fd_freelist = fd_freelist->freelist_next;
@@ -161,6 +163,9 @@ void grpc_fd_global_shutdown(void) {
grpc_fd *grpc_fd_create(int fd, const char *name) {
grpc_fd *r = alloc_fd(fd);
grpc_iomgr_register_object(&r->iomgr_object, name);
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, r, name);
+#endif
return r;
}
@@ -209,7 +214,7 @@ static int has_watchers(grpc_fd *fd) {
fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
}
-void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
+void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
const char *reason) {
fd->on_done_closure = on_done;
shutdown(fd->fd, SHUT_RDWR);
@@ -218,9 +223,7 @@ void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
if (!has_watchers(fd)) {
fd->closed = 1;
close(fd->fd);
- if (fd->on_done_closure) {
- grpc_iomgr_add_callback(fd->on_done_closure);
- }
+ grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
} else {
wake_all_watchers_locked(fd);
}
@@ -244,25 +247,8 @@ void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
#endif
-static void process_callback(grpc_iomgr_closure *closure, int success,
- int allow_synchronous_callback) {
- if (allow_synchronous_callback) {
- closure->cb(closure->cb_arg, success);
- } else {
- grpc_iomgr_add_delayed_callback(closure, success);
- }
-}
-
-static void process_callbacks(grpc_iomgr_closure *callbacks, size_t n,
- int success, int allow_synchronous_callback) {
- size_t i;
- for (i = 0; i < n; i++) {
- process_callback(callbacks + i, success, allow_synchronous_callback);
- }
-}
-
-static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
- int allow_synchronous_callback) {
+static void notify_on(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *st,
+ grpc_closure *closure) {
switch (gpr_atm_acq_load(st)) {
case NOT_READY:
/* There is no race if the descriptor is already ready, so we skip
@@ -284,8 +270,8 @@ static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
case READY:
GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY);
gpr_atm_rel_store(st, NOT_READY);
- process_callback(closure, !gpr_atm_acq_load(&fd->shutdown),
- allow_synchronous_callback);
+ grpc_exec_ctx_enqueue(exec_ctx, closure,
+ !gpr_atm_acq_load(&fd->shutdown));
return;
default: /* WAITING */
/* upcallptr was set to a different closure. This is an error! */
@@ -298,8 +284,8 @@ static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
abort();
}
-static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure **callbacks,
- size_t *ncallbacks) {
+static void set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ gpr_atm *st) {
gpr_intptr state = gpr_atm_acq_load(st);
switch (state) {
@@ -318,49 +304,38 @@ static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure **callbacks,
default: /* waiting */
GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
gpr_atm_no_barrier_load(st) != NOT_READY);
- callbacks[(*ncallbacks)++] = (grpc_iomgr_closure *)state;
+ grpc_exec_ctx_enqueue(exec_ctx, (grpc_closure *)state,
+ !gpr_atm_acq_load(&fd->shutdown));
gpr_atm_rel_store(st, NOT_READY);
return;
}
}
-static void set_ready(grpc_fd *fd, gpr_atm *st,
- int allow_synchronous_callback) {
+static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *st) {
/* only one set_ready can be active at once (but there may be a racing
notify_on) */
- int success;
- grpc_iomgr_closure *closure;
- size_t ncb = 0;
-
gpr_mu_lock(&fd->set_state_mu);
- set_ready_locked(st, &closure, &ncb);
+ set_ready_locked(exec_ctx, fd, st);
gpr_mu_unlock(&fd->set_state_mu);
- success = !gpr_atm_acq_load(&fd->shutdown);
- GPR_ASSERT(ncb <= 1);
- if (ncb > 0) {
- process_callbacks(closure, ncb, success, allow_synchronous_callback);
- }
}
-void grpc_fd_shutdown(grpc_fd *fd) {
- size_t ncb = 0;
+void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
gpr_mu_lock(&fd->set_state_mu);
GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
gpr_atm_rel_store(&fd->shutdown, 1);
- set_ready_locked(&fd->readst, &fd->shutdown_closures[0], &ncb);
- set_ready_locked(&fd->writest, &fd->shutdown_closures[0], &ncb);
+ set_ready_locked(exec_ctx, fd, &fd->readst);
+ set_ready_locked(exec_ctx, fd, &fd->writest);
gpr_mu_unlock(&fd->set_state_mu);
- GPR_ASSERT(ncb <= 2);
- process_callbacks(fd->shutdown_closures[0], ncb, 0 /* GPR_FALSE */,
- 0 /* GPR_FALSE */);
}
-void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure) {
- notify_on(fd, &fd->readst, closure, 0);
+void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure) {
+ notify_on(exec_ctx, fd, &fd->readst, closure);
}
-void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) {
- notify_on(fd, &fd->writest, closure, 0);
+void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure) {
+ notify_on(exec_ctx, fd, &fd->writest, closure);
}
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
@@ -406,7 +381,8 @@ gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
return mask;
}
-void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
+void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
+ int got_read, int got_write) {
int was_polling = 0;
int kick = 0;
grpc_fd *fd = watcher->fd;
@@ -439,21 +415,19 @@ void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
if (grpc_fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
fd->closed = 1;
close(fd->fd);
- if (fd->on_done_closure != NULL) {
- grpc_iomgr_add_callback(fd->on_done_closure);
- }
+ grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
}
gpr_mu_unlock(&fd->watcher_mu);
GRPC_FD_UNREF(fd, "poll");
}
-void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {
- set_ready(fd, &fd->readst, allow_synchronous_callback);
+void grpc_fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+ set_ready(exec_ctx, fd, &fd->readst);
}
-void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) {
- set_ready(fd, &fd->writest, allow_synchronous_callback);
+void grpc_fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+ set_ready(exec_ctx, fd, &fd->writest);
}
#endif
diff --git a/src/core/iomgr/fd_posix.h b/src/core/iomgr/fd_posix.h
index 835e9b339a..089aa4d717 100644
--- a/src/core/iomgr/fd_posix.h
+++ b/src/core/iomgr/fd_posix.h
@@ -52,8 +52,8 @@ typedef struct grpc_fd_watcher {
struct grpc_fd {
int fd;
/* refst format:
- bit0: 1=active/0=orphaned
- bit1-n: refcount
+ bit0: 1=active/0=orphaned
+ bit1-n: refcount
meaning that mostly we ref by two to avoid altering the orphaned bit,
and just unref by 1 when we're ready to flag the object as orphaned */
gpr_atm refst;
@@ -94,8 +94,8 @@ struct grpc_fd {
struct grpc_fd *freelist_next;
- grpc_iomgr_closure *on_done_closure;
- grpc_iomgr_closure *shutdown_closures[2];
+ grpc_closure *on_done_closure;
+ grpc_closure *shutdown_closures[2];
grpc_iomgr_object iomgr_object;
};
@@ -111,7 +111,7 @@ grpc_fd *grpc_fd_create(int fd, const char *name);
Requires: *fd initialized; no outstanding notify_on_read or
notify_on_write.
MUST NOT be called with a pollset lock taken */
-void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
+void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
const char *reason);
/* Begin polling on an fd.
@@ -130,13 +130,14 @@ gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
grpc_fd_watcher *rec);
/* Complete polling previously started with grpc_fd_begin_poll
MUST NOT be called with a pollset lock taken */
-void grpc_fd_end_poll(grpc_fd_watcher *rec, int got_read, int got_write);
+void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
+ int got_read, int got_write);
/* Return 1 if this fd is orphaned, 0 otherwise */
int grpc_fd_is_orphaned(grpc_fd *fd);
/* Cause any current callbacks to error out with GRPC_CALLBACK_CANCELLED. */
-void grpc_fd_shutdown(grpc_fd *fd);
+void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
/* Register read interest, causing read_cb to be called once when fd becomes
readable, on deadline specified by deadline, or on shutdown triggered by
@@ -151,17 +152,19 @@ void grpc_fd_shutdown(grpc_fd *fd);
underlying platform. This means that users must drain fd in read_cb before
calling notify_on_read again. Users are also expected to handle spurious
events, i.e read_cb is called while nothing can be readable from fd */
-void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure);
+void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure);
/* Exactly the same semantics as above, except based on writable events. */
-void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure);
+void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure);
/* Notification from the poller to an fd that it has become readable or
writable.
If allow_synchronous_callback is 1, allow running the fd callback inline
in this callstack, otherwise register an asynchronous callback and return */
-void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback);
-void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback);
+void grpc_fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
+void grpc_fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
/* Reference counting for fds */
#ifdef GRPC_FD_REF_COUNT_DEBUG
diff --git a/src/core/iomgr/iocp_windows.c b/src/core/iomgr/iocp_windows.c
index 006f8b2abf..c2f62a41b8 100644
--- a/src/core/iomgr/iocp_windows.c
+++ b/src/core/iomgr/iocp_windows.c
@@ -56,7 +56,7 @@ static gpr_atm g_custom_events = 0;
static HANDLE g_iocp;
-static void do_iocp_work() {
+static void do_iocp_work(grpc_exec_ctx *exec_ctx) {
BOOL success;
DWORD bytes = 0;
DWORD flags = 0;
@@ -64,8 +64,7 @@ static void do_iocp_work() {
LPOVERLAPPED overlapped;
grpc_winsocket *socket;
grpc_winsocket_callback_info *info;
- void (*f)(void *, int) = NULL;
- void *opaque = NULL;
+ grpc_closure *closure = NULL;
success = GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key,
&overlapped, INFINITE);
/* success = 0 and overlapped = NULL means the deadline got attained.
@@ -98,22 +97,27 @@ static void do_iocp_work() {
GPR_ASSERT(overlapped == &info->overlapped);
GPR_ASSERT(!info->has_pending_iocp);
gpr_mu_lock(&socket->state_mu);
- if (info->cb) {
- f = info->cb;
- opaque = info->opaque;
- info->cb = NULL;
+ if (info->closure) {
+ closure = info->closure;
+ info->closure = NULL;
} else {
info->has_pending_iocp = 1;
}
gpr_mu_unlock(&socket->state_mu);
- if (f) f(opaque, 1);
+ if (closure) {
+ closure->cb(exec_ctx, closure->cb_arg, 1);
+ }
}
static void iocp_loop(void *p) {
- while (gpr_atm_acq_load(&g_custom_events) ||
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ while (gpr_atm_acq_load(&g_custom_events) ||
!gpr_event_get(&g_shutdown_iocp)) {
- do_iocp_work();
+ do_iocp_work(&exec_ctx);
+ grpc_exec_ctx_flush(&exec_ctx);
}
+ grpc_exec_ctx_finish(&exec_ctx);
gpr_event_set(&g_iocp_done, (void *)1);
}
@@ -168,31 +172,31 @@ void grpc_iocp_add_socket(grpc_winsocket *socket) {
-) The IOCP already completed in the background, and we need to call
the callback now.
-) The IOCP hasn't completed yet, and we're queuing it for later. */
-static void socket_notify_on_iocp(grpc_winsocket *socket,
- void (*cb)(void *, int), void *opaque,
+static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx,
+ grpc_winsocket *socket, grpc_closure *closure,
grpc_winsocket_callback_info *info) {
int run_now = 0;
- GPR_ASSERT(!info->cb);
+ GPR_ASSERT(info->closure == NULL);
gpr_mu_lock(&socket->state_mu);
if (info->has_pending_iocp) {
run_now = 1;
info->has_pending_iocp = 0;
+ grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
} else {
- info->cb = cb;
- info->opaque = opaque;
+ info->closure = closure;
}
gpr_mu_unlock(&socket->state_mu);
- if (run_now) cb(opaque, 1);
}
-void grpc_socket_notify_on_write(grpc_winsocket *socket,
- void (*cb)(void *, int), void *opaque) {
- socket_notify_on_iocp(socket, cb, opaque, &socket->write_info);
+void grpc_socket_notify_on_write(grpc_exec_ctx *exec_ctx,
+ grpc_winsocket *socket,
+ grpc_closure *closure) {
+ socket_notify_on_iocp(exec_ctx, socket, closure, &socket->write_info);
}
-void grpc_socket_notify_on_read(grpc_winsocket *socket, void (*cb)(void *, int),
- void *opaque) {
- socket_notify_on_iocp(socket, cb, opaque, &socket->read_info);
+void grpc_socket_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
+ grpc_closure *closure) {
+ socket_notify_on_iocp(exec_ctx, socket, closure, &socket->read_info);
}
#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/iocp_windows.h b/src/core/iomgr/iocp_windows.h
index 7d2dc45176..b0209e04e3 100644
--- a/src/core/iomgr/iocp_windows.h
+++ b/src/core/iomgr/iocp_windows.h
@@ -43,10 +43,12 @@ void grpc_iocp_kick(void);
void grpc_iocp_shutdown(void);
void grpc_iocp_add_socket(grpc_winsocket *);
-void grpc_socket_notify_on_write(grpc_winsocket *,
- void (*cb)(void *, int success), void *opaque);
+void grpc_socket_notify_on_write(grpc_exec_ctx *exec_ctx,
+ grpc_winsocket *winsocket,
+ grpc_closure *closure);
-void grpc_socket_notify_on_read(grpc_winsocket *,
- void (*cb)(void *, int success), void *opaque);
+void grpc_socket_notify_on_read(grpc_exec_ctx *exec_ctx,
+ grpc_winsocket *winsocket,
+ grpc_closure *closure);
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOCP_WINDOWS_H */
diff --git a/src/core/iomgr/iomgr.c b/src/core/iomgr/iomgr.c
index d6ca5d1f71..612419b70e 100644
--- a/src/core/iomgr/iomgr.c
+++ b/src/core/iomgr/iomgr.c
@@ -48,39 +48,9 @@
static gpr_mu g_mu;
static gpr_cv g_rcv;
-static grpc_iomgr_closure *g_cbs_head = NULL;
-static grpc_iomgr_closure *g_cbs_tail = NULL;
static int g_shutdown;
-static gpr_event g_background_callback_executor_done;
static grpc_iomgr_object g_root_object;
-/* Execute followup callbacks continuously.
- Other threads may check in and help during pollset_work() */
-static void background_callback_executor(void *ignored) {
- gpr_mu_lock(&g_mu);
- while (!g_shutdown) {
- gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- gpr_timespec short_deadline = gpr_time_add(
- gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_millis(100, GPR_TIMESPAN));
- if (g_cbs_head) {
- grpc_iomgr_closure *closure = g_cbs_head;
- g_cbs_head = closure->next;
- if (!g_cbs_head) g_cbs_tail = NULL;
- gpr_mu_unlock(&g_mu);
- closure->cb(closure->cb_arg, closure->success);
- gpr_mu_lock(&g_mu);
- } else if (grpc_alarm_check(&g_mu, gpr_now(GPR_CLOCK_MONOTONIC),
- &deadline)) {
- } else {
- gpr_mu_unlock(&g_mu);
- gpr_sleep_until(gpr_time_min(short_deadline, deadline));
- gpr_mu_lock(&g_mu);
- }
- }
- gpr_mu_unlock(&g_mu);
- gpr_event_set(&g_background_callback_executor_done, (void *)1);
-}
-
void grpc_kick_poller(void) {
/* Empty. The background callback executor polls periodically. The activity
* the kicker is trying to draw the executor's attention to will be picked up
@@ -89,7 +59,6 @@ void grpc_kick_poller(void) {
}
void grpc_iomgr_init(void) {
- gpr_thd_id id;
g_shutdown = 0;
gpr_mu_init(&g_mu);
gpr_cv_init(&g_rcv);
@@ -97,8 +66,6 @@ void grpc_iomgr_init(void) {
g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = "root";
grpc_iomgr_platform_init();
- gpr_event_init(&g_background_callback_executor_done);
- gpr_thd_new(&id, background_callback_executor, NULL, NULL);
}
static size_t count_objects(void) {
@@ -118,76 +85,55 @@ static void dump_objects(const char *kind) {
}
void grpc_iomgr_shutdown(void) {
- grpc_iomgr_closure *closure;
gpr_timespec shutdown_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN));
gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(&g_mu);
g_shutdown = 1;
- while (g_cbs_head != NULL || g_root_object.next != &g_root_object) {
+ while (g_root_object.next != &g_root_object) {
if (gpr_time_cmp(
gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time),
gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
- if (g_cbs_head != NULL && g_root_object.next != &g_root_object) {
- gpr_log(GPR_DEBUG,
- "Waiting for %d iomgr objects to be destroyed and executing "
- "final callbacks",
- count_objects());
- } else if (g_cbs_head != NULL) {
- gpr_log(GPR_DEBUG, "Executing final iomgr callbacks");
- } else {
+ if (g_root_object.next != &g_root_object) {
gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed",
count_objects());
}
last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
}
- if (g_cbs_head) {
- do {
- closure = g_cbs_head;
- g_cbs_head = closure->next;
- if (!g_cbs_head) g_cbs_tail = NULL;
- gpr_mu_unlock(&g_mu);
-
- closure->cb(closure->cb_arg, 0);
- gpr_mu_lock(&g_mu);
- } while (g_cbs_head);
- continue;
- }
- if (grpc_alarm_check(&g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL)) {
+ if (grpc_alarm_check(&exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC),
+ NULL)) {
+ gpr_mu_unlock(&g_mu);
+ grpc_exec_ctx_flush(&exec_ctx);
+ gpr_mu_lock(&g_mu);
continue;
}
if (g_root_object.next != &g_root_object) {
- int timeout = 0;
- while (g_cbs_head == NULL) {
- gpr_timespec short_deadline = gpr_time_add(
+ gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
- if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) {
- if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
- timeout = 1;
- break;
+ if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
+ if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
+ if (g_root_object.next != &g_root_object) {
+ gpr_log(GPR_DEBUG,
+ "Failed to free %d iomgr objects before shutdown deadline: "
+ "memory leaks are likely",
+ count_objects());
+ dump_objects("LEAKED");
}
+ break;
}
}
- if (timeout) {
- gpr_log(GPR_DEBUG,
- "Failed to free %d iomgr objects before shutdown deadline: "
- "memory leaks are likely",
- count_objects());
- dump_objects("LEAKED");
- break;
- }
}
}
gpr_mu_unlock(&g_mu);
- memset(&g_root_object, 0, sizeof(g_root_object));
+ grpc_alarm_list_shutdown(&exec_ctx);
+ grpc_exec_ctx_finish(&exec_ctx);
- grpc_kick_poller();
- gpr_event_wait(&g_background_callback_executor_done,
- gpr_inf_future(GPR_CLOCK_REALTIME));
-
- grpc_alarm_list_shutdown();
+ /* ensure all threads have left g_mu */
+ gpr_mu_lock(&g_mu);
+ gpr_mu_unlock(&g_mu);
grpc_iomgr_platform_shutdown();
gpr_mu_destroy(&g_mu);
@@ -211,74 +157,3 @@ void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) {
gpr_mu_unlock(&g_mu);
gpr_free(obj->name);
}
-
-void grpc_iomgr_closure_init(grpc_iomgr_closure *closure, grpc_iomgr_cb_func cb,
- void *cb_arg) {
- closure->cb = cb;
- closure->cb_arg = cb_arg;
- closure->next = NULL;
-}
-
-static void assert_not_scheduled_locked(grpc_iomgr_closure *closure) {
-#ifndef NDEBUG
- grpc_iomgr_closure *c;
-
- for (c = g_cbs_head; c; c = c->next) {
- GPR_ASSERT(c != closure);
- }
-#endif
-}
-
-void grpc_iomgr_add_delayed_callback(grpc_iomgr_closure *closure, int success) {
- closure->success = success;
- GPR_ASSERT(closure->cb);
- gpr_mu_lock(&g_mu);
- assert_not_scheduled_locked(closure);
- closure->next = NULL;
- if (!g_cbs_tail) {
- g_cbs_head = g_cbs_tail = closure;
- } else {
- g_cbs_tail->next = closure;
- g_cbs_tail = closure;
- }
- if (g_shutdown) {
- gpr_cv_signal(&g_rcv);
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void grpc_iomgr_add_callback(grpc_iomgr_closure *closure) {
- grpc_iomgr_add_delayed_callback(closure, 1 /* GPR_TRUE */);
-}
-
-int grpc_maybe_call_delayed_callbacks(gpr_mu *drop_mu, int success) {
- int n = 0;
- gpr_mu *retake_mu = NULL;
- grpc_iomgr_closure *closure;
- for (;;) {
- /* check for new work */
- if (!gpr_mu_trylock(&g_mu)) {
- break;
- }
- closure = g_cbs_head;
- if (!closure) {
- gpr_mu_unlock(&g_mu);
- break;
- }
- g_cbs_head = closure->next;
- if (!g_cbs_head) g_cbs_tail = NULL;
- gpr_mu_unlock(&g_mu);
- /* if we have a mutex to drop, do so before executing work */
- if (drop_mu) {
- gpr_mu_unlock(drop_mu);
- retake_mu = drop_mu;
- drop_mu = NULL;
- }
- closure->cb(closure->cb_arg, success && closure->success);
- n++;
- }
- if (retake_mu) {
- gpr_mu_lock(retake_mu);
- }
- return n;
-}
diff --git a/src/core/iomgr/iomgr.h b/src/core/iomgr/iomgr.h
index 261c17366a..c9ea84c605 100644
--- a/src/core/iomgr/iomgr.h
+++ b/src/core/iomgr/iomgr.h
@@ -34,47 +34,10 @@
#ifndef GRPC_INTERNAL_CORE_IOMGR_IOMGR_H
#define GRPC_INTERNAL_CORE_IOMGR_IOMGR_H
-/** gRPC Callback definition.
- *
- * \param arg Arbitrary input.
- * \param success An indication on the state of the iomgr. On false, cleanup
- * actions should be taken (eg, shutdown). */
-typedef void (*grpc_iomgr_cb_func)(void *arg, int success);
-
-/** A closure over a grpc_iomgr_cb_func. */
-typedef struct grpc_iomgr_closure {
- /** Bound callback. */
- grpc_iomgr_cb_func cb;
-
- /** Arguments to be passed to "cb". */
- void *cb_arg;
-
- /** Internal. A boolean indication to "cb" on the state of the iomgr.
- * For instance, closures created during a shutdown would have this field set
- * to false. */
- int success;
-
- /**< Internal. Do not touch */
- struct grpc_iomgr_closure *next;
-} grpc_iomgr_closure;
-
-/** Initializes \a closure with \a cb and \a cb_arg. */
-void grpc_iomgr_closure_init(grpc_iomgr_closure *closure, grpc_iomgr_cb_func cb,
- void *cb_arg);
-
/** Initializes the iomgr. */
void grpc_iomgr_init(void);
/** Signals the intention to shutdown the iomgr. */
void grpc_iomgr_shutdown(void);
-/** Registers a closure to be invoked at some point in the future.
- *
- * Can be called from within a callback or from anywhere else */
-void grpc_iomgr_add_callback(grpc_iomgr_closure *closure);
-
-/** As per grpc_iomgr_add_callback, with the ability to set the success
- argument. */
-void grpc_iomgr_add_delayed_callback(grpc_iomgr_closure *iocb, int success);
-
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_H */
diff --git a/src/core/iomgr/iomgr_internal.h b/src/core/iomgr/iomgr_internal.h
index 4cec973ba0..f266732c96 100644
--- a/src/core/iomgr/iomgr_internal.h
+++ b/src/core/iomgr/iomgr_internal.h
@@ -43,9 +43,6 @@ typedef struct grpc_iomgr_object {
struct grpc_iomgr_object *prev;
} grpc_iomgr_object;
-int grpc_maybe_call_delayed_callbacks(gpr_mu *drop_mu, int success);
-void grpc_iomgr_add_delayed_callback(grpc_iomgr_closure *iocb, int success);
-
void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name);
void grpc_iomgr_unregister_object(grpc_iomgr_object *obj);
diff --git a/src/core/iomgr/pollset.h b/src/core/iomgr/pollset.h
index 337596cb74..d15553a12a 100644
--- a/src/core/iomgr/pollset.h
+++ b/src/core/iomgr/pollset.h
@@ -55,9 +55,8 @@
#endif
void grpc_pollset_init(grpc_pollset *pollset);
-void grpc_pollset_shutdown(grpc_pollset *pollset,
- void (*shutdown_done)(void *arg),
- void *shutdown_done_arg);
+void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_closure *closure);
void grpc_pollset_destroy(grpc_pollset *pollset);
/* Do some work on a pollset.
@@ -74,9 +73,13 @@ void grpc_pollset_destroy(grpc_pollset *pollset);
grpc_pollset_work, and it is guaranteed that GRPC_POLLSET_MU(pollset) will
not be released by grpc_pollset_work AFTER worker has been destroyed.
- Tries not to block past deadline. */
-void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
- gpr_timespec now, gpr_timespec deadline);
+ Tries not to block past deadline.
+ May call grpc_closure_list_run on grpc_closure_list, without holding the
+ pollset
+ lock */
+void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *worker, gpr_timespec now,
+ gpr_timespec deadline);
/* Break one polling thread out of polling work for this pollset.
If specific_worker is GRPC_POLLSET_KICK_BROADCAST, kick ALL the workers.
diff --git a/src/core/iomgr/pollset_multipoller_with_epoll.c b/src/core/iomgr/pollset_multipoller_with_epoll.c
index 481bdc4ede..5626b08a47 100644
--- a/src/core/iomgr/pollset_multipoller_with_epoll.c
+++ b/src/core/iomgr/pollset_multipoller_with_epoll.c
@@ -53,7 +53,7 @@ typedef struct wakeup_fd_hdl {
typedef struct {
grpc_pollset *pollset;
grpc_fd *fd;
- grpc_iomgr_closure closure;
+ grpc_closure closure;
} delayed_add;
typedef struct {
@@ -61,7 +61,8 @@ typedef struct {
wakeup_fd_hdl *free_wakeup_fds;
} pollset_hdr;
-static void finally_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
+static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd) {
pollset_hdr *h = pollset->data.ptr;
struct epoll_event ev;
int err;
@@ -83,15 +84,15 @@ static void finally_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
}
}
}
- grpc_fd_end_poll(&watcher, 0, 0);
+ grpc_fd_end_poll(exec_ctx, &watcher, 0, 0);
}
-static void perform_delayed_add(void *arg, int iomgr_status) {
+static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_status) {
delayed_add *da = arg;
- int do_shutdown_cb = 0;
if (!grpc_fd_is_orphaned(da->fd)) {
- finally_add_fd(da->pollset, da->fd);
+ finally_add_fd(exec_ctx, da->pollset, da->fd);
}
gpr_mu_lock(&da->pollset->mu);
@@ -100,38 +101,36 @@ static void perform_delayed_add(void *arg, int iomgr_status) {
/* We don't care about this pollset anymore. */
if (da->pollset->in_flight_cbs == 0 && !da->pollset->called_shutdown) {
da->pollset->called_shutdown = 1;
- do_shutdown_cb = 1;
+ grpc_exec_ctx_enqueue(exec_ctx, da->pollset->shutdown_done, 1);
}
}
gpr_mu_unlock(&da->pollset->mu);
GRPC_FD_UNREF(da->fd, "delayed_add");
- if (do_shutdown_cb) {
- da->pollset->shutdown_done_cb(da->pollset->shutdown_done_arg);
- }
-
gpr_free(da);
}
-static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset,
+static void multipoll_with_epoll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
grpc_fd *fd,
int and_unlock_pollset) {
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
- finally_add_fd(pollset, fd);
+ finally_add_fd(exec_ctx, pollset, fd);
} else {
delayed_add *da = gpr_malloc(sizeof(*da));
da->pollset = pollset;
da->fd = fd;
GRPC_FD_REF(fd, "delayed_add");
- grpc_iomgr_closure_init(&da->closure, perform_delayed_add, da);
+ grpc_closure_init(&da->closure, perform_delayed_add, da);
pollset->in_flight_cbs++;
- grpc_iomgr_add_callback(&da->closure);
+ grpc_exec_ctx_enqueue(exec_ctx, &da->closure, 1);
}
}
-static void multipoll_with_epoll_pollset_del_fd(grpc_pollset *pollset,
+static void multipoll_with_epoll_pollset_del_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
grpc_fd *fd,
int and_unlock_pollset) {
pollset_hdr *h = pollset->data.ptr;
@@ -153,9 +152,9 @@ static void multipoll_with_epoll_pollset_del_fd(grpc_pollset *pollset,
/* TODO(klempner): We probably want to turn this down a bit */
#define GRPC_EPOLL_MAX_EVENTS 1000
-static void multipoll_with_epoll_pollset_maybe_work(
- grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline,
- gpr_timespec now, int allow_synchronous_callback) {
+static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now) {
struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
int ep_rv;
int poll_rv;
@@ -209,18 +208,16 @@ static void multipoll_with_epoll_pollset_maybe_work(
int read = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write = ep_ev[i].events & EPOLLOUT;
if (read || cancel) {
- grpc_fd_become_readable(fd, allow_synchronous_callback);
+ grpc_fd_become_readable(exec_ctx, fd);
}
if (write || cancel) {
- grpc_fd_become_writable(fd, allow_synchronous_callback);
+ grpc_fd_become_writable(exec_ctx, fd);
}
}
}
} while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
}
}
-
- gpr_mu_lock(&pollset->mu);
}
static void multipoll_with_epoll_pollset_finish_shutdown(
@@ -234,11 +231,12 @@ static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) {
static const grpc_pollset_vtable multipoll_with_epoll_pollset = {
multipoll_with_epoll_pollset_add_fd, multipoll_with_epoll_pollset_del_fd,
- multipoll_with_epoll_pollset_maybe_work,
+ multipoll_with_epoll_pollset_maybe_work_and_unlock,
multipoll_with_epoll_pollset_finish_shutdown,
multipoll_with_epoll_pollset_destroy};
-static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
+static void epoll_become_multipoller(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset, grpc_fd **fds,
size_t nfds) {
size_t i;
pollset_hdr *h = gpr_malloc(sizeof(pollset_hdr));
@@ -252,7 +250,7 @@ static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
abort();
}
for (i = 0; i < nfds; i++) {
- multipoll_with_epoll_pollset_add_fd(pollset, fds[i], 0);
+ multipoll_with_epoll_pollset_add_fd(exec_ctx, pollset, fds[i], 0);
}
}
diff --git a/src/core/iomgr/pollset_multipoller_with_poll_posix.c b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
index cae260cab0..2a18cedb33 100644
--- a/src/core/iomgr/pollset_multipoller_with_poll_posix.c
+++ b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
@@ -59,7 +59,8 @@ typedef struct {
grpc_fd **dels;
} pollset_hdr;
-static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset,
+static void multipoll_with_poll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
grpc_fd *fd,
int and_unlock_pollset) {
size_t i;
@@ -80,7 +81,8 @@ exit:
}
}
-static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset,
+static void multipoll_with_poll_pollset_del_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
grpc_fd *fd,
int and_unlock_pollset) {
/* will get removed next poll cycle */
@@ -96,9 +98,9 @@ static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset,
}
}
-static void multipoll_with_poll_pollset_maybe_work(
- grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline,
- gpr_timespec now, int allow_synchronous_callback) {
+static void multipoll_with_poll_pollset_maybe_work_and_unlock(
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now) {
int timeout;
int r;
size_t i, j, fd_count;
@@ -148,7 +150,7 @@ static void multipoll_with_poll_pollset_maybe_work(
r = grpc_poll_function(pfds, pfd_count, timeout);
for (i = 1; i < pfd_count; i++) {
- grpc_fd_end_poll(&watchers[i], pfds[i].revents & POLLIN,
+ grpc_fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN,
pfds[i].revents & POLLOUT);
}
@@ -167,18 +169,16 @@ static void multipoll_with_poll_pollset_maybe_work(
continue;
}
if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
- grpc_fd_become_readable(watchers[i].fd, allow_synchronous_callback);
+ grpc_fd_become_readable(exec_ctx, watchers[i].fd);
}
if (pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) {
- grpc_fd_become_writable(watchers[i].fd, allow_synchronous_callback);
+ grpc_fd_become_writable(exec_ctx, watchers[i].fd);
}
}
}
gpr_free(pfds);
gpr_free(watchers);
-
- gpr_mu_lock(&pollset->mu);
}
static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) {
@@ -204,11 +204,12 @@ static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
static const grpc_pollset_vtable multipoll_with_poll_pollset = {
multipoll_with_poll_pollset_add_fd, multipoll_with_poll_pollset_del_fd,
- multipoll_with_poll_pollset_maybe_work,
+ multipoll_with_poll_pollset_maybe_work_and_unlock,
multipoll_with_poll_pollset_finish_shutdown,
multipoll_with_poll_pollset_destroy};
-void grpc_poll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
+void grpc_poll_become_multipoller(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset, grpc_fd **fds,
size_t nfds) {
size_t i;
pollset_hdr *h = gpr_malloc(sizeof(pollset_hdr));
diff --git a/src/core/iomgr/pollset_posix.c b/src/core/iomgr/pollset_posix.c
index f3e424e83c..43ad22c16d 100644
--- a/src/core/iomgr/pollset_posix.c
+++ b/src/core/iomgr/pollset_posix.c
@@ -136,12 +136,14 @@ void grpc_pollset_init(grpc_pollset *pollset) {
pollset->in_flight_cbs = 0;
pollset->shutting_down = 0;
pollset->called_shutdown = 0;
+ pollset->idle_jobs.head = pollset->idle_jobs.tail = NULL;
become_basic_pollset(pollset, NULL);
}
-void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
+void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
- pollset->vtable->add_fd(pollset, fd, 1);
+ pollset->vtable->add_fd(exec_ctx, pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
@@ -152,9 +154,10 @@ void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
#endif
}
-void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
+void grpc_pollset_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
- pollset->vtable->del_fd(pollset, fd, 1);
+ pollset->vtable->del_fd(exec_ctx, pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
@@ -165,23 +168,28 @@ void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
#endif
}
-static void finish_shutdown(grpc_pollset *pollset) {
+static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+ GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs));
pollset->vtable->finish_shutdown(pollset);
- pollset->shutdown_done_cb(pollset->shutdown_done_arg);
+ grpc_exec_ctx_enqueue(exec_ctx, pollset->shutdown_done, 1);
}
-void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
- gpr_timespec now, gpr_timespec deadline) {
+void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *worker, gpr_timespec now,
+ gpr_timespec deadline) {
/* pollset->mu already held */
int added_worker = 0;
+ int locked = 1;
/* this must happen before we (potentially) drop pollset->mu */
worker->next = worker->prev = NULL;
/* TODO(ctiller): pool these */
grpc_wakeup_fd_init(&worker->wakeup_fd);
- if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1)) {
+ if (!grpc_pollset_has_workers(pollset) &&
+ !grpc_closure_list_empty(pollset->idle_jobs)) {
+ grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
goto done;
}
- if (grpc_alarm_check(&pollset->mu, now, &deadline)) {
+ if (grpc_alarm_check(exec_ctx, now, &deadline)) {
goto done;
}
if (pollset->shutting_down) {
@@ -190,19 +198,28 @@ void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
if (pollset->in_flight_cbs) {
/* Give do_promote priority so we don't starve it out */
gpr_mu_unlock(&pollset->mu);
- gpr_mu_lock(&pollset->mu);
+ locked = 0;
goto done;
}
if (!pollset->kicked_without_pollers) {
push_front_worker(pollset, worker);
added_worker = 1;
gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
- pollset->vtable->maybe_work(pollset, worker, deadline, now, 1);
+ gpr_tls_set(&g_current_thread_worker, (gpr_intptr)worker);
+ pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, worker, deadline,
+ now);
+ locked = 0;
gpr_tls_set(&g_current_thread_poller, 0);
+ gpr_tls_set(&g_current_thread_worker, 0);
} else {
pollset->kicked_without_pollers = 0;
}
done:
+ if (!locked) {
+ grpc_exec_ctx_flush(exec_ctx);
+ gpr_mu_lock(&pollset->mu);
+ locked = 1;
+ }
grpc_wakeup_fd_destroy(&worker->wakeup_fd);
if (added_worker) {
remove_worker(pollset, worker);
@@ -213,19 +230,24 @@ done:
} else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
- finish_shutdown(pollset);
+ finish_shutdown(exec_ctx, pollset);
+ grpc_exec_ctx_flush(exec_ctx);
/* Continuing to access pollset here is safe -- it is the caller's
* responsibility to not destroy when it has outstanding calls to
* grpc_pollset_work.
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu);
+ } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
+ gpr_mu_unlock(&pollset->mu);
+ grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
+ grpc_exec_ctx_flush(exec_ctx);
+ gpr_mu_lock(&pollset->mu);
}
}
}
-void grpc_pollset_shutdown(grpc_pollset *pollset,
- void (*shutdown_done)(void *arg),
- void *shutdown_done_arg) {
+void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_closure *closure) {
int call_shutdown = 0;
gpr_mu_lock(&pollset->mu);
GPR_ASSERT(!pollset->shutting_down);
@@ -235,13 +257,15 @@ void grpc_pollset_shutdown(grpc_pollset *pollset,
pollset->called_shutdown = 1;
call_shutdown = 1;
}
- pollset->shutdown_done_cb = shutdown_done;
- pollset->shutdown_done_arg = shutdown_done_arg;
+ if (!grpc_pollset_has_workers(pollset)) {
+ grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
+ }
+ pollset->shutdown_done = closure;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
gpr_mu_unlock(&pollset->mu);
if (call_shutdown) {
- finish_shutdown(pollset);
+ finish_shutdown(exec_ctx, pollset);
}
}
@@ -279,15 +303,14 @@ typedef struct grpc_unary_promote_args {
const grpc_pollset_vtable *original_vtable;
grpc_pollset *pollset;
grpc_fd *fd;
- grpc_iomgr_closure promotion_closure;
+ grpc_closure promotion_closure;
} grpc_unary_promote_args;
-static void basic_do_promote(void *args, int success) {
+static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args, int success) {
grpc_unary_promote_args *up_args = args;
const grpc_pollset_vtable *original_vtable = up_args->original_vtable;
grpc_pollset *pollset = up_args->pollset;
grpc_fd *fd = up_args->fd;
- int do_shutdown_cb = 0;
/*
* This is quite tricky. There are a number of cases to keep in mind here:
@@ -300,12 +323,7 @@ static void basic_do_promote(void *args, int success) {
gpr_mu_lock(&pollset->mu);
/* First we need to ensure that nobody is polling concurrently */
- if (grpc_pollset_has_workers(pollset)) {
- grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
- grpc_iomgr_add_callback(&up_args->promotion_closure);
- gpr_mu_unlock(&pollset->mu);
- return;
- }
+ GPR_ASSERT(!grpc_pollset_has_workers(pollset));
gpr_free(up_args);
/* At this point the pollset may no longer be a unary poller. In that case
@@ -317,21 +335,20 @@ static void basic_do_promote(void *args, int success) {
if (pollset->shutting_down) {
/* We don't care about this pollset anymore. */
if (pollset->in_flight_cbs == 0 && !pollset->called_shutdown) {
- GPR_ASSERT(!grpc_pollset_has_workers(pollset));
- pollset->called_shutdown = 1;
- do_shutdown_cb = 1;
+ finish_shutdown(exec_ctx, pollset);
}
} else if (grpc_fd_is_orphaned(fd)) {
/* Don't try to add it to anything, we'll drop our ref on it below */
} else if (pollset->vtable != original_vtable) {
- pollset->vtable->add_fd(pollset, fd, 0);
+ pollset->vtable->add_fd(exec_ctx, pollset, fd, 0);
} else if (fd != pollset->data.ptr) {
grpc_fd *fds[2];
fds[0] = pollset->data.ptr;
fds[1] = fd;
if (fds[0] && !grpc_fd_is_orphaned(fds[0])) {
- grpc_platform_become_multipoller(pollset, fds, GPR_ARRAY_SIZE(fds));
+ grpc_platform_become_multipoller(exec_ctx, pollset, fds,
+ GPR_ARRAY_SIZE(fds));
GRPC_FD_UNREF(fds[0], "basicpoll");
} else {
/* old fd is orphaned and we haven't cleaned it up until now, so remain a
@@ -346,16 +363,12 @@ static void basic_do_promote(void *args, int success) {
gpr_mu_unlock(&pollset->mu);
- if (do_shutdown_cb) {
- pollset->shutdown_done_cb(pollset->shutdown_done_arg);
- }
-
/* Matching ref in basic_pollset_add_fd */
GRPC_FD_UNREF(fd, "basicpoll_add");
}
-static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd,
- int and_unlock_pollset) {
+static void basic_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd, int and_unlock_pollset) {
grpc_unary_promote_args *up_args;
GPR_ASSERT(fd);
if (fd == pollset->data.ptr) goto exit;
@@ -372,7 +385,8 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd,
pollset->data.ptr = fd;
GRPC_FD_REF(fd, "basicpoll");
} else if (!grpc_fd_is_orphaned(fds[0])) {
- grpc_platform_become_multipoller(pollset, fds, GPR_ARRAY_SIZE(fds));
+ grpc_platform_become_multipoller(exec_ctx, pollset, fds,
+ GPR_ARRAY_SIZE(fds));
GRPC_FD_UNREF(fds[0], "basicpoll");
} else {
/* old fd is orphaned and we haven't cleaned it up until now, so remain a
@@ -389,13 +403,13 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd,
GRPC_FD_REF(fd, "basicpoll_add");
pollset->in_flight_cbs++;
up_args = gpr_malloc(sizeof(*up_args));
- up_args->pollset = pollset;
up_args->fd = fd;
up_args->original_vtable = pollset->vtable;
+ up_args->pollset = pollset;
up_args->promotion_closure.cb = basic_do_promote;
up_args->promotion_closure.cb_arg = up_args;
- grpc_iomgr_add_callback(&up_args->promotion_closure);
+ grpc_closure_list_add(&pollset->idle_jobs, &up_args->promotion_closure, 1);
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
exit:
@@ -404,8 +418,8 @@ exit:
}
}
-static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd,
- int and_unlock_pollset) {
+static void basic_pollset_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd, int and_unlock_pollset) {
GPR_ASSERT(fd);
if (fd == pollset->data.ptr) {
GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
@@ -417,10 +431,11 @@ static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd,
}
}
-static void basic_pollset_maybe_work(grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- gpr_timespec deadline, gpr_timespec now,
- int allow_synchronous_callback) {
+static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
+ grpc_pollset_worker *worker,
+ gpr_timespec deadline,
+ gpr_timespec now) {
struct pollfd pfd[2];
grpc_fd *fd;
grpc_fd_watcher fd_watcher;
@@ -457,7 +472,7 @@ static void basic_pollset_maybe_work(grpc_pollset *pollset,
GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r);
if (fd) {
- grpc_fd_end_poll(&fd_watcher, pfd[1].revents & POLLIN,
+ grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[1].revents & POLLIN,
pfd[1].revents & POLLOUT);
}
@@ -473,15 +488,13 @@ static void basic_pollset_maybe_work(grpc_pollset *pollset,
}
if (nfds > 1) {
if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) {
- grpc_fd_become_readable(fd, allow_synchronous_callback);
+ grpc_fd_become_readable(exec_ctx, fd);
}
if (pfd[1].revents & (POLLOUT | POLLHUP | POLLERR)) {
- grpc_fd_become_writable(fd, allow_synchronous_callback);
+ grpc_fd_become_writable(exec_ctx, fd);
}
}
}
-
- gpr_mu_lock(&pollset->mu);
}
static void basic_pollset_destroy(grpc_pollset *pollset) {
@@ -492,8 +505,9 @@ static void basic_pollset_destroy(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable basic_pollset = {
- basic_pollset_add_fd, basic_pollset_del_fd, basic_pollset_maybe_work,
- basic_pollset_destroy, basic_pollset_destroy};
+ basic_pollset_add_fd, basic_pollset_del_fd,
+ basic_pollset_maybe_work_and_unlock, basic_pollset_destroy,
+ basic_pollset_destroy};
static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) {
pollset->vtable = &basic_pollset;
diff --git a/src/core/iomgr/pollset_posix.h b/src/core/iomgr/pollset_posix.h
index 69bd9cca8c..f996dd1edf 100644
--- a/src/core/iomgr/pollset_posix.h
+++ b/src/core/iomgr/pollset_posix.h
@@ -37,6 +37,8 @@
#include <poll.h>
#include <grpc/support/sync.h>
+#include "src/core/iomgr/exec_ctx.h"
+#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/wakeup_fd_posix.h"
typedef struct grpc_pollset_vtable grpc_pollset_vtable;
@@ -64,8 +66,8 @@ typedef struct grpc_pollset {
int shutting_down;
int called_shutdown;
int kicked_without_pollers;
- void (*shutdown_done_cb)(void *arg);
- void *shutdown_done_arg;
+ grpc_closure *shutdown_done;
+ grpc_closure_list idle_jobs;
union {
int fd;
void *ptr;
@@ -73,13 +75,13 @@ typedef struct grpc_pollset {
} grpc_pollset;
struct grpc_pollset_vtable {
- void (*add_fd)(grpc_pollset *pollset, struct grpc_fd *fd,
- int and_unlock_pollset);
- void (*del_fd)(grpc_pollset *pollset, struct grpc_fd *fd,
- int and_unlock_pollset);
- void (*maybe_work)(grpc_pollset *pollset, grpc_pollset_worker *worker,
- gpr_timespec deadline, gpr_timespec now,
- int allow_synchronous_callback);
+ void (*add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ struct grpc_fd *fd, int and_unlock_pollset);
+ void (*del_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ struct grpc_fd *fd, int and_unlock_pollset);
+ void (*maybe_work_and_unlock)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now);
void (*finish_shutdown)(grpc_pollset *pollset);
void (*destroy)(grpc_pollset *pollset);
};
@@ -87,10 +89,12 @@ struct grpc_pollset_vtable {
#define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)
/* Add an fd to a pollset */
-void grpc_pollset_add_fd(grpc_pollset *pollset, struct grpc_fd *fd);
+void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ struct grpc_fd *fd);
/* Force remove an fd from a pollset (normally they are removed on the next
poll after an fd is orphaned) */
-void grpc_pollset_del_fd(grpc_pollset *pollset, struct grpc_fd *fd);
+void grpc_pollset_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ struct grpc_fd *fd);
/* Returns the fd to listen on for kicks */
int grpc_kick_read_fd(grpc_pollset *p);
@@ -108,12 +112,14 @@ int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now);
/* turn a pollset into a multipoller: platform specific */
-typedef void (*grpc_platform_become_multipoller_type)(grpc_pollset *pollset,
+typedef void (*grpc_platform_become_multipoller_type)(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
struct grpc_fd **fds,
size_t fd_count);
extern grpc_platform_become_multipoller_type grpc_platform_become_multipoller;
-void grpc_poll_become_multipoller(grpc_pollset *pollset, struct grpc_fd **fds,
+void grpc_poll_become_multipoller(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset, struct grpc_fd **fds,
size_t fd_count);
/* Return 1 if the pollset has active threads in grpc_pollset_work (pollset must
diff --git a/src/core/iomgr/pollset_set.h b/src/core/iomgr/pollset_set.h
index 6d73951c70..0fdcba01a4 100644
--- a/src/core/iomgr/pollset_set.h
+++ b/src/core/iomgr/pollset_set.h
@@ -49,11 +49,13 @@
#include "src/core/iomgr/pollset_set_windows.h"
#endif
-void grpc_pollset_set_init(grpc_pollset_set *pollset_set);
-void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set);
-void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
- grpc_pollset *pollset);
-void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set,
- grpc_pollset *pollset);
+void grpc_pollset_set_init(grpc_pollset_set* pollset_set);
+void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set);
+void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset);
+void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */
diff --git a/src/core/iomgr/pollset_set_posix.c b/src/core/iomgr/pollset_set_posix.c
index 2076ac70ef..c86ed3d5da 100644
--- a/src/core/iomgr/pollset_set_posix.c
+++ b/src/core/iomgr/pollset_set_posix.c
@@ -58,7 +58,8 @@ void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
gpr_free(pollset_set->fds);
}
-void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
+void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set,
grpc_pollset *pollset) {
size_t i, j;
gpr_mu_lock(&pollset_set->mu);
@@ -74,7 +75,7 @@ void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
if (grpc_fd_is_orphaned(pollset_set->fds[i])) {
GRPC_FD_UNREF(pollset_set->fds[i], "pollset");
} else {
- grpc_pollset_add_fd(pollset, pollset_set->fds[i]);
+ grpc_pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
pollset_set->fds[j++] = pollset_set->fds[i];
}
}
@@ -82,7 +83,8 @@ void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
gpr_mu_unlock(&pollset_set->mu);
}
-void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set,
+void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set,
grpc_pollset *pollset) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
@@ -97,7 +99,8 @@ void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set,
gpr_mu_unlock(&pollset_set->mu);
}
-void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) {
+void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set, grpc_fd *fd) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->fd_count == pollset_set->fd_capacity) {
@@ -108,12 +111,13 @@ void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) {
GRPC_FD_REF(fd, "pollset_set");
pollset_set->fds[pollset_set->fd_count++] = fd;
for (i = 0; i < pollset_set->pollset_count; i++) {
- grpc_pollset_add_fd(pollset_set->pollsets[i], fd);
+ grpc_pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
}
gpr_mu_unlock(&pollset_set->mu);
}
-void grpc_pollset_set_del_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) {
+void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set, grpc_fd *fd) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
diff --git a/src/core/iomgr/pollset_set_posix.h b/src/core/iomgr/pollset_set_posix.h
index e88740bde1..05234fb642 100644
--- a/src/core/iomgr/pollset_set_posix.h
+++ b/src/core/iomgr/pollset_set_posix.h
@@ -49,7 +49,9 @@ typedef struct grpc_pollset_set {
grpc_fd **fds;
} grpc_pollset_set;
-void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd);
-void grpc_pollset_set_del_fd(grpc_pollset_set *pollset_set, grpc_fd *fd);
+void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set, grpc_fd *fd);
+void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set, grpc_fd *fd);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */
diff --git a/src/core/iomgr/pollset_set_windows.c b/src/core/iomgr/pollset_set_windows.c
index b9c209cd2c..53d5d3dcd4 100644
--- a/src/core/iomgr/pollset_set_windows.c
+++ b/src/core/iomgr/pollset_set_windows.c
@@ -37,14 +37,16 @@
#include "src/core/iomgr/pollset_set.h"
-void grpc_pollset_set_init(grpc_pollset_set *pollset_set) {}
+void grpc_pollset_set_init(grpc_pollset_set* pollset_set) {}
-void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {}
+void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
-void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {}
+void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
-void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {}
+void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/pollset_windows.c b/src/core/iomgr/pollset_windows.c
index 07522c8a0c..6182eb3532 100644
--- a/src/core/iomgr/pollset_windows.c
+++ b/src/core/iomgr/pollset_windows.c
@@ -85,29 +85,26 @@ void grpc_pollset_init(grpc_pollset *pollset) {
pollset->kicked_without_pollers = 0;
}
-void grpc_pollset_shutdown(grpc_pollset *pollset,
- void (*shutdown_done)(void *arg),
- void *shutdown_done_arg) {
+void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_closure *closure) {
gpr_mu_lock(&pollset->mu);
pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
gpr_mu_unlock(&pollset->mu);
- shutdown_done(shutdown_done_arg);
+ grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
}
void grpc_pollset_destroy(grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu);
}
-void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
- gpr_timespec now, gpr_timespec deadline) {
+void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *worker, gpr_timespec now,
+ gpr_timespec deadline) {
int added_worker = 0;
worker->next = worker->prev = NULL;
gpr_cv_init(&worker->cv);
- if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1 /* GPR_TRUE */)) {
- goto done;
- }
- if (grpc_alarm_check(&pollset->mu, now, &deadline)) {
+ if (grpc_alarm_check(exec_ctx, now, &deadline)) {
goto done;
}
if (!pollset->kicked_without_pollers && !pollset->shutting_down) {
@@ -118,6 +115,11 @@ void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
pollset->kicked_without_pollers = 0;
}
done:
+ if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
+ gpr_mu_unlock(&pollset->mu);
+ grpc_exec_ctx_flush(exec_ctx);
+ gpr_mu_lock(&pollset->mu);
+ }
gpr_cv_destroy(&worker->cv);
if (added_worker) {
remove_worker(pollset, worker);
diff --git a/src/core/iomgr/resolve_address.h b/src/core/iomgr/resolve_address.h
index 9f361cb892..01eedffa88 100644
--- a/src/core/iomgr/resolve_address.h
+++ b/src/core/iomgr/resolve_address.h
@@ -35,6 +35,8 @@
#define GRPC_INTERNAL_CORE_IOMGR_RESOLVE_ADDRESS_H
#include <stddef.h>
+#include "src/core/iomgr/exec_ctx.h"
+#include "src/core/iomgr/iomgr.h"
#define GRPC_MAX_SOCKADDR_SIZE 128
@@ -52,7 +54,8 @@ typedef struct {
On success: addresses is the result, and the callee must call
grpc_resolved_addresses_destroy when it's done with them
On failure: addresses is NULL */
-typedef void (*grpc_resolve_cb)(void *arg, grpc_resolved_addresses *addresses);
+typedef void (*grpc_resolve_cb)(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_resolved_addresses *addresses);
/* Asynchronously resolve addr. Use default_port if a port isn't designated
in addr, otherwise use the port in addr. */
/* TODO(ctiller): add a timeout here */
diff --git a/src/core/iomgr/resolve_address_posix.c b/src/core/iomgr/resolve_address_posix.c
index ce6972b797..99bd566e10 100644
--- a/src/core/iomgr/resolve_address_posix.c
+++ b/src/core/iomgr/resolve_address_posix.c
@@ -50,6 +50,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
typedef struct {
char *name;
@@ -106,8 +107,7 @@ grpc_resolved_addresses *grpc_blocking_resolve_address(
if (s != 0) {
/* Retry if well-known service name is recognized */
char *svc[][2] = {{"http", "80"}, {"https", "443"}};
- int i;
- for (i = 0; i < (int)(sizeof(svc) / sizeof(svc[0])); i++) {
+ for (i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
if (strcmp(port, svc[i][0]) == 0) {
s = getaddrinfo(host, svc[i][1], &hints, &result);
break;
@@ -144,17 +144,19 @@ done:
}
/* Thread function to asynch-ify grpc_blocking_resolve_address */
-static void do_request(void *rp) {
+static void do_request_thread(void *rp) {
request *r = rp;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resolved_addresses *resolved =
grpc_blocking_resolve_address(r->name, r->default_port);
void *arg = r->arg;
grpc_resolve_cb cb = r->cb;
gpr_free(r->name);
gpr_free(r->default_port);
- cb(arg, resolved);
+ cb(&exec_ctx, arg, resolved);
grpc_iomgr_unregister_object(&r->iomgr_object);
gpr_free(r);
+ grpc_exec_ctx_finish(&exec_ctx);
}
void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
@@ -175,7 +177,7 @@ void grpc_resolve_address(const char *name, const char *default_port,
r->default_port = gpr_strdup(default_port);
r->cb = cb;
r->arg = arg;
- gpr_thd_new(&id, do_request, r, NULL);
+ gpr_thd_new(&id, do_request_thread, r, NULL);
}
#endif
diff --git a/src/core/iomgr/resolve_address_windows.c b/src/core/iomgr/resolve_address_windows.c
index fb5fd0d4f6..fcd80b3912 100644
--- a/src/core/iomgr/resolve_address_windows.c
+++ b/src/core/iomgr/resolve_address_windows.c
@@ -128,6 +128,7 @@ done:
/* Thread function to asynch-ify grpc_blocking_resolve_address */
static void do_request(void *rp) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
request *r = rp;
grpc_resolved_addresses *resolved =
grpc_blocking_resolve_address(r->name, r->default_port);
@@ -137,7 +138,8 @@ static void do_request(void *rp) {
gpr_free(r->default_port);
grpc_iomgr_unregister_object(&r->iomgr_object);
gpr_free(r);
- cb(arg, resolved);
+ cb(&exec_ctx, arg, resolved);
+ grpc_exec_ctx_finish(&exec_ctx);
}
void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
@@ -149,7 +151,7 @@ void grpc_resolve_address(const char *name, const char *default_port,
grpc_resolve_cb cb, void *arg) {
request *r = gpr_malloc(sizeof(request));
gpr_thd_id id;
- const char *label;
+ char *label;
gpr_asprintf(&label, "resolve:%s", name);
grpc_iomgr_register_object(&r->iomgr_object, label);
gpr_free(label);
diff --git a/src/core/iomgr/sockaddr_utils.c b/src/core/iomgr/sockaddr_utils.c
index 0e4bf24549..511a5d5c59 100644
--- a/src/core/iomgr/sockaddr_utils.c
+++ b/src/core/iomgr/sockaddr_utils.c
@@ -158,8 +158,10 @@ int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
ip = &addr6->sin6_addr;
port = ntohs(addr6->sin6_port);
}
+ /* Windows inet_ntop wants a mutable ip pointer */
if (ip != NULL &&
- inet_ntop(addr->sa_family, ip, ntop_buf, sizeof(ntop_buf)) != NULL) {
+ inet_ntop(addr->sa_family, (void *)ip, ntop_buf, sizeof(ntop_buf)) !=
+ NULL) {
ret = gpr_join_host_port(out, ntop_buf, port);
} else {
ret = gpr_asprintf(out, "(sockaddr family=%d)", addr->sa_family);
diff --git a/src/core/iomgr/socket_windows.c b/src/core/iomgr/socket_windows.c
index 557ca82226..fafb7b6622 100644
--- a/src/core/iomgr/socket_windows.c
+++ b/src/core/iomgr/socket_windows.c
@@ -82,7 +82,8 @@ void grpc_winsocket_shutdown(grpc_winsocket *winsocket) {
DisconnectEx(winsocket->socket, NULL, 0, 0);
} else {
char *utf8_message = gpr_format_message(WSAGetLastError());
- gpr_log(GPR_ERROR, "Unable to retrieve DisconnectEx pointer : %s", utf8_message);
+ gpr_log(GPR_ERROR, "Unable to retrieve DisconnectEx pointer : %s",
+ utf8_message);
gpr_free(utf8_message);
}
closesocket(winsocket->socket);
diff --git a/src/core/iomgr/socket_windows.h b/src/core/iomgr/socket_windows.h
index 498921e0fd..dfbfabe1f9 100644
--- a/src/core/iomgr/socket_windows.h
+++ b/src/core/iomgr/socket_windows.h
@@ -41,6 +41,7 @@
#include <grpc/support/atm.h>
#include "src/core/iomgr/iomgr_internal.h"
+#include "src/core/iomgr/exec_ctx.h"
/* This holds the data for an outstanding read or write on a socket.
The mutex to protect the concurrent access to that data is the one
@@ -54,8 +55,7 @@ typedef struct grpc_winsocket_callback_info {
OVERLAPPED overlapped;
/* The callback information for the pending operation. May be empty if the
caller hasn't registered a callback yet. */
- void (*cb)(void *opaque, int success);
- void *opaque;
+ grpc_closure *closure;
/* A boolean to describe if the IO Completion Port got a notification for
that operation. This will happen if the operation completed before the
called had time to register a callback. We could avoid that behavior
@@ -91,7 +91,7 @@ typedef struct grpc_winsocket {
This prevents that. */
int added_to_iocp;
- grpc_iomgr_closure shutdown_closure;
+ grpc_closure shutdown_closure;
/* A label for iomgr to track outstanding objects */
grpc_iomgr_object iomgr_object;
diff --git a/src/core/iomgr/tcp_client.h b/src/core/iomgr/tcp_client.h
index 12296bd55b..5e18e71ca2 100644
--- a/src/core/iomgr/tcp_client.h
+++ b/src/core/iomgr/tcp_client.h
@@ -44,8 +44,9 @@
NULL on failure).
interested_parties points to a set of pollsets that would be interested
in this connection being established (in order to continue their work) */
-void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
- void *arg, grpc_pollset_set *interested_parties,
+void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect,
+ grpc_endpoint **endpoint,
+ grpc_pollset_set *interested_parties,
const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline);
diff --git a/src/core/iomgr/tcp_client_posix.c b/src/core/iomgr/tcp_client_posix.c
index 07fa44ad37..346566866a 100644
--- a/src/core/iomgr/tcp_client_posix.c
+++ b/src/core/iomgr/tcp_client_posix.c
@@ -57,16 +57,16 @@
extern int grpc_tcp_trace;
typedef struct {
- void (*cb)(void *arg, grpc_endpoint *tcp);
- void *cb_arg;
gpr_mu mu;
grpc_fd *fd;
gpr_timespec deadline;
grpc_alarm alarm;
int refs;
- grpc_iomgr_closure write_closure;
+ grpc_closure write_closure;
grpc_pollset_set *interested_parties;
char *addr_str;
+ grpc_endpoint **ep;
+ grpc_closure *closure;
} async_connect;
static int prepare_socket(const struct sockaddr *addr, int fd) {
@@ -91,7 +91,7 @@ error:
return 0;
}
-static void tc_on_alarm(void *acp, int success) {
+static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, int success) {
int done;
async_connect *ac = acp;
if (grpc_tcp_trace) {
@@ -100,7 +100,7 @@ static void tc_on_alarm(void *acp, int success) {
}
gpr_mu_lock(&ac->mu);
if (ac->fd != NULL) {
- grpc_fd_shutdown(ac->fd);
+ grpc_fd_shutdown(exec_ctx, ac->fd);
}
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
@@ -111,15 +111,14 @@ static void tc_on_alarm(void *acp, int success) {
}
}
-static void on_writable(void *acp, int success) {
+static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, int success) {
async_connect *ac = acp;
int so_error = 0;
socklen_t so_error_size;
int err;
int done;
- grpc_endpoint *ep = NULL;
- void (*cb)(void *arg, grpc_endpoint *tcp) = ac->cb;
- void *cb_arg = ac->cb_arg;
+ grpc_endpoint **ep = ac->ep;
+ grpc_closure *closure = ac->closure;
grpc_fd *fd;
if (grpc_tcp_trace) {
@@ -133,7 +132,7 @@ static void on_writable(void *acp, int success) {
ac->fd = NULL;
gpr_mu_unlock(&ac->mu);
- grpc_alarm_cancel(&ac->alarm);
+ grpc_alarm_cancel(exec_ctx, &ac->alarm);
gpr_mu_lock(&ac->mu);
if (success) {
@@ -162,7 +161,7 @@ static void on_writable(void *acp, int success) {
don't do that! */
gpr_log(GPR_ERROR, "kernel out of buffers");
gpr_mu_unlock(&ac->mu);
- grpc_fd_notify_on_write(fd, &ac->write_closure);
+ grpc_fd_notify_on_write(exec_ctx, fd, &ac->write_closure);
return;
} else {
switch (so_error) {
@@ -176,8 +175,8 @@ static void on_writable(void *acp, int success) {
goto finish;
}
} else {
- grpc_pollset_set_del_fd(ac->interested_parties, fd);
- ep = grpc_tcp_create(fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, ac->addr_str);
+ grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
+ *ep = grpc_tcp_create(fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, ac->addr_str);
fd = NULL;
goto finish;
}
@@ -190,8 +189,8 @@ static void on_writable(void *acp, int success) {
finish:
if (fd != NULL) {
- grpc_pollset_set_del_fd(ac->interested_parties, fd);
- grpc_fd_orphan(fd, NULL, "tcp_client_orphan");
+ grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
+ grpc_fd_orphan(exec_ctx, fd, NULL, "tcp_client_orphan");
fd = NULL;
}
done = (--ac->refs == 0);
@@ -201,11 +200,12 @@ finish:
gpr_free(ac->addr_str);
gpr_free(ac);
}
- cb(cb_arg, ep);
+ grpc_exec_ctx_enqueue(exec_ctx, closure, *ep != NULL);
}
-void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
- void *arg, grpc_pollset_set *interested_parties,
+void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_endpoint **ep,
+ grpc_pollset_set *interested_parties,
const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline) {
int fd;
@@ -218,6 +218,8 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
char *name;
char *addr_str;
+ *ep = NULL;
+
/* Use dualstack sockets where available. */
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
addr = (const struct sockaddr *)&addr6_v4mapped;
@@ -235,7 +237,7 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
addr_len = sizeof(addr4_copy);
}
if (!prepare_socket(addr, fd)) {
- cb(arg, NULL);
+ grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
return;
}
@@ -250,22 +252,23 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
fdobj = grpc_fd_create(fd, name);
if (err >= 0) {
- cb(arg, grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str));
+ *ep = grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str);
+ grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
goto done;
}
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
gpr_log(GPR_ERROR, "connect error to '%s': %s", addr_str, strerror(errno));
- grpc_fd_orphan(fdobj, NULL, "tcp_client_connect_error");
- cb(arg, NULL);
+ grpc_fd_orphan(exec_ctx, fdobj, NULL, "tcp_client_connect_error");
+ grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
goto done;
}
- grpc_pollset_set_add_fd(interested_parties, fdobj);
+ grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);
ac = gpr_malloc(sizeof(async_connect));
- ac->cb = cb;
- ac->cb_arg = arg;
+ ac->closure = closure;
+ ac->ep = ep;
ac->fd = fdobj;
ac->interested_parties = interested_parties;
ac->addr_str = addr_str;
@@ -281,10 +284,10 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
}
gpr_mu_lock(&ac->mu);
- grpc_alarm_init(&ac->alarm,
+ grpc_alarm_init(exec_ctx, &ac->alarm,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
tc_on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC));
- grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
+ grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure);
gpr_mu_unlock(&ac->mu);
done:
diff --git a/src/core/iomgr/tcp_client_windows.c b/src/core/iomgr/tcp_client_windows.c
index 6f57de0289..3540c55676 100644
--- a/src/core/iomgr/tcp_client_windows.c
+++ b/src/core/iomgr/tcp_client_windows.c
@@ -52,14 +52,15 @@
#include "src/core/iomgr/socket_windows.h"
typedef struct {
- void (*cb)(void *arg, grpc_endpoint *tcp);
- void *cb_arg;
+ grpc_closure *on_done;
gpr_mu mu;
grpc_winsocket *socket;
gpr_timespec deadline;
grpc_alarm alarm;
char *addr_name;
int refs;
+ grpc_closure on_connect;
+ grpc_endpoint **endpoint;
} async_connect;
static void async_connect_unlock_and_cleanup(async_connect *ac) {
@@ -73,7 +74,7 @@ static void async_connect_unlock_and_cleanup(async_connect *ac) {
}
}
-static void on_alarm(void *acp, int occured) {
+static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, int occured) {
async_connect *ac = acp;
gpr_mu_lock(&ac->mu);
/* If the alarm didn't occur, it got cancelled. */
@@ -83,15 +84,14 @@ static void on_alarm(void *acp, int occured) {
async_connect_unlock_and_cleanup(ac);
}
-static void on_connect(void *acp, int from_iocp) {
+static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, int from_iocp) {
async_connect *ac = acp;
SOCKET sock = ac->socket->socket;
- grpc_endpoint *ep = NULL;
+ grpc_endpoint **ep = ac->endpoint;
grpc_winsocket_callback_info *info = &ac->socket->write_info;
- void (*cb)(void *arg, grpc_endpoint *tcp) = ac->cb;
- void *cb_arg = ac->cb_arg;
-
- grpc_alarm_cancel(&ac->alarm);
+ grpc_closure *on_done = ac->on_done;
+
+ grpc_alarm_cancel(exec_ctx, &ac->alarm);
gpr_mu_lock(&ac->mu);
@@ -106,7 +106,7 @@ static void on_connect(void *acp, int from_iocp) {
gpr_log(GPR_ERROR, "on_connect error: %s", utf8_message);
gpr_free(utf8_message);
} else {
- ep = grpc_tcp_create(ac->socket, ac->addr_name);
+ *ep = grpc_tcp_create(ac->socket, ac->addr_name);
ac->socket = NULL;
}
}
@@ -114,13 +114,14 @@ static void on_connect(void *acp, int from_iocp) {
async_connect_unlock_and_cleanup(ac);
/* If the connection was aborted, the callback was already called when
the deadline was met. */
- cb(cb_arg, ep);
+ on_done->cb(exec_ctx, on_done->cb_arg, *ep != NULL);
}
/* Tries to issue one async connection, then schedules both an IOCP
notification request for the connection, and one timeout alert. */
-void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
- void *arg, grpc_pollset_set *interested_parties,
+void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
+ grpc_endpoint **endpoint,
+ grpc_pollset_set *interested_parties,
const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline) {
SOCKET sock = INVALID_SOCKET;
@@ -137,6 +138,8 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
char *utf8_message;
grpc_winsocket_callback_info *info;
+ *endpoint = NULL;
+
/* Use dualstack sockets where available. */
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
addr = (const struct sockaddr *)&addr6_v4mapped;
@@ -176,7 +179,8 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
socket = grpc_winsocket_create(sock, "client");
info = &socket->write_info;
- success = ConnectEx(sock, addr, (int)addr_len, NULL, 0, NULL, &info->overlapped);
+ success =
+ ConnectEx(sock, addr, (int)addr_len, NULL, 0, NULL, &info->overlapped);
/* It wouldn't be unusual to get a success immediately. But we'll still get
an IOCP notification, so let's ignore it. */
@@ -189,16 +193,17 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
}
ac = gpr_malloc(sizeof(async_connect));
- ac->cb = cb;
- ac->cb_arg = arg;
+ ac->on_done = on_done;
ac->socket = socket;
gpr_mu_init(&ac->mu);
ac->refs = 2;
ac->addr_name = grpc_sockaddr_to_uri(addr);
+ ac->endpoint = endpoint;
+ grpc_closure_init(&ac->on_connect, on_connect, ac);
- grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac,
+ grpc_alarm_init(exec_ctx, &ac->alarm, deadline, on_alarm, ac,
gpr_now(GPR_CLOCK_MONOTONIC));
- grpc_socket_notify_on_write(socket, on_connect, ac);
+ grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect);
return;
failure:
@@ -210,7 +215,7 @@ failure:
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
}
- cb(arg, NULL);
+ grpc_exec_ctx_enqueue(exec_ctx, on_done, 0);
}
#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/iomgr/tcp_posix.c b/src/core/iomgr/tcp_posix.c
index 68f469c368..c98f0125f8 100644
--- a/src/core/iomgr/tcp_posix.c
+++ b/src/core/iomgr/tcp_posix.c
@@ -85,39 +85,42 @@ typedef struct {
/** byte within outgoing_buffer->slices[outgoing_slice_idx] to write next */
size_t outgoing_byte_idx;
- grpc_iomgr_closure *read_cb;
- grpc_iomgr_closure *write_cb;
+ grpc_closure *read_cb;
+ grpc_closure *write_cb;
- grpc_iomgr_closure read_closure;
- grpc_iomgr_closure write_closure;
+ grpc_closure read_closure;
+ grpc_closure write_closure;
char *peer_string;
} grpc_tcp;
-static void tcp_handle_read(void *arg /* grpc_tcp */, int success);
-static void tcp_handle_write(void *arg /* grpc_tcp */, int success);
+static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ int success);
+static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ int success);
-static void tcp_shutdown(grpc_endpoint *ep) {
+static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
- grpc_fd_shutdown(tcp->em_fd);
+ grpc_fd_shutdown(exec_ctx, tcp->em_fd);
}
-static void tcp_free(grpc_tcp *tcp) {
- grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan");
+static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ grpc_fd_orphan(exec_ctx, tcp->em_fd, NULL, "tcp_unref_orphan");
gpr_free(tcp->peer_string);
gpr_free(tcp);
}
/*#define GRPC_TCP_REFCOUNT_DEBUG*/
#ifdef GRPC_TCP_REFCOUNT_DEBUG
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
+#define TCP_UNREF(cl, tcp, reason) \
+ tcp_unref((cl), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file,
- int line) {
+static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
+ const char *reason, const char *file, int line) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
reason, tcp->refcount.count, tcp->refcount.count - 1);
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
@@ -128,24 +131,24 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
gpr_ref(&tcp->refcount);
}
#else
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
+#define TCP_UNREF(cl, tcp, reason) tcp_unref((cl), (tcp))
#define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_tcp *tcp) {
+static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
#endif
-static void tcp_destroy(grpc_endpoint *ep) {
+static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
- TCP_UNREF(tcp, "destroy");
+ TCP_UNREF(exec_ctx, tcp, "destroy");
}
-static void call_read_cb(grpc_tcp *tcp, int success) {
- grpc_iomgr_closure *cb = tcp->read_cb;
+static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, int success) {
+ grpc_closure *cb = tcp->read_cb;
if (grpc_tcp_trace) {
size_t i;
@@ -160,11 +163,11 @@ static void call_read_cb(grpc_tcp *tcp, int success) {
tcp->read_cb = NULL;
tcp->incoming_buffer = NULL;
- cb->cb(cb->cb_arg, success);
+ cb->cb(exec_ctx, cb->cb_arg, success);
}
#define MAX_READ_IOVEC 4
-static void tcp_continue_read(grpc_tcp *tcp) {
+static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes;
@@ -206,18 +209,18 @@ static void tcp_continue_read(grpc_tcp *tcp) {
tcp->iov_size /= 2;
}
/* We've consumed the edge, request a new one */
- grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else {
/* TODO(klempner): Log interesting errors */
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
- call_read_cb(tcp, 0);
- TCP_UNREF(tcp, "read");
+ call_read_cb(exec_ctx, tcp, 0);
+ TCP_UNREF(exec_ctx, tcp, "read");
}
} else if (read_bytes == 0) {
/* 0 read size ==> end of stream */
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
- call_read_cb(tcp, 0);
- TCP_UNREF(tcp, "read");
+ call_read_cb(exec_ctx, tcp, 0);
+ TCP_UNREF(exec_ctx, tcp, "read");
} else {
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
if ((size_t)read_bytes < tcp->incoming_buffer->length) {
@@ -228,29 +231,29 @@ static void tcp_continue_read(grpc_tcp *tcp) {
++tcp->iov_size;
}
GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length);
- call_read_cb(tcp, 1);
- TCP_UNREF(tcp, "read");
+ call_read_cb(exec_ctx, tcp, 1);
+ TCP_UNREF(exec_ctx, tcp, "read");
}
GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0);
}
-static void tcp_handle_read(void *arg /* grpc_tcp */, int success) {
+static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ int success) {
grpc_tcp *tcp = (grpc_tcp *)arg;
GPR_ASSERT(!tcp->finished_edge);
if (!success) {
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
- call_read_cb(tcp, 0);
- TCP_UNREF(tcp, "read");
+ call_read_cb(exec_ctx, tcp, 0);
+ TCP_UNREF(exec_ctx, tcp, "read");
} else {
- tcp_continue_read(tcp);
+ tcp_continue_read(exec_ctx, tcp);
}
}
-static grpc_endpoint_op_status tcp_read(grpc_endpoint *ep,
- gpr_slice_buffer *incoming_buffer,
- grpc_iomgr_closure *cb) {
+static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *incoming_buffer, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
GPR_ASSERT(tcp->read_cb == NULL);
tcp->read_cb = cb;
@@ -259,16 +262,16 @@ static grpc_endpoint_op_status tcp_read(grpc_endpoint *ep,
TCP_REF(tcp, "read");
if (tcp->finished_edge) {
tcp->finished_edge = 0;
- grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else {
- grpc_iomgr_add_delayed_callback(&tcp->read_closure, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, &tcp->read_closure, 1);
}
- /* TODO(ctiller): immediate return */
- return GRPC_ENDPOINT_PENDING;
}
+typedef enum { FLUSH_DONE, FLUSH_PENDING, FLUSH_ERROR } flush_result;
+
#define MAX_WRITE_IOVEC 16
-static grpc_endpoint_op_status tcp_flush(grpc_tcp *tcp) {
+static flush_result tcp_flush(grpc_tcp *tcp) {
struct msghdr msg;
struct iovec iov[MAX_WRITE_IOVEC];
msg_iovlen_type iov_size;
@@ -318,10 +321,10 @@ static grpc_endpoint_op_status tcp_flush(grpc_tcp *tcp) {
if (errno == EAGAIN) {
tcp->outgoing_slice_idx = unwind_slice_idx;
tcp->outgoing_byte_idx = unwind_byte_idx;
- return GRPC_ENDPOINT_PENDING;
+ return FLUSH_PENDING;
} else {
/* TODO(klempner): Log some of these */
- return GRPC_ENDPOINT_ERROR;
+ return FLUSH_ERROR;
}
}
@@ -342,42 +345,42 @@ static grpc_endpoint_op_status tcp_flush(grpc_tcp *tcp) {
}
if (tcp->outgoing_slice_idx == tcp->outgoing_buffer->count) {
- return GRPC_ENDPOINT_DONE;
+ return FLUSH_DONE;
}
};
}
-static void tcp_handle_write(void *arg /* grpc_tcp */, int success) {
+static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ int success) {
grpc_tcp *tcp = (grpc_tcp *)arg;
- grpc_endpoint_op_status status;
- grpc_iomgr_closure *cb;
+ flush_result status;
+ grpc_closure *cb;
if (!success) {
cb = tcp->write_cb;
tcp->write_cb = NULL;
- cb->cb(cb->cb_arg, 0);
- TCP_UNREF(tcp, "write");
+ cb->cb(exec_ctx, cb->cb_arg, 0);
+ TCP_UNREF(exec_ctx, tcp, "write");
return;
}
GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0);
status = tcp_flush(tcp);
- if (status == GRPC_ENDPOINT_PENDING) {
- grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure);
+ if (status == FLUSH_PENDING) {
+ grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
- cb->cb(cb->cb_arg, status == GRPC_ENDPOINT_DONE);
- TCP_UNREF(tcp, "write");
+ cb->cb(exec_ctx, cb->cb_arg, status == FLUSH_DONE);
+ TCP_UNREF(exec_ctx, tcp, "write");
}
GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0);
}
-static grpc_endpoint_op_status tcp_write(grpc_endpoint *ep,
- gpr_slice_buffer *buf,
- grpc_iomgr_closure *cb) {
+static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *buf, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
- grpc_endpoint_op_status status;
+ flush_result status;
if (grpc_tcp_trace) {
size_t i;
@@ -395,32 +398,35 @@ static grpc_endpoint_op_status tcp_write(grpc_endpoint *ep,
if (buf->length == 0) {
GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0);
- return GRPC_ENDPOINT_DONE;
+ grpc_exec_ctx_enqueue(exec_ctx, cb, 1);
+ return;
}
tcp->outgoing_buffer = buf;
tcp->outgoing_slice_idx = 0;
tcp->outgoing_byte_idx = 0;
status = tcp_flush(tcp);
- if (status == GRPC_ENDPOINT_PENDING) {
+ if (status == FLUSH_PENDING) {
TCP_REF(tcp, "write");
tcp->write_cb = cb;
- grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure);
+ grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
+ } else {
+ grpc_exec_ctx_enqueue(exec_ctx, cb, status == FLUSH_DONE);
}
GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0);
- return status;
}
-static void tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
+static void tcp_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset *pollset) {
grpc_tcp *tcp = (grpc_tcp *)ep;
- grpc_pollset_add_fd(pollset, tcp->em_fd);
+ grpc_pollset_add_fd(exec_ctx, pollset, tcp->em_fd);
}
-static void tcp_add_to_pollset_set(grpc_endpoint *ep,
+static void tcp_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset_set *pollset_set) {
grpc_tcp *tcp = (grpc_tcp *)ep;
- grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
+ grpc_pollset_set_add_fd(exec_ctx, pollset_set, tcp->em_fd);
}
static char *tcp_get_peer(grpc_endpoint *ep) {
diff --git a/src/core/iomgr/tcp_server.h b/src/core/iomgr/tcp_server.h
index 9303975781..882635f638 100644
--- a/src/core/iomgr/tcp_server.h
+++ b/src/core/iomgr/tcp_server.h
@@ -40,14 +40,15 @@
typedef struct grpc_tcp_server grpc_tcp_server;
/* Called for newly connected TCP connections. */
-typedef void (*grpc_tcp_server_cb)(void *arg, grpc_endpoint *ep);
+typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_endpoint *ep);
/* Create a server, initially not bound to any ports */
grpc_tcp_server *grpc_tcp_server_create(void);
/* Start listening to bound ports */
-void grpc_tcp_server_start(grpc_tcp_server *server, grpc_pollset **pollsets,
- size_t pollset_count,
+void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
+ grpc_pollset **pollsets, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb, void *cb_arg);
/* Add a port to the server, returning port number on success, or negative
@@ -71,8 +72,7 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
up when grpc_tcp_server_destroy is called. */
int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned index);
-void grpc_tcp_server_destroy(grpc_tcp_server *server,
- void (*shutdown_done)(void *shutdown_done_arg),
- void *shutdown_done_arg);
+void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
+ grpc_closure *closure);
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_SERVER_H */
diff --git a/src/core/iomgr/tcp_server_posix.c b/src/core/iomgr/tcp_server_posix.c
index f7b692a76b..a582f4a7c3 100644
--- a/src/core/iomgr/tcp_server_posix.c
+++ b/src/core/iomgr/tcp_server_posix.c
@@ -84,8 +84,8 @@ typedef struct {
struct sockaddr_un un;
} addr;
size_t addr_len;
- grpc_iomgr_closure read_closure;
- grpc_iomgr_closure destroyed_closure;
+ grpc_closure read_closure;
+ grpc_closure destroyed_closure;
} server_port;
static void unlink_if_unix_domain_socket(const struct sockaddr_un *un) {
@@ -118,8 +118,7 @@ struct grpc_tcp_server {
size_t port_capacity;
/* shutdown callback */
- void (*shutdown_complete)(void *);
- void *shutdown_complete_arg;
+ grpc_closure *shutdown_complete;
/* all pollsets interested in new connections */
grpc_pollset **pollsets;
@@ -141,9 +140,8 @@ grpc_tcp_server *grpc_tcp_server_create(void) {
return s;
}
-static void finish_shutdown(grpc_tcp_server *s) {
- s->shutdown_complete(s->shutdown_complete_arg);
- s->shutdown_complete = NULL;
+static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1);
gpr_mu_destroy(&s->mu);
@@ -151,25 +149,23 @@ static void finish_shutdown(grpc_tcp_server *s) {
gpr_free(s);
}
-static void destroyed_port(void *server, int success) {
+static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, int success) {
grpc_tcp_server *s = server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
gpr_mu_unlock(&s->mu);
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
} else {
GPR_ASSERT(s->destroyed_ports < s->nports);
gpr_mu_unlock(&s->mu);
}
}
-static void dont_care_about_shutdown_completion(void *ignored) {}
-
/* called when all listening endpoints have been shutdown, so no further
events will be received on them - at this point it's safe to destroy
things */
-static void deactivated_all_ports(grpc_tcp_server *s) {
+static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
size_t i;
/* delete ALL the things */
@@ -188,38 +184,35 @@ static void deactivated_all_ports(grpc_tcp_server *s) {
}
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
- grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, "tcp_listener_shutdown");
+ grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure,
+ "tcp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
}
}
-void grpc_tcp_server_destroy(
- grpc_tcp_server *s, void (*shutdown_complete)(void *shutdown_complete_arg),
- void *shutdown_complete_arg) {
+void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
+ grpc_closure *closure) {
size_t i;
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
s->shutdown = 1;
- s->shutdown_complete = shutdown_complete
- ? shutdown_complete
- : dont_care_about_shutdown_completion;
- s->shutdown_complete_arg = shutdown_complete_arg;
+ s->shutdown_complete = closure;
/* shutdown all fd's */
if (s->active_ports) {
for (i = 0; i < s->nports; i++) {
- grpc_fd_shutdown(s->ports[i].emfd);
+ grpc_fd_shutdown(exec_ctx, s->ports[i].emfd);
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- deactivated_all_ports(s);
+ deactivated_all_ports(exec_ctx, s);
}
}
@@ -304,7 +297,7 @@ error:
}
/* event manager callback when reads are ready */
-static void on_read(void *arg, int success) {
+static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
server_port *sp = arg;
grpc_fd *fdobj;
size_t i;
@@ -320,14 +313,14 @@ static void on_read(void *arg, int success) {
char *addr_str;
char *name;
/* Note: If we ever decide to return this address to the user, remember to
- strip off the ::ffff:0.0.0.0/96 prefix first. */
+ strip off the ::ffff:0.0.0.0/96 prefix first. */
int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1);
if (fd < 0) {
switch (errno) {
case EINTR:
continue;
case EAGAIN:
- grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
return;
default:
gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno));
@@ -349,10 +342,10 @@ static void on_read(void *arg, int success) {
of channels -- we certainly should not be automatically adding every
incoming channel to every pollset owned by the server */
for (i = 0; i < sp->server->pollset_count; i++) {
- grpc_pollset_add_fd(sp->server->pollsets[i], fdobj);
+ grpc_pollset_add_fd(exec_ctx, sp->server->pollsets[i], fdobj);
}
sp->server->on_accept_cb(
- sp->server->on_accept_cb_arg,
+ exec_ctx, sp->server->on_accept_cb_arg,
grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str));
gpr_free(name);
@@ -365,7 +358,7 @@ error:
gpr_mu_lock(&sp->server->mu);
if (0 == --sp->server->active_ports) {
gpr_mu_unlock(&sp->server->mu);
- deactivated_all_ports(sp->server);
+ deactivated_all_ports(exec_ctx, sp->server);
} else {
gpr_mu_unlock(&sp->server->mu);
}
@@ -489,9 +482,10 @@ int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned index) {
return (index < s->nports) ? s->ports[index].fd : -1;
}
-void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollsets, size_t
- pollset_count, grpc_tcp_server_cb on_accept_cb, void
- *on_accept_cb_arg) {
+void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
+ grpc_pollset **pollsets, size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb,
+ void *on_accept_cb_arg) {
size_t i, j;
GPR_ASSERT(on_accept_cb);
gpr_mu_lock(&s->mu);
@@ -503,11 +497,12 @@ void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollsets, size_t
s->pollset_count = pollset_count;
for (i = 0; i < s->nports; i++) {
for (j = 0; j < pollset_count; j++) {
- grpc_pollset_add_fd(pollsets[j], s->ports[i].emfd);
+ grpc_pollset_add_fd(exec_ctx, pollsets[j], s->ports[i].emfd);
}
s->ports[i].read_closure.cb = on_read;
s->ports[i].read_closure.cb_arg = &s->ports[i];
- grpc_fd_notify_on_read(s->ports[i].emfd, &s->ports[i].read_closure);
+ grpc_fd_notify_on_read(exec_ctx, s->ports[i].emfd,
+ &s->ports[i].read_closure);
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
diff --git a/src/core/iomgr/tcp_server_windows.c b/src/core/iomgr/tcp_server_windows.c
index c42e5e7527..4b11ab0f06 100644
--- a/src/core/iomgr/tcp_server_windows.c
+++ b/src/core/iomgr/tcp_server_windows.c
@@ -67,6 +67,8 @@ typedef struct server_port {
/* The cached AcceptEx for that port. */
LPFN_ACCEPTEX AcceptEx;
int shutting_down;
+ /* closure for socket notification of accept being ready */
+ grpc_closure on_accept;
} server_port;
/* the overall server */
@@ -86,8 +88,7 @@ struct grpc_tcp_server {
size_t port_capacity;
/* shutdown callback */
- void(*shutdown_complete)(void *);
- void *shutdown_complete_arg;
+ grpc_closure *shutdown_complete;
};
/* Public function. Allocates the proper data structures to hold a
@@ -107,14 +108,14 @@ grpc_tcp_server *grpc_tcp_server_create(void) {
static void dont_care_about_shutdown_completion(void *arg) {}
-static void finish_shutdown(grpc_tcp_server *s) {
+static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
size_t i;
- s->shutdown_complete(s->shutdown_complete_arg);
+ grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1);
/* Now that the accepts have been aborted, we can destroy the sockets.
- The IOCP won't get notified on these, so we can flag them as already
- closed by the system. */
+ The IOCP won't get notified on these, so we can flag them as already
+ closed by the system. */
for (i = 0; i < s->nports; i++) {
server_port *sp = &s->ports[i];
grpc_winsocket_destroy(sp->socket);
@@ -124,17 +125,13 @@ static void finish_shutdown(grpc_tcp_server *s) {
}
/* Public function. Stops and destroys a grpc_tcp_server. */
-void grpc_tcp_server_destroy(grpc_tcp_server *s,
- void (*shutdown_complete)(void *shutdown_done_arg),
- void *shutdown_complete_arg) {
+void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
+ grpc_closure *shutdown_complete) {
size_t i;
int immediately_done = 0;
gpr_mu_lock(&s->mu);
- s->shutdown_complete = shutdown_complete
- ? shutdown_complete
- : dont_care_about_shutdown_completion;
- s->shutdown_complete_arg = shutdown_complete_arg;
+ s->shutdown_complete = shutdown_complete;
/* First, shutdown all fd's. This will queue abortion calls for all
of the pending accepts due to the normal operation mechanism. */
@@ -149,7 +146,7 @@ void grpc_tcp_server_destroy(grpc_tcp_server *s,
gpr_mu_unlock(&s->mu);
if (immediately_done) {
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
}
}
@@ -201,26 +198,25 @@ error:
return -1;
}
-static void decrement_active_ports_and_notify(server_port *sp) {
+static void decrement_active_ports_and_notify(grpc_exec_ctx *exec_ctx,
+ server_port *sp) {
int notify = 0;
sp->shutting_down = 0;
gpr_mu_lock(&sp->server->mu);
GPR_ASSERT(sp->server->active_ports > 0);
- if (0 == --sp->server->active_ports && sp->server->shutdown_complete != NULL) {
+ if (0 == --sp->server->active_ports &&
+ sp->server->shutdown_complete != NULL) {
notify = 1;
}
gpr_mu_unlock(&sp->server->mu);
if (notify) {
- finish_shutdown(sp->server);
+ finish_shutdown(exec_ctx, sp->server);
}
}
-/* start_accept will reference that for the IOCP notification request. */
-static void on_accept(void *arg, int from_iocp);
-
/* In order to do an async accept, we need to create a socket first which
will be the one assigned to the new incoming connection. */
-static void start_accept(server_port *port) {
+static void start_accept(grpc_exec_ctx *exec_ctx, server_port *port) {
SOCKET sock = INVALID_SOCKET;
char *message;
char *utf8_message;
@@ -259,7 +255,7 @@ static void start_accept(server_port *port) {
/* We're ready to do the accept. Calling grpc_socket_notify_on_read may
immediately process an accept that happened in the meantime. */
port->new_socket = sock;
- grpc_socket_notify_on_read(port->socket, on_accept, port);
+ grpc_socket_notify_on_read(exec_ctx, port->socket, &port->on_accept);
return;
failure:
@@ -269,7 +265,7 @@ failure:
change is not seen by on_accept and we proceed to trying new accept,
but we fail there because the listening port has been closed in the
meantime. */
- decrement_active_ports_and_notify(port);
+ decrement_active_ports_and_notify(exec_ctx, port);
return;
}
utf8_message = gpr_format_message(WSAGetLastError());
@@ -279,7 +275,7 @@ failure:
}
/* Event manager callback when reads are ready. */
-static void on_accept(void *arg, int from_iocp) {
+static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, int from_iocp) {
server_port *sp = arg;
SOCKET sock = sp->new_socket;
grpc_winsocket_callback_info *info = &sp->socket->read_info;
@@ -301,7 +297,7 @@ static void on_accept(void *arg, int from_iocp) {
}
/* The IOCP notified us of a completed operation. Let's grab the results,
- and act accordingly. */
+ and act accordingly. */
transfered_bytes = 0;
wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
&transfered_bytes, FALSE, &flags);
@@ -309,7 +305,7 @@ static void on_accept(void *arg, int from_iocp) {
if (sp->shutting_down) {
/* During the shutdown case, we ARE expecting an error. So that's well,
and we can wake up the shutdown thread. */
- decrement_active_ports_and_notify(sp);
+ decrement_active_ports_and_notify(exec_ctx, sp);
return;
} else {
char *utf8_message = gpr_format_message(WSAGetLastError());
@@ -345,12 +341,12 @@ static void on_accept(void *arg, int from_iocp) {
/* The only time we should call our callback, is where we successfully
managed to accept a connection, and created an endpoint. */
- if (ep) sp->server->on_accept_cb(sp->server->on_accept_cb_arg, ep);
+ if (ep) sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep);
/* As we were notified from the IOCP of one and exactly one accept,
the former socked we created has now either been destroy or assigned
to the new connection. We need to create a new one for the next
connection. */
- start_accept(sp);
+ start_accept(exec_ctx, sp);
}
static int add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
@@ -384,8 +380,9 @@ static int add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
/* append it to the list under a lock */
if (s->nports == s->port_capacity) {
- s->port_capacity *= 2;
- s->ports = gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
+ /* too many ports, and we need to store their address in a closure */
+ /* TODO(ctiller): make server_port a linked list */
+ abort();
}
sp = &s->ports[s->nports++];
sp->server = s;
@@ -393,6 +390,7 @@ static int add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
sp->shutting_down = 0;
sp->AcceptEx = AcceptEx;
sp->new_socket = INVALID_SOCKET;
+ grpc_closure_init(&sp->on_accept, on_accept, sp);
GPR_ASSERT(sp->socket);
gpr_mu_unlock(&s->mu);
}
@@ -458,12 +456,13 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
return allocated_port;
}
-SOCKET grpc_tcp_server_get_socket(grpc_tcp_server *s, unsigned index) {
+SOCKET
+grpc_tcp_server_get_socket(grpc_tcp_server *s, unsigned index) {
return (index < s->nports) ? s->ports[index].socket->socket : INVALID_SOCKET;
}
-void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollset,
- size_t pollset_count,
+void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
+ grpc_pollset **pollset, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb,
void *on_accept_cb_arg) {
size_t i;
@@ -474,7 +473,7 @@ void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollset,
s->on_accept_cb = on_accept_cb;
s->on_accept_cb_arg = on_accept_cb_arg;
for (i = 0; i < s->nports; i++) {
- start_accept(s->ports + i);
+ start_accept(exec_ctx, s->ports + i);
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
diff --git a/src/core/iomgr/tcp_windows.c b/src/core/iomgr/tcp_windows.c
index 725c18e6cc..b67683dbfd 100644
--- a/src/core/iomgr/tcp_windows.c
+++ b/src/core/iomgr/tcp_windows.c
@@ -82,8 +82,11 @@ typedef struct grpc_tcp {
/* Refcounting how many operations are in progress. */
gpr_refcount refcount;
- grpc_iomgr_closure *read_cb;
- grpc_iomgr_closure *write_cb;
+ grpc_closure on_read;
+ grpc_closure on_write;
+
+ grpc_closure *read_cb;
+ grpc_closure *write_cb;
gpr_slice read_slice;
gpr_slice_buffer *write_slices;
gpr_slice_buffer *read_slices;
@@ -108,18 +111,18 @@ static void tcp_free(grpc_tcp *tcp) {
#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file,
- int line) {
+ int line) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
- reason, tcp->refcount.count, tcp->refcount.count - 1);
+ reason, tcp->refcount.count, tcp->refcount.count - 1);
if (gpr_unref(&tcp->refcount)) {
tcp_free(tcp);
}
}
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
- int line) {
+ int line) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp,
- reason, tcp->refcount.count, tcp->refcount.count + 1);
+ reason, tcp->refcount.count, tcp->refcount.count + 1);
gpr_ref(&tcp->refcount);
}
#else
@@ -135,7 +138,9 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
#endif
/* Asynchronous callback from the IOCP, or the background thread. */
-static int on_read(grpc_tcp *tcp, int success) {
+static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, int success) {
+ grpc_tcp *tcp = tcpp;
+ grpc_closure *cb = tcp->read_cb;
grpc_winsocket *socket = tcp->socket;
gpr_slice sub;
gpr_slice *slice = NULL;
@@ -164,23 +169,15 @@ static int on_read(grpc_tcp *tcp, int success) {
}
}
- return success;
-}
-
-static void on_read_cb(void *tcpp, int from_iocp) {
- grpc_tcp *tcp = tcpp;
- grpc_iomgr_closure *cb = tcp->read_cb;
- int success = on_read(tcp, from_iocp);
tcp->read_cb = NULL;
TCP_UNREF(tcp, "read");
if (cb) {
- cb->cb(cb->cb_arg, success);
+ cb->cb(exec_ctx, cb->cb_arg, success);
}
}
-static grpc_endpoint_op_status win_read(grpc_endpoint *ep,
- gpr_slice_buffer *read_slices,
- grpc_iomgr_closure *cb) {
+static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *read_slices, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_winsocket *handle = tcp->socket;
grpc_winsocket_callback_info *info = &handle->read_info;
@@ -190,7 +187,8 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep,
WSABUF buffer;
if (tcp->shutting_down) {
- return GRPC_ENDPOINT_ERROR;
+ grpc_exec_ctx_enqueue(exec_ctx, cb, 0);
+ return;
}
tcp->read_cb = cb;
@@ -202,6 +200,8 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep,
buffer.len = GPR_SLICE_LENGTH(tcp->read_slice);
buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
+ TCP_REF(tcp, "read");
+
/* First let's try a synchronous, non-blocking read. */
status =
WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
@@ -209,14 +209,11 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep,
/* Did we get data immediately ? Yay. */
if (info->wsa_error != WSAEWOULDBLOCK) {
- int ok;
info->bytes_transfered = bytes_read;
- ok = on_read(tcp, 1);
- return ok ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR;
+ grpc_exec_ctx_enqueue(exec_ctx, &tcp->on_read, 1);
+ return;
}
- TCP_REF(tcp, "read");
-
/* Otherwise, let's retry, by queuing a read. */
memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags,
@@ -225,23 +222,21 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep,
if (status != 0) {
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
- int ok;
info->wsa_error = wsa_error;
- ok = on_read(tcp, 1);
- return ok ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR;
+ grpc_exec_ctx_enqueue(exec_ctx, &tcp->on_read, 0);
+ return;
}
}
- grpc_socket_notify_on_read(tcp->socket, on_read_cb, tcp);
- return GRPC_ENDPOINT_PENDING;
+ grpc_socket_notify_on_read(exec_ctx, tcp->socket, &tcp->on_read);
}
/* Asynchronous callback from the IOCP, or the background thread. */
-static void on_write(void *tcpp, int success) {
+static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, int success) {
grpc_tcp *tcp = (grpc_tcp *)tcpp;
grpc_winsocket *handle = tcp->socket;
grpc_winsocket_callback_info *info = &handle->write_info;
- grpc_iomgr_closure *cb;
+ grpc_closure *cb;
int do_abort = 0;
gpr_mu_lock(&tcp->mu);
@@ -263,13 +258,12 @@ static void on_write(void *tcpp, int success) {
}
TCP_UNREF(tcp, "write");
- cb->cb(cb->cb_arg, success);
+ cb->cb(exec_ctx, cb->cb_arg, success);
}
/* Initiates a write. */
-static grpc_endpoint_op_status win_write(grpc_endpoint *ep,
- gpr_slice_buffer *slices,
- grpc_iomgr_closure *cb) {
+static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *slices, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_winsocket *socket = tcp->socket;
grpc_winsocket_callback_info *info = &socket->write_info;
@@ -281,7 +275,8 @@ static grpc_endpoint_op_status win_write(grpc_endpoint *ep,
WSABUF *buffers = local_buffers;
if (tcp->shutting_down) {
- return GRPC_ENDPOINT_ERROR;
+ grpc_exec_ctx_enqueue(exec_ctx, cb, 0);
+ return;
}
tcp->write_cb = cb;
@@ -306,9 +301,9 @@ static grpc_endpoint_op_status win_write(grpc_endpoint *ep,
connection that has its send queue filled up. But if we don't, then we can
avoid doing an async write operation at all. */
if (info->wsa_error != WSAEWOULDBLOCK) {
- grpc_endpoint_op_status ret = GRPC_ENDPOINT_ERROR;
+ int ok = 0;
if (status == 0) {
- ret = GRPC_ENDPOINT_DONE;
+ ok = 1;
GPR_ASSERT(bytes_sent == tcp->write_slices->length);
} else {
if (socket->read_info.wsa_error != WSAECONNRESET) {
@@ -318,7 +313,8 @@ static grpc_endpoint_op_status win_write(grpc_endpoint *ep,
}
}
if (allocated) gpr_free(allocated);
- return ret;
+ grpc_exec_ctx_enqueue(exec_ctx, cb, ok);
+ return;
}
TCP_REF(tcp, "write");
@@ -334,24 +330,26 @@ static grpc_endpoint_op_status win_write(grpc_endpoint *ep,
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
TCP_UNREF(tcp, "write");
- return GRPC_ENDPOINT_ERROR;
+ grpc_exec_ctx_enqueue(exec_ctx, cb, 0);
+ return;
}
}
/* As all is now setup, we can now ask for the IOCP notification. It may
trigger the callback immediately however, but no matter. */
- grpc_socket_notify_on_write(socket, on_write, tcp);
- return GRPC_ENDPOINT_PENDING;
+ grpc_socket_notify_on_write(exec_ctx, socket, &tcp->on_write);
}
-static void win_add_to_pollset(grpc_endpoint *ep, grpc_pollset *ps) {
+static void win_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset *ps) {
grpc_tcp *tcp;
(void)ps;
tcp = (grpc_tcp *)ep;
grpc_iocp_add_socket(tcp->socket);
}
-static void win_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pss) {
+static void win_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset_set *pss) {
grpc_tcp *tcp;
(void)pss;
tcp = (grpc_tcp *)ep;
@@ -364,7 +362,7 @@ static void win_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pss) {
we're not going to protect against these. However the IO Completion Port
callback will happen from another thread, so we need to protect against
concurrent access of the data structure in that regard. */
-static void win_shutdown(grpc_endpoint *ep) {
+static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
gpr_mu_lock(&tcp->mu);
/* At that point, what may happen is that we're already inside the IOCP
@@ -374,7 +372,7 @@ static void win_shutdown(grpc_endpoint *ep) {
gpr_mu_unlock(&tcp->mu);
}
-static void win_destroy(grpc_endpoint *ep) {
+static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
TCP_UNREF(tcp, "destroy");
}
@@ -395,6 +393,8 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
tcp->socket = socket;
gpr_mu_init(&tcp->mu);
gpr_ref_init(&tcp->refcount, 1);
+ grpc_closure_init(&tcp->on_read, on_read, tcp);
+ grpc_closure_init(&tcp->on_write, on_write, tcp);
tcp->peer_string = gpr_strdup(peer_string);
return &tcp->base;
}
diff --git a/src/core/iomgr/time_averaged_stats.c b/src/core/iomgr/time_averaged_stats.c
index f881dde9fc..e075db4373 100644
--- a/src/core/iomgr/time_averaged_stats.c
+++ b/src/core/iomgr/time_averaged_stats.c
@@ -33,7 +33,7 @@
#include "src/core/iomgr/time_averaged_stats.h"
-void grpc_time_averaged_stats_init(grpc_time_averaged_stats *stats,
+void grpc_time_averaged_stats_init(grpc_time_averaged_stats* stats,
double init_avg, double regress_weight,
double persistence_factor) {
stats->init_avg = init_avg;
@@ -45,14 +45,14 @@ void grpc_time_averaged_stats_init(grpc_time_averaged_stats *stats,
stats->aggregate_weighted_avg = init_avg;
}
-void grpc_time_averaged_stats_add_sample(grpc_time_averaged_stats *stats,
+void grpc_time_averaged_stats_add_sample(grpc_time_averaged_stats* stats,
double value) {
stats->batch_total_value += value;
++stats->batch_num_samples;
}
double grpc_time_averaged_stats_update_average(
- grpc_time_averaged_stats *stats) {
+ grpc_time_averaged_stats* stats) {
/* Start with the current batch: */
double weighted_sum = stats->batch_total_value;
double total_weight = stats->batch_num_samples;
diff --git a/src/core/iomgr/time_averaged_stats.h b/src/core/iomgr/time_averaged_stats.h
index e6dec1b4cd..4e9e3956c2 100644
--- a/src/core/iomgr/time_averaged_stats.h
+++ b/src/core/iomgr/time_averaged_stats.h
@@ -75,14 +75,14 @@ typedef struct {
/* See the comments on the members above for an explanation of init_avg,
regress_weight, and persistence_factor. */
-void grpc_time_averaged_stats_init(grpc_time_averaged_stats *stats,
+void grpc_time_averaged_stats_init(grpc_time_averaged_stats* stats,
double init_avg, double regress_weight,
double persistence_factor);
/* Add a sample to the current batch. */
-void grpc_time_averaged_stats_add_sample(grpc_time_averaged_stats *stats,
+void grpc_time_averaged_stats_add_sample(grpc_time_averaged_stats* stats,
double value);
/* Complete a batch and compute the new estimate of the average sample
value. */
-double grpc_time_averaged_stats_update_average(grpc_time_averaged_stats *stats);
+double grpc_time_averaged_stats_update_average(grpc_time_averaged_stats* stats);
#endif /* GRPC_INTERNAL_CORE_IOMGR_TIME_AVERAGED_STATS_H */
diff --git a/src/core/iomgr/udp_server.c b/src/core/iomgr/udp_server.c
index 7957066598..ae7c889d0c 100644
--- a/src/core/iomgr/udp_server.c
+++ b/src/core/iomgr/udp_server.c
@@ -79,8 +79,8 @@ typedef struct {
struct sockaddr_un un;
} addr;
size_t addr_len;
- grpc_iomgr_closure read_closure;
- grpc_iomgr_closure destroyed_closure;
+ grpc_closure read_closure;
+ grpc_closure destroyed_closure;
grpc_udp_server_read_cb read_cb;
} server_port;
@@ -111,8 +111,7 @@ struct grpc_udp_server {
size_t port_capacity;
/* shutdown callback */
- void (*shutdown_complete)(void *);
- void *shutdown_complete_arg;
+ grpc_closure *shutdown_complete;
/* all pollsets interested in new connections */
grpc_pollset **pollsets;
@@ -134,8 +133,8 @@ grpc_udp_server *grpc_udp_server_create(void) {
return s;
}
-static void finish_shutdown(grpc_udp_server *s) {
- s->shutdown_complete(s->shutdown_complete_arg);
+static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
+ grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1);
gpr_mu_destroy(&s->mu);
gpr_cv_destroy(&s->cv);
@@ -144,24 +143,22 @@ static void finish_shutdown(grpc_udp_server *s) {
gpr_free(s);
}
-static void destroyed_port(void *server, int success) {
+static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, int success) {
grpc_udp_server *s = server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
gpr_mu_unlock(&s->mu);
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
} else {
gpr_mu_unlock(&s->mu);
}
}
-static void dont_care_about_shutdown_completion(void *ignored) {}
-
/* called when all listening endpoints have been shutdown, so no further
events will be received on them - at this point it's safe to destroy
things */
-static void deactivated_all_ports(grpc_udp_server *s) {
+static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
size_t i;
/* delete ALL the things */
@@ -180,38 +177,35 @@ static void deactivated_all_ports(grpc_udp_server *s) {
}
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
- grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, "udp_listener_shutdown");
+ grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure,
+ "udp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
}
}
-void grpc_udp_server_destroy(
- grpc_udp_server *s, void (*shutdown_complete)(void *shutdown_complete_arg),
- void *shutdown_complete_arg) {
+void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
+ grpc_closure *on_done) {
size_t i;
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
s->shutdown = 1;
- s->shutdown_complete = shutdown_complete
- ? shutdown_complete
- : dont_care_about_shutdown_completion;
- s->shutdown_complete_arg = shutdown_complete_arg;
+ s->shutdown_complete = on_done;
/* shutdown all fd's */
if (s->active_ports) {
for (i = 0; i < s->nports; i++) {
- grpc_fd_shutdown(s->ports[i].emfd);
+ grpc_fd_shutdown(exec_ctx, s->ports[i].emfd);
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- deactivated_all_ports(s);
+ deactivated_all_ports(exec_ctx, s);
}
}
@@ -266,14 +260,14 @@ error:
}
/* event manager callback when reads are ready */
-static void on_read(void *arg, int success) {
+static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
server_port *sp = arg;
if (success == 0) {
gpr_mu_lock(&sp->server->mu);
if (0 == --sp->server->active_ports) {
gpr_mu_unlock(&sp->server->mu);
- deactivated_all_ports(sp->server);
+ deactivated_all_ports(exec_ctx, sp->server);
} else {
gpr_mu_unlock(&sp->server->mu);
}
@@ -285,7 +279,7 @@ static void on_read(void *arg, int success) {
sp->read_cb(sp->fd);
/* Re-arm the notification event so we get another chance to read. */
- grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
}
static int add_socket_to_server(grpc_udp_server *s, int fd,
@@ -300,6 +294,7 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
if (port >= 0) {
grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
gpr_asprintf(&name, "udp-server-listener:%s", addr_str);
+ gpr_free(addr_str);
gpr_mu_lock(&s->mu);
/* append it to the list under a lock */
if (s->nports == s->port_capacity) {
@@ -315,6 +310,7 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
sp->read_cb = read_cb;
GPR_ASSERT(sp->emfd);
gpr_mu_unlock(&s->mu);
+ gpr_free(name);
}
return port;
@@ -405,19 +401,20 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index) {
return (index < s->nports) ? s->ports[index].fd : -1;
}
-void grpc_udp_server_start(grpc_udp_server *s, grpc_pollset **pollsets,
- size_t pollset_count) {
+void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
+ grpc_pollset **pollsets, size_t pollset_count) {
size_t i, j;
gpr_mu_lock(&s->mu);
GPR_ASSERT(s->active_ports == 0);
s->pollsets = pollsets;
for (i = 0; i < s->nports; i++) {
for (j = 0; j < pollset_count; j++) {
- grpc_pollset_add_fd(pollsets[j], s->ports[i].emfd);
+ grpc_pollset_add_fd(exec_ctx, pollsets[j], s->ports[i].emfd);
}
s->ports[i].read_closure.cb = on_read;
s->ports[i].read_closure.cb_arg = &s->ports[i];
- grpc_fd_notify_on_read(s->ports[i].emfd, &s->ports[i].read_closure);
+ grpc_fd_notify_on_read(exec_ctx, s->ports[i].emfd,
+ &s->ports[i].read_closure);
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
diff --git a/src/core/iomgr/udp_server.h b/src/core/iomgr/udp_server.h
index c930e81cbc..76082d7761 100644
--- a/src/core/iomgr/udp_server.h
+++ b/src/core/iomgr/udp_server.h
@@ -46,8 +46,8 @@ typedef void (*grpc_udp_server_read_cb)(int fd);
grpc_udp_server *grpc_udp_server_create(void);
/* Start listening to bound ports */
-void grpc_udp_server_start(grpc_udp_server *udp_server, grpc_pollset **pollsets,
- size_t pollset_count);
+void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server,
+ grpc_pollset **pollsets, size_t pollset_count);
int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index);
@@ -64,9 +64,8 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index);
int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
size_t addr_len, grpc_udp_server_read_cb read_cb);
-void grpc_udp_server_destroy(grpc_udp_server *server,
- void (*shutdown_done)(void *shutdown_done_arg),
- void *shutdown_done_arg);
+void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *server,
+ grpc_closure *on_done);
/* Write the contents of buffer to the underlying UDP socket. */
/*
diff --git a/src/core/iomgr/wakeup_fd_eventfd.c b/src/core/iomgr/wakeup_fd_eventfd.c
index 08fdc74f17..48eb1afb3d 100644
--- a/src/core/iomgr/wakeup_fd_eventfd.c
+++ b/src/core/iomgr/wakeup_fd_eventfd.c
@@ -42,7 +42,7 @@
#include "src/core/iomgr/wakeup_fd_posix.h"
#include <grpc/support/log.h>
-static void eventfd_create(grpc_wakeup_fd *fd_info) {
+static void eventfd_create(grpc_wakeup_fd* fd_info) {
int efd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
/* TODO(klempner): Handle failure more gracefully */
GPR_ASSERT(efd >= 0);
@@ -50,7 +50,7 @@ static void eventfd_create(grpc_wakeup_fd *fd_info) {
fd_info->write_fd = -1;
}
-static void eventfd_consume(grpc_wakeup_fd *fd_info) {
+static void eventfd_consume(grpc_wakeup_fd* fd_info) {
eventfd_t value;
int err;
do {
@@ -58,15 +58,15 @@ static void eventfd_consume(grpc_wakeup_fd *fd_info) {
} while (err < 0 && errno == EINTR);
}
-static void eventfd_wakeup(grpc_wakeup_fd *fd_info) {
+static void eventfd_wakeup(grpc_wakeup_fd* fd_info) {
int err;
do {
err = eventfd_write(fd_info->read_fd, 1);
} while (err < 0 && errno == EINTR);
}
-static void eventfd_destroy(grpc_wakeup_fd *fd_info) {
- close(fd_info->read_fd);
+static void eventfd_destroy(grpc_wakeup_fd* fd_info) {
+ if (fd_info->read_fd != 0) close(fd_info->read_fd);
}
static int eventfd_check_availability(void) {
diff --git a/src/core/iomgr/wakeup_fd_pipe.c b/src/core/iomgr/wakeup_fd_pipe.c
index 902034ee4b..80de181d9d 100644
--- a/src/core/iomgr/wakeup_fd_pipe.c
+++ b/src/core/iomgr/wakeup_fd_pipe.c
@@ -44,7 +44,7 @@
#include "src/core/iomgr/socket_utils_posix.h"
#include <grpc/support/log.h>
-static void pipe_init(grpc_wakeup_fd *fd_info) {
+static void pipe_init(grpc_wakeup_fd* fd_info) {
int pipefd[2];
/* TODO(klempner): Make this nonfatal */
GPR_ASSERT(0 == pipe(pipefd));
@@ -54,7 +54,7 @@ static void pipe_init(grpc_wakeup_fd *fd_info) {
fd_info->write_fd = pipefd[1];
}
-static void pipe_consume(grpc_wakeup_fd *fd_info) {
+static void pipe_consume(grpc_wakeup_fd* fd_info) {
char buf[128];
ssize_t r;
@@ -74,15 +74,15 @@ static void pipe_consume(grpc_wakeup_fd *fd_info) {
}
}
-static void pipe_wakeup(grpc_wakeup_fd *fd_info) {
+static void pipe_wakeup(grpc_wakeup_fd* fd_info) {
char c = 0;
while (write(fd_info->write_fd, &c, 1) != 1 && errno == EINTR)
;
}
-static void pipe_destroy(grpc_wakeup_fd *fd_info) {
- close(fd_info->read_fd);
- close(fd_info->write_fd);
+static void pipe_destroy(grpc_wakeup_fd* fd_info) {
+ if (fd_info->read_fd != 0) close(fd_info->read_fd);
+ if (fd_info->write_fd != 0) close(fd_info->write_fd);
}
static int pipe_check_availability(void) {
diff --git a/src/core/iomgr/wakeup_fd_posix.h b/src/core/iomgr/wakeup_fd_posix.h
index b6c086900d..fe71b5abe9 100644
--- a/src/core/iomgr/wakeup_fd_posix.h
+++ b/src/core/iomgr/wakeup_fd_posix.h
@@ -72,10 +72,10 @@ void grpc_wakeup_fd_global_init_force_fallback(void);
typedef struct grpc_wakeup_fd grpc_wakeup_fd;
typedef struct grpc_wakeup_fd_vtable {
- void (*init)(grpc_wakeup_fd *fd_info);
- void (*consume)(grpc_wakeup_fd *fd_info);
- void (*wakeup)(grpc_wakeup_fd *fd_info);
- void (*destroy)(grpc_wakeup_fd *fd_info);
+ void (*init)(grpc_wakeup_fd* fd_info);
+ void (*consume)(grpc_wakeup_fd* fd_info);
+ void (*wakeup)(grpc_wakeup_fd* fd_info);
+ void (*destroy)(grpc_wakeup_fd* fd_info);
/* Must be called before calling any other functions */
int (*check_availability)(void);
} grpc_wakeup_fd_vtable;
@@ -87,10 +87,10 @@ struct grpc_wakeup_fd {
#define GRPC_WAKEUP_FD_GET_READ_FD(fd_info) ((fd_info)->read_fd)
-void grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info);
-void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd *fd_info);
-void grpc_wakeup_fd_wakeup(grpc_wakeup_fd *fd_info);
-void grpc_wakeup_fd_destroy(grpc_wakeup_fd *fd_info);
+void grpc_wakeup_fd_init(grpc_wakeup_fd* fd_info);
+void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd* fd_info);
+void grpc_wakeup_fd_wakeup(grpc_wakeup_fd* fd_info);
+void grpc_wakeup_fd_destroy(grpc_wakeup_fd* fd_info);
/* Defined in some specialized implementation's .c file, or by
* wakeup_fd_nospecial.c if no such implementation exists. */
diff --git a/src/core/iomgr/workqueue.h b/src/core/iomgr/workqueue.h
new file mode 100644
index 0000000000..714536233c
--- /dev/null
+++ b/src/core/iomgr/workqueue.h
@@ -0,0 +1,85 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_H
+#define GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_H
+
+#include "src/core/iomgr/iomgr.h"
+#include "src/core/iomgr/pollset.h"
+#include "src/core/iomgr/closure.h"
+#include "src/core/iomgr/exec_ctx.h"
+
+#ifdef GPR_POSIX_SOCKET
+#include "src/core/iomgr/workqueue_posix.h"
+#endif
+
+#ifdef GPR_WIN32
+#include "src/core/iomgr/workqueue_windows.h"
+#endif
+
+/** A workqueue represents a list of work to be executed asynchronously. */
+struct grpc_workqueue;
+typedef struct grpc_workqueue grpc_workqueue;
+
+/** Create a work queue */
+grpc_workqueue *grpc_workqueue_create(grpc_exec_ctx *exec_ctx);
+
+void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
+
+#define GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#define GRPC_WORKQUEUE_REF(p, r) \
+ grpc_workqueue_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_WORKQUEUE_UNREF(cl, p, r) \
+ grpc_workqueue_unref((cl), (p), __FILE__, __LINE__, (r))
+void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
+ const char *reason);
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason);
+#else
+#define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p))
+#define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p))
+void grpc_workqueue_ref(grpc_workqueue *workqueue);
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
+#endif
+
+/** Bind this workqueue to a pollset */
+void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue,
+ grpc_pollset *pollset);
+
+/** Add a work item to a workqueue */
+void grpc_workqueue_push(grpc_workqueue *workqueue, grpc_closure *closure,
+ int success);
+
+#endif
diff --git a/src/core/iomgr/workqueue_posix.c b/src/core/iomgr/workqueue_posix.c
new file mode 100644
index 0000000000..0a0f3c364e
--- /dev/null
+++ b/src/core/iomgr/workqueue_posix.c
@@ -0,0 +1,142 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_POSIX_SOCKET
+
+#include "src/core/iomgr/workqueue.h"
+
+#include <stdio.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/iomgr/fd_posix.h"
+
+static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, int success);
+
+grpc_workqueue *grpc_workqueue_create(grpc_exec_ctx *exec_ctx) {
+ char name[32];
+ grpc_workqueue *workqueue = gpr_malloc(sizeof(grpc_workqueue));
+ gpr_ref_init(&workqueue->refs, 1);
+ gpr_mu_init(&workqueue->mu);
+ workqueue->closure_list.head = workqueue->closure_list.tail = NULL;
+ grpc_wakeup_fd_init(&workqueue->wakeup_fd);
+ sprintf(name, "workqueue:%p", (void *)workqueue);
+ workqueue->wakeup_read_fd =
+ grpc_fd_create(GRPC_WAKEUP_FD_GET_READ_FD(&workqueue->wakeup_fd), name);
+ grpc_closure_init(&workqueue->read_closure, on_readable, workqueue);
+ grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd,
+ &workqueue->read_closure);
+ return workqueue;
+}
+
+static void workqueue_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue) {
+ GPR_ASSERT(grpc_closure_list_empty(workqueue->closure_list));
+ grpc_fd_shutdown(exec_ctx, workqueue->wakeup_read_fd);
+}
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
+ const char *reason) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p ref %d -> %d %s",
+ workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count + 1,
+ reason);
+#else
+void grpc_workqueue_ref(grpc_workqueue *workqueue) {
+#endif
+ gpr_ref(&workqueue->refs);
+}
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p unref %d -> %d %s",
+ workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count - 1,
+ reason);
+#else
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
+#endif
+ if (gpr_unref(&workqueue->refs)) {
+ workqueue_destroy(exec_ctx, workqueue);
+ }
+}
+
+void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue,
+ grpc_pollset *pollset) {
+ grpc_pollset_add_fd(exec_ctx, pollset, workqueue->wakeup_read_fd);
+}
+
+void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
+ gpr_mu_lock(&workqueue->mu);
+ grpc_closure_list_move(&exec_ctx->closure_list, &workqueue->closure_list);
+ gpr_mu_unlock(&workqueue->mu);
+}
+
+static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, int success) {
+ grpc_workqueue *workqueue = arg;
+
+ if (!success) {
+ gpr_mu_destroy(&workqueue->mu);
+ /* HACK: let wakeup_fd code know that we stole the fd */
+ workqueue->wakeup_fd.read_fd = 0;
+ grpc_wakeup_fd_destroy(&workqueue->wakeup_fd);
+ grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, "destroy");
+ gpr_free(workqueue);
+ } else {
+ gpr_mu_lock(&workqueue->mu);
+ grpc_closure_list_move(&workqueue->closure_list, &exec_ctx->closure_list);
+ grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd);
+ gpr_mu_unlock(&workqueue->mu);
+ grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd,
+ &workqueue->read_closure);
+ }
+}
+
+void grpc_workqueue_push(grpc_workqueue *workqueue, grpc_closure *closure,
+ int success) {
+ closure->success = success;
+ closure->next = NULL;
+ gpr_mu_lock(&workqueue->mu);
+ if (grpc_closure_list_empty(workqueue->closure_list)) {
+ grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);
+ }
+ grpc_closure_list_add(&workqueue->closure_list, closure, success);
+ gpr_mu_unlock(&workqueue->mu);
+}
+
+#endif /* GPR_POSIX_SOCKET */
diff --git a/src/core/iomgr/workqueue_posix.h b/src/core/iomgr/workqueue_posix.h
new file mode 100644
index 0000000000..589034fe1b
--- /dev/null
+++ b/src/core/iomgr/workqueue_posix.h
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_POSIX_H
+#define GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_POSIX_H
+
+struct grpc_fd;
+
+struct grpc_workqueue {
+ gpr_refcount refs;
+
+ gpr_mu mu;
+ grpc_closure_list closure_list;
+
+ grpc_wakeup_fd wakeup_fd;
+ struct grpc_fd *wakeup_read_fd;
+
+ grpc_closure read_closure;
+};
+
+#endif /* GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_POSIX_H */
diff --git a/src/core/iomgr/workqueue_windows.c b/src/core/iomgr/workqueue_windows.c
new file mode 100644
index 0000000000..f9ca57557b
--- /dev/null
+++ b/src/core/iomgr/workqueue_windows.c
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_WIN32
+
+#include "src/core/iomgr/workqueue.h"
+
+#endif /* GPR_WIN32 */
diff --git a/src/core/iomgr/workqueue_windows.h b/src/core/iomgr/workqueue_windows.h
new file mode 100644
index 0000000000..941f195f51
--- /dev/null
+++ b/src/core/iomgr/workqueue_windows.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_WINDOWS_H
+#define GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_WINDOWS_H
+
+#endif /* GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_WINDOWS_H */
diff --git a/src/core/json/json.h b/src/core/json/json.h
index 573584bf6f..c4df2998c3 100644
--- a/src/core/json/json.h
+++ b/src/core/json/json.h
@@ -42,14 +42,14 @@
* are not owned by it.
*/
typedef struct grpc_json {
- struct grpc_json* next;
- struct grpc_json* prev;
- struct grpc_json* child;
- struct grpc_json* parent;
+ struct grpc_json *next;
+ struct grpc_json *prev;
+ struct grpc_json *child;
+ struct grpc_json *parent;
grpc_json_type type;
- const char* key;
- const char* value;
+ const char *key;
+ const char *value;
} grpc_json;
/* The next two functions are going to parse the input string, and
@@ -65,8 +65,8 @@ typedef struct grpc_json {
*
* Delete the allocated tree afterward using grpc_json_destroy().
*/
-grpc_json* grpc_json_parse_string_with_len(char* input, size_t size);
-grpc_json* grpc_json_parse_string(char* input);
+grpc_json *grpc_json_parse_string_with_len(char *input, size_t size);
+grpc_json *grpc_json_parse_string(char *input);
/* This function will create a new string using gpr_realloc, and will
* deserialize the grpc_json tree into it. It'll be zero-terminated,
@@ -76,13 +76,13 @@ grpc_json* grpc_json_parse_string(char* input);
* If indent is 0, then newlines will be suppressed as well, and the
* output will be condensed at its maximum.
*/
-char* grpc_json_dump_to_string(grpc_json* json, int indent);
+char *grpc_json_dump_to_string(grpc_json *json, int indent);
/* Use these to create or delete a grpc_json object.
* Deletion is recursive. We will not attempt to free any of the strings
* in any of the objects of that tree.
*/
-grpc_json* grpc_json_create(grpc_json_type type);
-void grpc_json_destroy(grpc_json* json);
+grpc_json *grpc_json_create(grpc_json_type type);
+void grpc_json_destroy(grpc_json *json);
#endif /* GRPC_INTERNAL_CORE_JSON_JSON_H */
diff --git a/src/core/json/json_reader.c b/src/core/json/json_reader.c
index c22d4edd47..8abad01252 100644
--- a/src/core/json/json_reader.c
+++ b/src/core/json/json_reader.c
@@ -37,61 +37,61 @@
#include "src/core/json/json_reader.h"
-static void json_reader_string_clear(grpc_json_reader* reader) {
+static void json_reader_string_clear(grpc_json_reader *reader) {
reader->vtable->string_clear(reader->userdata);
}
-static void json_reader_string_add_char(grpc_json_reader* reader,
+static void json_reader_string_add_char(grpc_json_reader *reader,
gpr_uint32 c) {
reader->vtable->string_add_char(reader->userdata, c);
}
-static void json_reader_string_add_utf32(grpc_json_reader* reader,
+static void json_reader_string_add_utf32(grpc_json_reader *reader,
gpr_uint32 utf32) {
reader->vtable->string_add_utf32(reader->userdata, utf32);
}
-static gpr_uint32 grpc_json_reader_read_char(grpc_json_reader* reader) {
+static gpr_uint32 grpc_json_reader_read_char(grpc_json_reader *reader) {
return reader->vtable->read_char(reader->userdata);
}
-static void json_reader_container_begins(grpc_json_reader* reader,
+static void json_reader_container_begins(grpc_json_reader *reader,
grpc_json_type type) {
reader->vtable->container_begins(reader->userdata, type);
}
static grpc_json_type grpc_json_reader_container_ends(
- grpc_json_reader* reader) {
+ grpc_json_reader *reader) {
return reader->vtable->container_ends(reader->userdata);
}
-static void json_reader_set_key(grpc_json_reader* reader) {
+static void json_reader_set_key(grpc_json_reader *reader) {
reader->vtable->set_key(reader->userdata);
}
-static void json_reader_set_string(grpc_json_reader* reader) {
+static void json_reader_set_string(grpc_json_reader *reader) {
reader->vtable->set_string(reader->userdata);
}
-static int json_reader_set_number(grpc_json_reader* reader) {
+static int json_reader_set_number(grpc_json_reader *reader) {
return reader->vtable->set_number(reader->userdata);
}
-static void json_reader_set_true(grpc_json_reader* reader) {
+static void json_reader_set_true(grpc_json_reader *reader) {
reader->vtable->set_true(reader->userdata);
}
-static void json_reader_set_false(grpc_json_reader* reader) {
+static void json_reader_set_false(grpc_json_reader *reader) {
reader->vtable->set_false(reader->userdata);
}
-static void json_reader_set_null(grpc_json_reader* reader) {
+static void json_reader_set_null(grpc_json_reader *reader) {
reader->vtable->set_null(reader->userdata);
}
/* Call this function to initialize the reader structure. */
-void grpc_json_reader_init(grpc_json_reader* reader,
- grpc_json_reader_vtable* vtable, void* userdata) {
+void grpc_json_reader_init(grpc_json_reader *reader,
+ grpc_json_reader_vtable *vtable, void *userdata) {
memset(reader, 0, sizeof(*reader));
reader->vtable = vtable;
reader->userdata = userdata;
@@ -99,13 +99,13 @@ void grpc_json_reader_init(grpc_json_reader* reader,
reader->state = GRPC_JSON_STATE_VALUE_BEGIN;
}
-int grpc_json_reader_is_complete(grpc_json_reader* reader) {
+int grpc_json_reader_is_complete(grpc_json_reader *reader) {
return ((reader->depth == 0) &&
((reader->state == GRPC_JSON_STATE_END) ||
(reader->state == GRPC_JSON_STATE_VALUE_END)));
}
-grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
+grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
gpr_uint32 c, success;
/* This state-machine is a strict implementation of ECMA-404 */
diff --git a/src/core/json/json_reader.h b/src/core/json/json_reader.h
index 4d5487f790..417db110cf 100644
--- a/src/core/json/json_reader.h
+++ b/src/core/json/json_reader.h
@@ -82,27 +82,27 @@ struct grpc_json_reader;
typedef struct grpc_json_reader_vtable {
/* Clears your internal string scratchpad. */
- void (*string_clear)(void* userdata);
+ void (*string_clear)(void *userdata);
/* Adds a char to the string scratchpad. */
- void (*string_add_char)(void* userdata, gpr_uint32 c);
+ void (*string_add_char)(void *userdata, gpr_uint32 c);
/* Adds a utf32 char to the string scratchpad. */
- void (*string_add_utf32)(void* userdata, gpr_uint32 c);
+ void (*string_add_utf32)(void *userdata, gpr_uint32 c);
/* Reads a character from your input. May be utf-8, 16 or 32. */
- gpr_uint32 (*read_char)(void* userdata);
+ gpr_uint32 (*read_char)(void *userdata);
/* Starts a container of type GRPC_JSON_ARRAY or GRPC_JSON_OBJECT. */
- void (*container_begins)(void* userdata, grpc_json_type type);
+ void (*container_begins)(void *userdata, grpc_json_type type);
/* Ends the current container. Must return the type of its parent. */
- grpc_json_type (*container_ends)(void* userdata);
+ grpc_json_type (*container_ends)(void *userdata);
/* Your internal string scratchpad is an object's key. */
- void (*set_key)(void* userdata);
+ void (*set_key)(void *userdata);
/* Your internal string scratchpad is a string value. */
- void (*set_string)(void* userdata);
+ void (*set_string)(void *userdata);
/* Your internal string scratchpad is a numerical value. Return 1 if valid. */
- int (*set_number)(void* userdata);
+ int (*set_number)(void *userdata);
/* Sets the values true, false or null. */
- void (*set_true)(void* userdata);
- void (*set_false)(void* userdata);
- void (*set_null)(void* userdata);
+ void (*set_true)(void *userdata);
+ void (*set_false)(void *userdata);
+ void (*set_null)(void *userdata);
} grpc_json_reader_vtable;
typedef struct grpc_json_reader {
@@ -110,8 +110,8 @@ typedef struct grpc_json_reader {
* The definition is public so you can put it on your stack.
*/
- void* userdata;
- grpc_json_reader_vtable* vtable;
+ void *userdata;
+ grpc_json_reader_vtable *vtable;
int depth;
int in_object;
int in_array;
@@ -144,17 +144,17 @@ typedef enum {
* . GRPC_JSON_INTERNAL_ERROR if the parser somehow ended into an invalid
* internal state.
*/
-grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader);
+grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader);
/* Call this function to initialize the reader structure. */
-void grpc_json_reader_init(grpc_json_reader* reader,
- grpc_json_reader_vtable* vtable, void* userdata);
+void grpc_json_reader_init(grpc_json_reader *reader,
+ grpc_json_reader_vtable *vtable, void *userdata);
/* You may call this from the read_char callback if you don't know where is the
* end of your input stream, and you'd like the json reader to hint you that it
* has completed reading its input, so you can return an EOF to it. Note that
* there might still be trailing whitespaces after that point.
*/
-int grpc_json_reader_is_complete(grpc_json_reader* reader);
+int grpc_json_reader_is_complete(grpc_json_reader *reader);
#endif /* GRPC_INTERNAL_CORE_JSON_JSON_READER_H */
diff --git a/src/core/json/json_string.c b/src/core/json/json_string.c
index e6622ec461..0461c2703f 100644
--- a/src/core/json/json_string.c
+++ b/src/core/json/json_string.c
@@ -53,13 +53,13 @@
* input size, and never expands it.
*/
typedef struct {
- grpc_json* top;
- grpc_json* current_container;
- grpc_json* current_value;
- gpr_uint8* input;
- gpr_uint8* key;
- gpr_uint8* string;
- gpr_uint8* string_ptr;
+ grpc_json *top;
+ grpc_json *current_container;
+ grpc_json *current_value;
+ gpr_uint8 *input;
+ gpr_uint8 *key;
+ gpr_uint8 *string;
+ gpr_uint8 *string_ptr;
size_t remaining_input;
} json_reader_userdata;
@@ -67,7 +67,7 @@ typedef struct {
* The point is that we allocate that string in chunks of 256 bytes.
*/
typedef struct {
- char* output;
+ char *output;
size_t free_space;
size_t string_len;
size_t allocated;
@@ -77,8 +77,8 @@ typedef struct {
* and will enlarge it if necessary. We're only allocating chunks of 256
* bytes at a time (or multiples thereof).
*/
-static void json_writer_output_check(void* userdata, size_t needed) {
- json_writer_userdata* state = userdata;
+static void json_writer_output_check(void *userdata, size_t needed) {
+ json_writer_userdata *state = userdata;
if (state->free_space >= needed) return;
needed -= state->free_space;
/* Round up by 256 bytes. */
@@ -89,23 +89,23 @@ static void json_writer_output_check(void* userdata, size_t needed) {
}
/* These are needed by the writer's implementation. */
-static void json_writer_output_char(void* userdata, char c) {
- json_writer_userdata* state = userdata;
+static void json_writer_output_char(void *userdata, char c) {
+ json_writer_userdata *state = userdata;
json_writer_output_check(userdata, 1);
state->output[state->string_len++] = c;
state->free_space--;
}
-static void json_writer_output_string_with_len(void* userdata, const char* str,
+static void json_writer_output_string_with_len(void *userdata, const char *str,
size_t len) {
- json_writer_userdata* state = userdata;
+ json_writer_userdata *state = userdata;
json_writer_output_check(userdata, len);
memcpy(state->output + state->string_len, str, len);
state->string_len += len;
state->free_space -= len;
}
-static void json_writer_output_string(void* userdata, const char* str) {
+static void json_writer_output_string(void *userdata, const char *str) {
size_t len = strlen(str);
json_writer_output_string_with_len(userdata, str, len);
}
@@ -113,8 +113,8 @@ static void json_writer_output_string(void* userdata, const char* str) {
/* The reader asks us to clear our scratchpad. In our case, we'll simply mark
* the end of the current string, and advance our output pointer.
*/
-static void json_reader_string_clear(void* userdata) {
- json_reader_userdata* state = userdata;
+static void json_reader_string_clear(void *userdata) {
+ json_reader_userdata *state = userdata;
if (state->string) {
GPR_ASSERT(state->string_ptr < state->input);
*state->string_ptr++ = 0;
@@ -122,8 +122,8 @@ static void json_reader_string_clear(void* userdata) {
state->string = state->string_ptr;
}
-static void json_reader_string_add_char(void* userdata, gpr_uint32 c) {
- json_reader_userdata* state = userdata;
+static void json_reader_string_add_char(void *userdata, gpr_uint32 c) {
+ json_reader_userdata *state = userdata;
GPR_ASSERT(state->string_ptr < state->input);
GPR_ASSERT(c <= 0xff);
*state->string_ptr++ = (gpr_uint8)c;
@@ -132,7 +132,7 @@ static void json_reader_string_add_char(void* userdata, gpr_uint32 c) {
/* We are converting a UTF-32 character into UTF-8 here,
* as described by RFC3629.
*/
-static void json_reader_string_add_utf32(void* userdata, gpr_uint32 c) {
+static void json_reader_string_add_utf32(void *userdata, gpr_uint32 c) {
if (c <= 0x7f) {
json_reader_string_add_char(userdata, c);
} else if (c <= 0x7ff) {
@@ -162,9 +162,9 @@ static void json_reader_string_add_utf32(void* userdata, gpr_uint32 c) {
/* We consider that the input may be a zero-terminated string. So we
* can end up hitting eof before the end of the alleged string length.
*/
-static gpr_uint32 json_reader_read_char(void* userdata) {
+static gpr_uint32 json_reader_read_char(void *userdata) {
gpr_uint32 r;
- json_reader_userdata* state = userdata;
+ json_reader_userdata *state = userdata;
if (state->remaining_input == 0) return GRPC_JSON_READ_CHAR_EOF;
@@ -182,9 +182,9 @@ static gpr_uint32 json_reader_read_char(void* userdata) {
/* Helper function to create a new grpc_json object and link it into
* our tree-in-progress inside our opaque structure.
*/
-static grpc_json* json_create_and_link(void* userdata, grpc_json_type type) {
- json_reader_userdata* state = userdata;
- grpc_json* json = grpc_json_create(type);
+static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
+ json_reader_userdata *state = userdata;
+ grpc_json *json = grpc_json_create(type);
json->parent = state->current_container;
json->prev = state->current_value;
@@ -198,7 +198,7 @@ static grpc_json* json_create_and_link(void* userdata, grpc_json_type type) {
json->parent->child = json;
}
if (json->parent->type == GRPC_JSON_OBJECT) {
- json->key = (char*)state->key;
+ json->key = (char *)state->key;
}
}
if (!state->top) {
@@ -208,9 +208,9 @@ static grpc_json* json_create_and_link(void* userdata, grpc_json_type type) {
return json;
}
-static void json_reader_container_begins(void* userdata, grpc_json_type type) {
- json_reader_userdata* state = userdata;
- grpc_json* container;
+static void json_reader_container_begins(void *userdata, grpc_json_type type) {
+ json_reader_userdata *state = userdata;
+ grpc_json *container;
GPR_ASSERT(type == GRPC_JSON_ARRAY || type == GRPC_JSON_OBJECT);
@@ -228,9 +228,9 @@ static void json_reader_container_begins(void* userdata, grpc_json_type type) {
* Also note that if we're at the top of the tree, and the last container
* ends, we have to return GRPC_JSON_TOP_LEVEL.
*/
-static grpc_json_type json_reader_container_ends(void* userdata) {
+static grpc_json_type json_reader_container_ends(void *userdata) {
grpc_json_type container_type = GRPC_JSON_TOP_LEVEL;
- json_reader_userdata* state = userdata;
+ json_reader_userdata *state = userdata;
GPR_ASSERT(state->current_container);
@@ -250,36 +250,36 @@ static grpc_json_type json_reader_container_ends(void* userdata) {
* Note that in the set_number case, we're not going to try interpreting it.
* We'll keep it as a string, and leave it to the caller to evaluate it.
*/
-static void json_reader_set_key(void* userdata) {
- json_reader_userdata* state = userdata;
+static void json_reader_set_key(void *userdata) {
+ json_reader_userdata *state = userdata;
state->key = state->string;
}
-static void json_reader_set_string(void* userdata) {
- json_reader_userdata* state = userdata;
- grpc_json* json = json_create_and_link(userdata, GRPC_JSON_STRING);
- json->value = (char*)state->string;
+static void json_reader_set_string(void *userdata) {
+ json_reader_userdata *state = userdata;
+ grpc_json *json = json_create_and_link(userdata, GRPC_JSON_STRING);
+ json->value = (char *)state->string;
}
-static int json_reader_set_number(void* userdata) {
- json_reader_userdata* state = userdata;
- grpc_json* json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
- json->value = (char*)state->string;
+static int json_reader_set_number(void *userdata) {
+ json_reader_userdata *state = userdata;
+ grpc_json *json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
+ json->value = (char *)state->string;
return 1;
}
/* The object types true, false and null are self-sufficient, and don't need
* any more information beside their type.
*/
-static void json_reader_set_true(void* userdata) {
+static void json_reader_set_true(void *userdata) {
json_create_and_link(userdata, GRPC_JSON_TRUE);
}
-static void json_reader_set_false(void* userdata) {
+static void json_reader_set_false(void *userdata) {
json_create_and_link(userdata, GRPC_JSON_FALSE);
}
-static void json_reader_set_null(void* userdata) {
+static void json_reader_set_null(void *userdata) {
json_create_and_link(userdata, GRPC_JSON_NULL);
}
@@ -292,17 +292,17 @@ static grpc_json_reader_vtable reader_vtable = {
json_reader_set_false, json_reader_set_null};
/* And finally, let's define our public API. */
-grpc_json* grpc_json_parse_string_with_len(char* input, size_t size) {
+grpc_json *grpc_json_parse_string_with_len(char *input, size_t size) {
grpc_json_reader reader;
json_reader_userdata state;
- grpc_json* json = NULL;
+ grpc_json *json = NULL;
grpc_json_reader_status status;
if (!input) return NULL;
state.top = state.current_container = state.current_value = NULL;
state.string = state.key = NULL;
- state.string_ptr = state.input = (gpr_uint8*)input;
+ state.string_ptr = state.input = (gpr_uint8 *)input;
state.remaining_input = size;
grpc_json_reader_init(&reader, &reader_vtable, &state);
@@ -319,11 +319,11 @@ grpc_json* grpc_json_parse_string_with_len(char* input, size_t size) {
#define UNBOUND_JSON_STRING_LENGTH 0x7fffffff
-grpc_json* grpc_json_parse_string(char* input) {
+grpc_json *grpc_json_parse_string(char *input) {
return grpc_json_parse_string_with_len(input, UNBOUND_JSON_STRING_LENGTH);
}
-static void json_dump_recursive(grpc_json_writer* writer, grpc_json* json,
+static void json_dump_recursive(grpc_json_writer *writer, grpc_json *json,
int in_object) {
while (json) {
if (in_object) grpc_json_writer_object_key(writer, json->key);
@@ -363,7 +363,7 @@ static grpc_json_writer_vtable writer_vtable = {
json_writer_output_char, json_writer_output_string,
json_writer_output_string_with_len};
-char* grpc_json_dump_to_string(grpc_json* json, int indent) {
+char *grpc_json_dump_to_string(grpc_json *json, int indent) {
grpc_json_writer writer;
json_writer_userdata state;
diff --git a/src/core/json/json_writer.c b/src/core/json/json_writer.c
index ca9c835825..e0d02f411e 100644
--- a/src/core/json/json_writer.c
+++ b/src/core/json/json_writer.c
@@ -37,22 +37,22 @@
#include "src/core/json/json_writer.h"
-static void json_writer_output_char(grpc_json_writer* writer, char c) {
+static void json_writer_output_char(grpc_json_writer *writer, char c) {
writer->vtable->output_char(writer->userdata, c);
}
-static void json_writer_output_string(grpc_json_writer* writer,
- const char* str) {
+static void json_writer_output_string(grpc_json_writer *writer,
+ const char *str) {
writer->vtable->output_string(writer->userdata, str);
}
-static void json_writer_output_string_with_len(grpc_json_writer* writer,
- const char* str, size_t len) {
+static void json_writer_output_string_with_len(grpc_json_writer *writer,
+ const char *str, size_t len) {
writer->vtable->output_string_with_len(writer->userdata, str, len);
}
-void grpc_json_writer_init(grpc_json_writer* writer, int indent,
- grpc_json_writer_vtable* vtable, void* userdata) {
+void grpc_json_writer_init(grpc_json_writer *writer, int indent,
+ grpc_json_writer_vtable *vtable, void *userdata) {
memset(writer, 0, sizeof(*writer));
writer->container_empty = 1;
writer->indent = indent;
@@ -60,7 +60,7 @@ void grpc_json_writer_init(grpc_json_writer* writer, int indent,
writer->userdata = userdata;
}
-static void json_writer_output_indent(grpc_json_writer* writer) {
+static void json_writer_output_indent(grpc_json_writer *writer) {
static const char spacesstr[] =
" "
" "
@@ -88,7 +88,7 @@ static void json_writer_output_indent(grpc_json_writer* writer) {
writer, spacesstr + sizeof(spacesstr) - 1 - spaces, spaces);
}
-static void json_writer_value_end(grpc_json_writer* writer) {
+static void json_writer_value_end(grpc_json_writer *writer) {
if (writer->container_empty) {
writer->container_empty = 0;
if ((writer->indent == 0) || (writer->depth == 0)) return;
@@ -100,7 +100,7 @@ static void json_writer_value_end(grpc_json_writer* writer) {
}
}
-static void json_writer_escape_utf16(grpc_json_writer* writer,
+static void json_writer_escape_utf16(grpc_json_writer *writer,
gpr_uint16 utf16) {
static const char hex[] = "0123456789abcdef";
@@ -111,8 +111,8 @@ static void json_writer_escape_utf16(grpc_json_writer* writer,
json_writer_output_char(writer, hex[(utf16)&0x0f]);
}
-static void json_writer_escape_string(grpc_json_writer* writer,
- const char* string) {
+static void json_writer_escape_string(grpc_json_writer *writer,
+ const char *string) {
json_writer_output_char(writer, '"');
for (;;) {
@@ -207,7 +207,7 @@ static void json_writer_escape_string(grpc_json_writer* writer,
json_writer_output_char(writer, '"');
}
-void grpc_json_writer_container_begins(grpc_json_writer* writer,
+void grpc_json_writer_container_begins(grpc_json_writer *writer,
grpc_json_type type) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
@@ -217,7 +217,7 @@ void grpc_json_writer_container_begins(grpc_json_writer* writer,
writer->depth++;
}
-void grpc_json_writer_container_ends(grpc_json_writer* writer,
+void grpc_json_writer_container_ends(grpc_json_writer *writer,
grpc_json_type type) {
if (writer->indent && !writer->container_empty)
json_writer_output_char(writer, '\n');
@@ -228,7 +228,7 @@ void grpc_json_writer_container_ends(grpc_json_writer* writer,
writer->got_key = 0;
}
-void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string) {
+void grpc_json_writer_object_key(grpc_json_writer *writer, const char *string) {
json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_escape_string(writer, string);
@@ -236,23 +236,23 @@ void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string) {
writer->got_key = 1;
}
-void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string) {
+void grpc_json_writer_value_raw(grpc_json_writer *writer, const char *string) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_output_string(writer, string);
writer->got_key = 0;
}
-void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer,
- const char* string, size_t len) {
+void grpc_json_writer_value_raw_with_len(grpc_json_writer *writer,
+ const char *string, size_t len) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_output_string_with_len(writer, string, len);
writer->got_key = 0;
}
-void grpc_json_writer_value_string(grpc_json_writer* writer,
- const char* string) {
+void grpc_json_writer_value_string(grpc_json_writer *writer,
+ const char *string) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_escape_string(writer, string);
diff --git a/src/core/json/json_writer.h b/src/core/json/json_writer.h
index a299dfabf8..9ef04aab01 100644
--- a/src/core/json/json_writer.h
+++ b/src/core/json/json_writer.h
@@ -52,17 +52,17 @@
typedef struct grpc_json_writer_vtable {
/* Adds a character to the output stream. */
- void (*output_char)(void* userdata, char);
+ void (*output_char)(void *userdata, char);
/* Adds a zero-terminated string to the output stream. */
- void (*output_string)(void* userdata, const char* str);
+ void (*output_string)(void *userdata, const char *str);
/* Adds a fixed-length string to the output stream. */
- void (*output_string_with_len)(void* userdata, const char* str, size_t len);
+ void (*output_string_with_len)(void *userdata, const char *str, size_t len);
} grpc_json_writer_vtable;
typedef struct grpc_json_writer {
- void* userdata;
- grpc_json_writer_vtable* vtable;
+ void *userdata;
+ grpc_json_writer_vtable *vtable;
int indent;
int depth;
int container_empty;
@@ -74,24 +74,24 @@ typedef struct grpc_json_writer {
* use indent=0, then the output will not have any newlines either, thus
* emitting a condensed json output.
*/
-void grpc_json_writer_init(grpc_json_writer* writer, int indent,
- grpc_json_writer_vtable* vtable, void* userdata);
+void grpc_json_writer_init(grpc_json_writer *writer, int indent,
+ grpc_json_writer_vtable *vtable, void *userdata);
/* Signals the beginning of a container. */
-void grpc_json_writer_container_begins(grpc_json_writer* writer,
+void grpc_json_writer_container_begins(grpc_json_writer *writer,
grpc_json_type type);
/* Signals the end of a container. */
-void grpc_json_writer_container_ends(grpc_json_writer* writer,
+void grpc_json_writer_container_ends(grpc_json_writer *writer,
grpc_json_type type);
/* Writes down an object key for the next value. */
-void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string);
+void grpc_json_writer_object_key(grpc_json_writer *writer, const char *string);
/* Sets a raw value. Useful for numbers. */
-void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string);
+void grpc_json_writer_value_raw(grpc_json_writer *writer, const char *string);
/* Sets a raw value with its length. Useful for values like true or false. */
-void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer,
- const char* string, size_t len);
+void grpc_json_writer_value_raw_with_len(grpc_json_writer *writer,
+ const char *string, size_t len);
/* Sets a string value. It'll be escaped, and utf-8 validated. */
-void grpc_json_writer_value_string(grpc_json_writer* writer,
- const char* string);
+void grpc_json_writer_value_string(grpc_json_writer *writer,
+ const char *string);
#endif /* GRPC_INTERNAL_CORE_JSON_JSON_WRITER_H */
diff --git a/src/core/profiling/basic_timers.c b/src/core/profiling/basic_timers.c
index 4b6a0d2f56..2f6c88daac 100644
--- a/src/core/profiling/basic_timers.c
+++ b/src/core/profiling/basic_timers.c
@@ -54,10 +54,10 @@ typedef enum {
typedef struct grpc_timer_entry {
gpr_timespec tm;
int tag;
- const char* tagstr;
+ const char *tagstr;
marker_type type;
- void* id;
- const char* file;
+ void *id;
+ const char *file;
int line;
} grpc_timer_entry;
@@ -69,20 +69,20 @@ static __thread int count;
static void log_report() {
int i;
for (i = 0; i < count; i++) {
- grpc_timer_entry* entry = &(log[i]);
- printf("GRPC_LAT_PROF %ld.%09d %p %c %d(%s) %p %s %d\n",
- entry->tm.tv_sec, entry->tm.tv_nsec,
- (void*)(gpr_intptr)gpr_thd_currentid(), entry->type, entry->tag,
- entry->tagstr, entry->id, entry->file, entry->line);
+ grpc_timer_entry *entry = &(log[i]);
+ printf("GRPC_LAT_PROF %ld.%09d %p %c %d(%s) %p %s %d\n", entry->tm.tv_sec,
+ entry->tm.tv_nsec, (void *)(gpr_intptr)gpr_thd_currentid(),
+ entry->type, entry->tag, entry->tagstr, entry->id, entry->file,
+ entry->line);
}
/* Now clear out the log */
count = 0;
}
-static void grpc_timers_log_add(int tag, const char* tagstr, marker_type type,
- void* id, const char* file, int line) {
- grpc_timer_entry* entry;
+static void grpc_timers_log_add(int tag, const char *tagstr, marker_type type,
+ void *id, const char *file, int line) {
+ grpc_timer_entry *entry;
/* TODO (vpai) : Improve concurrency */
if (count == MAX_COUNT) {
@@ -101,28 +101,28 @@ static void grpc_timers_log_add(int tag, const char* tagstr, marker_type type,
}
/* Latency profiler API implementation. */
-void grpc_timer_add_mark(int tag, const char* tagstr, void* id,
- const char* file, int line) {
+void grpc_timer_add_mark(int tag, const char *tagstr, void *id,
+ const char *file, int line) {
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
grpc_timers_log_add(tag, tagstr, MARK, id, file, line);
}
}
-void grpc_timer_add_important_mark(int tag, const char* tagstr, void* id,
- const char* file, int line) {
+void grpc_timer_add_important_mark(int tag, const char *tagstr, void *id,
+ const char *file, int line) {
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
grpc_timers_log_add(tag, tagstr, IMPORTANT, id, file, line);
}
}
-void grpc_timer_begin(int tag, const char* tagstr, void* id, const char* file,
+void grpc_timer_begin(int tag, const char *tagstr, void *id, const char *file,
int line) {
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
grpc_timers_log_add(tag, tagstr, BEGIN, id, file, line);
}
}
-void grpc_timer_end(int tag, const char* tagstr, void* id, const char* file,
+void grpc_timer_end(int tag, const char *tagstr, void *id, const char *file,
int line) {
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
grpc_timers_log_add(tag, tagstr, END, id, file, line);
@@ -136,5 +136,6 @@ void grpc_timers_global_destroy(void) {}
#else /* !GRPC_BASIC_PROFILER */
void grpc_timers_global_init(void) {}
+
void grpc_timers_global_destroy(void) {}
#endif /* GRPC_BASIC_PROFILER */
diff --git a/src/core/profiling/stap_timers.c b/src/core/profiling/stap_timers.c
index 99975163f9..6868a674a9 100644
--- a/src/core/profiling/stap_timers.c
+++ b/src/core/profiling/stap_timers.c
@@ -42,22 +42,22 @@
#include "src/core/profiling/stap_probes.h"
/* Latency profiler API implementation. */
-void grpc_timer_add_mark(int tag, const char* tagstr, void* id,
- const char* file, int line) {
+void grpc_timer_add_mark(int tag, const char *tagstr, void *id,
+ const char *file, int line) {
_STAP_ADD_MARK(tag);
}
-void grpc_timer_add_important_mark(int tag, const char* tagstr, void* id,
- const char* file, int line) {
+void grpc_timer_add_important_mark(int tag, const char *tagstr, void *id,
+ const char *file, int line) {
_STAP_ADD_IMPORTANT_MARK(tag);
}
-void grpc_timer_begin(int tag, const char* tagstr, void* id, const char* file,
+void grpc_timer_begin(int tag, const char *tagstr, void *id, const char *file,
int line) {
_STAP_TIMING_NS_BEGIN(tag);
}
-void grpc_timer_end(int tag, const char* tagstr, void* id, const char* file,
+void grpc_timer_end(int tag, const char *tagstr, void *id, const char *file,
int line) {
_STAP_TIMING_NS_END(tag);
}
diff --git a/src/core/profiling/timers.h b/src/core/profiling/timers.h
index 92dbab9042..a70520408c 100644
--- a/src/core/profiling/timers.h
+++ b/src/core/profiling/timers.h
@@ -76,8 +76,8 @@ enum grpc_profiling_tags {
GRPC_PTAG_HTTP2_UNLOCK_CLEANUP = 402 + GRPC_PTAG_IGNORE_THRESHOLD,
/* > 1024 Unassigned reserved. For any miscellaneous use.
- * Use addition to generate tags from this base or take advantage of the 10
- * zero'd bits for OR-ing. */
+ * Use addition to generate tags from this base or take advantage of the 10
+ * zero'd bits for OR-ing. */
GRPC_PTAG_OTHER_BASE = 1024
};
diff --git a/src/core/security/client_auth_filter.c b/src/core/security/client_auth_filter.c
index 16b3fed08f..c152a06498 100644
--- a/src/core/security/client_auth_filter.c
+++ b/src/core/security/client_auth_filter.c
@@ -83,15 +83,15 @@ static void reset_service_url(call_data *calld) {
}
}
-static void bubble_up_error(grpc_call_element *elem, grpc_status_code status,
- const char *error_msg) {
+static void bubble_up_error(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_status_code status, const char *error_msg) {
call_data *calld = elem->call_data;
gpr_log(GPR_ERROR, "Client side authentication failure: %s", error_msg);
grpc_transport_stream_op_add_cancellation(&calld->op, status);
- grpc_call_next_op(elem, &calld->op);
+ grpc_call_next_op(exec_ctx, elem, &calld->op);
}
-static void on_credentials_metadata(void *user_data,
+static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_credentials_md *md_elems,
size_t num_md,
grpc_credentials_status status) {
@@ -103,7 +103,7 @@ static void on_credentials_metadata(void *user_data,
size_t i;
reset_service_url(calld);
if (status != GRPC_CREDENTIALS_OK) {
- bubble_up_error(elem, GRPC_STATUS_UNAUTHENTICATED,
+ bubble_up_error(exec_ctx, elem, GRPC_STATUS_UNAUTHENTICATED,
"Credentials failed to get metadata.");
return;
}
@@ -117,7 +117,7 @@ static void on_credentials_metadata(void *user_data,
grpc_mdelem_from_slices(chand->md_ctx, gpr_slice_ref(md_elems[i].key),
gpr_slice_ref(md_elems[i].value)));
}
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
void build_service_url(const char *url_scheme, call_data *calld) {
@@ -139,7 +139,8 @@ void build_service_url(const char *url_scheme, call_data *calld) {
gpr_free(service);
}
-static void send_security_metadata(grpc_call_element *elem,
+static void send_security_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
@@ -155,7 +156,7 @@ static void send_security_metadata(grpc_call_element *elem,
if (!channel_creds_has_md && !call_creds_has_md) {
/* Skip sending metadata altogether. */
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
return;
}
@@ -163,7 +164,7 @@ static void send_security_metadata(grpc_call_element *elem,
calld->creds =
grpc_composite_credentials_create(channel_creds, ctx->creds, NULL);
if (calld->creds == NULL) {
- bubble_up_error(elem, GRPC_STATUS_INVALID_ARGUMENT,
+ bubble_up_error(exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT,
"Incompatible credentials set on channel and call.");
return;
}
@@ -175,22 +176,23 @@ static void send_security_metadata(grpc_call_element *elem,
build_service_url(chand->security_connector->base.url_scheme, calld);
calld->op = *op; /* Copy op (originates from the caller's stack). */
GPR_ASSERT(calld->pollset);
- grpc_credentials_get_request_metadata(calld->creds, calld->pollset,
+ grpc_credentials_get_request_metadata(exec_ctx, calld->creds, calld->pollset,
calld->service_url,
on_credentials_metadata, elem);
}
-static void on_host_checked(void *user_data, grpc_security_status status) {
+static void on_host_checked(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_security_status status) {
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = elem->call_data;
if (status == GRPC_SECURITY_OK) {
- send_security_metadata(elem, &calld->op);
+ send_security_metadata(exec_ctx, elem, &calld->op);
} else {
char *error_msg;
gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.",
grpc_mdstr_as_c_string(calld->host));
- bubble_up_error(elem, GRPC_STATUS_INVALID_ARGUMENT, error_msg);
+ bubble_up_error(exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT, error_msg);
gpr_free(error_msg);
}
}
@@ -200,7 +202,8 @@ static void on_host_checked(void *user_data, grpc_security_status status) {
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
-static void auth_start_transport_op(grpc_call_element *elem,
+static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
@@ -254,30 +257,32 @@ static void auth_start_transport_op(grpc_call_element *elem,
const char *call_host = grpc_mdstr_as_c_string(calld->host);
calld->op = *op; /* Copy op (originates from the caller's stack). */
status = grpc_channel_security_connector_check_call_host(
- chand->security_connector, call_host, on_host_checked, elem);
+ exec_ctx, chand->security_connector, call_host, on_host_checked,
+ elem);
if (status != GRPC_SECURITY_OK) {
if (status == GRPC_SECURITY_ERROR) {
char *error_msg;
gpr_asprintf(&error_msg,
"Invalid host %s set in :authority metadata.",
call_host);
- bubble_up_error(elem, GRPC_STATUS_INVALID_ARGUMENT, error_msg);
+ bubble_up_error(exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT,
+ error_msg);
gpr_free(error_msg);
}
return; /* early exit */
}
}
- send_security_metadata(elem, op);
+ send_security_metadata(exec_ctx, elem, op);
return; /* early exit */
}
}
- /* pass control up or down the stack */
- grpc_call_next_op(elem, op);
+ /* pass control down the stack */
+ grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
call_data *calld = elem->call_data;
@@ -286,7 +291,8 @@ static void init_call_elem(grpc_call_element *elem,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
call_data *calld = elem->call_data;
grpc_credentials_unref(calld->creds);
if (calld->host != NULL) {
@@ -299,7 +305,8 @@ static void destroy_call_elem(grpc_call_element *elem) {
}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
@@ -328,7 +335,8 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
grpc_channel_security_connector *ctx = chand->security_connector;
diff --git a/src/core/security/credentials.c b/src/core/security/credentials.c
index 5d3c7c90b0..bdd9ab8e9c 100644
--- a/src/core/security/credentials.c
+++ b/src/core/security/credentials.c
@@ -47,6 +47,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
+#include <grpc/support/thd.h>
#include <grpc/support/time.h>
/* -- Common. -- */
@@ -54,7 +55,6 @@
struct grpc_credentials_metadata_request {
grpc_credentials *creds;
grpc_credentials_metadata_cb cb;
- grpc_iomgr_closure *on_simulated_token_fetch_done_closure;
void *user_data;
};
@@ -66,8 +66,6 @@ grpc_credentials_metadata_request_create(grpc_credentials *creds,
gpr_malloc(sizeof(grpc_credentials_metadata_request));
r->creds = grpc_credentials_ref(creds);
r->cb = cb;
- r->on_simulated_token_fetch_done_closure =
- gpr_malloc(sizeof(grpc_iomgr_closure));
r->user_data = user_data;
return r;
}
@@ -75,7 +73,6 @@ grpc_credentials_metadata_request_create(grpc_credentials *creds,
static void grpc_credentials_metadata_request_destroy(
grpc_credentials_metadata_request *r) {
grpc_credentials_unref(r->creds);
- gpr_free(r->on_simulated_token_fetch_done_closure);
gpr_free(r);
}
@@ -107,19 +104,17 @@ int grpc_credentials_has_request_metadata_only(grpc_credentials *creds) {
return creds->vtable->has_request_metadata_only(creds);
}
-void grpc_credentials_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data) {
+void grpc_credentials_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_credentials *creds, grpc_pollset *pollset,
+ const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) {
if (creds == NULL || !grpc_credentials_has_request_metadata(creds) ||
creds->vtable->get_request_metadata == NULL) {
if (cb != NULL) {
- cb(user_data, NULL, 0, GRPC_CREDENTIALS_OK);
+ cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_OK);
}
return;
}
- creds->vtable->get_request_metadata(creds, pollset, service_url, cb,
+ creds->vtable->get_request_metadata(exec_ctx, creds, pollset, service_url, cb,
user_data);
}
@@ -223,7 +218,7 @@ static grpc_security_status ssl_create_security_connector(
grpc_security_status status = GRPC_SECURITY_OK;
size_t i = 0;
const char *overridden_target_name = NULL;
- grpc_arg arg;
+ grpc_arg new_arg;
for (i = 0; args && i < args->num_args; i++) {
grpc_arg *arg = &args->args[i];
@@ -238,10 +233,10 @@ static grpc_security_status ssl_create_security_connector(
if (status != GRPC_SECURITY_OK) {
return status;
}
- arg.type = GRPC_ARG_STRING;
- arg.key = GRPC_ARG_HTTP2_SCHEME;
- arg.value.string = "https";
- *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
+ new_arg.type = GRPC_ARG_STRING;
+ new_arg.key = GRPC_ARG_HTTP2_SCHEME;
+ new_arg.value.string = "https";
+ *new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1);
return status;
}
@@ -374,11 +369,9 @@ static int jwt_has_request_metadata_only(const grpc_credentials *creds) {
return 1;
}
-static void jwt_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data) {
+static void jwt_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_credentials *creds, grpc_pollset *pollset,
+ const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) {
grpc_service_account_jwt_access_credentials *c =
(grpc_service_account_jwt_access_credentials *)creds;
gpr_timespec refresh_threshold = gpr_time_from_seconds(
@@ -422,10 +415,11 @@ static void jwt_get_request_metadata(grpc_credentials *creds,
}
if (jwt_md != NULL) {
- cb(user_data, jwt_md->entries, jwt_md->num_entries, GRPC_CREDENTIALS_OK);
+ cb(exec_ctx, user_data, jwt_md->entries, jwt_md->num_entries,
+ GRPC_CREDENTIALS_OK);
grpc_credentials_md_store_unref(jwt_md);
} else {
- cb(user_data, NULL, 0, GRPC_CREDENTIALS_ERROR);
+ cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_ERROR);
}
}
@@ -571,7 +565,8 @@ end:
}
static void on_oauth2_token_fetcher_http_response(
- void *user_data, const grpc_httpcli_response *response) {
+ grpc_exec_ctx *exec_ctx, void *user_data,
+ const grpc_httpcli_response *response) {
grpc_credentials_metadata_request *r =
(grpc_credentials_metadata_request *)user_data;
grpc_oauth2_token_fetcher_credentials *c =
@@ -585,19 +580,19 @@ static void on_oauth2_token_fetcher_http_response(
if (status == GRPC_CREDENTIALS_OK) {
c->token_expiration =
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), token_lifetime);
- r->cb(r->user_data, c->access_token_md->entries,
+ r->cb(exec_ctx, r->user_data, c->access_token_md->entries,
c->access_token_md->num_entries, status);
} else {
c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
- r->cb(r->user_data, NULL, 0, status);
+ r->cb(exec_ctx, r->user_data, NULL, 0, status);
}
gpr_mu_unlock(&c->mu);
grpc_credentials_metadata_request_destroy(r);
}
static void oauth2_token_fetcher_get_request_metadata(
- grpc_credentials *creds, grpc_pollset *pollset, const char *service_url,
- grpc_credentials_metadata_cb cb, void *user_data) {
+ grpc_exec_ctx *exec_ctx, grpc_credentials *creds, grpc_pollset *pollset,
+ const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) {
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)creds;
gpr_timespec refresh_threshold = gpr_time_from_seconds(
@@ -615,11 +610,12 @@ static void oauth2_token_fetcher_get_request_metadata(
gpr_mu_unlock(&c->mu);
}
if (cached_access_token_md != NULL) {
- cb(user_data, cached_access_token_md->entries,
+ cb(exec_ctx, user_data, cached_access_token_md->entries,
cached_access_token_md->num_entries, GRPC_CREDENTIALS_OK);
grpc_credentials_md_store_unref(cached_access_token_md);
} else {
c->fetch_func(
+ exec_ctx,
grpc_credentials_metadata_request_create(creds, cb, user_data),
&c->httpcli_context, pollset, on_oauth2_token_fetcher_http_response,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), refresh_threshold));
@@ -645,7 +641,7 @@ static grpc_credentials_vtable compute_engine_vtable = {
oauth2_token_fetcher_get_request_metadata, NULL};
static void compute_engine_fetch_oauth2(
- grpc_credentials_metadata_request *metadata_req,
+ grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
grpc_httpcli_context *httpcli_context, grpc_pollset *pollset,
grpc_httpcli_response_cb response_cb, gpr_timespec deadline) {
grpc_httpcli_header header = {"Metadata-Flavor", "Google"};
@@ -655,8 +651,8 @@ static void compute_engine_fetch_oauth2(
request.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
request.hdr_count = 1;
request.hdrs = &header;
- grpc_httpcli_get(httpcli_context, pollset, &request, deadline, response_cb,
- metadata_req);
+ grpc_httpcli_get(exec_ctx, httpcli_context, pollset, &request, deadline,
+ response_cb, metadata_req);
}
grpc_credentials *grpc_google_compute_engine_credentials_create(
@@ -684,7 +680,7 @@ static grpc_credentials_vtable refresh_token_vtable = {
oauth2_token_fetcher_get_request_metadata, NULL};
static void refresh_token_fetch_oauth2(
- grpc_credentials_metadata_request *metadata_req,
+ grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
grpc_httpcli_context *httpcli_context, grpc_pollset *pollset,
grpc_httpcli_response_cb response_cb, gpr_timespec deadline) {
grpc_google_refresh_token_credentials *c =
@@ -702,13 +698,12 @@ static void refresh_token_fetch_oauth2(
request.hdr_count = 1;
request.hdrs = &header;
request.handshaker = &grpc_httpcli_ssl;
- grpc_httpcli_post(httpcli_context, pollset, &request, body, strlen(body),
- deadline, response_cb, metadata_req);
+ grpc_httpcli_post(exec_ctx, httpcli_context, pollset, &request, body,
+ strlen(body), deadline, response_cb, metadata_req);
gpr_free(body);
}
-grpc_credentials *
-grpc_refresh_token_credentials_create_from_auth_refresh_token(
+grpc_credentials *grpc_refresh_token_credentials_create_from_auth_refresh_token(
grpc_auth_refresh_token refresh_token) {
grpc_google_refresh_token_credentials *c;
if (!grpc_auth_refresh_token_is_valid(&refresh_token)) {
@@ -746,31 +741,29 @@ static int md_only_test_has_request_metadata_only(
return 1;
}
-void on_simulated_token_fetch_done(void *user_data, int success) {
+static void on_simulated_token_fetch_done(void *user_data) {
grpc_credentials_metadata_request *r =
(grpc_credentials_metadata_request *)user_data;
grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)r->creds;
- GPR_ASSERT(success);
- r->cb(r->user_data, c->md_store->entries, c->md_store->num_entries,
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ r->cb(&exec_ctx, r->user_data, c->md_store->entries, c->md_store->num_entries,
GRPC_CREDENTIALS_OK);
grpc_credentials_metadata_request_destroy(r);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-static void md_only_test_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data) {
+static void md_only_test_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_credentials *creds, grpc_pollset *pollset,
+ const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) {
grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds;
if (c->is_async) {
+ gpr_thd_id thd_id;
grpc_credentials_metadata_request *cb_arg =
grpc_credentials_metadata_request_create(creds, cb, user_data);
- grpc_iomgr_closure_init(cb_arg->on_simulated_token_fetch_done_closure,
- on_simulated_token_fetch_done, cb_arg);
- grpc_iomgr_add_callback(cb_arg->on_simulated_token_fetch_done_closure);
+ gpr_thd_new(&thd_id, on_simulated_token_fetch_done, cb_arg, NULL);
} else {
- cb(user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK);
+ cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK);
}
}
@@ -810,13 +803,11 @@ static int access_token_has_request_metadata_only(
return 1;
}
-static void access_token_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data) {
+static void access_token_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_credentials *creds, grpc_pollset *pollset,
+ const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) {
grpc_access_token_credentials *c = (grpc_access_token_credentials *)creds;
- cb(user_data, c->access_token_md->entries, 1, GRPC_CREDENTIALS_OK);
+ cb(exec_ctx, user_data, c->access_token_md->entries, 1, GRPC_CREDENTIALS_OK);
}
static grpc_credentials_vtable access_token_vtable = {
@@ -961,13 +952,13 @@ static void composite_md_context_destroy(
gpr_free(ctx);
}
-static void composite_metadata_cb(void *user_data,
+static void composite_metadata_cb(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_credentials_md *md_elems, size_t num_md,
grpc_credentials_status status) {
grpc_composite_credentials_metadata_context *ctx =
(grpc_composite_credentials_metadata_context *)user_data;
if (status != GRPC_CREDENTIALS_OK) {
- ctx->cb(ctx->user_data, NULL, 0, status);
+ ctx->cb(exec_ctx, ctx->user_data, NULL, 0, status);
return;
}
@@ -985,7 +976,7 @@ static void composite_metadata_cb(void *user_data,
grpc_credentials *inner_creds =
ctx->composite_creds->inner.creds_array[ctx->creds_index++];
if (grpc_credentials_has_request_metadata(inner_creds)) {
- grpc_credentials_get_request_metadata(inner_creds, ctx->pollset,
+ grpc_credentials_get_request_metadata(exec_ctx, inner_creds, ctx->pollset,
ctx->service_url,
composite_metadata_cb, ctx);
return;
@@ -993,20 +984,18 @@ static void composite_metadata_cb(void *user_data,
}
/* We're done!. */
- ctx->cb(ctx->user_data, ctx->md_elems->entries, ctx->md_elems->num_entries,
- GRPC_CREDENTIALS_OK);
+ ctx->cb(exec_ctx, ctx->user_data, ctx->md_elems->entries,
+ ctx->md_elems->num_entries, GRPC_CREDENTIALS_OK);
composite_md_context_destroy(ctx);
}
-static void composite_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data) {
+static void composite_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_credentials *creds, grpc_pollset *pollset,
+ const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) {
grpc_composite_credentials *c = (grpc_composite_credentials *)creds;
grpc_composite_credentials_metadata_context *ctx;
if (!grpc_credentials_has_request_metadata(creds)) {
- cb(user_data, NULL, 0, GRPC_CREDENTIALS_OK);
+ cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_OK);
return;
}
ctx = gpr_malloc(sizeof(grpc_composite_credentials_metadata_context));
@@ -1020,8 +1009,9 @@ static void composite_get_request_metadata(grpc_credentials *creds,
while (ctx->creds_index < c->inner.num_creds) {
grpc_credentials *inner_creds = c->inner.creds_array[ctx->creds_index++];
if (grpc_credentials_has_request_metadata(inner_creds)) {
- grpc_credentials_get_request_metadata(inner_creds, pollset, service_url,
- composite_metadata_cb, ctx);
+ grpc_credentials_get_request_metadata(exec_ctx, inner_creds, pollset,
+ service_url, composite_metadata_cb,
+ ctx);
return;
}
}
@@ -1153,13 +1143,11 @@ static int iam_has_request_metadata_only(const grpc_credentials *creds) {
return 1;
}
-static void iam_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data) {
+static void iam_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_credentials *creds, grpc_pollset *pollset,
+ const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) {
grpc_google_iam_credentials *c = (grpc_google_iam_credentials *)creds;
- cb(user_data, c->iam_md->entries, c->iam_md->num_entries,
+ cb(exec_ctx, user_data, c->iam_md->entries, c->iam_md->num_entries,
GRPC_CREDENTIALS_OK);
}
@@ -1213,13 +1201,15 @@ static void plugin_md_request_metadata_ready(void *request,
size_t num_md,
grpc_status_code status,
const char *error_details) {
+ /* called from application code */
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_metadata_plugin_request *r = (grpc_metadata_plugin_request *)request;
if (status != GRPC_STATUS_OK) {
if (error_details != NULL) {
gpr_log(GPR_ERROR, "Getting metadata from plugin failed with error: %s",
error_details);
}
- r->cb(r->user_data, NULL, 0, GRPC_CREDENTIALS_ERROR);
+ r->cb(&exec_ctx, r->user_data, NULL, 0, GRPC_CREDENTIALS_ERROR);
} else {
size_t i;
grpc_credentials_md *md_array = NULL;
@@ -1231,7 +1221,7 @@ static void plugin_md_request_metadata_ready(void *request,
gpr_slice_from_copied_buffer(md[i].value, md[i].value_length);
}
}
- r->cb(r->user_data, md_array, num_md, GRPC_CREDENTIALS_OK);
+ r->cb(&exec_ctx, r->user_data, md_array, num_md, GRPC_CREDENTIALS_OK);
if (md_array != NULL) {
for (i = 0; i < num_md; i++) {
gpr_slice_unref(md_array[i].key);
@@ -1241,13 +1231,12 @@ static void plugin_md_request_metadata_ready(void *request,
}
}
gpr_free(r);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-static void plugin_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data) {
+static void plugin_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_credentials *creds, grpc_pollset *pollset,
+ const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) {
grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
if (c->plugin.get_metadata != NULL) {
grpc_metadata_plugin_request *request = gpr_malloc(sizeof(*request));
@@ -1257,7 +1246,7 @@ static void plugin_get_request_metadata(grpc_credentials *creds,
c->plugin.get_metadata(c->plugin.state, service_url,
plugin_md_request_metadata_ready, request);
} else {
- cb(user_data, NULL, 0, GRPC_CREDENTIALS_OK);
+ cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_OK);
}
}
@@ -1276,4 +1265,3 @@ grpc_credentials *grpc_metadata_credentials_create_from_plugin(
c->plugin = plugin;
return &c->base;
}
-
diff --git a/src/core/security/credentials.h b/src/core/security/credentials.h
index 38ce0f8ba6..b213e052d3 100644
--- a/src/core/security/credentials.h
+++ b/src/core/security/credentials.h
@@ -124,7 +124,8 @@ grpc_server_credentials *grpc_fake_transport_security_server_credentials_create(
/* It is the caller's responsibility to gpr_free the result if not NULL. */
char *grpc_get_well_known_google_credentials_file_path(void);
-typedef void (*grpc_credentials_metadata_cb)(void *user_data,
+typedef void (*grpc_credentials_metadata_cb)(grpc_exec_ctx *exec_ctx,
+ void *user_data,
grpc_credentials_md *md_elems,
size_t num_md,
grpc_credentials_status status);
@@ -133,8 +134,8 @@ typedef struct {
void (*destruct)(grpc_credentials *c);
int (*has_request_metadata)(const grpc_credentials *c);
int (*has_request_metadata_only)(const grpc_credentials *c);
- void (*get_request_metadata)(grpc_credentials *c, grpc_pollset *pollset,
- const char *service_url,
+ void (*get_request_metadata)(grpc_exec_ctx *exec_ctx, grpc_credentials *c,
+ grpc_pollset *pollset, const char *service_url,
grpc_credentials_metadata_cb cb,
void *user_data);
grpc_security_status (*create_security_connector)(
@@ -153,11 +154,9 @@ grpc_credentials *grpc_credentials_ref(grpc_credentials *creds);
void grpc_credentials_unref(grpc_credentials *creds);
int grpc_credentials_has_request_metadata(grpc_credentials *creds);
int grpc_credentials_has_request_metadata_only(grpc_credentials *creds);
-void grpc_credentials_get_request_metadata(grpc_credentials *creds,
- grpc_pollset *pollset,
- const char *service_url,
- grpc_credentials_metadata_cb cb,
- void *user_data);
+void grpc_credentials_get_request_metadata(
+ grpc_exec_ctx *exec_ctx, grpc_credentials *creds, grpc_pollset *pollset,
+ const char *service_url, grpc_credentials_metadata_cb cb, void *user_data);
/* Creates a security connector for the channel. May also create new channel
args for the channel to be used in place of the passed in const args if
@@ -216,7 +215,6 @@ typedef struct {
grpc_server_credentials *c, grpc_security_connector **sc);
} grpc_server_credentials_vtable;
-
/* TODO(jboeuf): Add a refcount. */
struct grpc_server_credentials {
const grpc_server_credentials_vtable *vtable;
@@ -271,7 +269,8 @@ typedef struct {
typedef struct grpc_credentials_metadata_request
grpc_credentials_metadata_request;
-typedef void (*grpc_fetch_oauth2_func)(grpc_credentials_metadata_request *req,
+typedef void (*grpc_fetch_oauth2_func)(grpc_exec_ctx *exec_ctx,
+ grpc_credentials_metadata_request *req,
grpc_httpcli_context *http_context,
grpc_pollset *pollset,
grpc_httpcli_response_cb response_cb,
diff --git a/src/core/security/google_default_credentials.c b/src/core/security/google_default_credentials.c
index 874dd59e84..7b85842808 100644
--- a/src/core/security/google_default_credentials.c
+++ b/src/core/security/google_default_credentials.c
@@ -63,7 +63,8 @@ typedef struct {
} compute_engine_detector;
static void on_compute_engine_detection_http_response(
- void *user_data, const grpc_httpcli_response *response) {
+ grpc_exec_ctx *exec_ctx, void *user_data,
+ const grpc_httpcli_response *response) {
compute_engine_detector *detector = (compute_engine_detector *)user_data;
if (response != NULL && response->status == 200 && response->hdr_count > 0) {
/* Internet providers can return a generic response to all requests, so
@@ -84,12 +85,16 @@ static void on_compute_engine_detection_http_response(
gpr_mu_unlock(GRPC_POLLSET_MU(&detector->pollset));
}
-static void destroy_pollset(void *p) { grpc_pollset_destroy(p); }
+static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, int s) {
+ grpc_pollset_destroy(p);
+}
static int is_stack_running_on_compute_engine(void) {
compute_engine_detector detector;
grpc_httpcli_request request;
grpc_httpcli_context context;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_closure destroy_closure;
/* The http call is local. If it takes more than one sec, it is for sure not
on compute engine. */
@@ -106,22 +111,27 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_context_init(&context);
grpc_httpcli_get(
- &context, &detector.pollset, &request,
+ &exec_ctx, &context, &detector.pollset, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
on_compute_engine_detection_http_response, &detector);
+ grpc_exec_ctx_finish(&exec_ctx);
+
/* Block until we get the response. This is not ideal but this should only be
called once for the lifetime of the process by the default credentials. */
gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset));
while (!detector.is_done) {
grpc_pollset_worker worker;
- grpc_pollset_work(&detector.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+ grpc_pollset_work(&exec_ctx, &detector.pollset, &worker,
+ gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset));
grpc_httpcli_context_destroy(&context);
- grpc_pollset_shutdown(&detector.pollset, destroy_pollset, &detector.pollset);
+ grpc_closure_init(&destroy_closure, destroy_pollset, &detector.pollset);
+ grpc_pollset_shutdown(&exec_ctx, &detector.pollset, &destroy_closure);
+ grpc_exec_ctx_finish(&exec_ctx);
return detector.success;
}
diff --git a/src/core/security/handshake.c b/src/core/security/handshake.c
index 3b49271373..adbdd0b40e 100644
--- a/src/core/security/handshake.c
+++ b/src/core/security/handshake.c
@@ -54,28 +54,31 @@ typedef struct {
gpr_slice_buffer outgoing;
grpc_security_handshake_done_cb cb;
void *user_data;
- grpc_iomgr_closure on_handshake_data_sent_to_peer;
- grpc_iomgr_closure on_handshake_data_received_from_peer;
+ grpc_closure on_handshake_data_sent_to_peer;
+ grpc_closure on_handshake_data_received_from_peer;
} grpc_security_handshake;
+static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
+ void *setup, int success);
-static void on_handshake_data_received_from_peer(void *setup, int success);
+static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *setup,
+ int success);
-static void on_handshake_data_sent_to_peer(void *setup, int success);
-
-static void security_handshake_done(grpc_security_handshake *h,
+static void security_handshake_done(grpc_exec_ctx *exec_ctx,
+ grpc_security_handshake *h,
int is_success) {
if (is_success) {
- h->cb(h->user_data, GRPC_SECURITY_OK, h->wrapped_endpoint,
+ h->cb(exec_ctx, h->user_data, GRPC_SECURITY_OK, h->wrapped_endpoint,
h->secure_endpoint);
} else {
if (h->secure_endpoint != NULL) {
- grpc_endpoint_shutdown(h->secure_endpoint);
- grpc_endpoint_destroy(h->secure_endpoint);
+ grpc_endpoint_shutdown(exec_ctx, h->secure_endpoint);
+ grpc_endpoint_destroy(exec_ctx, h->secure_endpoint);
} else {
- grpc_endpoint_destroy(h->wrapped_endpoint);
+ grpc_endpoint_destroy(exec_ctx, h->wrapped_endpoint);
}
- h->cb(h->user_data, GRPC_SECURITY_ERROR, h->wrapped_endpoint, NULL);
+ h->cb(exec_ctx, h->user_data, GRPC_SECURITY_ERROR, h->wrapped_endpoint,
+ NULL);
}
if (h->handshaker != NULL) tsi_handshaker_destroy(h->handshaker);
if (h->handshake_buffer != NULL) gpr_free(h->handshake_buffer);
@@ -86,13 +89,14 @@ static void security_handshake_done(grpc_security_handshake *h,
gpr_free(h);
}
-static void on_peer_checked(void *user_data, grpc_security_status status) {
+static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_security_status status) {
grpc_security_handshake *h = user_data;
tsi_frame_protector *protector;
tsi_result result;
if (status != GRPC_SECURITY_OK) {
gpr_log(GPR_ERROR, "Error checking peer.");
- security_handshake_done(h, 0);
+ security_handshake_done(exec_ctx, h, 0);
return;
}
result =
@@ -100,7 +104,7 @@ static void on_peer_checked(void *user_data, grpc_security_status status) {
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Frame protector creation failed with error %s.",
tsi_result_to_string(result));
- security_handshake_done(h, 0);
+ security_handshake_done(exec_ctx, h, 0);
return;
}
h->secure_endpoint =
@@ -108,11 +112,11 @@ static void on_peer_checked(void *user_data, grpc_security_status status) {
h->left_overs.slices, h->left_overs.count);
h->left_overs.count = 0;
h->left_overs.length = 0;
- security_handshake_done(h, 1);
+ security_handshake_done(exec_ctx, h, 1);
return;
}
-static void check_peer(grpc_security_handshake *h) {
+static void check_peer(grpc_exec_ctx *exec_ctx, grpc_security_handshake *h) {
grpc_security_status peer_status;
tsi_peer peer;
tsi_result result = tsi_handshaker_extract_peer(h->handshaker, &peer);
@@ -120,21 +124,22 @@ static void check_peer(grpc_security_handshake *h) {
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Peer extraction failed with error %s",
tsi_result_to_string(result));
- security_handshake_done(h, 0);
+ security_handshake_done(exec_ctx, h, 0);
return;
}
peer_status = grpc_security_connector_check_peer(h->connector, peer,
on_peer_checked, h);
if (peer_status == GRPC_SECURITY_ERROR) {
gpr_log(GPR_ERROR, "Peer check failed.");
- security_handshake_done(h, 0);
+ security_handshake_done(exec_ctx, h, 0);
return;
} else if (peer_status == GRPC_SECURITY_OK) {
- on_peer_checked(h, peer_status);
+ on_peer_checked(exec_ctx, h, peer_status);
}
}
-static void send_handshake_bytes_to_peer(grpc_security_handshake *h) {
+static void send_handshake_bytes_to_peer(grpc_exec_ctx *exec_ctx,
+ grpc_security_handshake *h) {
size_t offset = 0;
tsi_result result = TSI_OK;
gpr_slice to_send;
@@ -154,7 +159,7 @@ static void send_handshake_bytes_to_peer(grpc_security_handshake *h) {
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshake failed with error %s",
tsi_result_to_string(result));
- security_handshake_done(h, 0);
+ security_handshake_done(exec_ctx, h, 0);
return;
}
@@ -163,22 +168,13 @@ static void send_handshake_bytes_to_peer(grpc_security_handshake *h) {
gpr_slice_buffer_reset_and_unref(&h->outgoing);
gpr_slice_buffer_add(&h->outgoing, to_send);
/* TODO(klempner,jboeuf): This should probably use the client setup
- deadline */
- switch (grpc_endpoint_write(h->wrapped_endpoint, &h->outgoing,
- &h->on_handshake_data_sent_to_peer)) {
- case GRPC_ENDPOINT_ERROR:
- gpr_log(GPR_ERROR, "Could not send handshake data to peer.");
- security_handshake_done(h, 0);
- break;
- case GRPC_ENDPOINT_DONE:
- on_handshake_data_sent_to_peer(h, 1);
- break;
- case GRPC_ENDPOINT_PENDING:
- break;
- }
+ deadline */
+ grpc_endpoint_write(exec_ctx, h->wrapped_endpoint, &h->outgoing,
+ &h->on_handshake_data_sent_to_peer);
}
-static void on_handshake_data_received_from_peer(void *handshake, int success) {
+static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
+ void *handshake, int success) {
grpc_security_handshake *h = handshake;
size_t consumed_slice_size = 0;
tsi_result result = TSI_OK;
@@ -188,7 +184,7 @@ static void on_handshake_data_received_from_peer(void *handshake, int success) {
if (!success) {
gpr_log(GPR_ERROR, "Read failed.");
- security_handshake_done(h, 0);
+ security_handshake_done(exec_ctx, h, 0);
return;
}
@@ -203,20 +199,11 @@ static void on_handshake_data_received_from_peer(void *handshake, int success) {
if (tsi_handshaker_is_in_progress(h->handshaker)) {
/* We may need more data. */
if (result == TSI_INCOMPLETE_DATA) {
- switch (grpc_endpoint_read(h->wrapped_endpoint, &h->incoming,
- &h->on_handshake_data_received_from_peer)) {
- case GRPC_ENDPOINT_DONE:
- on_handshake_data_received_from_peer(h, 1);
- break;
- case GRPC_ENDPOINT_ERROR:
- on_handshake_data_received_from_peer(h, 0);
- break;
- case GRPC_ENDPOINT_PENDING:
- break;
- }
+ grpc_endpoint_read(exec_ctx, h->wrapped_endpoint, &h->incoming,
+ &h->on_handshake_data_received_from_peer);
return;
} else {
- send_handshake_bytes_to_peer(h);
+ send_handshake_bytes_to_peer(exec_ctx, h);
return;
}
}
@@ -224,7 +211,7 @@ static void on_handshake_data_received_from_peer(void *handshake, int success) {
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshake failed with error %s",
tsi_result_to_string(result));
- security_handshake_done(h, 0);
+ security_handshake_done(exec_ctx, h, 0);
return;
}
@@ -234,7 +221,7 @@ static void on_handshake_data_received_from_peer(void *handshake, int success) {
num_left_overs =
(has_left_overs_in_current_slice ? 1 : 0) + h->incoming.count - i - 1;
if (num_left_overs == 0) {
- check_peer(h);
+ check_peer(exec_ctx, h);
return;
}
@@ -249,17 +236,18 @@ static void on_handshake_data_received_from_peer(void *handshake, int success) {
gpr_slice_buffer_addn(
&h->left_overs, &h->incoming.slices[i + 1],
num_left_overs - (size_t)has_left_overs_in_current_slice);
- check_peer(h);
+ check_peer(exec_ctx, h);
}
/* If handshake is NULL, the handshake is done. */
-static void on_handshake_data_sent_to_peer(void *handshake, int success) {
+static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx,
+ void *handshake, int success) {
grpc_security_handshake *h = handshake;
/* Make sure that write is OK. */
if (!success) {
gpr_log(GPR_ERROR, "Write failed.");
- if (handshake != NULL) security_handshake_done(h, 0);
+ if (handshake != NULL) security_handshake_done(exec_ctx, h, 0);
return;
}
@@ -267,23 +255,15 @@ static void on_handshake_data_sent_to_peer(void *handshake, int success) {
if (tsi_handshaker_is_in_progress(h->handshaker)) {
/* TODO(klempner,jboeuf): This should probably use the client setup
deadline */
- switch (grpc_endpoint_read(h->wrapped_endpoint, &h->incoming,
- &h->on_handshake_data_received_from_peer)) {
- case GRPC_ENDPOINT_ERROR:
- on_handshake_data_received_from_peer(h, 0);
- break;
- case GRPC_ENDPOINT_PENDING:
- break;
- case GRPC_ENDPOINT_DONE:
- on_handshake_data_received_from_peer(h, 1);
- break;
- }
+ grpc_endpoint_read(exec_ctx, h->wrapped_endpoint, &h->incoming,
+ &h->on_handshake_data_received_from_peer);
} else {
- check_peer(h);
+ check_peer(exec_ctx, h);
}
}
-void grpc_do_security_handshake(tsi_handshaker *handshaker,
+void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
+ tsi_handshaker *handshaker,
grpc_security_connector *connector,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
@@ -297,12 +277,12 @@ void grpc_do_security_handshake(tsi_handshaker *handshaker,
h->wrapped_endpoint = nonsecure_endpoint;
h->user_data = user_data;
h->cb = cb;
- grpc_iomgr_closure_init(&h->on_handshake_data_sent_to_peer,
- on_handshake_data_sent_to_peer, h);
- grpc_iomgr_closure_init(&h->on_handshake_data_received_from_peer,
- on_handshake_data_received_from_peer, h);
+ grpc_closure_init(&h->on_handshake_data_sent_to_peer,
+ on_handshake_data_sent_to_peer, h);
+ grpc_closure_init(&h->on_handshake_data_received_from_peer,
+ on_handshake_data_received_from_peer, h);
gpr_slice_buffer_init(&h->left_overs);
gpr_slice_buffer_init(&h->outgoing);
gpr_slice_buffer_init(&h->incoming);
- send_handshake_bytes_to_peer(h);
+ send_handshake_bytes_to_peer(exec_ctx, h);
}
diff --git a/src/core/security/handshake.h b/src/core/security/handshake.h
index d7e4a30580..28eaa79dc3 100644
--- a/src/core/security/handshake.h
+++ b/src/core/security/handshake.h
@@ -37,9 +37,9 @@
#include "src/core/iomgr/endpoint.h"
#include "src/core/security/security_connector.h"
-
/* Calls the callback upon completion. Takes owership of handshaker. */
-void grpc_do_security_handshake(tsi_handshaker *handshaker,
+void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
+ tsi_handshaker *handshaker,
grpc_security_connector *connector,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
diff --git a/src/core/security/jwt_verifier.c b/src/core/security/jwt_verifier.c
index 790f2178db..9de8482025 100644
--- a/src/core/security/jwt_verifier.c
+++ b/src/core/security/jwt_verifier.c
@@ -145,7 +145,7 @@ static jose_header *jose_header_from_json(grpc_json *json, gpr_slice buffer) {
/* We only support RSA-1.5 signatures for now.
Beware of this if we add HMAC support:
https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/
- */
+ */
if (cur->type != GRPC_JSON_STRING || strncmp(cur->value, "RS", 2) ||
evp_md_from_alg(cur->value) == NULL) {
gpr_log(GPR_ERROR, "Invalid alg field [%s]", cur->value);
@@ -494,7 +494,7 @@ static EVP_PKEY *find_verification_key(const grpc_json *json,
jwk_keys = find_property_by_name(json, "keys");
if (jwk_keys == NULL) {
/* Use the google proprietary format which is:
- { <kid1>: <x5091>, <kid2>: <x5092>, ... } */
+ { <kid1>: <x5091>, <kid2>: <x5092>, ... } */
const grpc_json *cur = find_property_by_name(json, header_kid);
if (cur == NULL) return NULL;
return extract_pkey_from_x509(cur->value);
@@ -569,7 +569,7 @@ end:
return result;
}
-static void on_keys_retrieved(void *user_data,
+static void on_keys_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
const grpc_httpcli_response *response) {
grpc_json *json = json_from_http(response);
verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data;
@@ -610,7 +610,7 @@ end:
verifier_cb_ctx_destroy(ctx);
}
-static void on_openid_config_retrieved(void *user_data,
+static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
const grpc_httpcli_response *response) {
const grpc_json *cur;
grpc_json *json = json_from_http(response);
@@ -618,7 +618,7 @@ static void on_openid_config_retrieved(void *user_data,
grpc_httpcli_request req;
const char *jwks_uri;
- /* TODO(jboeuf): Cache the jwks_uri in order to avoid this hop next time.*/
+ /* TODO(jboeuf): Cache the jwks_uri in order to avoid this hop next time. */
if (json == NULL) goto error;
cur = find_property_by_name(json, "jwks_uri");
if (cur == NULL) {
@@ -641,7 +641,7 @@ static void on_openid_config_retrieved(void *user_data,
*(req.host + (req.path - jwks_uri)) = '\0';
}
grpc_httpcli_get(
- &ctx->verifier->http_ctx, ctx->pollset, &req,
+ exec_ctx, &ctx->verifier->http_ctx, ctx->pollset, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
on_keys_retrieved, ctx);
grpc_json_destroy(json);
@@ -682,7 +682,8 @@ static void verifier_put_mapping(grpc_jwt_verifier *v, const char *email_domain,
}
/* Takes ownership of ctx. */
-static void retrieve_key_and_verify(verifier_cb_ctx *ctx) {
+static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
+ verifier_cb_ctx *ctx) {
const char *at_sign;
grpc_httpcli_response_cb http_cb;
char *path_prefix = NULL;
@@ -743,7 +744,7 @@ static void retrieve_key_and_verify(verifier_cb_ctx *ctx) {
}
grpc_httpcli_get(
- &ctx->verifier->http_ctx, ctx->pollset, &req,
+ exec_ctx, &ctx->verifier->http_ctx, ctx->pollset, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
http_cb, ctx);
gpr_free(req.host);
@@ -755,7 +756,8 @@ error:
verifier_cb_ctx_destroy(ctx);
}
-void grpc_jwt_verifier_verify(grpc_jwt_verifier *verifier,
+void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx,
+ grpc_jwt_verifier *verifier,
grpc_pollset *pollset, const char *jwt,
const char *audience,
grpc_jwt_verification_done_cb cb,
@@ -791,6 +793,7 @@ void grpc_jwt_verifier_verify(grpc_jwt_verifier *verifier,
signature = grpc_base64_decode(cur, 1);
if (GPR_SLICE_IS_EMPTY(signature)) goto error;
retrieve_key_and_verify(
+ exec_ctx,
verifier_cb_ctx_create(verifier, pollset, header, claims, audience,
signature, jwt, signed_jwt_len, user_data, cb));
return;
diff --git a/src/core/security/jwt_verifier.h b/src/core/security/jwt_verifier.h
index 7a32debfcb..51ea036e4a 100644
--- a/src/core/security/jwt_verifier.h
+++ b/src/core/security/jwt_verifier.h
@@ -120,7 +120,8 @@ typedef void (*grpc_jwt_verification_done_cb)(void *user_data,
grpc_jwt_claims *claims);
/* Verifies for the JWT for the given expected audience. */
-void grpc_jwt_verifier_verify(grpc_jwt_verifier *verifier,
+void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx,
+ grpc_jwt_verifier *verifier,
grpc_pollset *pollset, const char *jwt,
const char *audience,
grpc_jwt_verification_done_cb cb,
diff --git a/src/core/security/secure_endpoint.c b/src/core/security/secure_endpoint.c
index b696e384fc..0288f7128a 100644
--- a/src/core/security/secure_endpoint.c
+++ b/src/core/security/secure_endpoint.c
@@ -49,9 +49,9 @@ typedef struct {
struct tsi_frame_protector *protector;
gpr_mu protector_mu;
/* saved upper level callbacks and user_data. */
- grpc_iomgr_closure *read_cb;
- grpc_iomgr_closure *write_cb;
- grpc_iomgr_closure on_read;
+ grpc_closure *read_cb;
+ grpc_closure *write_cb;
+ grpc_closure on_read;
gpr_slice_buffer *read_buffer;
gpr_slice_buffer source_buffer;
/* saved handshaker leftover data to unprotect. */
@@ -67,9 +67,9 @@ typedef struct {
int grpc_trace_secure_endpoint = 0;
-static void destroy(secure_endpoint *secure_ep) {
+static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) {
secure_endpoint *ep = secure_ep;
- grpc_endpoint_destroy(ep->wrapped_ep);
+ grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep);
tsi_frame_protector_destroy(ep->protector);
gpr_slice_buffer_destroy(&ep->leftover_bytes);
gpr_slice_unref(ep->read_staging_buffer);
@@ -82,16 +82,18 @@ static void destroy(secure_endpoint *secure_ep) {
/*#define GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG*/
#ifdef GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG
-#define SECURE_ENDPOINT_UNREF(ep, reason) \
- secure_endpoint_unref((ep), (reason), __FILE__, __LINE__)
+#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \
+ secure_endpoint_unref((exec_ctx), (ep), (reason), __FILE__, __LINE__)
#define SECURE_ENDPOINT_REF(ep, reason) \
secure_endpoint_ref((ep), (reason), __FILE__, __LINE__)
-static void secure_endpoint_unref(secure_endpoint *ep, const char *reason,
- const char *file, int line) {
+static void secure_endpoint_unref(secure_endpoint *ep,
+ grpc_closure_list *closure_list,
+ const char *reason, const char *file,
+ int line) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP unref %p : %s %d -> %d",
ep, reason, ep->ref.count, ep->ref.count - 1);
if (gpr_unref(&ep->ref)) {
- destroy(ep);
+ destroy(exec_ctx, ep);
}
}
@@ -102,11 +104,13 @@ static void secure_endpoint_ref(secure_endpoint *ep, const char *reason,
gpr_ref(&ep->ref);
}
#else
-#define SECURE_ENDPOINT_UNREF(ep, reason) secure_endpoint_unref((ep))
+#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \
+ secure_endpoint_unref((exec_ctx), (ep))
#define SECURE_ENDPOINT_REF(ep, reason) secure_endpoint_ref((ep))
-static void secure_endpoint_unref(secure_endpoint *ep) {
+static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx,
+ secure_endpoint *ep) {
if (gpr_unref(&ep->ref)) {
- destroy(ep);
+ destroy(exec_ctx, ep);
}
}
@@ -121,7 +125,8 @@ static void flush_read_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur,
*end = GPR_SLICE_END_PTR(ep->read_staging_buffer);
}
-static void call_read_cb(secure_endpoint *ep, int success) {
+static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
+ int success) {
if (grpc_trace_secure_endpoint) {
size_t i;
for (i = 0; i < ep->read_buffer->count; i++) {
@@ -132,11 +137,11 @@ static void call_read_cb(secure_endpoint *ep, int success) {
}
}
ep->read_buffer = NULL;
- ep->read_cb->cb(ep->read_cb->cb_arg, success);
- SECURE_ENDPOINT_UNREF(ep, "read");
+ grpc_exec_ctx_enqueue(exec_ctx, ep->read_cb, success);
+ SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
}
-static int on_read(void *user_data, int success) {
+static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
unsigned i;
gpr_uint8 keep_looping = 0;
tsi_result result = TSI_OK;
@@ -146,7 +151,8 @@ static int on_read(void *user_data, int success) {
if (!success) {
gpr_slice_buffer_reset_and_unref(ep->read_buffer);
- return 0;
+ call_read_cb(exec_ctx, ep, 0);
+ return;
}
/* TODO(yangg) check error, maybe bail out early */
@@ -202,49 +208,30 @@ static int on_read(void *user_data, int success) {
if (result != TSI_OK) {
gpr_slice_buffer_reset_and_unref(ep->read_buffer);
- return 0;
+ call_read_cb(exec_ctx, ep, 0);
+ return;
}
- return 1;
+ call_read_cb(exec_ctx, ep, 1);
}
-static void on_read_cb(void *user_data, int success) {
- call_read_cb(user_data, on_read(user_data, success));
-}
-
-static grpc_endpoint_op_status endpoint_read(grpc_endpoint *secure_ep,
- gpr_slice_buffer *slices,
- grpc_iomgr_closure *cb) {
+static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
+ gpr_slice_buffer *slices, grpc_closure *cb) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
- int immediate_read_success = -1;
ep->read_cb = cb;
ep->read_buffer = slices;
gpr_slice_buffer_reset_and_unref(ep->read_buffer);
+ SECURE_ENDPOINT_REF(ep, "read");
if (ep->leftover_bytes.count) {
gpr_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer);
GPR_ASSERT(ep->leftover_bytes.count == 0);
- return on_read(ep, 1) ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR;
- }
-
- SECURE_ENDPOINT_REF(ep, "read");
-
- switch (
- grpc_endpoint_read(ep->wrapped_ep, &ep->source_buffer, &ep->on_read)) {
- case GRPC_ENDPOINT_DONE:
- immediate_read_success = on_read(ep, 1);
- break;
- case GRPC_ENDPOINT_PENDING:
- return GRPC_ENDPOINT_PENDING;
- case GRPC_ENDPOINT_ERROR:
- immediate_read_success = on_read(ep, 0);
- break;
+ on_read(exec_ctx, ep, 1);
+ return;
}
- GPR_ASSERT(immediate_read_success != -1);
- SECURE_ENDPOINT_UNREF(ep, "read");
-
- return immediate_read_success ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR;
+ grpc_endpoint_read(exec_ctx, ep->wrapped_ep, &ep->source_buffer,
+ &ep->on_read);
}
static void flush_write_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur,
@@ -255,9 +242,8 @@ static void flush_write_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur,
*end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
}
-static grpc_endpoint_op_status endpoint_write(grpc_endpoint *secure_ep,
- gpr_slice_buffer *slices,
- grpc_iomgr_closure *cb) {
+static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
+ gpr_slice_buffer *slices, grpc_closure *cb) {
unsigned i;
tsi_result result = TSI_OK;
secure_endpoint *ep = (secure_endpoint *)secure_ep;
@@ -329,32 +315,37 @@ static grpc_endpoint_op_status endpoint_write(grpc_endpoint *secure_ep,
if (result != TSI_OK) {
/* TODO(yangg) do different things according to the error type? */
gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
- return GRPC_ENDPOINT_ERROR;
+ grpc_exec_ctx_enqueue(exec_ctx, cb, 0);
+ return;
}
- return grpc_endpoint_write(ep->wrapped_ep, &ep->output_buffer, cb);
+ grpc_endpoint_write(exec_ctx, ep->wrapped_ep, &ep->output_buffer, cb);
}
-static void endpoint_shutdown(grpc_endpoint *secure_ep) {
+static void endpoint_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
- grpc_endpoint_shutdown(ep->wrapped_ep);
+ grpc_endpoint_shutdown(exec_ctx, ep->wrapped_ep);
}
-static void endpoint_destroy(grpc_endpoint *secure_ep) {
+static void endpoint_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
- SECURE_ENDPOINT_UNREF(ep, "destroy");
+ SECURE_ENDPOINT_UNREF(exec_ctx, ep, "destroy");
}
-static void endpoint_add_to_pollset(grpc_endpoint *secure_ep,
+static void endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *secure_ep,
grpc_pollset *pollset) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
- grpc_endpoint_add_to_pollset(ep->wrapped_ep, pollset);
+ grpc_endpoint_add_to_pollset(exec_ctx, ep->wrapped_ep, pollset);
}
-static void endpoint_add_to_pollset_set(grpc_endpoint *secure_ep,
+static void endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *secure_ep,
grpc_pollset_set *pollset_set) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
- grpc_endpoint_add_to_pollset_set(ep->wrapped_ep, pollset_set);
+ grpc_endpoint_add_to_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set);
}
static char *endpoint_get_peer(grpc_endpoint *secure_ep) {
@@ -386,7 +377,7 @@ grpc_endpoint *grpc_secure_endpoint_create(
gpr_slice_buffer_init(&ep->output_buffer);
gpr_slice_buffer_init(&ep->source_buffer);
ep->read_buffer = NULL;
- grpc_iomgr_closure_init(&ep->on_read, on_read_cb, ep);
+ grpc_closure_init(&ep->on_read, on_read, ep);
gpr_mu_init(&ep->protector_mu);
gpr_ref_init(&ep->ref, 1);
return &ep->base;
diff --git a/src/core/security/security_connector.c b/src/core/security/security_connector.c
index f6460a323e..7c4cf6f04d 100644
--- a/src/core/security/security_connector.c
+++ b/src/core/security/security_connector.c
@@ -102,14 +102,15 @@ const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer,
return NULL;
}
-void grpc_security_connector_do_handshake(grpc_security_connector *sc,
+void grpc_security_connector_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
void *user_data) {
if (sc == NULL || nonsecure_endpoint == NULL) {
- cb(user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
+ cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
} else {
- sc->vtable->do_handshake(sc, nonsecure_endpoint, cb, user_data);
+ sc->vtable->do_handshake(exec_ctx, sc, nonsecure_endpoint, cb, user_data);
}
}
@@ -124,10 +125,10 @@ grpc_security_status grpc_security_connector_check_peer(
}
grpc_security_status grpc_channel_security_connector_check_call_host(
- grpc_channel_security_connector *sc, const char *host,
- grpc_security_check_cb cb, void *user_data) {
+ grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
+ const char *host, grpc_security_check_cb cb, void *user_data) {
if (sc == NULL || sc->check_call_host == NULL) return GRPC_SECURITY_ERROR;
- return sc->check_call_host(sc, host, cb, user_data);
+ return sc->check_call_host(exec_ctx, sc, host, cb, user_data);
}
#ifdef GRPC_SECURITY_CONNECTOR_REFCOUNT_DEBUG
@@ -268,31 +269,33 @@ end:
}
static grpc_security_status fake_channel_check_call_host(
- grpc_channel_security_connector *sc, const char *host,
- grpc_security_check_cb cb, void *user_data) {
+ grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
+ const char *host, grpc_security_check_cb cb, void *user_data) {
grpc_fake_channel_security_connector *c =
(grpc_fake_channel_security_connector *)sc;
if (c->call_host_check_is_async) {
- cb(user_data, GRPC_SECURITY_OK);
+ cb(exec_ctx, user_data, GRPC_SECURITY_OK);
return GRPC_SECURITY_PENDING;
} else {
return GRPC_SECURITY_OK;
}
}
-static void fake_channel_do_handshake(grpc_security_connector *sc,
+static void fake_channel_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
void *user_data) {
- grpc_do_security_handshake(tsi_create_fake_handshaker(1), sc,
+ grpc_do_security_handshake(exec_ctx, tsi_create_fake_handshaker(1), sc,
nonsecure_endpoint, cb, user_data);
}
-static void fake_server_do_handshake(grpc_security_connector *sc,
+static void fake_server_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
void *user_data) {
- grpc_do_security_handshake(tsi_create_fake_handshaker(0), sc,
+ grpc_do_security_handshake(exec_ctx, tsi_create_fake_handshaker(0), sc,
nonsecure_endpoint, cb, user_data);
}
@@ -382,7 +385,8 @@ static grpc_security_status ssl_create_handshaker(
return GRPC_SECURITY_OK;
}
-static void ssl_channel_do_handshake(grpc_security_connector *sc,
+static void ssl_channel_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
void *user_data) {
@@ -395,14 +399,15 @@ static void ssl_channel_do_handshake(grpc_security_connector *sc,
: c->target_name,
&handshaker);
if (status != GRPC_SECURITY_OK) {
- cb(user_data, status, nonsecure_endpoint, NULL);
+ cb(exec_ctx, user_data, status, nonsecure_endpoint, NULL);
} else {
- grpc_do_security_handshake(handshaker, sc, nonsecure_endpoint, cb,
+ grpc_do_security_handshake(exec_ctx, handshaker, sc, nonsecure_endpoint, cb,
user_data);
}
}
-static void ssl_server_do_handshake(grpc_security_connector *sc,
+static void ssl_server_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *sc,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
void *user_data) {
@@ -412,9 +417,9 @@ static void ssl_server_do_handshake(grpc_security_connector *sc,
grpc_security_status status =
ssl_create_handshaker(c->handshaker_factory, 0, NULL, &handshaker);
if (status != GRPC_SECURITY_OK) {
- cb(user_data, status, nonsecure_endpoint, NULL);
+ cb(exec_ctx, user_data, status, nonsecure_endpoint, NULL);
} else {
- grpc_do_security_handshake(handshaker, sc, nonsecure_endpoint, cb,
+ grpc_do_security_handshake(exec_ctx, handshaker, sc, nonsecure_endpoint, cb,
user_data);
}
}
@@ -523,8 +528,8 @@ static grpc_security_status ssl_server_check_peer(grpc_security_connector *sc,
}
static grpc_security_status ssl_channel_check_call_host(
- grpc_channel_security_connector *sc, const char *host,
- grpc_security_check_cb cb, void *user_data) {
+ grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
+ const char *host, grpc_security_check_cb cb, void *user_data) {
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
@@ -643,12 +648,12 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
goto error;
}
*sc = &c->base;
- gpr_free(alpn_protocol_strings);
+ gpr_free((void *)alpn_protocol_strings);
gpr_free(alpn_protocol_string_lengths);
return GRPC_SECURITY_OK;
error:
- gpr_free(alpn_protocol_strings);
+ gpr_free((void *)alpn_protocol_strings);
gpr_free(alpn_protocol_string_lengths);
return GRPC_SECURITY_ERROR;
}
@@ -698,12 +703,12 @@ grpc_security_status grpc_ssl_server_security_connector_create(
goto error;
}
*sc = &c->base;
- gpr_free(alpn_protocol_strings);
+ gpr_free((void *)alpn_protocol_strings);
gpr_free(alpn_protocol_string_lengths);
return GRPC_SECURITY_OK;
error:
- gpr_free(alpn_protocol_strings);
+ gpr_free((void *)alpn_protocol_strings);
gpr_free(alpn_protocol_string_lengths);
return GRPC_SECURITY_ERROR;
}
diff --git a/src/core/security/security_connector.h b/src/core/security/security_connector.h
index 5fc1db382e..9218a3caab 100644
--- a/src/core/security/security_connector.h
+++ b/src/core/security/security_connector.h
@@ -60,18 +60,19 @@ typedef struct grpc_security_connector grpc_security_connector;
#define GRPC_SECURITY_CONNECTOR_ARG "grpc.security_connector"
-typedef void (*grpc_security_check_cb)(void *user_data,
+typedef void (*grpc_security_check_cb)(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_security_status status);
-
/* Ownership of the secure_endpoint is transfered. */
-typedef void (*grpc_security_handshake_done_cb)(
- void *user_data, grpc_security_status status,
- grpc_endpoint *wrapped_endpoint, grpc_endpoint *secure_endpoint);
+typedef void (*grpc_security_handshake_done_cb)(grpc_exec_ctx *exec_ctx,
+ void *user_data,
+ grpc_security_status status,
+ grpc_endpoint *wrapped_endpoint,
+ grpc_endpoint *secure_endpoint);
typedef struct {
void (*destroy)(grpc_security_connector *sc);
- void (*do_handshake)(grpc_security_connector *sc,
+ void (*do_handshake)(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb, void *user_data);
grpc_security_status (*check_peer)(grpc_security_connector *sc, tsi_peer peer,
@@ -108,7 +109,8 @@ void grpc_security_connector_unref(grpc_security_connector *policy);
#endif
/* Handshake. */
-void grpc_security_connector_do_handshake(grpc_security_connector *connector,
+void grpc_security_connector_do_handshake(grpc_exec_ctx *exec_ctx,
+ grpc_security_connector *connector,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
void *user_data);
@@ -144,7 +146,8 @@ typedef struct grpc_channel_security_connector grpc_channel_security_connector;
struct grpc_channel_security_connector {
grpc_security_connector base; /* requires is_client_side to be non 0. */
grpc_credentials *request_metadata_creds;
- grpc_security_status (*check_call_host)(grpc_channel_security_connector *sc,
+ grpc_security_status (*check_call_host)(grpc_exec_ctx *exec_ctx,
+ grpc_channel_security_connector *sc,
const char *host,
grpc_security_check_cb cb,
void *user_data);
@@ -156,8 +159,8 @@ struct grpc_channel_security_connector {
GRPC_SECURITY_OK. In the asynchronous case, the call will return
GRPC_SECURITY_PENDING unless an error is detected early on. */
grpc_security_status grpc_channel_security_connector_check_call_host(
- grpc_channel_security_connector *sc, const char *host,
- grpc_security_check_cb cb, void *user_data);
+ grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
+ const char *host, grpc_security_check_cb cb, void *user_data);
/* --- Creation security connectors. --- */
diff --git a/src/core/security/server_auth_filter.c b/src/core/security/server_auth_filter.c
index d134201e87..9638b18e88 100644
--- a/src/core/security/server_auth_filter.c
+++ b/src/core/security/server_auth_filter.c
@@ -44,11 +44,11 @@ typedef struct call_data {
gpr_uint8 got_client_metadata;
grpc_stream_op_buffer *recv_ops;
/* Closure to call when finished with the auth_on_recv hook. */
- grpc_iomgr_closure *on_done_recv;
+ grpc_closure *on_done_recv;
/* Receive closures are chained: we inject this closure as the on_done_recv
up-call on transport_op, and remember to call our on_done_recv member after
handling it. */
- grpc_iomgr_closure auth_on_recv;
+ grpc_closure auth_on_recv;
grpc_transport_stream_op transport_op;
grpc_metadata_array md;
const grpc_metadata *consumed_md;
@@ -109,12 +109,14 @@ static grpc_mdelem *remove_consumed_md(void *user_data, grpc_mdelem *md) {
return md;
}
+/* called from application code */
static void on_md_processing_done(
void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
const grpc_metadata *response_md, size_t num_response_md,
grpc_status_code status, const char *error_details) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
/* TODO(jboeuf): Implement support for response_md. */
if (response_md != NULL && num_response_md > 0) {
@@ -129,21 +131,24 @@ static void on_md_processing_done(
grpc_metadata_batch_filter(&calld->md_op->data.metadata, remove_consumed_md,
elem);
grpc_metadata_array_destroy(&calld->md);
- calld->on_done_recv->cb(calld->on_done_recv->cb_arg, 1);
+ calld->on_done_recv->cb(&exec_ctx, calld->on_done_recv->cb_arg, 1);
} else {
gpr_slice message;
grpc_metadata_array_destroy(&calld->md);
error_details = error_details != NULL
- ? error_details
- : "Authentication metadata processing failed.";
+ ? error_details
+ : "Authentication metadata processing failed.";
message = gpr_slice_from_copied_string(error_details);
grpc_sopb_reset(calld->recv_ops);
grpc_transport_stream_op_add_close(&calld->transport_op, status, &message);
- grpc_call_next_op(elem, &calld->transport_op);
+ grpc_call_next_op(&exec_ctx, elem, &calld->transport_op);
}
+
+ grpc_exec_ctx_finish(&exec_ctx);
}
-static void auth_on_recv(void *user_data, int success) {
+static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
+ int success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
@@ -164,7 +169,7 @@ static void auth_on_recv(void *user_data, int success) {
return;
}
}
- calld->on_done_recv->cb(calld->on_done_recv->cb_arg, success);
+ calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
static void set_recv_ops_md_callbacks(grpc_call_element *elem,
@@ -185,14 +190,15 @@ static void set_recv_ops_md_callbacks(grpc_call_element *elem,
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
-static void auth_start_transport_op(grpc_call_element *elem,
+static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
set_recv_ops_md_callbacks(elem, op);
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
-static void init_call_elem(grpc_call_element *elem,
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
/* grab pointers to our data from the call element */
@@ -202,7 +208,7 @@ static void init_call_elem(grpc_call_element *elem,
/* initialize members */
memset(calld, 0, sizeof(*calld));
- grpc_iomgr_closure_init(&calld->auth_on_recv, auth_on_recv, elem);
+ grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem);
GPR_ASSERT(initial_op && initial_op->context != NULL &&
initial_op->context[GRPC_CONTEXT_SECURITY].value == NULL);
@@ -227,10 +233,12 @@ static void init_call_elem(grpc_call_element *elem,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_call_element *elem) {}
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {}
/* Constructor for channel_data */
-static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_security_connector *sc = grpc_find_security_connector_in_args(args);
@@ -256,7 +264,8 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
GRPC_SECURITY_CONNECTOR_UNREF(chand->security_connector,
diff --git a/src/core/security/server_secure_chttp2.c b/src/core/security/server_secure_chttp2.c
index f7318b2079..a6c515dc34 100644
--- a/src/core/security/server_secure_chttp2.c
+++ b/src/core/security/server_secure_chttp2.c
@@ -65,6 +65,8 @@ typedef struct grpc_server_secure_state {
int is_shutdown;
gpr_mu mu;
gpr_refcount refcount;
+ grpc_closure destroy_closure;
+ grpc_closure *destroy_callback;
} grpc_server_secure_state;
static void state_ref(grpc_server_secure_state *state) {
@@ -83,8 +85,8 @@ static void state_unref(grpc_server_secure_state *state) {
}
}
-static void setup_transport(void *statep, grpc_transport *transport,
- grpc_mdctx *mdctx) {
+static void setup_transport(grpc_exec_ctx *exec_ctx, void *statep,
+ grpc_transport *transport, grpc_mdctx *mdctx) {
static grpc_channel_filter const *extra_filters[] = {
&grpc_server_auth_filter, &grpc_http_server_filter};
grpc_server_secure_state *state = statep;
@@ -96,7 +98,7 @@ static void setup_transport(void *statep, grpc_transport *transport,
args_copy = grpc_channel_args_copy_and_add(
grpc_server_get_channel_args(state->server), args_to_add,
GPR_ARRAY_SIZE(args_to_add));
- grpc_server_setup_transport(state->server, transport, extra_filters,
+ grpc_server_setup_transport(exec_ctx, state->server, transport, extra_filters,
GPR_ARRAY_SIZE(extra_filters), mdctx, args_copy);
grpc_channel_args_destroy(args_copy);
}
@@ -122,7 +124,8 @@ static int remove_tcp_from_list_locked(grpc_server_secure_state *state,
return -1;
}
-static void on_secure_handshake_done(void *statep, grpc_security_status status,
+static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
+ grpc_security_status status,
grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
grpc_server_secure_state *state = statep;
@@ -134,14 +137,14 @@ static void on_secure_handshake_done(void *statep, grpc_security_status status,
if (!state->is_shutdown) {
mdctx = grpc_mdctx_create();
transport = grpc_create_chttp2_transport(
- grpc_server_get_channel_args(state->server), secure_endpoint, mdctx,
- 0);
- setup_transport(state, transport, mdctx);
- grpc_chttp2_transport_start_reading(transport, NULL, 0);
+ exec_ctx, grpc_server_get_channel_args(state->server),
+ secure_endpoint, mdctx, 0);
+ setup_transport(exec_ctx, state, transport, mdctx);
+ grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
} else {
/* We need to consume this here, because the server may already have gone
* away. */
- grpc_endpoint_destroy(secure_endpoint);
+ grpc_endpoint_destroy(exec_ctx, secure_endpoint);
}
gpr_mu_unlock(&state->mu);
} else {
@@ -153,7 +156,8 @@ static void on_secure_handshake_done(void *statep, grpc_security_status status,
state_unref(state);
}
-static void on_accept(void *statep, grpc_endpoint *tcp) {
+static void on_accept(grpc_exec_ctx *exec_ctx, void *statep,
+ grpc_endpoint *tcp) {
grpc_server_secure_state *state = statep;
tcp_endpoint_list *node;
state_ref(state);
@@ -163,23 +167,26 @@ static void on_accept(void *statep, grpc_endpoint *tcp) {
node->next = state->handshaking_tcp_endpoints;
state->handshaking_tcp_endpoints = node;
gpr_mu_unlock(&state->mu);
- grpc_security_connector_do_handshake(state->sc, tcp, on_secure_handshake_done,
- state);
+ grpc_security_connector_do_handshake(exec_ctx, state->sc, tcp,
+ on_secure_handshake_done, state);
}
/* Server callback: start listening on our ports */
-static void start(grpc_server *server, void *statep, grpc_pollset **pollsets,
- size_t pollset_count) {
+static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep,
+ grpc_pollset **pollsets, size_t pollset_count) {
grpc_server_secure_state *state = statep;
- grpc_tcp_server_start(state->tcp, pollsets, pollset_count, on_accept, state);
+ grpc_tcp_server_start(exec_ctx, state->tcp, pollsets, pollset_count,
+ on_accept, state);
}
-static void destroy_done(void *statep) {
+static void destroy_done(grpc_exec_ctx *exec_ctx, void *statep, int success) {
grpc_server_secure_state *state = statep;
- grpc_server_listener_destroy_done(state->server);
+ state->destroy_callback->cb(exec_ctx, state->destroy_callback->cb_arg,
+ success);
gpr_mu_lock(&state->mu);
while (state->handshaking_tcp_endpoints != NULL) {
- grpc_endpoint_shutdown(state->handshaking_tcp_endpoints->tcp_endpoint);
+ grpc_endpoint_shutdown(exec_ctx,
+ state->handshaking_tcp_endpoints->tcp_endpoint);
remove_tcp_from_list_locked(state,
state->handshaking_tcp_endpoints->tcp_endpoint);
}
@@ -189,14 +196,17 @@ static void destroy_done(void *statep) {
/* Server callback: destroy the tcp listener (so we don't generate further
callbacks) */
-static void destroy(grpc_server *server, void *statep) {
+static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep,
+ grpc_closure *callback) {
grpc_server_secure_state *state = statep;
grpc_tcp_server *tcp;
gpr_mu_lock(&state->mu);
state->is_shutdown = 1;
+ state->destroy_callback = callback;
tcp = state->tcp;
gpr_mu_unlock(&state->mu);
- grpc_tcp_server_destroy(tcp, destroy_done, state);
+ grpc_closure_init(&state->destroy_closure, destroy_done, state);
+ grpc_tcp_server_destroy(exec_ctx, tcp, &state->destroy_closure);
}
int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
@@ -210,6 +220,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
int port_temp;
grpc_security_status status = GRPC_SECURITY_ERROR;
grpc_security_connector *sc = NULL;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
/* create security context */
if (creds == NULL) goto error;
@@ -270,8 +281,9 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
gpr_ref_init(&state->refcount, 1);
/* Register with the server only upon success */
- grpc_server_add_listener(server, state, start, destroy);
+ grpc_server_add_listener(&exec_ctx, server, state, start, destroy);
+ grpc_exec_ctx_finish(&exec_ctx);
return port_num;
/* Error path: cleanup and return */
@@ -283,10 +295,11 @@ error:
grpc_resolved_addresses_destroy(resolved);
}
if (tcp) {
- grpc_tcp_server_destroy(tcp, NULL, NULL);
+ grpc_tcp_server_destroy(&exec_ctx, tcp, NULL);
}
if (state) {
gpr_free(state);
}
+ grpc_exec_ctx_finish(&exec_ctx);
return 0;
}
diff --git a/src/core/statistics/census_interface.h b/src/core/statistics/census_interface.h
index ac1ff24866..e870357276 100644
--- a/src/core/statistics/census_interface.h
+++ b/src/core/statistics/census_interface.h
@@ -61,11 +61,11 @@ void census_shutdown(void);
TODO(hongyu): Figure out valid characters set for service name and command
name and document requirements here.*/
-int census_add_method_tag(census_op_id op_id, const char* method_name);
+int census_add_method_tag(census_op_id op_id, const char *method_name);
/* Annotates tracing information to a specific op_id.
Up to CENSUS_MAX_ANNOTATION_LENGTH bytes are recorded. */
-void census_tracing_print(census_op_id op_id, const char* annotation);
+void census_tracing_print(census_op_id op_id, const char *annotation);
/* Starts tracing for an RPC. Returns a locally unique census_op_id */
census_op_id census_tracing_start_op(void);
diff --git a/src/core/statistics/census_log.c b/src/core/statistics/census_log.c
index ec56ce38df..88e338038d 100644
--- a/src/core/statistics/census_log.c
+++ b/src/core/statistics/census_log.c
@@ -102,14 +102,14 @@
/* End of platform specific code */
typedef struct census_log_block_list_struct {
- struct census_log_block_list_struct* next;
- struct census_log_block_list_struct* prev;
- struct census_log_block* block;
+ struct census_log_block_list_struct *next;
+ struct census_log_block_list_struct *prev;
+ struct census_log_block *block;
} cl_block_list_struct;
typedef struct census_log_block {
/* Pointer to underlying buffer */
- char* buffer;
+ char *buffer;
gpr_atm writer_lock;
gpr_atm reader_lock;
/* Keeps completely written bytes. Declared atomic because accessed
@@ -176,8 +176,8 @@ struct census_log {
unsigned num_cores;
/* number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log */
gpr_int32 num_blocks;
- cl_block* blocks; /* Block metadata. */
- cl_core_local_block* core_local_blocks; /* Keeps core to block mappings. */
+ cl_block *blocks; /* Block metadata. */
+ cl_core_local_block *core_local_blocks; /* Keeps core to block mappings. */
gpr_mu lock;
int initialized; /* has log been initialized? */
/* Keeps the state of the reader iterator. A value of 0 indicates that
@@ -186,10 +186,10 @@ struct census_log {
gpr_uint32 read_iterator_state;
/* Points to the block being read. If non-NULL, the block is locked for
reading (block_being_read_->reader_lock is held). */
- cl_block* block_being_read;
+ cl_block *block_being_read;
/* A non-zero value indicates that log is full. */
gpr_atm is_full;
- char* buffer;
+ char *buffer;
cl_block_list free_block_list;
cl_block_list dirty_block_list;
gpr_atm out_of_space_count;
@@ -201,44 +201,44 @@ static struct census_log g_log;
/* Functions that operate on an atomic memory location used as a lock */
/* Returns non-zero if lock is acquired */
-static int cl_try_lock(gpr_atm* lock) { return gpr_atm_acq_cas(lock, 0, 1); }
+static int cl_try_lock(gpr_atm *lock) { return gpr_atm_acq_cas(lock, 0, 1); }
-static void cl_unlock(gpr_atm* lock) { gpr_atm_rel_store(lock, 0); }
+static void cl_unlock(gpr_atm *lock) { gpr_atm_rel_store(lock, 0); }
/* Functions that operate on cl_core_local_block's */
-static void cl_core_local_block_set_block(cl_core_local_block* clb,
- cl_block* block) {
+static void cl_core_local_block_set_block(cl_core_local_block *clb,
+ cl_block *block) {
gpr_atm_rel_store(&clb->block, (gpr_atm)block);
}
-static cl_block* cl_core_local_block_get_block(cl_core_local_block* clb) {
- return (cl_block*)gpr_atm_acq_load(&clb->block);
+static cl_block *cl_core_local_block_get_block(cl_core_local_block *clb) {
+ return (cl_block *)gpr_atm_acq_load(&clb->block);
}
/* Functions that operate on cl_block_list_struct's */
-static void cl_block_list_struct_initialize(cl_block_list_struct* bls,
- cl_block* block) {
+static void cl_block_list_struct_initialize(cl_block_list_struct *bls,
+ cl_block *block) {
bls->next = bls->prev = bls;
bls->block = block;
}
/* Functions that operate on cl_block_list's */
-static void cl_block_list_initialize(cl_block_list* list) {
+static void cl_block_list_initialize(cl_block_list *list) {
list->count = 0;
cl_block_list_struct_initialize(&list->ht, NULL);
}
/* Returns head of *this, or NULL if empty. */
-static cl_block* cl_block_list_head(cl_block_list* list) {
+static cl_block *cl_block_list_head(cl_block_list *list) {
return list->ht.next->block;
}
/* Insert element *e after *pos. */
-static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos,
- cl_block_list_struct* e) {
+static void cl_block_list_insert(cl_block_list *list, cl_block_list_struct *pos,
+ cl_block_list_struct *e) {
list->count++;
e->next = pos->next;
e->prev = pos;
@@ -247,17 +247,17 @@ static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos,
}
/* Insert block at the head of the list */
-static void cl_block_list_insert_at_head(cl_block_list* list, cl_block* block) {
+static void cl_block_list_insert_at_head(cl_block_list *list, cl_block *block) {
cl_block_list_insert(list, &list->ht, &block->link);
}
/* Insert block at the tail of the list */
-static void cl_block_list_insert_at_tail(cl_block_list* list, cl_block* block) {
+static void cl_block_list_insert_at_tail(cl_block_list *list, cl_block *block) {
cl_block_list_insert(list, list->ht.prev, &block->link);
}
/* Removes block *b. Requires *b be in the list. */
-static void cl_block_list_remove(cl_block_list* list, cl_block* b) {
+static void cl_block_list_remove(cl_block_list *list, cl_block *b) {
list->count--;
b->link.next->prev = b->link.prev;
b->link.prev->next = b->link.next;
@@ -265,7 +265,7 @@ static void cl_block_list_remove(cl_block_list* list, cl_block* b) {
/* Functions that operate on cl_block's */
-static void cl_block_initialize(cl_block* block, char* buffer) {
+static void cl_block_initialize(cl_block *block, char *buffer) {
block->buffer = buffer;
gpr_atm_rel_store(&block->writer_lock, 0);
gpr_atm_rel_store(&block->reader_lock, 0);
@@ -275,12 +275,12 @@ static void cl_block_initialize(cl_block* block, char* buffer) {
}
/* Guards against exposing partially written buffer to the reader. */
-static void cl_block_set_bytes_committed(cl_block* block,
+static void cl_block_set_bytes_committed(cl_block *block,
gpr_int32 bytes_committed) {
gpr_atm_rel_store(&block->bytes_committed, bytes_committed);
}
-static gpr_int32 cl_block_get_bytes_committed(cl_block* block) {
+static gpr_int32 cl_block_get_bytes_committed(cl_block *block) {
return gpr_atm_acq_load(&block->bytes_committed);
}
@@ -291,7 +291,7 @@ static gpr_int32 cl_block_get_bytes_committed(cl_block* block) {
On success, clears the block state and returns with writer_lock_ and
reader_lock_ held. These locks are released by a subsequent
cl_block_access_enable() call. */
-static int cl_block_try_disable_access(cl_block* block, int discard_data) {
+static int cl_block_try_disable_access(cl_block *block, int discard_data) {
if (!cl_try_lock(&block->writer_lock)) {
return 0;
}
@@ -310,13 +310,13 @@ static int cl_block_try_disable_access(cl_block* block, int discard_data) {
return 1;
}
-static void cl_block_enable_access(cl_block* block) {
+static void cl_block_enable_access(cl_block *block) {
cl_unlock(&block->reader_lock);
cl_unlock(&block->writer_lock);
}
/* Returns with writer_lock held. */
-static void* cl_block_start_write(cl_block* block, size_t size) {
+static void *cl_block_start_write(cl_block *block, size_t size) {
gpr_int32 bytes_committed;
if (!cl_try_lock(&block->writer_lock)) {
return NULL;
@@ -332,7 +332,7 @@ static void* cl_block_start_write(cl_block* block, size_t size) {
/* Releases writer_lock and increments committed bytes by 'bytes_written'.
'bytes_written' must be <= 'size' specified in the corresponding
StartWrite() call. This function is thread-safe. */
-static void cl_block_end_write(cl_block* block, size_t bytes_written) {
+static void cl_block_end_write(cl_block *block, size_t bytes_written) {
cl_block_set_bytes_committed(
block, cl_block_get_bytes_committed(block) + bytes_written);
cl_unlock(&block->writer_lock);
@@ -343,8 +343,8 @@ static void cl_block_end_write(cl_block* block, size_t bytes_written) {
released by a subsequent cl_block_end_read() call. Returns NULL if:
- read in progress
- no data available */
-static void* cl_block_start_read(cl_block* block, size_t* bytes_available) {
- void* record;
+static void *cl_block_start_read(cl_block *block, size_t *bytes_available) {
+ void *record;
if (!cl_try_lock(&block->reader_lock)) {
return NULL;
}
@@ -360,7 +360,7 @@ static void* cl_block_start_read(cl_block* block, size_t* bytes_available) {
return record;
}
-static void cl_block_end_read(cl_block* block) {
+static void cl_block_end_read(cl_block *block) {
cl_unlock(&block->reader_lock);
}
@@ -368,8 +368,8 @@ static void cl_block_end_read(cl_block* block) {
/* Allocates a new free block (or recycles an available dirty block if log is
configured to discard old records). Returns NULL if out-of-space. */
-static cl_block* cl_allocate_block(void) {
- cl_block* block = cl_block_list_head(&g_log.free_block_list);
+static cl_block *cl_allocate_block(void) {
+ cl_block *block = cl_block_list_head(&g_log.free_block_list);
if (block != NULL) {
cl_block_list_remove(&g_log.free_block_list, block);
return block;
@@ -396,10 +396,10 @@ static cl_block* cl_allocate_block(void) {
- 'core_id' => 'old_block' mapping changed (another thread allocated a
block before lock was acquired). */
static int cl_allocate_core_local_block(gpr_int32 core_id,
- cl_block* old_block) {
+ cl_block *old_block) {
/* Now that we have the lock, check if core-local mapping has changed. */
- cl_core_local_block* core_local_block = &g_log.core_local_blocks[core_id];
- cl_block* block = cl_core_local_block_get_block(core_local_block);
+ cl_core_local_block *core_local_block = &g_log.core_local_blocks[core_id];
+ cl_block *block = cl_core_local_block_get_block(core_local_block);
if ((block != NULL) && (block != old_block)) {
return 1;
}
@@ -417,16 +417,16 @@ static int cl_allocate_core_local_block(gpr_int32 core_id,
return 1;
}
-static cl_block* cl_get_block(void* record) {
- gpr_uintptr p = (gpr_uintptr)((char*)record - g_log.buffer);
+static cl_block *cl_get_block(void *record) {
+ gpr_uintptr p = (gpr_uintptr)((char *)record - g_log.buffer);
gpr_uintptr index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
return &g_log.blocks[index];
}
/* Gets the next block to read and tries to free 'prev' block (if not NULL).
Returns NULL if reached the end. */
-static cl_block* cl_next_block_to_read(cl_block* prev) {
- cl_block* block = NULL;
+static cl_block *cl_next_block_to_read(cl_block *prev) {
+ cl_block *block = NULL;
if (g_log.read_iterator_state == g_log.num_cores) {
/* We are traversing dirty list; find the next dirty block. */
if (prev != NULL) {
@@ -474,11 +474,11 @@ void census_log_initialize(size_t size_in_mb, int discard_old_records) {
g_log.read_iterator_state = 0;
g_log.block_being_read = NULL;
gpr_atm_rel_store(&g_log.is_full, 0);
- g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned(
+ g_log.core_local_blocks = (cl_core_local_block *)gpr_malloc_aligned(
g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
memset(g_log.core_local_blocks, 0,
g_log.num_cores * sizeof(cl_core_local_block));
- g_log.blocks = (cl_block*)gpr_malloc_aligned(
+ g_log.blocks = (cl_block *)gpr_malloc_aligned(
g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
@@ -486,7 +486,7 @@ void census_log_initialize(size_t size_in_mb, int discard_old_records) {
cl_block_list_initialize(&g_log.free_block_list);
cl_block_list_initialize(&g_log.dirty_block_list);
for (ix = 0; ix < g_log.num_blocks; ++ix) {
- cl_block* block = g_log.blocks + ix;
+ cl_block *block = g_log.blocks + ix;
cl_block_initialize(block,
g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * ix));
cl_block_try_disable_access(block, 1 /* discard data */);
@@ -508,7 +508,7 @@ void census_log_shutdown(void) {
g_log.initialized = 0;
}
-void* census_log_start_write(size_t size) {
+void *census_log_start_write(size_t size) {
/* Used to bound number of times block allocation is attempted. */
gpr_int32 attempts_remaining = g_log.num_blocks;
/* TODO(aveitch): move this inside the do loop when current_cpu is fixed */
@@ -519,8 +519,8 @@ void* census_log_start_write(size_t size) {
}
do {
int allocated;
- void* record = NULL;
- cl_block* block =
+ void *record = NULL;
+ cl_block *block =
cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
if (block && (record = cl_block_start_write(block, size))) {
return record;
@@ -546,7 +546,7 @@ void* census_log_start_write(size_t size) {
return NULL;
}
-void census_log_end_write(void* record, size_t bytes_written) {
+void census_log_end_write(void *record, size_t bytes_written) {
GPR_ASSERT(g_log.initialized);
cl_block_end_write(cl_get_block(record), bytes_written);
}
@@ -563,7 +563,7 @@ void census_log_init_reader(void) {
gpr_mu_unlock(&g_log.lock);
}
-const void* census_log_read_next(size_t* bytes_available) {
+const void *census_log_read_next(size_t *bytes_available) {
GPR_ASSERT(g_log.initialized);
gpr_mu_lock(&g_log.lock);
if (g_log.block_being_read != NULL) {
@@ -572,7 +572,7 @@ const void* census_log_read_next(size_t* bytes_available) {
do {
g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read);
if (g_log.block_being_read != NULL) {
- void* record =
+ void *record =
cl_block_start_read(g_log.block_being_read, bytes_available);
if (record != NULL) {
gpr_mu_unlock(&g_log.lock);
diff --git a/src/core/statistics/census_log.h b/src/core/statistics/census_log.h
index 60b6d597df..356437c346 100644
--- a/src/core/statistics/census_log.h
+++ b/src/core/statistics/census_log.h
@@ -62,9 +62,9 @@ void census_log_shutdown(void);
- log is configured to keep old records OR
- all blocks are pinned by incomplete records.
*/
-void* census_log_start_write(size_t size);
+void *census_log_start_write(size_t size);
-void census_log_end_write(void* record, size_t bytes_written);
+void census_log_end_write(void *record, size_t bytes_written);
/* census_log_read_next() iterates over blocks with data and for each block
returns a pointer to the first unread byte. The number of bytes that can be
@@ -75,7 +75,7 @@ void census_log_end_write(void* record, size_t bytes_written);
current iteration.
*/
void census_log_init_reader(void);
-const void* census_log_read_next(size_t* bytes_available);
+const void *census_log_read_next(size_t *bytes_available);
/* Returns estimated remaining space across all blocks, in bytes. If log is
configured to discard old records, returns total log space. Otherwise,
diff --git a/src/core/statistics/census_rpc_stats.c b/src/core/statistics/census_rpc_stats.c
index b836987cf0..ba2c81d6a3 100644
--- a/src/core/statistics/census_rpc_stats.c
+++ b/src/core/statistics/census_rpc_stats.c
@@ -56,8 +56,8 @@ typedef census_per_method_rpc_stats per_method_stats;
static gpr_once g_stats_store_mu_init = GPR_ONCE_INIT;
/* Guards two stats stores. */
static gpr_mu g_mu;
-static census_ht* g_client_stats_store = NULL;
-static census_ht* g_server_stats_store = NULL;
+static census_ht *g_client_stats_store = NULL;
+static census_ht *g_server_stats_store = NULL;
static void init_mutex(void) { gpr_mu_init(&g_mu); }
@@ -65,23 +65,23 @@ static void init_mutex_once(void) {
gpr_once_init(&g_stats_store_mu_init, init_mutex);
}
-static int cmp_str_keys(const void* k1, const void* k2) {
- return strcmp((const char*)k1, (const char*)k2);
+static int cmp_str_keys(const void *k1, const void *k2) {
+ return strcmp((const char *)k1, (const char *)k2);
}
/* TODO(hongyu): replace it with cityhash64 */
-static gpr_uint64 simple_hash(const void* k) {
+static gpr_uint64 simple_hash(const void *k) {
size_t len = strlen(k);
- gpr_uint64 higher = gpr_murmur_hash3((const char*)k, len / 2, 0);
+ gpr_uint64 higher = gpr_murmur_hash3((const char *)k, len / 2, 0);
return higher << 32 |
- gpr_murmur_hash3((const char*)k + len / 2, len - len / 2, 0);
+ gpr_murmur_hash3((const char *)k + len / 2, len - len / 2, 0);
}
-static void delete_stats(void* stats) {
- census_window_stats_destroy((struct census_window_stats*)stats);
+static void delete_stats(void *stats) {
+ census_window_stats_destroy((struct census_window_stats *)stats);
}
-static void delete_key(void* key) { gpr_free(key); }
+static void delete_key(void *key) { gpr_free(key); }
static const census_ht_option ht_opt = {
CENSUS_HT_POINTER /* key type */, 1999 /* n_of_buckets */,
@@ -89,13 +89,13 @@ static const census_ht_option ht_opt = {
delete_stats /* data deleter */, delete_key /* key deleter */
};
-static void init_rpc_stats(void* stats) {
+static void init_rpc_stats(void *stats) {
memset(stats, 0, sizeof(census_rpc_stats));
}
-static void stat_add_proportion(double p, void* base, const void* addme) {
- census_rpc_stats* b = (census_rpc_stats*)base;
- census_rpc_stats* a = (census_rpc_stats*)addme;
+static void stat_add_proportion(double p, void *base, const void *addme) {
+ census_rpc_stats *b = (census_rpc_stats *)base;
+ census_rpc_stats *a = (census_rpc_stats *)addme;
b->cnt += p * a->cnt;
b->rpc_error_cnt += p * a->rpc_error_cnt;
b->app_error_cnt += p * a->app_error_cnt;
@@ -106,7 +106,7 @@ static void stat_add_proportion(double p, void* base, const void* addme) {
b->wire_response_bytes += p * a->wire_response_bytes;
}
-static void stat_add(void* base, const void* addme) {
+static void stat_add(void *base, const void *addme) {
stat_add_proportion(1.0, base, addme);
}
@@ -116,18 +116,18 @@ static gpr_timespec min_hour_total_intervals[3] = {
static const census_window_stats_stat_info window_stats_settings = {
sizeof(census_rpc_stats), init_rpc_stats, stat_add, stat_add_proportion};
-census_rpc_stats* census_rpc_stats_create_empty(void) {
- census_rpc_stats* ret =
- (census_rpc_stats*)gpr_malloc(sizeof(census_rpc_stats));
+census_rpc_stats *census_rpc_stats_create_empty(void) {
+ census_rpc_stats *ret =
+ (census_rpc_stats *)gpr_malloc(sizeof(census_rpc_stats));
memset(ret, 0, sizeof(census_rpc_stats));
return ret;
}
-void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats* data) {
+void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats *data) {
int i = 0;
for (i = 0; i < data->num_entries; i++) {
if (data->stats[i].method != NULL) {
- gpr_free((void*)data->stats[i].method);
+ gpr_free((void *)data->stats[i].method);
}
}
if (data->stats != NULL) {
@@ -137,25 +137,25 @@ void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats* data) {
data->stats = NULL;
}
-static void record_stats(census_ht* store, census_op_id op_id,
- const census_rpc_stats* stats) {
+static void record_stats(census_ht *store, census_op_id op_id,
+ const census_rpc_stats *stats) {
gpr_mu_lock(&g_mu);
if (store != NULL) {
- census_trace_obj* trace = NULL;
+ census_trace_obj *trace = NULL;
census_internal_lock_trace_store();
trace = census_get_trace_obj_locked(op_id);
if (trace != NULL) {
- const char* method_name = census_get_trace_method_name(trace);
- struct census_window_stats* window_stats = NULL;
+ const char *method_name = census_get_trace_method_name(trace);
+ struct census_window_stats *window_stats = NULL;
census_ht_key key;
- key.ptr = (void*)method_name;
+ key.ptr = (void *)method_name;
window_stats = census_ht_find(store, key);
census_internal_unlock_trace_store();
if (window_stats == NULL) {
window_stats = census_window_stats_create(3, min_hour_total_intervals,
30, &window_stats_settings);
key.ptr = gpr_strdup(key.ptr);
- census_ht_insert(store, key, (void*)window_stats);
+ census_ht_insert(store, key, (void *)window_stats);
}
census_window_stats_add(window_stats, gpr_now(GPR_CLOCK_REALTIME), stats);
} else {
@@ -166,17 +166,17 @@ static void record_stats(census_ht* store, census_op_id op_id,
}
void census_record_rpc_client_stats(census_op_id op_id,
- const census_rpc_stats* stats) {
+ const census_rpc_stats *stats) {
record_stats(g_client_stats_store, op_id, stats);
}
void census_record_rpc_server_stats(census_op_id op_id,
- const census_rpc_stats* stats) {
+ const census_rpc_stats *stats) {
record_stats(g_server_stats_store, op_id, stats);
}
/* Get stats from input stats store */
-static void get_stats(census_ht* store, census_aggregated_rpc_stats* data) {
+static void get_stats(census_ht *store, census_aggregated_rpc_stats *data) {
GPR_ASSERT(data != NULL);
if (data->num_entries != 0) {
census_aggregated_rpc_stats_set_empty(data);
@@ -186,23 +186,24 @@ static void get_stats(census_ht* store, census_aggregated_rpc_stats* data) {
size_t n;
unsigned i, j;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
- census_ht_kv* kv = census_ht_get_all_elements(store, &n);
+ census_ht_kv *kv = census_ht_get_all_elements(store, &n);
if (kv != NULL) {
data->num_entries = n;
- data->stats = (per_method_stats*)gpr_malloc(sizeof(per_method_stats) * n);
+ data->stats =
+ (per_method_stats *)gpr_malloc(sizeof(per_method_stats) * n);
for (i = 0; i < n; i++) {
census_window_stats_sums sums[NUM_INTERVALS];
for (j = 0; j < NUM_INTERVALS; j++) {
- sums[j].statistic = (void*)census_rpc_stats_create_empty();
+ sums[j].statistic = (void *)census_rpc_stats_create_empty();
}
data->stats[i].method = gpr_strdup(kv[i].k.ptr);
census_window_stats_get_sums(kv[i].v, now, sums);
data->stats[i].minute_stats =
- *(census_rpc_stats*)sums[MINUTE_INTERVAL].statistic;
+ *(census_rpc_stats *)sums[MINUTE_INTERVAL].statistic;
data->stats[i].hour_stats =
- *(census_rpc_stats*)sums[HOUR_INTERVAL].statistic;
+ *(census_rpc_stats *)sums[HOUR_INTERVAL].statistic;
data->stats[i].total_stats =
- *(census_rpc_stats*)sums[TOTAL_INTERVAL].statistic;
+ *(census_rpc_stats *)sums[TOTAL_INTERVAL].statistic;
for (j = 0; j < NUM_INTERVALS; j++) {
gpr_free(sums[j].statistic);
}
@@ -213,11 +214,11 @@ static void get_stats(census_ht* store, census_aggregated_rpc_stats* data) {
gpr_mu_unlock(&g_mu);
}
-void census_get_client_stats(census_aggregated_rpc_stats* data) {
+void census_get_client_stats(census_aggregated_rpc_stats *data) {
get_stats(g_client_stats_store, data);
}
-void census_get_server_stats(census_aggregated_rpc_stats* data) {
+void census_get_server_stats(census_aggregated_rpc_stats *data) {
get_stats(g_server_stats_store, data);
}
diff --git a/src/core/statistics/census_rpc_stats.h b/src/core/statistics/census_rpc_stats.h
index aec31c1971..5edbe9f478 100644
--- a/src/core/statistics/census_rpc_stats.h
+++ b/src/core/statistics/census_rpc_stats.h
@@ -53,10 +53,10 @@ struct census_rpc_stats {
};
/* Creates an empty rpc stats object on heap. */
-census_rpc_stats* census_rpc_stats_create_empty(void);
+census_rpc_stats *census_rpc_stats_create_empty(void);
typedef struct census_per_method_rpc_stats {
- const char* method;
+ const char *method;
census_rpc_stats minute_stats; /* cumulative stats in the past minute */
census_rpc_stats hour_stats; /* cumulative stats in the past hour */
census_rpc_stats total_stats; /* cumulative stats from last gc */
@@ -64,19 +64,19 @@ typedef struct census_per_method_rpc_stats {
typedef struct census_aggregated_rpc_stats {
int num_entries;
- census_per_method_rpc_stats* stats;
+ census_per_method_rpc_stats *stats;
} census_aggregated_rpc_stats;
/* Initializes an aggregated rpc stats object to an empty state. */
-void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats* data);
+void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats *data);
/* Records client side stats of a rpc. */
void census_record_rpc_client_stats(census_op_id op_id,
- const census_rpc_stats* stats);
+ const census_rpc_stats *stats);
/* Records server side stats of a rpc. */
void census_record_rpc_server_stats(census_op_id op_id,
- const census_rpc_stats* stats);
+ const census_rpc_stats *stats);
/* The following two functions are intended for inprocess query of
per-service per-method stats from grpc implementations. */
@@ -84,12 +84,12 @@ void census_record_rpc_server_stats(census_op_id op_id,
/* Populates *data_map with server side aggregated per-service per-method
stats.
DO NOT CALL from outside of grpc code. */
-void census_get_server_stats(census_aggregated_rpc_stats* data_map);
+void census_get_server_stats(census_aggregated_rpc_stats *data_map);
/* Populates *data_map with client side aggregated per-service per-method
stats.
DO NOT CALL from outside of grpc code. */
-void census_get_client_stats(census_aggregated_rpc_stats* data_map);
+void census_get_client_stats(census_aggregated_rpc_stats *data_map);
void census_stats_store_init(void);
void census_stats_store_shutdown(void);
diff --git a/src/core/statistics/census_tracing.c b/src/core/statistics/census_tracing.c
index f2a09dc06e..0eeecfe6c8 100644
--- a/src/core/statistics/census_tracing.c
+++ b/src/core/statistics/census_tracing.c
@@ -44,10 +44,10 @@
#include <grpc/support/port_platform.h>
#include <grpc/support/sync.h>
-void census_trace_obj_destroy(census_trace_obj* obj) {
- census_trace_annotation* p = obj->annotations;
+void census_trace_obj_destroy(census_trace_obj *obj) {
+ census_trace_annotation *p = obj->annotations;
while (p != NULL) {
- census_trace_annotation* next = p->next;
+ census_trace_annotation *next = p->next;
gpr_free(p);
p = next;
}
@@ -55,12 +55,12 @@ void census_trace_obj_destroy(census_trace_obj* obj) {
gpr_free(obj);
}
-static void delete_trace_obj(void* obj) {
- census_trace_obj_destroy((census_trace_obj*)obj);
+static void delete_trace_obj(void *obj) {
+ census_trace_obj_destroy((census_trace_obj *)obj);
}
static const census_ht_option ht_opt = {
- CENSUS_HT_UINT64 /* key type*/,
+ CENSUS_HT_UINT64 /* key type */,
571 /* n_of_buckets */,
NULL /* hash */,
NULL /* compare_keys */,
@@ -70,14 +70,14 @@ static const census_ht_option ht_opt = {
static gpr_once g_init_mutex_once = GPR_ONCE_INIT;
static gpr_mu g_mu; /* Guards following two static variables. */
-static census_ht* g_trace_store = NULL;
+static census_ht *g_trace_store = NULL;
static gpr_uint64 g_id = 0;
-static census_ht_key op_id_as_key(census_op_id* id) {
- return *(census_ht_key*)id;
+static census_ht_key op_id_as_key(census_op_id *id) {
+ return *(census_ht_key *)id;
}
-static gpr_uint64 op_id_2_uint64(census_op_id* id) {
+static gpr_uint64 op_id_2_uint64(census_op_id *id) {
gpr_uint64 ret;
memcpy(&ret, id, sizeof(census_op_id));
return ret;
@@ -92,22 +92,22 @@ static void init_mutex_once(void) {
census_op_id census_tracing_start_op(void) {
gpr_mu_lock(&g_mu);
{
- census_trace_obj* ret = gpr_malloc(sizeof(census_trace_obj));
+ census_trace_obj *ret = gpr_malloc(sizeof(census_trace_obj));
memset(ret, 0, sizeof(census_trace_obj));
g_id++;
memcpy(&ret->id, &g_id, sizeof(census_op_id));
ret->rpc_stats.cnt = 1;
ret->ts = gpr_now(GPR_CLOCK_REALTIME);
- census_ht_insert(g_trace_store, op_id_as_key(&ret->id), (void*)ret);
+ census_ht_insert(g_trace_store, op_id_as_key(&ret->id), (void *)ret);
gpr_log(GPR_DEBUG, "Start tracing for id %lu", g_id);
gpr_mu_unlock(&g_mu);
return ret->id;
}
}
-int census_add_method_tag(census_op_id op_id, const char* method) {
+int census_add_method_tag(census_op_id op_id, const char *method) {
int ret = 0;
- census_trace_obj* trace = NULL;
+ census_trace_obj *trace = NULL;
gpr_mu_lock(&g_mu);
trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
if (trace == NULL) {
@@ -119,16 +119,16 @@ int census_add_method_tag(census_op_id op_id, const char* method) {
return ret;
}
-void census_tracing_print(census_op_id op_id, const char* anno_txt) {
- census_trace_obj* trace = NULL;
+void census_tracing_print(census_op_id op_id, const char *anno_txt) {
+ census_trace_obj *trace = NULL;
gpr_mu_lock(&g_mu);
trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
if (trace != NULL) {
- census_trace_annotation* anno = gpr_malloc(sizeof(census_trace_annotation));
+ census_trace_annotation *anno = gpr_malloc(sizeof(census_trace_annotation));
anno->ts = gpr_now(GPR_CLOCK_REALTIME);
{
- char* d = anno->txt;
- const char* s = anno_txt;
+ char *d = anno->txt;
+ const char *s = anno_txt;
int n = 0;
for (; n < CENSUS_MAX_ANNOTATION_LENGTH && *s != '\0'; ++n) {
*d++ = *s++;
@@ -142,7 +142,7 @@ void census_tracing_print(census_op_id op_id, const char* anno_txt) {
}
void census_tracing_end_op(census_op_id op_id) {
- census_trace_obj* trace = NULL;
+ census_trace_obj *trace = NULL;
gpr_mu_lock(&g_mu);
trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
if (trace != NULL) {
@@ -183,22 +183,23 @@ void census_internal_lock_trace_store(void) { gpr_mu_lock(&g_mu); }
void census_internal_unlock_trace_store(void) { gpr_mu_unlock(&g_mu); }
-census_trace_obj* census_get_trace_obj_locked(census_op_id op_id) {
+census_trace_obj *census_get_trace_obj_locked(census_op_id op_id) {
if (g_trace_store == NULL) {
gpr_log(GPR_ERROR, "Census trace store is not initialized.");
return NULL;
}
- return (census_trace_obj*)census_ht_find(g_trace_store, op_id_as_key(&op_id));
+ return (census_trace_obj *)census_ht_find(g_trace_store,
+ op_id_as_key(&op_id));
}
-const char* census_get_trace_method_name(const census_trace_obj* trace) {
+const char *census_get_trace_method_name(const census_trace_obj *trace) {
return trace->method;
}
-static census_trace_annotation* dup_annotation_chain(
- census_trace_annotation* from) {
- census_trace_annotation* ret = NULL;
- census_trace_annotation** to = &ret;
+static census_trace_annotation *dup_annotation_chain(
+ census_trace_annotation *from) {
+ census_trace_annotation *ret = NULL;
+ census_trace_annotation **to = &ret;
for (; from != NULL; from = from->next) {
*to = gpr_malloc(sizeof(census_trace_annotation));
memcpy(*to, from, sizeof(census_trace_annotation));
@@ -207,8 +208,8 @@ static census_trace_annotation* dup_annotation_chain(
return ret;
}
-static census_trace_obj* trace_obj_dup(census_trace_obj* from) {
- census_trace_obj* to = NULL;
+static census_trace_obj *trace_obj_dup(census_trace_obj *from) {
+ census_trace_obj *to = NULL;
GPR_ASSERT(from != NULL);
to = gpr_malloc(sizeof(census_trace_obj));
to->id = from->id;
@@ -219,18 +220,18 @@ static census_trace_obj* trace_obj_dup(census_trace_obj* from) {
return to;
}
-census_trace_obj** census_get_active_ops(int* num_active_ops) {
- census_trace_obj** ret = NULL;
+census_trace_obj **census_get_active_ops(int *num_active_ops) {
+ census_trace_obj **ret = NULL;
gpr_mu_lock(&g_mu);
if (g_trace_store != NULL) {
size_t n = 0;
- census_ht_kv* all_kvs = census_ht_get_all_elements(g_trace_store, &n);
+ census_ht_kv *all_kvs = census_ht_get_all_elements(g_trace_store, &n);
*num_active_ops = (int)n;
if (n != 0) {
size_t i = 0;
- ret = gpr_malloc(sizeof(census_trace_obj*) * n);
+ ret = gpr_malloc(sizeof(census_trace_obj *) * n);
for (i = 0; i < n; i++) {
- ret[i] = trace_obj_dup((census_trace_obj*)all_kvs[i].v);
+ ret[i] = trace_obj_dup((census_trace_obj *)all_kvs[i].v);
}
}
gpr_free(all_kvs);
diff --git a/src/core/statistics/census_tracing.h b/src/core/statistics/census_tracing.h
index 08305c2469..bb3f2556d2 100644
--- a/src/core/statistics/census_tracing.h
+++ b/src/core/statistics/census_tracing.h
@@ -50,19 +50,19 @@ extern "C" {
typedef struct census_trace_annotation {
gpr_timespec ts; /* timestamp of the annotation */
char txt[CENSUS_MAX_ANNOTATION_LENGTH + 1]; /* actual txt annotation */
- struct census_trace_annotation* next;
+ struct census_trace_annotation *next;
} census_trace_annotation;
typedef struct census_trace_obj {
census_op_id id;
gpr_timespec ts;
census_rpc_stats rpc_stats;
- char* method;
- census_trace_annotation* annotations;
+ char *method;
+ census_trace_annotation *annotations;
} census_trace_obj;
/* Deletes trace object. */
-void census_trace_obj_destroy(census_trace_obj* obj);
+void census_trace_obj_destroy(census_trace_obj *obj);
/* Initializes trace store. This function is thread safe. */
void census_tracing_init(void);
@@ -73,7 +73,7 @@ void census_tracing_shutdown(void);
/* Gets trace obj corresponding to the input op_id. Returns NULL if trace store
is not initialized or trace obj is not found. Requires trace store being
locked before calling this function. */
-census_trace_obj* census_get_trace_obj_locked(census_op_id op_id);
+census_trace_obj *census_get_trace_obj_locked(census_op_id op_id);
/* The following two functions acquire and release the trace store global lock.
They are for census internal use only. */
@@ -81,13 +81,13 @@ void census_internal_lock_trace_store(void);
void census_internal_unlock_trace_store(void);
/* Gets method name associated with the input trace object. */
-const char* census_get_trace_method_name(const census_trace_obj* trace);
+const char *census_get_trace_method_name(const census_trace_obj *trace);
/* Returns an array of pointers to trace objects of currently active operations
and fills in number of active operations. Returns NULL if there are no active
operations.
Caller owns the returned objects. */
-census_trace_obj** census_get_active_ops(int* num_active_ops);
+census_trace_obj **census_get_active_ops(int *num_active_ops);
#ifdef __cplusplus
}
diff --git a/src/core/statistics/hash_table.c b/src/core/statistics/hash_table.c
index 56bdcc2fff..39b760f0e0 100644
--- a/src/core/statistics/hash_table.c
+++ b/src/core/statistics/hash_table.c
@@ -45,14 +45,14 @@
/* A single hash table data entry */
typedef struct ht_entry {
census_ht_key key;
- void* data;
- struct ht_entry* next;
+ void *data;
+ struct ht_entry *next;
} ht_entry;
/* hash table bucket */
typedef struct bucket {
/* NULL if bucket is empty */
- ht_entry* next;
+ ht_entry *next;
/* -1 if all buckets are empty. */
gpr_int32 prev_non_empty_bucket;
/* -1 if all buckets are empty. */
@@ -66,7 +66,7 @@ struct unresizable_hash_table {
gpr_uint32 num_buckets;
/* Array of buckets initialized at creation time. Memory consumption is
16 bytes per bucket on a 64-bit platform. */
- bucket* buckets;
+ bucket *buckets;
/* Index of the first non-empty bucket. -1 iff size == 0. */
gpr_int32 first_non_empty_bucket;
/* Index of the last non_empty bucket. -1 iff size == 0. */
@@ -79,11 +79,11 @@ typedef struct entry_locator {
gpr_int32 bucket_idx;
int is_first_in_chain;
int found;
- ht_entry* prev_entry;
+ ht_entry *prev_entry;
} entry_locator;
/* Asserts if option is not valid. */
-void check_options(const census_ht_option* option) {
+void check_options(const census_ht_option *option) {
GPR_ASSERT(option != NULL);
GPR_ASSERT(option->num_buckets > 0);
GPR_ASSERT(option->key_type == CENSUS_HT_UINT64 ||
@@ -98,12 +98,12 @@ void check_options(const census_ht_option* option) {
#define REMOVE_NEXT(options, ptr) \
do { \
- ht_entry* tmp = (ptr)->next; \
+ ht_entry *tmp = (ptr)->next; \
(ptr)->next = tmp->next; \
delete_entry(options, tmp); \
} while (0)
-static void delete_entry(const census_ht_option* opt, ht_entry* p) {
+static void delete_entry(const census_ht_option *opt, ht_entry *p) {
if (opt->delete_data != NULL) {
opt->delete_data(p->data);
}
@@ -113,18 +113,18 @@ static void delete_entry(const census_ht_option* opt, ht_entry* p) {
gpr_free(p);
}
-static gpr_uint64 hash(const census_ht_option* opt, census_ht_key key) {
+static gpr_uint64 hash(const census_ht_option *opt, census_ht_key key) {
return opt->key_type == CENSUS_HT_UINT64 ? key.val : opt->hash(key.ptr);
}
-census_ht* census_ht_create(const census_ht_option* option) {
+census_ht *census_ht_create(const census_ht_option *option) {
int i;
- census_ht* ret = NULL;
+ census_ht *ret = NULL;
check_options(option);
- ret = (census_ht*)gpr_malloc(sizeof(census_ht));
+ ret = (census_ht *)gpr_malloc(sizeof(census_ht));
ret->size = 0;
ret->num_buckets = option->num_buckets;
- ret->buckets = (bucket*)gpr_malloc(sizeof(bucket) * ret->num_buckets);
+ ret->buckets = (bucket *)gpr_malloc(sizeof(bucket) * ret->num_buckets);
ret->options = *option;
/* initialize each bucket */
for (i = 0; i < ret->options.num_buckets; i++) {
@@ -135,11 +135,11 @@ census_ht* census_ht_create(const census_ht_option* option) {
return ret;
}
-static gpr_int32 find_bucket_idx(const census_ht* ht, census_ht_key key) {
+static gpr_int32 find_bucket_idx(const census_ht *ht, census_ht_key key) {
return hash(&ht->options, key) % ht->num_buckets;
}
-static int keys_match(const census_ht_option* opt, const ht_entry* p,
+static int keys_match(const census_ht_option *opt, const ht_entry *p,
const census_ht_key key) {
GPR_ASSERT(opt->key_type == CENSUS_HT_UINT64 ||
opt->key_type == CENSUS_HT_POINTER);
@@ -147,10 +147,10 @@ static int keys_match(const census_ht_option* opt, const ht_entry* p,
return !opt->compare_keys((p->key).ptr, key.ptr);
}
-static entry_locator ht_find(const census_ht* ht, census_ht_key key) {
+static entry_locator ht_find(const census_ht *ht, census_ht_key key) {
entry_locator loc = {0, 0, 0, NULL};
gpr_int32 idx = 0;
- ht_entry* ptr = NULL;
+ ht_entry *ptr = NULL;
GPR_ASSERT(ht != NULL);
idx = find_bucket_idx(ht, key);
ptr = ht->buckets[idx].next;
@@ -178,7 +178,7 @@ static entry_locator ht_find(const census_ht* ht, census_ht_key key) {
return loc;
}
-void* census_ht_find(const census_ht* ht, census_ht_key key) {
+void *census_ht_find(const census_ht *ht, census_ht_key key) {
entry_locator loc = ht_find(ht, key);
if (loc.found == 0) {
return NULL;
@@ -187,9 +187,9 @@ void* census_ht_find(const census_ht* ht, census_ht_key key) {
: loc.prev_entry->next->data;
}
-void census_ht_insert(census_ht* ht, census_ht_key key, void* data) {
+void census_ht_insert(census_ht *ht, census_ht_key key, void *data) {
gpr_int32 idx = find_bucket_idx(ht, key);
- ht_entry* ptr = NULL;
+ ht_entry *ptr = NULL;
entry_locator loc = ht_find(ht, key);
if (loc.found) {
/* Replace old value with new value. */
@@ -215,7 +215,7 @@ void census_ht_insert(census_ht* ht, census_ht_key key, void* data) {
ht->buckets[idx].next_non_empty_bucket = -1;
ht->last_non_empty_bucket = idx;
}
- ptr = (ht_entry*)gpr_malloc(sizeof(ht_entry));
+ ptr = (ht_entry *)gpr_malloc(sizeof(ht_entry));
ptr->key = key;
ptr->data = data;
ptr->next = ht->buckets[idx].next;
@@ -223,7 +223,7 @@ void census_ht_insert(census_ht* ht, census_ht_key key, void* data) {
ht->size++;
}
-void census_ht_erase(census_ht* ht, census_ht_key key) {
+void census_ht_erase(census_ht *ht, census_ht_key key) {
entry_locator loc = ht_find(ht, key);
if (loc.found == 0) {
/* noop if not found */
@@ -231,7 +231,7 @@ void census_ht_erase(census_ht* ht, census_ht_key key) {
}
ht->size--;
if (loc.is_first_in_chain) {
- bucket* b = &ht->buckets[loc.bucket_idx];
+ bucket *b = &ht->buckets[loc.bucket_idx];
GPR_ASSERT(b->next != NULL);
/* The only entry in the bucket */
if (b->next->next == NULL) {
@@ -256,8 +256,8 @@ void census_ht_erase(census_ht* ht, census_ht_key key) {
}
/* Returns NULL if input table is empty. */
-census_ht_kv* census_ht_get_all_elements(const census_ht* ht, size_t* num) {
- census_ht_kv* ret = NULL;
+census_ht_kv *census_ht_get_all_elements(const census_ht *ht, size_t *num) {
+ census_ht_kv *ret = NULL;
int i = 0;
gpr_int32 idx = -1;
GPR_ASSERT(ht != NULL && num != NULL);
@@ -266,10 +266,10 @@ census_ht_kv* census_ht_get_all_elements(const census_ht* ht, size_t* num) {
return NULL;
}
- ret = (census_ht_kv*)gpr_malloc(sizeof(census_ht_kv) * ht->size);
+ ret = (census_ht_kv *)gpr_malloc(sizeof(census_ht_kv) * ht->size);
idx = ht->first_non_empty_bucket;
while (idx >= 0) {
- ht_entry* ptr = ht->buckets[idx].next;
+ ht_entry *ptr = ht->buckets[idx].next;
for (; ptr != NULL; ptr = ptr->next) {
ret[i].k = ptr->key;
ret[i].v = ptr->data;
@@ -280,8 +280,8 @@ census_ht_kv* census_ht_get_all_elements(const census_ht* ht, size_t* num) {
return ret;
}
-static void ht_delete_entry_chain(const census_ht_option* options,
- ht_entry* first) {
+static void ht_delete_entry_chain(const census_ht_option *options,
+ ht_entry *first) {
if (first == NULL) {
return;
}
@@ -291,7 +291,7 @@ static void ht_delete_entry_chain(const census_ht_option* options,
delete_entry(options, first);
}
-void census_ht_destroy(census_ht* ht) {
+void census_ht_destroy(census_ht *ht) {
unsigned i;
for (i = 0; i < ht->num_buckets; ++i) {
ht_delete_entry_chain(&ht->options, ht->buckets[i].next);
@@ -300,4 +300,4 @@ void census_ht_destroy(census_ht* ht) {
gpr_free(ht);
}
-size_t census_ht_get_size(const census_ht* ht) { return ht->size; }
+size_t census_ht_get_size(const census_ht *ht) { return ht->size; }
diff --git a/src/core/statistics/hash_table.h b/src/core/statistics/hash_table.h
index b7f8e11af4..8b39f536fd 100644
--- a/src/core/statistics/hash_table.h
+++ b/src/core/statistics/hash_table.h
@@ -61,7 +61,7 @@ typedef struct unresizable_hash_table census_ht;
store and const char* for stats store). */
typedef union {
gpr_uint64 val;
- void* ptr;
+ void *ptr;
} census_ht_key;
typedef enum census_ht_key_type {
@@ -76,56 +76,56 @@ typedef struct census_ht_option {
gpr_int32 num_buckets;
/* Fucntion to calculate uint64 hash value of the key. Only takes effect if
key_type is POINTER. */
- gpr_uint64 (*hash)(const void*);
+ gpr_uint64 (*hash)(const void *);
/* Function to compare two keys, returns 0 iff equal. Only takes effect if
key_type is POINTER */
- int (*compare_keys)(const void* k1, const void* k2);
+ int (*compare_keys)(const void *k1, const void *k2);
/* Value deleter. NULL if no specialized delete function is needed. */
- void (*delete_data)(void*);
+ void (*delete_data)(void *);
/* Key deleter. NULL if table does not own the key. (e.g. key is part of the
value or key is not owned by the table.) */
- void (*delete_key)(void*);
+ void (*delete_key)(void *);
} census_ht_option;
/* Creates a hashtable with fixed number of buckets according to the settings
specified in 'options' arg. Function pointers "hash" and "compare_keys" must
be provided if key_type is POINTER. Asserts if fail to create. */
-census_ht* census_ht_create(const census_ht_option* options);
+census_ht *census_ht_create(const census_ht_option *options);
/* Deletes hash table instance. Frees all dynamic memory owned by ht.*/
-void census_ht_destroy(census_ht* ht);
+void census_ht_destroy(census_ht *ht);
/* Inserts the input key-val pair into hash_table. If an entry with the same key
exists in the table, the corresponding value will be overwritten by the input
val. */
-void census_ht_insert(census_ht* ht, census_ht_key key, void* val);
+void census_ht_insert(census_ht *ht, census_ht_key key, void *val);
/* Returns pointer to data, returns NULL if not found. */
-void* census_ht_find(const census_ht* ht, census_ht_key key);
+void *census_ht_find(const census_ht *ht, census_ht_key key);
/* Erase hash table entry with input key. Noop if key is not found. */
-void census_ht_erase(census_ht* ht, census_ht_key key);
+void census_ht_erase(census_ht *ht, census_ht_key key);
typedef struct census_ht_kv {
census_ht_key k;
- void* v;
+ void *v;
} census_ht_kv;
/* Returns an array of pointers to all values in the hash table. Order of the
elements can be arbitrary. Sets 'num' to the size of returned array. Caller
owns returned array. */
-census_ht_kv* census_ht_get_all_elements(const census_ht* ht, size_t* num);
+census_ht_kv *census_ht_get_all_elements(const census_ht *ht, size_t *num);
/* Returns number of elements kept. */
-size_t census_ht_get_size(const census_ht* ht);
+size_t census_ht_get_size(const census_ht *ht);
/* Functor applied on each key-value pair while iterating through entries in the
table. The functor should not mutate data. */
-typedef void (*census_ht_itr_cb)(census_ht_key key, const void* val_ptr,
- void* state);
+typedef void (*census_ht_itr_cb)(census_ht_key key, const void *val_ptr,
+ void *state);
/* Iterates through all key-value pairs in the hash_table. The callback function
should not invalidate data entries. */
-gpr_uint64 census_ht_for_all(const census_ht* ht, census_ht_itr_cb);
+gpr_uint64 census_ht_for_all(const census_ht *ht, census_ht_itr_cb);
#endif /* GRPC_INTERNAL_CORE_STATISTICS_HASH_TABLE_H */
diff --git a/src/core/statistics/window_stats.c b/src/core/statistics/window_stats.c
index a64e080565..4d0d3cca4a 100644
--- a/src/core/statistics/window_stats.c
+++ b/src/core/statistics/window_stats.c
@@ -48,14 +48,14 @@ typedef struct census_window_stats_sum cws_sum;
entries and a single statistic */
typedef struct census_window_stats_bucket {
gpr_int64 count;
- void* statistic;
+ void *statistic;
} cws_bucket;
/* Each interval has a set of buckets, and the variables needed to keep
track of their current state */
typedef struct census_window_stats_interval_stats {
/* The buckets. There will be 'granularity' + 1 of these. */
- cws_bucket* buckets;
+ cws_bucket *buckets;
/* Index of the bucket containing the smallest time interval. */
int bottom_bucket;
/* The smallest time storable in the current window. */
@@ -74,7 +74,7 @@ typedef struct census_window_stats {
/* Record of stat_info. */
cws_stat_info stat_info;
/* Stats for each interval. */
- cws_interval_stats* interval_stats;
+ cws_interval_stats *interval_stats;
/* The time the newset stat was recorded. */
gpr_int64 newest_time;
} window_stats;
@@ -97,8 +97,8 @@ static gpr_int64 timespec_to_ns(const gpr_timespec ts) {
return (gpr_int64)ts.tv_sec * GPR_NS_PER_SEC + ts.tv_nsec;
}
-static void cws_initialize_statistic(void* statistic,
- const cws_stat_info* stat_info) {
+static void cws_initialize_statistic(void *statistic,
+ const cws_stat_info *stat_info) {
if (stat_info->stat_initialize == NULL) {
memset(statistic, 0, stat_info->stat_size);
} else {
@@ -107,17 +107,17 @@ static void cws_initialize_statistic(void* statistic,
}
/* Create and initialize a statistic */
-static void* cws_create_statistic(const cws_stat_info* stat_info) {
- void* stat = gpr_malloc(stat_info->stat_size);
+static void *cws_create_statistic(const cws_stat_info *stat_info) {
+ void *stat = gpr_malloc(stat_info->stat_size);
cws_initialize_statistic(stat, stat_info);
return stat;
}
-window_stats* census_window_stats_create(int nintervals,
+window_stats *census_window_stats_create(int nintervals,
const gpr_timespec intervals[],
int granularity,
- const cws_stat_info* stat_info) {
- window_stats* ret;
+ const cws_stat_info *stat_info) {
+ window_stats *ret;
int i;
/* validate inputs */
GPR_ASSERT(nintervals > 0 && granularity > 2 && intervals != NULL &&
@@ -129,17 +129,17 @@ window_stats* census_window_stats_create(int nintervals,
granularity * 10 <= ns);
}
/* Allocate and initialize relevant data structures */
- ret = (window_stats*)gpr_malloc(sizeof(window_stats));
+ ret = (window_stats *)gpr_malloc(sizeof(window_stats));
ret->nintervals = nintervals;
ret->nbuckets = granularity + 1;
ret->stat_info = *stat_info;
ret->interval_stats =
- (cws_interval_stats*)gpr_malloc(nintervals * sizeof(cws_interval_stats));
+ (cws_interval_stats *)gpr_malloc(nintervals * sizeof(cws_interval_stats));
for (i = 0; i < nintervals; i++) {
gpr_int64 size_ns = timespec_to_ns(intervals[i]);
- cws_interval_stats* is = ret->interval_stats + i;
- cws_bucket* buckets = is->buckets =
- (cws_bucket*)gpr_malloc(ret->nbuckets * sizeof(cws_bucket));
+ cws_interval_stats *is = ret->interval_stats + i;
+ cws_bucket *buckets = is->buckets =
+ (cws_bucket *)gpr_malloc(ret->nbuckets * sizeof(cws_bucket));
int b;
for (b = 0; b < ret->nbuckets; b++) {
buckets[b].statistic = cws_create_statistic(stat_info);
@@ -168,8 +168,8 @@ window_stats* census_window_stats_create(int nintervals,
/* When we try adding a measurement above the current interval range, we
need to "shift" the buckets sufficiently to cover the new range. */
-static void cws_shift_buckets(const window_stats* wstats,
- cws_interval_stats* is, gpr_int64 when_ns) {
+static void cws_shift_buckets(const window_stats *wstats,
+ cws_interval_stats *is, gpr_int64 when_ns) {
int i;
/* number of bucket time widths to "shift" */
int shift;
@@ -191,14 +191,14 @@ static void cws_shift_buckets(const window_stats* wstats,
is->bottom += shift * is->width;
}
-void census_window_stats_add(window_stats* wstats, const gpr_timespec when,
- const void* stat_value) {
+void census_window_stats_add(window_stats *wstats, const gpr_timespec when,
+ const void *stat_value) {
int i;
gpr_int64 when_ns = timespec_to_ns(when);
GPR_ASSERT(wstats->interval_stats != NULL);
for (i = 0; i < wstats->nintervals; i++) {
- cws_interval_stats* is = wstats->interval_stats + i;
- cws_bucket* bucket;
+ cws_interval_stats *is = wstats->interval_stats + i;
+ cws_bucket *bucket;
if (when_ns < is->bottom) { /* Below smallest time in interval: drop */
continue;
}
@@ -218,21 +218,21 @@ void census_window_stats_add(window_stats* wstats, const gpr_timespec when,
}
/* Add a specific bucket contents to an accumulating total. */
-static void cws_add_bucket_to_sum(cws_sum* sum, const cws_bucket* bucket,
- const cws_stat_info* stat_info) {
+static void cws_add_bucket_to_sum(cws_sum *sum, const cws_bucket *bucket,
+ const cws_stat_info *stat_info) {
sum->count += bucket->count;
stat_info->stat_add(sum->statistic, bucket->statistic);
}
/* Add a proportion to an accumulating sum. */
-static void cws_add_proportion_to_sum(double p, cws_sum* sum,
- const cws_bucket* bucket,
- const cws_stat_info* stat_info) {
+static void cws_add_proportion_to_sum(double p, cws_sum *sum,
+ const cws_bucket *bucket,
+ const cws_stat_info *stat_info) {
sum->count += p * bucket->count;
stat_info->stat_add_proportion(p, sum->statistic, bucket->statistic);
}
-void census_window_stats_get_sums(const window_stats* wstats,
+void census_window_stats_get_sums(const window_stats *wstats,
const gpr_timespec when, cws_sum sums[]) {
int i;
gpr_int64 when_ns = timespec_to_ns(when);
@@ -242,8 +242,8 @@ void census_window_stats_get_sums(const window_stats* wstats,
int new_bucket;
double last_proportion = 1.0;
double bottom_proportion;
- cws_interval_stats* is = wstats->interval_stats + i;
- cws_sum* sum = sums + i;
+ cws_interval_stats *is = wstats->interval_stats + i;
+ cws_sum *sum = sums + i;
sum->count = 0;
cws_initialize_statistic(sum->statistic, &wstats->stat_info);
if (when_ns < is->bottom) {
@@ -255,12 +255,12 @@ void census_window_stats_get_sums(const window_stats* wstats,
/* Calculating the appropriate amount of which buckets to use can get
complicated. Essentially there are two cases:
1) if the "top" bucket (new_bucket, where the newest additions to the
- stats recorded are entered) corresponds to 'when', then we need
- to take a proportion of it - (if when < newest_time) or the full
- thing. We also (possibly) need to take a corresponding
- proportion of the bottom bucket.
+ stats recorded are entered) corresponds to 'when', then we need
+ to take a proportion of it - (if when < newest_time) or the full
+ thing. We also (possibly) need to take a corresponding
+ proportion of the bottom bucket.
2) Other cases, we just take a straight proportion.
- */
+ */
when_bucket = (when_ns - is->bottom) / is->width;
new_bucket = (wstats->newest_time - is->bottom) / is->width;
if (new_bucket == when_bucket) {
@@ -300,7 +300,7 @@ void census_window_stats_get_sums(const window_stats* wstats,
}
}
-void census_window_stats_destroy(window_stats* wstats) {
+void census_window_stats_destroy(window_stats *wstats) {
int i;
GPR_ASSERT(wstats->interval_stats != NULL);
for (i = 0; i < wstats->nintervals; i++) {
diff --git a/src/core/statistics/window_stats.h b/src/core/statistics/window_stats.h
index 0020f6b44c..f4732e96a0 100644
--- a/src/core/statistics/window_stats.h
+++ b/src/core/statistics/window_stats.h
@@ -120,13 +120,13 @@ typedef struct census_window_stats_stat_info {
size_t stat_size;
/* Function to initialize a user-defined statistics object. If this is set
* to NULL, then the object will be zero-initialized. */
- void (*stat_initialize)(void* stat);
+ void (*stat_initialize)(void *stat);
/* Function to add one user-defined statistics object ('addme') to 'base' */
- void (*stat_add)(void* base, const void* addme);
+ void (*stat_add)(void *base, const void *addme);
/* As for previous function, but only add a proportion 'p'. This API will
currently only use 'p' values in the range [0,1], but other values are
possible in the future, and should be supported. */
- void (*stat_add_proportion)(double p, void* base, const void* addme);
+ void (*stat_add_proportion)(double p, void *base, const void *addme);
} census_window_stats_stat_info;
/* Create a new window_stats object. 'nintervals' is the number of
@@ -138,29 +138,29 @@ typedef struct census_window_stats_stat_info {
years will be treated as essentially infinite in size. This function will
GPR_ASSERT() if the object cannot be created or any of the parameters have
invalid values. This function is thread-safe. */
-struct census_window_stats* census_window_stats_create(
+struct census_window_stats *census_window_stats_create(
int nintervals, const gpr_timespec intervals[], int granularity,
- const census_window_stats_stat_info* stat_info);
+ const census_window_stats_stat_info *stat_info);
/* Add a new measurement (in 'stat_value'), as of a given time ('when').
This function is thread-compatible. */
-void census_window_stats_add(struct census_window_stats* wstats,
- const gpr_timespec when, const void* stat_value);
+void census_window_stats_add(struct census_window_stats *wstats,
+ const gpr_timespec when, const void *stat_value);
/* Structure used to record a single intervals sum for a given statistic */
typedef struct census_window_stats_sum {
/* Total count of samples. Note that because some internal interpolation
- is performed, the count of samples returned for each interval may not be an
- integral value. */
+ is performed, the count of samples returned for each interval may not be an
+ integral value. */
double count;
/* Sum for statistic */
- void* statistic;
+ void *statistic;
} census_window_stats_sums;
/* Retrieve a set of all values stored in a window_stats object 'wstats'. The
number of 'sums' MUST be the same as the number 'nintervals' used in
census_window_stats_create(). This function is thread-compatible. */
-void census_window_stats_get_sums(const struct census_window_stats* wstats,
+void census_window_stats_get_sums(const struct census_window_stats *wstats,
const gpr_timespec when,
struct census_window_stats_sum sums[]);
@@ -168,6 +168,6 @@ void census_window_stats_get_sums(const struct census_window_stats* wstats,
object will no longer be usable from any of the above functions (and
calling them will most likely result in a NULL-pointer dereference or
assertion failure). This function is thread-compatible. */
-void census_window_stats_destroy(struct census_window_stats* wstats);
+void census_window_stats_destroy(struct census_window_stats *wstats);
#endif /* GRPC_INTERNAL_CORE_STATISTICS_WINDOW_STATS_H */
diff --git a/src/core/support/cmdline.c b/src/core/support/cmdline.c
index 45a3182f73..87f60bca2e 100644
--- a/src/core/support/cmdline.c
+++ b/src/core/support/cmdline.c
@@ -192,9 +192,9 @@ static void print_usage_and_die(gpr_cmdline *cl) {
exit(1);
}
-static void extra_state(gpr_cmdline *cl, char *arg) {
+static void extra_state(gpr_cmdline *cl, char *str) {
if (!cl->extra_arg) print_usage_and_die(cl);
- cl->extra_arg(cl->extra_arg_user_data, arg);
+ cl->extra_arg(cl->extra_arg_user_data, str);
}
static arg *find_arg(gpr_cmdline *cl, char *name) {
@@ -214,7 +214,7 @@ static arg *find_arg(gpr_cmdline *cl, char *name) {
return a;
}
-static void value_state(gpr_cmdline *cl, char *arg) {
+static void value_state(gpr_cmdline *cl, char *str) {
long intval;
char *end;
@@ -222,80 +222,80 @@ static void value_state(gpr_cmdline *cl, char *arg) {
switch (cl->cur_arg->type) {
case ARGTYPE_INT:
- intval = strtol(arg, &end, 0);
+ intval = strtol(str, &end, 0);
if (*end || intval < INT_MIN || intval > INT_MAX) {
- fprintf(stderr, "expected integer, got '%s' for %s\n", arg,
+ fprintf(stderr, "expected integer, got '%s' for %s\n", str,
cl->cur_arg->name);
print_usage_and_die(cl);
}
*(int *)cl->cur_arg->value = (int)intval;
break;
case ARGTYPE_BOOL:
- if (0 == strcmp(arg, "1") || 0 == strcmp(arg, "true")) {
+ if (0 == strcmp(str, "1") || 0 == strcmp(str, "true")) {
*(int *)cl->cur_arg->value = 1;
- } else if (0 == strcmp(arg, "0") || 0 == strcmp(arg, "false")) {
+ } else if (0 == strcmp(str, "0") || 0 == strcmp(str, "false")) {
*(int *)cl->cur_arg->value = 0;
} else {
- fprintf(stderr, "expected boolean, got '%s' for %s\n", arg,
+ fprintf(stderr, "expected boolean, got '%s' for %s\n", str,
cl->cur_arg->name);
print_usage_and_die(cl);
}
break;
case ARGTYPE_STRING:
- *(char **)cl->cur_arg->value = arg;
+ *(char **)cl->cur_arg->value = str;
break;
}
cl->state = normal_state;
}
-static void normal_state(gpr_cmdline *cl, char *arg) {
+static void normal_state(gpr_cmdline *cl, char *str) {
char *eq = NULL;
char *tmp = NULL;
char *arg_name = NULL;
- if (0 == strcmp(arg, "-help") || 0 == strcmp(arg, "--help") ||
- 0 == strcmp(arg, "-h")) {
+ if (0 == strcmp(str, "-help") || 0 == strcmp(str, "--help") ||
+ 0 == strcmp(str, "-h")) {
print_usage_and_die(cl);
}
cl->cur_arg = NULL;
- if (arg[0] == '-') {
- if (arg[1] == '-') {
- if (arg[2] == 0) {
+ if (str[0] == '-') {
+ if (str[1] == '-') {
+ if (str[2] == 0) {
/* handle '--' to move to just extra args */
cl->state = extra_state;
return;
}
- arg += 2;
+ str += 2;
} else {
- arg += 1;
+ str += 1;
}
- /* first byte of arg is now past the leading '-' or '--' */
- if (arg[0] == 'n' && arg[1] == 'o' && arg[2] == '-') {
- /* arg is of the form '--no-foo' - it's a flag disable */
- arg += 3;
- cl->cur_arg = find_arg(cl, arg);
+ /* first byte of str is now past the leading '-' or '--' */
+ if (str[0] == 'n' && str[1] == 'o' && str[2] == '-') {
+ /* str is of the form '--no-foo' - it's a flag disable */
+ str += 3;
+ cl->cur_arg = find_arg(cl, str);
if (cl->cur_arg->type != ARGTYPE_BOOL) {
- fprintf(stderr, "%s is not a flag argument\n", arg);
+ fprintf(stderr, "%s is not a flag argument\n", str);
print_usage_and_die(cl);
}
*(int *)cl->cur_arg->value = 0;
return; /* early out */
}
- eq = strchr(arg, '=');
+ eq = strchr(str, '=');
if (eq != NULL) {
/* copy the string into a temp buffer and extract the name */
- tmp = arg_name = gpr_malloc((size_t)(eq - arg + 1));
- memcpy(arg_name, arg, (size_t)(eq - arg));
- arg_name[eq - arg] = 0;
+ tmp = arg_name = gpr_malloc((size_t)(eq - str + 1));
+ memcpy(arg_name, str, (size_t)(eq - str));
+ arg_name[eq - str] = 0;
} else {
- arg_name = arg;
+ arg_name = str;
}
cl->cur_arg = find_arg(cl, arg_name);
if (eq != NULL) {
- /* arg was of the type --foo=value, parse the value */
+ /* str was of the type --foo=value, parse the value */
value_state(cl, eq + 1);
} else if (cl->cur_arg->type != ARGTYPE_BOOL) {
/* flag types don't have a '--foo value' variant, other types do */
@@ -305,7 +305,7 @@ static void normal_state(gpr_cmdline *cl, char *arg) {
*(int *)cl->cur_arg->value = 1;
}
} else {
- extra_state(cl, arg);
+ extra_state(cl, str);
}
gpr_free(tmp);
diff --git a/src/core/support/host_port.c b/src/core/support/host_port.c
index a28f04df9c..23f65b1581 100644
--- a/src/core/support/host_port.c
+++ b/src/core/support/host_port.c
@@ -79,7 +79,7 @@ int gpr_split_host_port(const char *name, char **host, char **port) {
host_len = (size_t)(rbracket - host_start);
if (memchr(host_start, ':', host_len) == NULL) {
/* Require all bracketed hosts to contain a colon, because a hostname or
- IPv4 address should never use brackets. */
+ IPv4 address should never use brackets. */
return 0;
}
} else {
diff --git a/src/core/support/stack_lockfree.c b/src/core/support/stack_lockfree.c
index 180ba19c68..df9a09894c 100644
--- a/src/core/support/stack_lockfree.c
+++ b/src/core/support/stack_lockfree.c
@@ -67,7 +67,7 @@ typedef union lockfree_node {
#define ENTRY_ALIGNMENT_BITS 3 /* make sure that entries aligned to 8-bytes */
#define INVALID_ENTRY_INDEX \
((1 << 16) - 1) /* reserve this entry as invalid \
- */
+ */
struct gpr_stack_lockfree {
lockfree_node *entries;
diff --git a/src/core/support/stack_lockfree.h b/src/core/support/stack_lockfree.h
index 2bbbe3bd95..ca58dd007a 100644
--- a/src/core/support/stack_lockfree.h
+++ b/src/core/support/stack_lockfree.h
@@ -40,14 +40,14 @@ typedef struct gpr_stack_lockfree gpr_stack_lockfree;
/* This stack must specify the maximum number of entries to track.
The current implementation only allows up to 65534 entries */
-gpr_stack_lockfree* gpr_stack_lockfree_create(size_t entries);
-void gpr_stack_lockfree_destroy(gpr_stack_lockfree* stack);
+gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries);
+void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack);
/* Pass in a valid entry number for the next stack entry */
/* Returns 1 if this is the first element on the stack, 0 otherwise */
-int gpr_stack_lockfree_push(gpr_stack_lockfree*, int entry);
+int gpr_stack_lockfree_push(gpr_stack_lockfree *, int entry);
/* Returns -1 on empty or the actual entry number */
-int gpr_stack_lockfree_pop(gpr_stack_lockfree* stack);
+int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack);
#endif /* GRPC_INTERNAL_CORE_SUPPORT_STACK_LOCKFREE_H */
diff --git a/src/core/support/string_win32.c b/src/core/support/string_win32.c
index 8ffb0a225e..914ba8771c 100644
--- a/src/core/support/string_win32.c
+++ b/src/core/support/string_win32.c
@@ -81,7 +81,8 @@ int gpr_asprintf(char **strp, const char *format, ...) {
}
#if defined UNICODE || defined _UNICODE
-LPTSTR gpr_char_to_tchar(LPCSTR input) {
+LPTSTR
+gpr_char_to_tchar(LPCSTR input) {
LPTSTR ret;
int needed = MultiByteToWideChar(CP_UTF8, 0, input, -1, NULL, 0);
if (needed == 0) return NULL;
@@ -90,7 +91,8 @@ LPTSTR gpr_char_to_tchar(LPCSTR input) {
return ret;
}
-LPSTR gpr_tchar_to_char(LPCTSTR input) {
+LPSTR
+gpr_tchar_to_char(LPCTSTR input) {
LPSTR ret;
int needed = WideCharToMultiByte(CP_UTF8, 0, input, -1, NULL, 0, NULL, NULL);
if (needed == 0) return NULL;
diff --git a/src/core/support/sync_posix.c b/src/core/support/sync_posix.c
index 6f078cd4bb..91c30989ce 100644
--- a/src/core/support/sync_posix.c
+++ b/src/core/support/sync_posix.c
@@ -41,15 +41,15 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
-void gpr_mu_init(gpr_mu *mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
+void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
-void gpr_mu_destroy(gpr_mu *mu) { GPR_ASSERT(pthread_mutex_destroy(mu) == 0); }
+void gpr_mu_destroy(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_destroy(mu) == 0); }
-void gpr_mu_lock(gpr_mu *mu) { GPR_ASSERT(pthread_mutex_lock(mu) == 0); }
+void gpr_mu_lock(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_lock(mu) == 0); }
-void gpr_mu_unlock(gpr_mu *mu) { GPR_ASSERT(pthread_mutex_unlock(mu) == 0); }
+void gpr_mu_unlock(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_unlock(mu) == 0); }
-int gpr_mu_trylock(gpr_mu *mu) {
+int gpr_mu_trylock(gpr_mu* mu) {
int err = pthread_mutex_trylock(mu);
GPR_ASSERT(err == 0 || err == EBUSY);
return err == 0;
@@ -57,11 +57,11 @@ int gpr_mu_trylock(gpr_mu *mu) {
/*----------------------------------------*/
-void gpr_cv_init(gpr_cv *cv) { GPR_ASSERT(pthread_cond_init(cv, NULL) == 0); }
+void gpr_cv_init(gpr_cv* cv) { GPR_ASSERT(pthread_cond_init(cv, NULL) == 0); }
-void gpr_cv_destroy(gpr_cv *cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); }
+void gpr_cv_destroy(gpr_cv* cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); }
-int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
+int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline) {
int err = 0;
if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) ==
0) {
@@ -77,15 +77,15 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
return err == ETIMEDOUT;
}
-void gpr_cv_signal(gpr_cv *cv) { GPR_ASSERT(pthread_cond_signal(cv) == 0); }
+void gpr_cv_signal(gpr_cv* cv) { GPR_ASSERT(pthread_cond_signal(cv) == 0); }
-void gpr_cv_broadcast(gpr_cv *cv) {
+void gpr_cv_broadcast(gpr_cv* cv) {
GPR_ASSERT(pthread_cond_broadcast(cv) == 0);
}
/*----------------------------------------*/
-void gpr_once_init(gpr_once *once, void (*init_function)(void)) {
+void gpr_once_init(gpr_once* once, void (*init_function)(void)) {
GPR_ASSERT(pthread_once(once, init_function) == 0);
}
diff --git a/src/core/support/sync_win32.c b/src/core/support/sync_win32.c
index f546477067..69dd46399a 100644
--- a/src/core/support/sync_win32.c
+++ b/src/core/support/sync_win32.c
@@ -90,7 +90,7 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
gpr_timespec now = gpr_now(abs_deadline.clock_type);
gpr_int64 now_ms = (gpr_int64)now.tv_sec * 1000 + now.tv_nsec / 1000000;
gpr_int64 deadline_ms =
- (gpr_int64)abs_deadline.tv_sec * 1000 + abs_deadline.tv_nsec / 1000000;
+ (gpr_int64)abs_deadline.tv_sec * 1000 + abs_deadline.tv_nsec / 1000000;
if (now_ms >= deadline_ms) {
timeout = 1;
} else {
diff --git a/src/core/support/thd.c b/src/core/support/thd.c
index 32c0db5b66..41daeb5d0e 100644
--- a/src/core/support/thd.c
+++ b/src/core/support/thd.c
@@ -45,20 +45,20 @@ gpr_thd_options gpr_thd_options_default(void) {
return options;
}
-void gpr_thd_options_set_detached(gpr_thd_options *options) {
+void gpr_thd_options_set_detached(gpr_thd_options* options) {
options->flags &= ~GPR_THD_JOINABLE;
}
-void gpr_thd_options_set_joinable(gpr_thd_options *options) {
+void gpr_thd_options_set_joinable(gpr_thd_options* options) {
options->flags |= GPR_THD_JOINABLE;
}
-int gpr_thd_options_is_detached(const gpr_thd_options *options) {
+int gpr_thd_options_is_detached(const gpr_thd_options* options) {
if (!options) return 1;
return (options->flags & GPR_THD_JOINABLE) == 0;
}
-int gpr_thd_options_is_joinable(const gpr_thd_options *options) {
+int gpr_thd_options_is_joinable(const gpr_thd_options* options) {
if (!options) return 0;
return (options->flags & GPR_THD_JOINABLE) == GPR_THD_JOINABLE;
}
diff --git a/src/core/support/time_precise.h b/src/core/support/time_precise.h
index 574ebb8448..a72d37e2f1 100644
--- a/src/core/support/time_precise.h
+++ b/src/core/support/time_precise.h
@@ -83,8 +83,8 @@ static void gpr_precise_clock_now(gpr_timespec *clk) {
clk->tv_nsec = counter % cycles_per_second;
}
-#else /* GRPC_TIMERS_RDTSC */
-static void gpr_precise_clock_now(gpr_timespec *clk) {
+#else /* GRPC_TIMERS_RDTSC */
+static void gpr_precise_clock_now(gpr_timespec* clk) {
*clk = gpr_now(GPR_CLOCK_REALTIME);
clk->clock_type = GPR_CLOCK_PRECISE;
}
diff --git a/src/core/surface/call.c b/src/core/surface/call.c
index 4168c2ef0c..51dcbeb1a1 100644
--- a/src/core/surface/call.c
+++ b/src/core/surface/call.c
@@ -163,8 +163,6 @@ struct grpc_call {
gpr_uint8 bound_pollset;
/* is an error status set */
gpr_uint8 error_status_set;
- /** should the alarm be cancelled */
- gpr_uint8 cancel_alarm;
/** bitmask of allocated completion events in completions */
gpr_uint8 allocated_completions;
/** flag indicating that cancellation is inherited */
@@ -182,15 +180,15 @@ struct grpc_call {
request_set[op] is an integer specifying a set of operations to which
the request belongs:
- - if it is < GRPC_IOREQ_OP_COUNT, then this operation is pending
- completion, and the integer represents to which group of operations
- the ioreq belongs. Each group is represented by one master, and the
- integer in request_set is an index into masters to find the master
- data.
- - if it is REQSET_EMPTY, the ioreq op is inactive and available to be
- started
- - finally, if request_set[op] is REQSET_DONE, then the operation is
- complete and unavailable to be started again
+ - if it is < GRPC_IOREQ_OP_COUNT, then this operation is pending
+ completion, and the integer represents to which group of operations
+ the ioreq belongs. Each group is represented by one master, and the
+ integer in request_set is an index into masters to find the master
+ data.
+ - if it is REQSET_EMPTY, the ioreq op is inactive and available to be
+ started
+ - finally, if request_set[op] is REQSET_DONE, then the operation is
+ complete and unavailable to be started again
request_data[op] is the request data as supplied by the initiator of
a request, and is valid iff request_set[op] <= GRPC_IOREQ_OP_COUNT.
@@ -256,10 +254,10 @@ struct grpc_call {
gpr_slice_buffer incoming_message;
gpr_uint32 incoming_message_length;
gpr_uint32 incoming_message_flags;
- grpc_iomgr_closure destroy_closure;
- grpc_iomgr_closure on_done_recv;
- grpc_iomgr_closure on_done_send;
- grpc_iomgr_closure on_done_bind;
+ grpc_closure destroy_closure;
+ grpc_closure on_done_recv;
+ grpc_closure on_done_send;
+ grpc_closure on_done_bind;
/** completion events - for completion queue use */
grpc_cq_completion completions[MAX_CONCURRENT_COMPLETIONS];
@@ -278,19 +276,22 @@ struct grpc_call {
#define CALL_FROM_TOP_ELEM(top_elem) \
CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
-static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline);
-static void call_on_done_recv(void *call, int success);
-static void call_on_done_send(void *call, int success);
+static void set_deadline_alarm(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ gpr_timespec deadline);
+static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *call, int success);
+static void call_on_done_send(grpc_exec_ctx *exec_ctx, void *call, int success);
static int fill_send_ops(grpc_call *call, grpc_transport_stream_op *op);
-static void execute_op(grpc_call *call, grpc_transport_stream_op *op);
-static void recv_metadata(grpc_call *call, grpc_metadata_batch *metadata);
+static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op *op);
+static void recv_metadata(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_metadata_batch *metadata);
static void finish_read_ops(grpc_call *call);
static grpc_call_error cancel_with_status(grpc_call *c, grpc_status_code status,
const char *description);
-static void finished_loose_op(void *call, int success);
+static void finished_loose_op(grpc_exec_ctx *exec_ctx, void *call, int success);
static void lock(grpc_call *call);
-static void unlock(grpc_call *call);
+static void unlock(grpc_exec_ctx *exec_ctx, grpc_call *call);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
gpr_uint32 propagation_mask,
@@ -303,6 +304,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
grpc_transport_stream_op initial_op;
grpc_transport_stream_op *initial_op_ptr = NULL;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call =
gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
memset(call, 0, sizeof(grpc_call));
@@ -333,9 +335,9 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
grpc_sopb_init(&call->send_ops);
grpc_sopb_init(&call->recv_ops);
gpr_slice_buffer_init(&call->incoming_message);
- grpc_iomgr_closure_init(&call->on_done_recv, call_on_done_recv, call);
- grpc_iomgr_closure_init(&call->on_done_send, call_on_done_send, call);
- grpc_iomgr_closure_init(&call->on_done_bind, finished_loose_op, call);
+ grpc_closure_init(&call->on_done_recv, call_on_done_recv, call);
+ grpc_closure_init(&call->on_done_send, call_on_done_send, call);
+ grpc_closure_init(&call->on_done_bind, finished_loose_op, call);
/* dropped in destroy and when READ_STATE_STREAM_CLOSED received */
gpr_ref_init(&call->internal_refcount, 2);
/* server hack: start reads immediately so we can get initial metadata.
@@ -350,8 +352,8 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
GRPC_CALL_INTERNAL_REF(call, "receiving");
initial_op_ptr = &initial_op;
}
- grpc_call_stack_init(channel_stack, server_transport_data, initial_op_ptr,
- CALL_STACK_FROM_CALL(call));
+ grpc_call_stack_init(&exec_ctx, channel_stack, server_transport_data,
+ initial_op_ptr, CALL_STACK_FROM_CALL(call));
if (parent_call != NULL) {
GRPC_CALL_INTERNAL_REF(parent_call, "child");
GPR_ASSERT(call->is_client);
@@ -395,19 +397,20 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
}
if (gpr_time_cmp(send_deadline, gpr_inf_future(send_deadline.clock_type)) !=
0) {
- set_deadline_alarm(call, send_deadline);
+ set_deadline_alarm(&exec_ctx, call, send_deadline);
}
+ grpc_exec_ctx_finish(&exec_ctx);
return call;
}
-void grpc_call_set_completion_queue(grpc_call *call,
+void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq) {
lock(call);
call->cq = cq;
if (cq) {
GRPC_CQ_INTERNAL_REF(cq, "bind");
}
- unlock(call);
+ unlock(exec_ctx, call);
}
grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call) {
@@ -429,13 +432,14 @@ static grpc_cq_completion *allocate_completion(grpc_call *call) {
abort();
}
-static void done_completion(void *call, grpc_cq_completion *completion) {
+static void done_completion(grpc_exec_ctx *exec_ctx, void *call,
+ grpc_cq_completion *completion) {
grpc_call *c = call;
gpr_mu_lock(&c->completion_mu);
c->allocated_completions &=
(gpr_uint8) ~(1u << (completion - c->completions));
gpr_mu_unlock(&c->completion_mu);
- GRPC_CALL_INTERNAL_UNREF(c, "completion", 1);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, c, "completion");
}
#ifdef GRPC_CALL_REF_COUNT_DEBUG
@@ -448,11 +452,11 @@ void grpc_call_internal_ref(grpc_call *c) {
gpr_ref(&c->internal_refcount);
}
-static void destroy_call(void *call, int ignored_success) {
+static void destroy_call(grpc_exec_ctx *exec_ctx, grpc_call *call) {
size_t i;
grpc_call *c = call;
- grpc_call_stack_destroy(CALL_STACK_FROM_CALL(c));
- GRPC_CHANNEL_INTERNAL_UNREF(c->channel, "call");
+ grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c));
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->channel, "call");
gpr_mu_destroy(&c->mu);
gpr_mu_destroy(&c->completion_mu);
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
@@ -486,21 +490,15 @@ static void destroy_call(void *call, int ignored_success) {
}
#ifdef GRPC_CALL_REF_COUNT_DEBUG
-void grpc_call_internal_unref(grpc_call *c, const char *reason,
- int allow_immediate_deletion) {
+void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ const char *reason) {
gpr_log(GPR_DEBUG, "CALL: unref %p %d -> %d [%s]", c,
c->internal_refcount.count, c->internal_refcount.count - 1, reason);
#else
-void grpc_call_internal_unref(grpc_call *c, int allow_immediate_deletion) {
+void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c) {
#endif
if (gpr_unref(&c->internal_refcount)) {
- if (allow_immediate_deletion) {
- destroy_call(c, 1);
- } else {
- c->destroy_closure.cb = destroy_call;
- c->destroy_closure.cb_arg = c;
- grpc_iomgr_add_callback(&c->destroy_closure);
- }
+ destroy_call(exec_ctx, c);
}
}
@@ -599,7 +597,7 @@ static int need_more_data(grpc_call *call) {
(call->cancel_with_status != GRPC_STATUS_OK) || call->destroy_called;
}
-static void unlock(grpc_call *call) {
+static void unlock(grpc_exec_ctx *exec_ctx, grpc_call *call) {
grpc_transport_stream_op op;
completed_request completed_requests[GRPC_IOREQ_OP_COUNT];
int completing_requests = 0;
@@ -607,7 +605,6 @@ static void unlock(grpc_call *call) {
int i;
const size_t MAX_RECV_PEEK_AHEAD = 65536;
size_t buffered_bytes;
- int cancel_alarm = 0;
memset(&op, 0, sizeof(op));
@@ -615,9 +612,6 @@ static void unlock(grpc_call *call) {
start_op = op.cancel_with_status != GRPC_STATUS_OK;
call->cancel_with_status = GRPC_STATUS_OK; /* reset */
- cancel_alarm = call->cancel_alarm;
- call->cancel_alarm = 0;
-
if (!call->receiving && need_more_data(call)) {
if (grpc_bbq_empty(&call->incoming_queue) && call->reading_message) {
op.max_recv_bytes = call->incoming_message_length -
@@ -667,23 +661,20 @@ static void unlock(grpc_call *call) {
gpr_mu_unlock(&call->mu);
- if (cancel_alarm) {
- grpc_alarm_cancel(&call->alarm);
- }
-
if (start_op) {
- execute_op(call, &op);
+ execute_op(exec_ctx, call, &op);
}
if (completing_requests > 0) {
for (i = 0; i < completing_requests; i++) {
- completed_requests[i].on_complete(call, completed_requests[i].success,
+ completed_requests[i].on_complete(exec_ctx, call,
+ completed_requests[i].success,
completed_requests[i].user_data);
}
lock(call);
call->completing = 0;
- unlock(call);
- GRPC_CALL_INTERNAL_UNREF(call, "completing", 0);
+ unlock(exec_ctx, call);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completing");
}
}
@@ -828,7 +819,7 @@ static void early_out_write_ops(grpc_call *call) {
}
}
-static void call_on_done_send(void *pc, int success) {
+static void call_on_done_send(grpc_exec_ctx *exec_ctx, void *pc, int success) {
grpc_call *call = pc;
lock(call);
if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_INITIAL_METADATA)) {
@@ -851,8 +842,8 @@ static void call_on_done_send(void *pc, int success) {
call->send_ops.nops = 0;
call->last_send_contains = 0;
call->sending = 0;
- unlock(call);
- GRPC_CALL_INTERNAL_UNREF(call, "sending", 0);
+ unlock(exec_ctx, call);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "sending");
}
static void finish_message(grpc_call *call) {
@@ -958,7 +949,7 @@ static int add_slice_to_message(grpc_call *call, gpr_slice slice) {
}
}
-static void call_on_done_recv(void *pc, int success) {
+static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *pc, int success) {
grpc_call *call = pc;
grpc_call *child_call;
grpc_call *next_child_call;
@@ -973,7 +964,7 @@ static void call_on_done_recv(void *pc, int success) {
case GRPC_NO_OP:
break;
case GRPC_OP_METADATA:
- recv_metadata(call, &op->data.metadata);
+ recv_metadata(exec_ctx, call, &op->data.metadata);
break;
case GRPC_OP_BEGIN_MESSAGE:
success = begin_message(call, op->data.begin_message);
@@ -994,7 +985,9 @@ static void call_on_done_recv(void *pc, int success) {
if (call->recv_state == GRPC_STREAM_CLOSED) {
GPR_ASSERT(call->read_state <= READ_STATE_STREAM_CLOSED);
call->read_state = READ_STATE_STREAM_CLOSED;
- call->cancel_alarm |= call->have_alarm;
+ if (call->have_alarm) {
+ grpc_alarm_cancel(exec_ctx, &call->alarm);
+ }
/* propagate cancellation to any interested children */
child_call = call->first_child;
if (child_call != NULL) {
@@ -1003,12 +996,12 @@ static void call_on_done_recv(void *pc, int success) {
if (child_call->cancellation_is_inherited) {
GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel");
grpc_call_cancel(child_call, NULL);
- GRPC_CALL_INTERNAL_UNREF(child_call, "propagate_cancel", 0);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel");
}
child_call = next_child_call;
} while (child_call != call->first_child);
}
- GRPC_CALL_INTERNAL_UNREF(call, "closed", 0);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "closed");
}
finish_read_ops(call);
} else {
@@ -1020,9 +1013,9 @@ static void call_on_done_recv(void *pc, int success) {
finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS_DETAILS, 0);
}
call->recv_ops.nops = 0;
- unlock(call);
+ unlock(exec_ctx, call);
- GRPC_CALL_INTERNAL_UNREF(call, "receiving", 0);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "receiving");
GRPC_TIMER_END(GRPC_PTAG_CALL_ON_DONE_RECV, 0);
}
@@ -1273,18 +1266,19 @@ static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs,
}
grpc_call_error grpc_call_start_ioreq_and_call_back(
- grpc_call *call, const grpc_ioreq *reqs, size_t nreqs,
- grpc_ioreq_completion_func on_complete, void *user_data) {
+ grpc_exec_ctx *exec_ctx, grpc_call *call, const grpc_ioreq *reqs,
+ size_t nreqs, grpc_ioreq_completion_func on_complete, void *user_data) {
grpc_call_error err;
lock(call);
err = start_ioreq(call, reqs, nreqs, on_complete, user_data);
- unlock(call);
+ unlock(exec_ctx, call);
return err;
}
void grpc_call_destroy(grpc_call *c) {
int cancel;
grpc_call *parent = c->parent;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (parent) {
gpr_mu_lock(&parent->mu);
@@ -1297,17 +1291,20 @@ void grpc_call_destroy(grpc_call *c) {
c->sibling_next->sibling_prev = c->sibling_prev;
}
gpr_mu_unlock(&parent->mu);
- GRPC_CALL_INTERNAL_UNREF(parent, "child", 1);
+ GRPC_CALL_INTERNAL_UNREF(&exec_ctx, parent, "child");
}
lock(c);
GPR_ASSERT(!c->destroy_called);
c->destroy_called = 1;
- c->cancel_alarm |= c->have_alarm;
+ if (c->have_alarm) {
+ grpc_alarm_cancel(&exec_ctx, &c->alarm);
+ }
cancel = c->read_state != READ_STATE_STREAM_CLOSED;
- unlock(c);
+ unlock(&exec_ctx, c);
if (cancel) grpc_call_cancel(c, NULL);
- GRPC_CALL_INTERNAL_UNREF(c, "destroy", 1);
+ GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
+ grpc_exec_ctx_finish(&exec_ctx);
}
grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
@@ -1321,10 +1318,12 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
const char *description,
void *reserved) {
grpc_call_error r;
- (void)reserved;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_ASSERT(reserved == NULL);
lock(c);
r = cancel_with_status(c, status, description);
- unlock(c);
+ unlock(&exec_ctx, c);
+ grpc_exec_ctx_finish(&exec_ctx);
return r;
}
@@ -1344,22 +1343,25 @@ static grpc_call_error cancel_with_status(grpc_call *c, grpc_status_code status,
return GRPC_CALL_OK;
}
-static void finished_loose_op(void *call, int success_ignored) {
- GRPC_CALL_INTERNAL_UNREF(call, "loose-op", 0);
+static void finished_loose_op(grpc_exec_ctx *exec_ctx, void *call,
+ int success_ignored) {
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "loose-op");
}
typedef struct {
grpc_call *call;
- grpc_iomgr_closure closure;
+ grpc_closure closure;
} finished_loose_op_allocated_args;
-static void finished_loose_op_allocated(void *alloc, int success) {
+static void finished_loose_op_allocated(grpc_exec_ctx *exec_ctx, void *alloc,
+ int success) {
finished_loose_op_allocated_args *args = alloc;
- finished_loose_op(args->call, success);
+ finished_loose_op(exec_ctx, args->call, success);
gpr_free(args);
}
-static void execute_op(grpc_call *call, grpc_transport_stream_op *op) {
+static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op *op) {
grpc_call_element *elem;
GPR_ASSERT(op->on_consumed == NULL);
@@ -1370,27 +1372,29 @@ static void execute_op(grpc_call *call, grpc_transport_stream_op *op) {
} else {
finished_loose_op_allocated_args *args = gpr_malloc(sizeof(*args));
args->call = call;
- grpc_iomgr_closure_init(&args->closure, finished_loose_op_allocated,
- args);
+ grpc_closure_init(&args->closure, finished_loose_op_allocated, args);
op->on_consumed = &args->closure;
}
}
elem = CALL_ELEM_FROM_CALL(call, 0);
op->context = call->context;
- elem->filter->start_transport_stream_op(elem, op);
+ elem->filter->start_transport_stream_op(exec_ctx, elem, op);
}
char *grpc_call_get_peer(grpc_call *call) {
grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
- return elem->filter->get_peer(elem);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ char *result = elem->filter->get_peer(&exec_ctx, elem);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return result;
}
grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
return CALL_FROM_TOP_ELEM(elem);
}
-static void call_alarm(void *arg, int success) {
+static void call_alarm(grpc_exec_ctx *exec_ctx, void *arg, int success) {
grpc_call *call = arg;
lock(call);
call->have_alarm = 0;
@@ -1399,11 +1403,12 @@ static void call_alarm(void *arg, int success) {
"Deadline Exceeded");
}
finish_read_ops(call);
- unlock(call);
- GRPC_CALL_INTERNAL_UNREF(call, "alarm", 1);
+ unlock(exec_ctx, call);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "alarm");
}
-static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline) {
+static void set_deadline_alarm(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ gpr_timespec deadline) {
if (call->have_alarm) {
gpr_log(GPR_ERROR, "Attempt to set deadline alarm twice");
assert(0);
@@ -1412,7 +1417,7 @@ static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline) {
GRPC_CALL_INTERNAL_REF(call, "alarm");
call->have_alarm = 1;
call->send_deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
- grpc_alarm_init(&call->alarm, call->send_deadline, call_alarm, call,
+ grpc_alarm_init(exec_ctx, &call->alarm, call->send_deadline, call_alarm, call,
gpr_now(GPR_CLOCK_MONOTONIC));
}
@@ -1464,7 +1469,8 @@ static gpr_uint32 decode_compression(grpc_mdelem *md) {
return algorithm;
}
-static void recv_metadata(grpc_call *call, grpc_metadata_batch *md) {
+static void recv_metadata(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_metadata_batch *md) {
grpc_linked_mdelem *l;
grpc_metadata_array *dest;
grpc_metadata *mdusr;
@@ -1473,18 +1479,18 @@ static void recv_metadata(grpc_call *call, grpc_metadata_batch *md) {
is_trailing = call->read_state >= READ_STATE_GOT_INITIAL_METADATA;
for (l = md->list.head; l != NULL; l = l->next) {
- grpc_mdelem *md = l->md;
- grpc_mdstr *key = md->key;
+ grpc_mdelem *mdel = l->md;
+ grpc_mdstr *key = mdel->key;
if (key == grpc_channel_get_status_string(call->channel)) {
- set_status_code(call, STATUS_FROM_WIRE, decode_status(md));
+ set_status_code(call, STATUS_FROM_WIRE, decode_status(mdel));
} else if (key == grpc_channel_get_message_string(call->channel)) {
- set_status_details(call, STATUS_FROM_WIRE, GRPC_MDSTR_REF(md->value));
+ set_status_details(call, STATUS_FROM_WIRE, GRPC_MDSTR_REF(mdel->value));
} else if (key ==
grpc_channel_get_compression_algorithm_string(call->channel)) {
- set_compression_algorithm(call, decode_compression(md));
+ set_compression_algorithm(call, decode_compression(mdel));
} else if (key == grpc_channel_get_encodings_accepted_by_peer_string(
call->channel)) {
- set_encodings_accepted_by_peer(call, md->value->slice);
+ set_encodings_accepted_by_peer(call, mdel->value->slice);
} else {
dest = &call->buffered_metadata[is_trailing];
if (dest->count == dest->capacity) {
@@ -1493,9 +1499,9 @@ static void recv_metadata(grpc_call *call, grpc_metadata_batch *md) {
gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
}
mdusr = &dest->metadata[dest->count++];
- mdusr->key = grpc_mdstr_as_c_string(md->key);
- mdusr->value = grpc_mdstr_as_c_string(md->value);
- mdusr->value_length = GPR_SLICE_LENGTH(md->value->slice);
+ mdusr->key = grpc_mdstr_as_c_string(mdel->key);
+ mdusr->value = grpc_mdstr_as_c_string(mdel->value);
+ mdusr->value_length = GPR_SLICE_LENGTH(mdel->value->slice);
if (call->owned_metadata_count == call->owned_metadata_capacity) {
call->owned_metadata_capacity =
GPR_MAX(call->owned_metadata_capacity + 8,
@@ -1504,14 +1510,14 @@ static void recv_metadata(grpc_call *call, grpc_metadata_batch *md) {
gpr_realloc(call->owned_metadata,
sizeof(grpc_mdelem *) * call->owned_metadata_capacity);
}
- call->owned_metadata[call->owned_metadata_count++] = md;
- l->md = 0;
+ call->owned_metadata[call->owned_metadata_count++] = mdel;
+ l->md = NULL;
}
}
if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
0 &&
!call->is_client) {
- set_deadline_alarm(call, md->deadline);
+ set_deadline_alarm(exec_ctx, call, md->deadline);
}
if (!is_trailing) {
call->read_state = READ_STATE_GOT_INITIAL_METADATA;
@@ -1543,13 +1549,15 @@ static void set_cancelled_value(grpc_status_code status, void *dest) {
*(grpc_status_code *)dest = (status != GRPC_STATUS_OK);
}
-static void finish_batch(grpc_call *call, int success, void *tag) {
- grpc_cq_end_op(call->cq, tag, success, done_completion, call,
+static void finish_batch(grpc_exec_ctx *exec_ctx, grpc_call *call, int success,
+ void *tag) {
+ grpc_cq_end_op(exec_ctx, call->cq, tag, success, done_completion, call,
allocate_completion(call));
}
-static void finish_batch_with_close(grpc_call *call, int success, void *tag) {
- grpc_cq_end_op(call->cq, tag, 1, done_completion, call,
+static void finish_batch_with_close(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ int success, void *tag) {
+ grpc_cq_end_op(exec_ctx, call->cq, tag, 1, done_completion, call,
allocate_completion(call));
}
@@ -1568,30 +1576,45 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
size_t out;
const grpc_op *op;
grpc_ioreq *req;
- void (*finish_func)(grpc_call *, int, void *) = finish_batch;
+ void (*finish_func)(grpc_exec_ctx *, grpc_call *, int, void *) = finish_batch;
+ grpc_call_error error;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- if (reserved != NULL) return GRPC_CALL_ERROR;
+ if (reserved != NULL) {
+ error = GRPC_CALL_ERROR;
+ goto done;
+ }
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, tag);
if (nops == 0) {
grpc_cq_begin_op(call->cq);
GRPC_CALL_INTERNAL_REF(call, "completion");
- grpc_cq_end_op(call->cq, tag, 1, done_completion, call,
+ grpc_cq_end_op(&exec_ctx, call->cq, tag, 1, done_completion, call,
allocate_completion(call));
- return GRPC_CALL_OK;
+ error = GRPC_CALL_OK;
+ goto done;
}
/* rewrite batch ops into ioreq ops */
for (in = 0, out = 0; in < nops; in++) {
op = &ops[in];
- if (op->reserved != NULL) return GRPC_CALL_ERROR;
+ if (op->reserved != NULL) {
+ error = GRPC_CALL_ERROR;
+ goto done;
+ }
switch (op->op) {
case GRPC_OP_SEND_INITIAL_METADATA:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done;
+ }
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_SEND_INITIAL_METADATA;
req->data.send_metadata.count = op->data.send_initial_metadata.count;
req->data.send_metadata.metadata =
@@ -1600,36 +1623,55 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
break;
case GRPC_OP_SEND_MESSAGE:
if (!are_write_flags_valid(op->flags)) {
- return GRPC_CALL_ERROR_INVALID_FLAGS;
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done;
}
if (op->data.send_message == NULL) {
- return GRPC_CALL_ERROR_INVALID_MESSAGE;
+ error = GRPC_CALL_ERROR_INVALID_MESSAGE;
+ goto done;
}
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_SEND_MESSAGE;
req->data.send_message = op->data.send_message;
req->flags = op->flags;
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done;
+ }
if (!call->is_client) {
- return GRPC_CALL_ERROR_NOT_ON_SERVER;
+ error = GRPC_CALL_ERROR_NOT_ON_SERVER;
+ goto done;
}
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_SEND_CLOSE;
req->flags = op->flags;
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done;
+ }
if (call->is_client) {
- return GRPC_CALL_ERROR_NOT_ON_CLIENT;
+ error = GRPC_CALL_ERROR_NOT_ON_CLIENT;
+ goto done;
}
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_SEND_TRAILING_METADATA;
req->flags = op->flags;
req->data.send_metadata.count =
@@ -1637,7 +1679,10 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
req->data.send_metadata.metadata =
op->data.send_status_from_server.trailing_metadata;
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_SEND_STATUS;
req->data.send_status.code = op->data.send_status_from_server.status;
req->data.send_status.details =
@@ -1647,17 +1692,27 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
op->data.send_status_from_server.status_details, 0)
: NULL;
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_SEND_CLOSE;
break;
case GRPC_OP_RECV_INITIAL_METADATA:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done;
+ }
if (!call->is_client) {
- return GRPC_CALL_ERROR_NOT_ON_SERVER;
+ error = GRPC_CALL_ERROR_NOT_ON_SERVER;
+ goto done;
}
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
req->data.recv_metadata = op->data.recv_initial_metadata;
req->data.recv_metadata->count = 0;
@@ -1665,55 +1720,86 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
break;
case GRPC_OP_RECV_MESSAGE:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done;
+ }
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_RECV_MESSAGE;
req->data.recv_message = op->data.recv_message;
req->flags = op->flags;
break;
case GRPC_OP_RECV_STATUS_ON_CLIENT:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done;
+ }
if (!call->is_client) {
- return GRPC_CALL_ERROR_NOT_ON_SERVER;
+ error = GRPC_CALL_ERROR_NOT_ON_SERVER;
+ goto done;
}
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_RECV_STATUS;
req->flags = op->flags;
req->data.recv_status.set_value = set_status_value_directly;
req->data.recv_status.user_data = op->data.recv_status_on_client.status;
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_RECV_STATUS_DETAILS;
req->data.recv_status_details.details =
op->data.recv_status_on_client.status_details;
req->data.recv_status_details.details_capacity =
op->data.recv_status_on_client.status_details_capacity;
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_RECV_TRAILING_METADATA;
req->data.recv_metadata =
op->data.recv_status_on_client.trailing_metadata;
req->data.recv_metadata->count = 0;
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_RECV_CLOSE;
finish_func = finish_batch_with_close;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
/* Flag validation: currently allow no flags */
- if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
+ if (op->flags != 0) {
+ error = GRPC_CALL_ERROR_INVALID_FLAGS;
+ goto done;
+ }
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_RECV_STATUS;
req->flags = op->flags;
req->data.recv_status.set_value = set_cancelled_value;
req->data.recv_status.user_data =
op->data.recv_close_on_server.cancelled;
req = &reqs[out++];
- if (out > GRPC_IOREQ_OP_COUNT) return GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ if (out > GRPC_IOREQ_OP_COUNT) {
+ error = GRPC_CALL_ERROR_BATCH_TOO_BIG;
+ goto done;
+ }
req->op = GRPC_IOREQ_RECV_CLOSE;
finish_func = finish_batch_with_close;
break;
@@ -1723,7 +1809,11 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
GRPC_CALL_INTERNAL_REF(call, "completion");
grpc_cq_begin_op(call->cq);
- return grpc_call_start_ioreq_and_call_back(call, reqs, out, finish_func, tag);
+ error = grpc_call_start_ioreq_and_call_back(&exec_ctx, call, reqs, out,
+ finish_func, tag);
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
+ return error;
}
void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
diff --git a/src/core/surface/call.h b/src/core/surface/call.h
index 00638e43b5..7ac0c92ab7 100644
--- a/src/core/surface/call.h
+++ b/src/core/surface/call.h
@@ -82,11 +82,13 @@ typedef union {
typedef struct {
grpc_ioreq_op op;
- gpr_uint32 flags; /**< A copy of the write flags from grpc_op */
+ gpr_uint32 flags;
+ /**< A copy of the write flags from grpc_op */
grpc_ioreq_data data;
} grpc_ioreq;
-typedef void (*grpc_ioreq_completion_func)(grpc_call *call, int success,
+typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
+ grpc_call *call, int success,
void *user_data);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
@@ -97,28 +99,29 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
size_t add_initial_metadata_count,
gpr_timespec send_deadline);
-void grpc_call_set_completion_queue(grpc_call *call, grpc_completion_queue *cq);
+void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_completion_queue *cq);
grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call);
#ifdef GRPC_CALL_REF_COUNT_DEBUG
void grpc_call_internal_ref(grpc_call *call, const char *reason);
-void grpc_call_internal_unref(grpc_call *call, const char *reason,
- int allow_immediate_deletion);
+void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ const char *reason);
#define GRPC_CALL_INTERNAL_REF(call, reason) \
grpc_call_internal_ref(call, reason)
-#define GRPC_CALL_INTERNAL_UNREF(call, reason, allow_immediate_deletion) \
- grpc_call_internal_unref(call, reason, allow_immediate_deletion)
+#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \
+ grpc_call_internal_unref(exec_ctx, call, reason)
#else
void grpc_call_internal_ref(grpc_call *call);
-void grpc_call_internal_unref(grpc_call *call, int allow_immediate_deletion);
+void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call);
#define GRPC_CALL_INTERNAL_REF(call, reason) grpc_call_internal_ref(call)
-#define GRPC_CALL_INTERNAL_UNREF(call, reason, allow_immediate_deletion) \
- grpc_call_internal_unref(call, allow_immediate_deletion)
+#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \
+ grpc_call_internal_unref(exec_ctx, call)
#endif
grpc_call_error grpc_call_start_ioreq_and_call_back(
- grpc_call *call, const grpc_ioreq *reqs, size_t nreqs,
- grpc_ioreq_completion_func on_complete, void *user_data);
+ grpc_exec_ctx *exec_ctx, grpc_call *call, const grpc_ioreq *reqs,
+ size_t nreqs, grpc_ioreq_completion_func on_complete, void *user_data);
grpc_call_stack *grpc_call_get_call_stack(grpc_call *call);
diff --git a/src/core/surface/call_details.c b/src/core/surface/call_details.c
index 67862d7afe..65d2d1da5b 100644
--- a/src/core/surface/call_details.c
+++ b/src/core/surface/call_details.c
@@ -36,11 +36,11 @@
#include <string.h>
-void grpc_call_details_init(grpc_call_details *cd) {
+void grpc_call_details_init(grpc_call_details* cd) {
memset(cd, 0, sizeof(*cd));
}
-void grpc_call_details_destroy(grpc_call_details *cd) {
+void grpc_call_details_destroy(grpc_call_details* cd) {
gpr_free(cd->method);
gpr_free(cd->host);
}
diff --git a/src/core/surface/channel.c b/src/core/surface/channel.c
index a89523b3ab..08d003daca 100644
--- a/src/core/surface/channel.c
+++ b/src/core/surface/channel.c
@@ -77,7 +77,6 @@ struct grpc_channel {
gpr_mu registered_call_mu;
registered_call *registered_calls;
- grpc_iomgr_closure destroy_closure;
char *target;
};
@@ -91,7 +90,8 @@ struct grpc_channel {
#define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024)
grpc_channel *grpc_channel_create_from_filters(
- const char *target, const grpc_channel_filter **filters, size_t num_filters,
+ grpc_exec_ctx *exec_ctx, const char *target,
+ const grpc_channel_filter **filters, size_t num_filters,
const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client) {
size_t i;
size_t size =
@@ -177,7 +177,7 @@ grpc_channel *grpc_channel_create_from_filters(
gpr_free(default_authority);
}
- grpc_channel_stack_init(filters, num_filters, channel, args,
+ grpc_channel_stack_init(exec_ctx, filters, num_filters, channel, args,
channel->metadata_context,
CHANNEL_STACK_FROM_CHANNEL(channel));
@@ -270,10 +270,9 @@ void grpc_channel_internal_ref(grpc_channel *c) {
gpr_ref(&c->refs);
}
-static void destroy_channel(void *p, int ok) {
- grpc_channel *channel = p;
+static void destroy_channel(grpc_exec_ctx *exec_ctx, grpc_channel *channel) {
size_t i;
- grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CHANNEL(channel));
+ grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel));
for (i = 0; i < NUM_CACHED_STATUS_ELEMS; i++) {
GRPC_MDELEM_UNREF(channel->grpc_status_elem[i]);
}
@@ -302,28 +301,31 @@ static void destroy_channel(void *p, int ok) {
}
#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
-void grpc_channel_internal_unref(grpc_channel *channel, const char *reason) {
+void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
+ const char *reason) {
gpr_log(GPR_DEBUG, "CHANNEL: unref %p %d -> %d [%s]", channel,
channel->refs.count, channel->refs.count - 1, reason);
#else
-void grpc_channel_internal_unref(grpc_channel *channel) {
+void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_channel *channel) {
#endif
if (gpr_unref(&channel->refs)) {
- channel->destroy_closure.cb = destroy_channel;
- channel->destroy_closure.cb_arg = channel;
- grpc_iomgr_add_callback(&channel->destroy_closure);
+ destroy_channel(exec_ctx, channel);
}
}
void grpc_channel_destroy(grpc_channel *channel) {
grpc_transport_op op;
grpc_channel_element *elem;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
memset(&op, 0, sizeof(op));
op.disconnect = 1;
elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
- elem->filter->start_transport_op(elem, &op);
+ elem->filter->start_transport_op(&exec_ctx, elem, &op);
- GRPC_CHANNEL_INTERNAL_UNREF(channel, "channel");
+ GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, channel, "channel");
+
+ grpc_exec_ctx_finish(&exec_ctx);
}
grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel) {
diff --git a/src/core/surface/channel.h b/src/core/surface/channel.h
index f271616f60..e5030d52d2 100644
--- a/src/core/surface/channel.h
+++ b/src/core/surface/channel.h
@@ -38,7 +38,8 @@
#include "src/core/client_config/subchannel_factory.h"
grpc_channel *grpc_channel_create_from_filters(
- const char *target, const grpc_channel_filter **filters, size_t count,
+ grpc_exec_ctx *exec_ctx, const char *target,
+ const grpc_channel_filter **filters, size_t count,
const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client);
/** Get a (borrowed) pointer to this channels underlying channel stack */
@@ -63,18 +64,20 @@ gpr_uint32 grpc_channel_get_max_message_length(grpc_channel *channel);
#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
void grpc_channel_internal_ref(grpc_channel *channel, const char *reason);
-void grpc_channel_internal_unref(grpc_channel *channel, const char *reason);
+void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
+ const char *reason);
#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
grpc_channel_internal_ref(channel, reason)
-#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
- grpc_channel_internal_unref(channel, reason)
+#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \
+ grpc_channel_internal_unref(exec_ctx, channel, reason)
#else
void grpc_channel_internal_ref(grpc_channel *channel);
-void grpc_channel_internal_unref(grpc_channel *channel);
+void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_channel *channel);
#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
grpc_channel_internal_ref(channel)
-#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
- grpc_channel_internal_unref(channel)
+#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \
+ grpc_channel_internal_unref(exec_ctx, channel)
#endif
#endif /* GRPC_INTERNAL_CORE_SURFACE_CHANNEL_H */
diff --git a/src/core/surface/channel_connectivity.c b/src/core/surface/channel_connectivity.c
index 5c55ad3655..47cbab154f 100644
--- a/src/core/surface/channel_connectivity.c
+++ b/src/core/surface/channel_connectivity.c
@@ -45,15 +45,20 @@ grpc_connectivity_state grpc_channel_check_connectivity_state(
/* forward through to the underlying client channel */
grpc_channel_element *client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_connectivity_state state;
if (client_channel_elem->filter != &grpc_client_channel_filter) {
gpr_log(GPR_ERROR,
"grpc_channel_check_connectivity_state called on something that is "
"not a client channel, but '%s'",
client_channel_elem->filter->name);
+ grpc_exec_ctx_finish(&exec_ctx);
return GRPC_CHANNEL_FATAL_FAILURE;
}
- return grpc_client_channel_check_connectivity_state(client_channel_elem,
- try_to_connect);
+ state = grpc_client_channel_check_connectivity_state(
+ &exec_ctx, client_channel_elem, try_to_connect);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return state;
}
typedef enum {
@@ -68,7 +73,7 @@ typedef struct {
callback_phase phase;
int success;
int removed;
- grpc_iomgr_closure on_complete;
+ grpc_closure on_complete;
grpc_alarm alarm;
grpc_connectivity_state state;
grpc_completion_queue *cq;
@@ -77,13 +82,14 @@ typedef struct {
void *tag;
} state_watcher;
-static void delete_state_watcher(state_watcher *w) {
- GRPC_CHANNEL_INTERNAL_UNREF(w->channel, "watch_connectivity");
+static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->channel, "watch_connectivity");
gpr_mu_destroy(&w->mu);
gpr_free(w);
}
-static void finished_completion(void *pw, grpc_cq_completion *ignored) {
+static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
+ grpc_cq_completion *ignored) {
int delete = 0;
state_watcher *w = pw;
gpr_mu_lock(&w->mu);
@@ -103,11 +109,12 @@ static void finished_completion(void *pw, grpc_cq_completion *ignored) {
gpr_mu_unlock(&w->mu);
if (delete) {
- delete_state_watcher(w);
+ delete_state_watcher(exec_ctx, w);
}
}
-static void partly_done(state_watcher *w, int due_to_completion) {
+static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
+ int due_to_completion) {
int delete = 0;
grpc_channel_element *client_channel_elem = NULL;
@@ -116,7 +123,7 @@ static void partly_done(state_watcher *w, int due_to_completion) {
w->removed = 1;
client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(w->channel));
- grpc_client_channel_del_interested_party(client_channel_elem,
+ grpc_client_channel_del_interested_party(exec_ctx, client_channel_elem,
grpc_cq_pollset(w->cq));
}
gpr_mu_unlock(&w->mu);
@@ -124,15 +131,15 @@ static void partly_done(state_watcher *w, int due_to_completion) {
gpr_mu_lock(&w->mu);
w->success = 1;
gpr_mu_unlock(&w->mu);
- grpc_alarm_cancel(&w->alarm);
+ grpc_alarm_cancel(exec_ctx, &w->alarm);
}
gpr_mu_lock(&w->mu);
switch (w->phase) {
case WAITING:
w->phase = CALLING_BACK;
- grpc_cq_end_op(w->cq, w->tag, w->success, finished_completion, w,
- &w->completion_storage);
+ grpc_cq_end_op(exec_ctx, w->cq, w->tag, w->success, finished_completion,
+ w, &w->completion_storage);
break;
case CALLING_BACK:
w->phase = CALLING_BACK_AND_FINISHED;
@@ -148,25 +155,30 @@ static void partly_done(state_watcher *w, int due_to_completion) {
gpr_mu_unlock(&w->mu);
if (delete) {
- delete_state_watcher(w);
+ delete_state_watcher(exec_ctx, w);
}
}
-static void watch_complete(void *pw, int success) { partly_done(pw, 1); }
+static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw, int success) {
+ partly_done(exec_ctx, pw, 1);
+}
-static void timeout_complete(void *pw, int success) { partly_done(pw, 0); }
+static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw, int success) {
+ partly_done(exec_ctx, pw, 0);
+}
void grpc_channel_watch_connectivity_state(
grpc_channel *channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue *cq, void *tag) {
grpc_channel_element *client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
state_watcher *w = gpr_malloc(sizeof(*w));
grpc_cq_begin_op(cq);
gpr_mu_init(&w->mu);
- grpc_iomgr_closure_init(&w->on_complete, watch_complete, w);
+ grpc_closure_init(&w->on_complete, watch_complete, w);
w->phase = WAITING;
w->state = last_observed_state;
w->success = 0;
@@ -175,7 +187,7 @@ void grpc_channel_watch_connectivity_state(
w->tag = tag;
w->channel = channel;
- grpc_alarm_init(&w->alarm,
+ grpc_alarm_init(&exec_ctx, &w->alarm,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
timeout_complete, w, gpr_now(GPR_CLOCK_MONOTONIC));
@@ -184,12 +196,14 @@ void grpc_channel_watch_connectivity_state(
"grpc_channel_watch_connectivity_state called on something that is "
"not a client channel, but '%s'",
client_channel_elem->filter->name);
- grpc_iomgr_add_delayed_callback(&w->on_complete, 1);
+ grpc_exec_ctx_enqueue(&exec_ctx, &w->on_complete, 1);
} else {
GRPC_CHANNEL_INTERNAL_REF(channel, "watch_connectivity");
- grpc_client_channel_add_interested_party(client_channel_elem,
+ grpc_client_channel_add_interested_party(&exec_ctx, client_channel_elem,
grpc_cq_pollset(cq));
- grpc_client_channel_watch_connectivity_state(client_channel_elem, &w->state,
- &w->on_complete);
+ grpc_client_channel_watch_connectivity_state(&exec_ctx, client_channel_elem,
+ &w->state, &w->on_complete);
}
+
+ grpc_exec_ctx_finish(&exec_ctx);
}
diff --git a/src/core/surface/channel_create.c b/src/core/surface/channel_create.c
index d323d0d74f..05591ce27f 100644
--- a/src/core/surface/channel_create.c
+++ b/src/core/surface/channel_create.c
@@ -52,9 +52,15 @@ typedef struct {
grpc_connector base;
gpr_refcount refs;
- grpc_iomgr_closure *notify;
+ grpc_closure *notify;
grpc_connect_in_args args;
grpc_connect_out_args *result;
+
+ grpc_endpoint *tcp;
+
+ grpc_mdctx *mdctx;
+
+ grpc_closure connected;
} connector;
static void connector_ref(grpc_connector *con) {
@@ -62,20 +68,23 @@ static void connector_ref(grpc_connector *con) {
gpr_ref(&c->refs);
}
-static void connector_unref(grpc_connector *con) {
+static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
connector *c = (connector *)con;
if (gpr_unref(&c->refs)) {
+ grpc_mdctx_unref(c->mdctx);
gpr_free(c);
}
}
-static void connected(void *arg, grpc_endpoint *tcp) {
+static void connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
connector *c = arg;
- grpc_iomgr_closure *notify;
+ grpc_closure *notify;
+ grpc_endpoint *tcp = c->tcp;
if (tcp != NULL) {
c->result->transport = grpc_create_chttp2_transport(
- c->args.channel_args, tcp, c->args.metadata_context, 1);
- grpc_chttp2_transport_start_reading(c->result->transport, NULL, 0);
+ exec_ctx, c->args.channel_args, tcp, c->mdctx, 1);
+ grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL,
+ 0);
GPR_ASSERT(c->result->transport);
c->result->filters = gpr_malloc(sizeof(grpc_channel_filter *));
c->result->filters[0] = &grpc_http_client_filter;
@@ -85,23 +94,26 @@ static void connected(void *arg, grpc_endpoint *tcp) {
}
notify = c->notify;
c->notify = NULL;
- grpc_iomgr_add_callback(notify);
+ notify->cb(exec_ctx, notify->cb_arg, 1);
}
-static void connector_shutdown(grpc_connector *con) {}
+static void connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *con) {}
-static void connector_connect(grpc_connector *con,
+static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
const grpc_connect_in_args *args,
grpc_connect_out_args *result,
- grpc_iomgr_closure *notify) {
+ grpc_closure *notify) {
connector *c = (connector *)con;
GPR_ASSERT(c->notify == NULL);
GPR_ASSERT(notify->cb);
c->notify = notify;
c->args = *args;
c->result = result;
- grpc_tcp_client_connect(connected, c, args->interested_parties, args->addr,
- args->addr_len, args->deadline);
+ c->tcp = NULL;
+ grpc_closure_init(&c->connected, connected, c);
+ grpc_tcp_client_connect(exec_ctx, &c->connected, &c->tcp,
+ args->interested_parties, args->addr, args->addr_len,
+ args->deadline);
}
static const grpc_connector_vtable connector_vtable = {
@@ -120,10 +132,11 @@ static void subchannel_factory_ref(grpc_subchannel_factory *scf) {
gpr_ref(&f->refs);
}
-static void subchannel_factory_unref(grpc_subchannel_factory *scf) {
+static void subchannel_factory_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *scf) {
subchannel_factory *f = (subchannel_factory *)scf;
if (gpr_unref(&f->refs)) {
- GRPC_CHANNEL_INTERNAL_UNREF(f->master, "subchannel_factory");
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, f->master, "subchannel_factory");
grpc_channel_args_destroy(f->merge_args);
grpc_mdctx_unref(f->mdctx);
gpr_free(f);
@@ -131,7 +144,8 @@ static void subchannel_factory_unref(grpc_subchannel_factory *scf) {
}
static grpc_subchannel *subchannel_factory_create_subchannel(
- grpc_subchannel_factory *scf, grpc_subchannel_args *args) {
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *scf,
+ grpc_subchannel_args *args) {
subchannel_factory *f = (subchannel_factory *)scf;
connector *c = gpr_malloc(sizeof(*c));
grpc_channel_args *final_args =
@@ -139,12 +153,14 @@ static grpc_subchannel *subchannel_factory_create_subchannel(
grpc_subchannel *s;
memset(c, 0, sizeof(*c));
c->base.vtable = &connector_vtable;
+ c->mdctx = f->mdctx;
+ grpc_mdctx_ref(c->mdctx);
gpr_ref_init(&c->refs, 1);
args->mdctx = f->mdctx;
args->args = final_args;
args->master = f->master;
s = grpc_subchannel_create(&c->base, args);
- grpc_connector_unref(&c->base);
+ grpc_connector_unref(exec_ctx, &c->base);
grpc_channel_args_destroy(final_args);
return s;
}
@@ -166,6 +182,7 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
grpc_resolver *resolver;
subchannel_factory *f;
grpc_mdctx *mdctx = grpc_mdctx_create();
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
size_t n = 0;
GPR_ASSERT(!reserved);
if (grpc_channel_args_is_census_enabled(args)) {
@@ -175,8 +192,8 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
filters[n++] = &grpc_client_channel_filter;
GPR_ASSERT(n <= MAX_FILTERS);
- channel =
- grpc_channel_create_from_filters(target, filters, n, args, mdctx, 1);
+ channel = grpc_channel_create_from_filters(&exec_ctx, target, filters, n,
+ args, mdctx, 1);
f = gpr_malloc(sizeof(*f));
f->base.vtable = &subchannel_factory_vtable;
@@ -191,10 +208,12 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
return NULL;
}
- grpc_client_channel_set_resolver(grpc_channel_get_channel_stack(channel),
- resolver);
- GRPC_RESOLVER_UNREF(resolver, "create");
- grpc_subchannel_factory_unref(&f->base);
+ grpc_client_channel_set_resolver(
+ &exec_ctx, grpc_channel_get_channel_stack(channel), resolver);
+ GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "create");
+ grpc_subchannel_factory_unref(&exec_ctx, &f->base);
+
+ grpc_exec_ctx_finish(&exec_ctx);
return channel;
}
diff --git a/src/core/surface/completion_queue.c b/src/core/surface/completion_queue.c
index b58115a93f..5dac8ebcf8 100644
--- a/src/core/surface/completion_queue.c
+++ b/src/core/surface/completion_queue.c
@@ -67,8 +67,12 @@ struct grpc_completion_queue {
int is_server_cq;
int num_pluckers;
plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
+ grpc_closure pollset_destroy_done;
};
+static void on_pollset_destroy_done(grpc_exec_ctx *exec_ctx, void *cc,
+ int success);
+
grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
grpc_completion_queue *cc = gpr_malloc(sizeof(grpc_completion_queue));
GPR_ASSERT(!reserved);
@@ -80,6 +84,7 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
grpc_pollset_init(&cc->pollset);
cc->completed_tail = &cc->completed_head;
cc->completed_head.next = (gpr_uintptr)cc->completed_tail;
+ grpc_closure_init(&cc->pollset_destroy_done, on_pollset_destroy_done, cc);
return cc;
}
@@ -94,7 +99,8 @@ void grpc_cq_internal_ref(grpc_completion_queue *cc) {
gpr_ref(&cc->owning_refs);
}
-static void on_pollset_destroy_done(void *arg) {
+static void on_pollset_destroy_done(grpc_exec_ctx *exec_ctx, void *arg,
+ int success) {
grpc_completion_queue *cc = arg;
GRPC_CQ_INTERNAL_UNREF(cc, "pollset_destroy");
}
@@ -126,8 +132,10 @@ void grpc_cq_begin_op(grpc_completion_queue *cc) {
/* Signal the end of an operation - if this is the last waiting-to-be-queued
event, then enter shutdown mode */
/* Queue a GRPC_OP_COMPLETED operation */
-void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
- void (*done)(void *done_arg, grpc_cq_completion *storage),
+void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
+ void *tag, int success,
+ void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
+ grpc_cq_completion *storage),
void *done_arg, grpc_cq_completion *storage) {
int shutdown;
int i;
@@ -162,7 +170,7 @@ void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
GPR_ASSERT(cc->shutdown_called);
cc->shutdown = 1;
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
- grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
+ grpc_pollset_shutdown(exec_ctx, &cc->pollset, &cc->pollset_destroy_done);
}
}
@@ -172,6 +180,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
grpc_pollset_worker worker;
int first_loop = 1;
gpr_timespec now;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_ASSERT(!reserved);
@@ -190,7 +199,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
- c->done(c->done_arg, c);
+ c->done(&exec_ctx, c->done_arg, c);
break;
}
if (cc->shutdown) {
@@ -207,10 +216,11 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
break;
}
first_loop = 0;
- grpc_pollset_work(&cc->pollset, &worker, now, deadline);
+ grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now, deadline);
}
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
+ grpc_exec_ctx_finish(&exec_ctx);
return ret;
}
@@ -247,6 +257,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_pollset_worker worker;
gpr_timespec now;
int first_loop = 1;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_ASSERT(!reserved);
@@ -268,7 +279,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
- c->done(c->done_arg, c);
+ c->done(&exec_ctx, c->done_arg, c);
goto done;
}
prev = c;
@@ -299,18 +310,20 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
break;
}
first_loop = 0;
- grpc_pollset_work(&cc->pollset, &worker, now, deadline);
+ grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now, deadline);
del_plucker(cc, tag, &worker);
}
done:
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
+ grpc_exec_ctx_finish(&exec_ctx);
return ret;
}
/* Shutdown simply drops a ref that we reserved at creation time; if we drop
to zero here, then enter shutdown mode and wake up any waiters */
void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
if (cc->shutdown_called) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
@@ -324,8 +337,9 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
GPR_ASSERT(!cc->shutdown);
cc->shutdown = 1;
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
- grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
+ grpc_pollset_shutdown(&exec_ctx, &cc->pollset, &cc->pollset_destroy_done);
}
+ grpc_exec_ctx_finish(&exec_ctx);
}
void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
diff --git a/src/core/surface/completion_queue.h b/src/core/surface/completion_queue.h
index 74dc09e36e..5f8282e542 100644
--- a/src/core/surface/completion_queue.h
+++ b/src/core/surface/completion_queue.h
@@ -44,7 +44,8 @@ typedef struct grpc_cq_completion {
void *tag;
/** done callback - called when this queue element is no longer
needed by the completion queue */
- void (*done)(void *done_arg, struct grpc_cq_completion *c);
+ void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
+ struct grpc_cq_completion *c);
void *done_arg;
/** next pointer; low bit is used to indicate success or not */
gpr_uintptr next;
@@ -71,8 +72,10 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc);
void grpc_cq_begin_op(grpc_completion_queue *cc);
/* Queue a GRPC_OP_COMPLETED operation */
-void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
- void (*done)(void *done_arg, grpc_cq_completion *storage),
+void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
+ void *tag, int success,
+ void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
+ grpc_cq_completion *storage),
void *done_arg, grpc_cq_completion *storage);
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc);
diff --git a/src/core/surface/lame_client.c b/src/core/surface/lame_client.c
index 80704cbf67..0b22d225d0 100644
--- a/src/core/surface/lame_client.c
+++ b/src/core/surface/lame_client.c
@@ -54,14 +54,15 @@ typedef struct {
const char *error_message;
} channel_data;
-static void lame_start_transport_stream_op(grpc_call_element *elem,
+static void lame_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
if (op->send_ops != NULL) {
grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
- op->on_done_send->cb(op->on_done_send->cb_arg, 0);
+ op->on_done_send->cb(exec_ctx, op->on_done_send->cb_arg, 0);
}
if (op->recv_ops != NULL) {
char tmp[GPR_LTOA_MIN_BUFSIZE];
@@ -80,42 +81,45 @@ static void lame_start_transport_stream_op(grpc_call_element *elem,
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
- op->on_done_recv->cb(op->on_done_recv->cb_arg, 1);
+ op->on_done_recv->cb(exec_ctx, op->on_done_recv->cb_arg, 1);
}
if (op->on_consumed != NULL) {
- op->on_consumed->cb(op->on_consumed->cb_arg, 0);
+ op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 0);
}
}
-static char *lame_get_peer(grpc_call_element *elem) {
+static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
return grpc_channel_get_target(chand->master);
}
-static void lame_start_transport_op(grpc_channel_element *elem,
+static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
grpc_transport_op *op) {
if (op->on_connectivity_state_change) {
GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE);
*op->connectivity_state = GRPC_CHANNEL_FATAL_FAILURE;
op->on_connectivity_state_change->cb(
- op->on_connectivity_state_change->cb_arg, 1);
+ exec_ctx, op->on_connectivity_state_change->cb_arg, 1);
}
if (op->on_consumed != NULL) {
- op->on_consumed->cb(op->on_consumed->cb_arg, 1);
+ op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 1);
}
}
-static void init_call_elem(grpc_call_element *elem,
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *transport_server_data,
grpc_transport_stream_op *initial_op) {
if (initial_op) {
- grpc_transport_stream_op_finish_with_failure(initial_op);
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, initial_op);
}
}
-static void destroy_call_elem(grpc_call_element *elem) {}
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {}
-static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
channel_data *chand = elem->channel_data;
@@ -125,7 +129,8 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
chand->master = master;
}
-static void destroy_channel_elem(grpc_channel_element *elem) {}
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {}
static const grpc_channel_filter lame_filter = {
lame_start_transport_stream_op,
@@ -148,13 +153,15 @@ grpc_channel *grpc_lame_client_channel_create(const char *target,
grpc_channel *channel;
grpc_channel_element *elem;
channel_data *chand;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
static const grpc_channel_filter *filters[] = {&lame_filter};
- channel = grpc_channel_create_from_filters(target, filters, 1, NULL,
- grpc_mdctx_create(), 1);
+ channel = grpc_channel_create_from_filters(&exec_ctx, target, filters, 1,
+ NULL, grpc_mdctx_create(), 1);
elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
GPR_ASSERT(elem->filter == &lame_filter);
chand = (channel_data *)elem->channel_data;
chand->error_code = error_code;
chand->error_message = error_message;
+ grpc_exec_ctx_finish(&exec_ctx);
return channel;
}
diff --git a/src/core/surface/metadata_array.c b/src/core/surface/metadata_array.c
index 4010977497..648c579266 100644
--- a/src/core/surface/metadata_array.c
+++ b/src/core/surface/metadata_array.c
@@ -36,10 +36,10 @@
#include <string.h>
-void grpc_metadata_array_init(grpc_metadata_array *array) {
+void grpc_metadata_array_init(grpc_metadata_array* array) {
memset(array, 0, sizeof(*array));
}
-void grpc_metadata_array_destroy(grpc_metadata_array *array) {
+void grpc_metadata_array_destroy(grpc_metadata_array* array) {
gpr_free(array->metadata);
}
diff --git a/src/core/surface/secure_channel_create.c b/src/core/surface/secure_channel_create.c
index 3f3469720d..d6070a54a8 100644
--- a/src/core/surface/secure_channel_create.c
+++ b/src/core/surface/secure_channel_create.c
@@ -57,12 +57,17 @@ typedef struct {
grpc_channel_security_connector *security_connector;
- grpc_iomgr_closure *notify;
+ grpc_closure *notify;
grpc_connect_in_args args;
grpc_connect_out_args *result;
gpr_mu mu;
grpc_endpoint *connecting_endpoint;
+ grpc_endpoint *newly_connecting_endpoint;
+
+ grpc_closure connected_closure;
+
+ grpc_mdctx *mdctx;
} connector;
static void connector_ref(grpc_connector *con) {
@@ -70,18 +75,20 @@ static void connector_ref(grpc_connector *con) {
gpr_ref(&c->refs);
}
-static void connector_unref(grpc_connector *con) {
+static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
connector *c = (connector *)con;
if (gpr_unref(&c->refs)) {
+ grpc_mdctx_unref(c->mdctx);
gpr_free(c);
}
}
-static void on_secure_handshake_done(void *arg, grpc_security_status status,
+static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_security_status status,
grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
connector *c = arg;
- grpc_iomgr_closure *notify;
+ grpc_closure *notify;
gpr_mu_lock(&c->mu);
if (c->connecting_endpoint == NULL) {
memset(c->result, 0, sizeof(*c->result));
@@ -97,8 +104,9 @@ static void on_secure_handshake_done(void *arg, grpc_security_status status,
c->connecting_endpoint = NULL;
gpr_mu_unlock(&c->mu);
c->result->transport = grpc_create_chttp2_transport(
- c->args.channel_args, secure_endpoint, c->args.metadata_context, 1);
- grpc_chttp2_transport_start_reading(c->result->transport, NULL, 0);
+ exec_ctx, c->args.channel_args, secure_endpoint, c->mdctx, 1);
+ grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL,
+ 0);
c->result->filters = gpr_malloc(sizeof(grpc_channel_filter *) * 2);
c->result->filters[0] = &grpc_http_client_filter;
c->result->filters[1] = &grpc_client_auth_filter;
@@ -106,28 +114,29 @@ static void on_secure_handshake_done(void *arg, grpc_security_status status,
}
notify = c->notify;
c->notify = NULL;
- grpc_iomgr_add_callback(notify);
+ notify->cb(exec_ctx, notify->cb_arg, 1);
}
-static void connected(void *arg, grpc_endpoint *tcp) {
+static void connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
connector *c = arg;
- grpc_iomgr_closure *notify;
+ grpc_closure *notify;
+ grpc_endpoint *tcp = c->newly_connecting_endpoint;
if (tcp != NULL) {
gpr_mu_lock(&c->mu);
GPR_ASSERT(c->connecting_endpoint == NULL);
c->connecting_endpoint = tcp;
gpr_mu_unlock(&c->mu);
- grpc_security_connector_do_handshake(&c->security_connector->base, tcp,
- on_secure_handshake_done, c);
+ grpc_security_connector_do_handshake(exec_ctx, &c->security_connector->base,
+ tcp, on_secure_handshake_done, c);
} else {
memset(c->result, 0, sizeof(*c->result));
notify = c->notify;
c->notify = NULL;
- grpc_iomgr_add_callback(notify);
+ notify->cb(exec_ctx, notify->cb_arg, 1);
}
}
-static void connector_shutdown(grpc_connector *con) {
+static void connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
connector *c = (connector *)con;
grpc_endpoint *ep;
gpr_mu_lock(&c->mu);
@@ -135,14 +144,14 @@ static void connector_shutdown(grpc_connector *con) {
c->connecting_endpoint = NULL;
gpr_mu_unlock(&c->mu);
if (ep) {
- grpc_endpoint_shutdown(ep);
+ grpc_endpoint_shutdown(exec_ctx, ep);
}
}
-static void connector_connect(grpc_connector *con,
+static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
const grpc_connect_in_args *args,
grpc_connect_out_args *result,
- grpc_iomgr_closure *notify) {
+ grpc_closure *notify) {
connector *c = (connector *)con;
GPR_ASSERT(c->notify == NULL);
GPR_ASSERT(notify->cb);
@@ -152,8 +161,10 @@ static void connector_connect(grpc_connector *con,
gpr_mu_lock(&c->mu);
GPR_ASSERT(c->connecting_endpoint == NULL);
gpr_mu_unlock(&c->mu);
- grpc_tcp_client_connect(connected, c, args->interested_parties, args->addr,
- args->addr_len, args->deadline);
+ grpc_closure_init(&c->connected_closure, connected, c);
+ grpc_tcp_client_connect(
+ exec_ctx, &c->connected_closure, &c->newly_connecting_endpoint,
+ args->interested_parties, args->addr, args->addr_len, args->deadline);
}
static const grpc_connector_vtable connector_vtable = {
@@ -173,12 +184,13 @@ static void subchannel_factory_ref(grpc_subchannel_factory *scf) {
gpr_ref(&f->refs);
}
-static void subchannel_factory_unref(grpc_subchannel_factory *scf) {
+static void subchannel_factory_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *scf) {
subchannel_factory *f = (subchannel_factory *)scf;
if (gpr_unref(&f->refs)) {
GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
"subchannel_factory");
- GRPC_CHANNEL_INTERNAL_UNREF(f->master, "subchannel_factory");
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, f->master, "subchannel_factory");
grpc_channel_args_destroy(f->merge_args);
grpc_mdctx_unref(f->mdctx);
gpr_free(f);
@@ -186,7 +198,8 @@ static void subchannel_factory_unref(grpc_subchannel_factory *scf) {
}
static grpc_subchannel *subchannel_factory_create_subchannel(
- grpc_subchannel_factory *scf, grpc_subchannel_args *args) {
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *scf,
+ grpc_subchannel_args *args) {
subchannel_factory *f = (subchannel_factory *)scf;
connector *c = gpr_malloc(sizeof(*c));
grpc_channel_args *final_args =
@@ -195,13 +208,15 @@ static grpc_subchannel *subchannel_factory_create_subchannel(
memset(c, 0, sizeof(*c));
c->base.vtable = &connector_vtable;
c->security_connector = f->security_connector;
+ c->mdctx = f->mdctx;
gpr_mu_init(&c->mu);
+ grpc_mdctx_ref(c->mdctx);
gpr_ref_init(&c->refs, 1);
- args->mdctx = f->mdctx;
args->args = final_args;
args->master = f->master;
+ args->mdctx = f->mdctx;
s = grpc_subchannel_create(&c->base, args);
- grpc_connector_unref(&c->base);
+ grpc_connector_unref(exec_ctx, &c->base);
grpc_channel_args_destroy(final_args);
return s;
}
@@ -222,32 +237,35 @@ grpc_channel *grpc_secure_channel_create(grpc_credentials *creds,
grpc_arg connector_arg;
grpc_channel_args *args_copy;
grpc_channel_args *new_args_from_connector;
- grpc_channel_security_connector *connector;
+ grpc_channel_security_connector *security_connector;
grpc_mdctx *mdctx;
grpc_resolver *resolver;
subchannel_factory *f;
#define MAX_FILTERS 3
const grpc_channel_filter *filters[MAX_FILTERS];
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
size_t n = 0;
GPR_ASSERT(reserved == NULL);
if (grpc_find_security_connector_in_args(args) != NULL) {
gpr_log(GPR_ERROR, "Cannot set security context in channel args.");
+ grpc_exec_ctx_finish(&exec_ctx);
return grpc_lame_client_channel_create(
target, GRPC_STATUS_INVALID_ARGUMENT,
"Security connector exists in channel args.");
}
if (grpc_credentials_create_security_connector(
- creds, target, args, NULL, &connector, &new_args_from_connector) !=
- GRPC_SECURITY_OK) {
+ creds, target, args, NULL, &security_connector,
+ &new_args_from_connector) != GRPC_SECURITY_OK) {
+ grpc_exec_ctx_finish(&exec_ctx);
return grpc_lame_client_channel_create(
target, GRPC_STATUS_INVALID_ARGUMENT,
"Failed to create security connector.");
}
mdctx = grpc_mdctx_create();
- connector_arg = grpc_security_connector_to_arg(&connector->base);
+ connector_arg = grpc_security_connector_to_arg(&security_connector->base);
args_copy = grpc_channel_args_copy_and_add(
new_args_from_connector != NULL ? new_args_from_connector : args,
&connector_arg, 1);
@@ -258,34 +276,37 @@ grpc_channel *grpc_secure_channel_create(grpc_credentials *creds,
filters[n++] = &grpc_client_channel_filter;
GPR_ASSERT(n <= MAX_FILTERS);
- channel =
- grpc_channel_create_from_filters(target, filters, n, args_copy, mdctx, 1);
+ channel = grpc_channel_create_from_filters(&exec_ctx, target, filters, n,
+ args_copy, mdctx, 1);
f = gpr_malloc(sizeof(*f));
f->base.vtable = &subchannel_factory_vtable;
gpr_ref_init(&f->refs, 1);
grpc_mdctx_ref(mdctx);
f->mdctx = mdctx;
- GRPC_SECURITY_CONNECTOR_REF(&connector->base, "subchannel_factory");
- f->security_connector = connector;
+ GRPC_SECURITY_CONNECTOR_REF(&security_connector->base, "subchannel_factory");
+ f->security_connector = security_connector;
f->merge_args = grpc_channel_args_copy(args_copy);
f->master = channel;
GRPC_CHANNEL_INTERNAL_REF(channel, "subchannel_factory");
resolver = grpc_resolver_create(target, &f->base);
if (!resolver) {
+ grpc_exec_ctx_finish(&exec_ctx);
return NULL;
}
- grpc_client_channel_set_resolver(grpc_channel_get_channel_stack(channel),
- resolver);
- GRPC_RESOLVER_UNREF(resolver, "create");
- grpc_subchannel_factory_unref(&f->base);
- GRPC_SECURITY_CONNECTOR_UNREF(&connector->base, "channel_create");
+ grpc_client_channel_set_resolver(
+ &exec_ctx, grpc_channel_get_channel_stack(channel), resolver);
+ GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "create");
+ grpc_subchannel_factory_unref(&exec_ctx, &f->base);
+ GRPC_SECURITY_CONNECTOR_UNREF(&security_connector->base, "channel_create");
grpc_channel_args_destroy(args_copy);
if (new_args_from_connector != NULL) {
grpc_channel_args_destroy(new_args_from_connector);
}
+ grpc_exec_ctx_finish(&exec_ctx);
+
return channel;
}
diff --git a/src/core/surface/server.c b/src/core/surface/server.c
index 3d404f78a4..27fc0945f9 100644
--- a/src/core/surface/server.c
+++ b/src/core/surface/server.c
@@ -56,10 +56,12 @@
typedef struct listener {
void *arg;
- void (*start)(grpc_server *server, void *arg, grpc_pollset **pollsets,
- size_t pollset_count);
- void (*destroy)(grpc_server *server, void *arg);
+ void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_pollset **pollsets, size_t pollset_count);
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_closure *closure);
struct listener *next;
+ grpc_closure destroy_done;
} listener;
typedef struct call_data call_data;
@@ -113,8 +115,8 @@ struct channel_data {
channel_registered_method *registered_methods;
gpr_uint32 registered_method_slots;
gpr_uint32 registered_method_max_probes;
- grpc_iomgr_closure finish_destroy_channel_closure;
- grpc_iomgr_closure channel_connectivity_changed;
+ grpc_closure finish_destroy_channel_closure;
+ grpc_closure channel_connectivity_changed;
};
typedef struct shutdown_tag {
@@ -153,10 +155,10 @@ struct call_data {
grpc_stream_op_buffer *recv_ops;
grpc_stream_state *recv_state;
- grpc_iomgr_closure *on_done_recv;
+ grpc_closure *on_done_recv;
- grpc_iomgr_closure server_on_recv;
- grpc_iomgr_closure kill_zombie_closure;
+ grpc_closure server_on_recv;
+ grpc_closure kill_zombie_closure;
call_data *pending_next;
};
@@ -181,7 +183,7 @@ typedef struct {
struct grpc_server {
size_t channel_filter_count;
- const grpc_channel_filter **channel_filters;
+ grpc_channel_filter const **channel_filters;
grpc_channel_args *channel_args;
grpc_completion_queue **cqs;
@@ -224,12 +226,13 @@ struct grpc_server {
#define SERVER_FROM_CALL_ELEM(elem) \
(((channel_data *)(elem)->channel_data)->server)
-static void begin_call(grpc_server *server, call_data *calld,
- requested_call *rc);
-static void fail_call(grpc_server *server, requested_call *rc);
+static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ call_data *calld, requested_call *rc);
+static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ requested_call *rc);
/* Before calling maybe_finish_shutdown, we must hold mu_global and not
hold mu_call */
-static void maybe_finish_shutdown(grpc_server *server);
+static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_server *server);
/*
* channel broadcaster
@@ -252,18 +255,19 @@ static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) {
}
struct shutdown_cleanup_args {
- grpc_iomgr_closure closure;
+ grpc_closure closure;
gpr_slice slice;
};
-static void shutdown_cleanup(void *arg, int iomgr_status_ignored) {
+static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
+ int iomgr_status_ignored) {
struct shutdown_cleanup_args *a = arg;
gpr_slice_unref(a->slice);
gpr_free(a);
}
-static void send_shutdown(grpc_channel *channel, int send_goaway,
- int send_disconnect) {
+static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
+ int send_goaway, int send_disconnect) {
grpc_transport_op op;
struct shutdown_cleanup_args *sc;
grpc_channel_element *elem;
@@ -275,21 +279,22 @@ static void send_shutdown(grpc_channel *channel, int send_goaway,
op.goaway_message = &sc->slice;
op.goaway_status = GRPC_STATUS_OK;
op.disconnect = send_disconnect;
- grpc_iomgr_closure_init(&sc->closure, shutdown_cleanup, sc);
+ grpc_closure_init(&sc->closure, shutdown_cleanup, sc);
op.on_consumed = &sc->closure;
elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
- elem->filter->start_transport_op(elem, &op);
+ elem->filter->start_transport_op(exec_ctx, elem, &op);
}
-static void channel_broadcaster_shutdown(channel_broadcaster *cb,
+static void channel_broadcaster_shutdown(grpc_exec_ctx *exec_ctx,
+ channel_broadcaster *cb,
int send_goaway,
int force_disconnect) {
size_t i;
for (i = 0; i < cb->num_channels; i++) {
- send_shutdown(cb->channels[i], send_goaway, force_disconnect);
- GRPC_CHANNEL_INTERNAL_UNREF(cb->channels[i], "broadcast");
+ send_shutdown(exec_ctx, cb->channels[i], send_goaway, force_disconnect);
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, cb->channels[i], "broadcast");
}
gpr_free(cb->channels);
}
@@ -298,41 +303,41 @@ static void channel_broadcaster_shutdown(channel_broadcaster *cb,
* request_matcher
*/
-static void request_matcher_init(request_matcher *request_matcher,
- size_t entries) {
- memset(request_matcher, 0, sizeof(*request_matcher));
- request_matcher->requests = gpr_stack_lockfree_create(entries);
+static void request_matcher_init(request_matcher *rm, size_t entries) {
+ memset(rm, 0, sizeof(*rm));
+ rm->requests = gpr_stack_lockfree_create(entries);
}
-static void request_matcher_destroy(request_matcher *request_matcher) {
- GPR_ASSERT(gpr_stack_lockfree_pop(request_matcher->requests) == -1);
- gpr_stack_lockfree_destroy(request_matcher->requests);
+static void request_matcher_destroy(request_matcher *rm) {
+ GPR_ASSERT(gpr_stack_lockfree_pop(rm->requests) == -1);
+ gpr_stack_lockfree_destroy(rm->requests);
}
-static void kill_zombie(void *elem, int success) {
+static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem, int success) {
grpc_call_destroy(grpc_call_from_top_element(elem));
}
-static void request_matcher_zombify_all_pending_calls(
- request_matcher *request_matcher) {
- while (request_matcher->pending_head) {
- call_data *calld = request_matcher->pending_head;
- request_matcher->pending_head = calld->pending_next;
+static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
+ request_matcher *rm) {
+ while (rm->pending_head) {
+ call_data *calld = rm->pending_head;
+ rm->pending_head = calld->pending_next;
gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
- grpc_iomgr_closure_init(
+ grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
- grpc_iomgr_add_callback(&calld->kill_zombie_closure);
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
}
}
-static void request_matcher_kill_requests(grpc_server *server,
+static void request_matcher_kill_requests(grpc_exec_ctx *exec_ctx,
+ grpc_server *server,
request_matcher *rm) {
int request_id;
while ((request_id = gpr_stack_lockfree_pop(rm->requests)) != -1) {
- fail_call(server, &server->requested_calls[request_id]);
+ fail_call(exec_ctx, server, &server->requested_calls[request_id]);
}
}
@@ -344,13 +349,13 @@ static void server_ref(grpc_server *server) {
gpr_ref(&server->internal_refcount);
}
-static void server_delete(grpc_server *server) {
+static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
registered_method *rm;
size_t i;
grpc_channel_args_destroy(server->channel_args);
gpr_mu_destroy(&server->mu_global);
gpr_mu_destroy(&server->mu_call);
- gpr_free(server->channel_filters);
+ gpr_free((void *)server->channel_filters);
while ((rm = server->registered_methods) != NULL) {
server->registered_methods = rm->next;
request_matcher_destroy(&rm->request_matcher);
@@ -370,9 +375,9 @@ static void server_delete(grpc_server *server) {
gpr_free(server);
}
-static void server_unref(grpc_server *server) {
+static void server_unref(grpc_exec_ctx *exec_ctx, grpc_server *server) {
if (gpr_unref(&server->internal_refcount)) {
- server_delete(server);
+ server_delete(exec_ctx, server);
}
}
@@ -386,26 +391,28 @@ static void orphan_channel(channel_data *chand) {
chand->next = chand->prev = chand;
}
-static void finish_destroy_channel(void *cd, int success) {
+static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd,
+ int success) {
channel_data *chand = cd;
grpc_server *server = chand->server;
- GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "server");
- server_unref(server);
+ gpr_log(GPR_DEBUG, "finish_destroy_channel: %p", chand->channel);
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server");
+ server_unref(exec_ctx, server);
}
-static void destroy_channel(channel_data *chand) {
+static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand) {
if (is_channel_orphaned(chand)) return;
GPR_ASSERT(chand->server != NULL);
orphan_channel(chand);
server_ref(chand->server);
- maybe_finish_shutdown(chand->server);
+ maybe_finish_shutdown(exec_ctx, chand->server);
chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
chand->finish_destroy_channel_closure.cb_arg = chand;
- grpc_iomgr_add_callback(&chand->finish_destroy_channel_closure);
+ grpc_exec_ctx_enqueue(exec_ctx, &chand->finish_destroy_channel_closure, 1);
}
-static void finish_start_new_rpc(grpc_server *server, grpc_call_element *elem,
- request_matcher *request_matcher) {
+static void finish_start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ grpc_call_element *elem, request_matcher *rm) {
call_data *calld = elem->call_data;
int request_id;
@@ -413,22 +420,22 @@ static void finish_start_new_rpc(grpc_server *server, grpc_call_element *elem,
gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
- grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_iomgr_add_callback(&calld->kill_zombie_closure);
+ grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
return;
}
- request_id = gpr_stack_lockfree_pop(request_matcher->requests);
+ request_id = gpr_stack_lockfree_pop(rm->requests);
if (request_id == -1) {
gpr_mu_lock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
calld->state = PENDING;
gpr_mu_unlock(&calld->mu_state);
- if (request_matcher->pending_head == NULL) {
- request_matcher->pending_tail = request_matcher->pending_head = calld;
+ if (rm->pending_head == NULL) {
+ rm->pending_tail = rm->pending_head = calld;
} else {
- request_matcher->pending_tail->pending_next = calld;
- request_matcher->pending_tail = calld;
+ rm->pending_tail->pending_next = calld;
+ rm->pending_tail = calld;
}
calld->pending_next = NULL;
gpr_mu_unlock(&server->mu_call);
@@ -436,11 +443,11 @@ static void finish_start_new_rpc(grpc_server *server, grpc_call_element *elem,
gpr_mu_lock(&calld->mu_state);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
- begin_call(server, calld, &server->requested_calls[request_id]);
+ begin_call(exec_ctx, server, calld, &server->requested_calls[request_id]);
}
}
-static void start_new_rpc(grpc_call_element *elem) {
+static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_server *server = chand->server;
@@ -458,7 +465,7 @@ static void start_new_rpc(grpc_call_element *elem) {
if (!rm) break;
if (rm->host != calld->host) continue;
if (rm->method != calld->path) continue;
- finish_start_new_rpc(server, elem,
+ finish_start_new_rpc(exec_ctx, server, elem,
&rm->server_registered_method->request_matcher);
return;
}
@@ -470,12 +477,13 @@ static void start_new_rpc(grpc_call_element *elem) {
if (!rm) break;
if (rm->host != NULL) continue;
if (rm->method != calld->path) continue;
- finish_start_new_rpc(server, elem,
+ finish_start_new_rpc(exec_ctx, server, elem,
&rm->server_registered_method->request_matcher);
return;
}
}
- finish_start_new_rpc(server, elem, &server->unregistered_request_matcher);
+ finish_start_new_rpc(exec_ctx, server, elem,
+ &server->unregistered_request_matcher);
}
static int num_listeners(grpc_server *server) {
@@ -487,8 +495,9 @@ static int num_listeners(grpc_server *server) {
return n;
}
-static void done_shutdown_event(void *server, grpc_cq_completion *completion) {
- server_unref(server);
+static void done_shutdown_event(grpc_exec_ctx *exec_ctx, void *server,
+ grpc_cq_completion *completion) {
+ server_unref(exec_ctx, server);
}
static int num_channels(grpc_server *server) {
@@ -501,24 +510,27 @@ static int num_channels(grpc_server *server) {
return n;
}
-static void kill_pending_work_locked(grpc_server *server) {
+static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx,
+ grpc_server *server) {
registered_method *rm;
- request_matcher_kill_requests(server, &server->unregistered_request_matcher);
+ request_matcher_kill_requests(exec_ctx, server,
+ &server->unregistered_request_matcher);
request_matcher_zombify_all_pending_calls(
- &server->unregistered_request_matcher);
+ exec_ctx, &server->unregistered_request_matcher);
for (rm = server->registered_methods; rm; rm = rm->next) {
- request_matcher_kill_requests(server, &rm->request_matcher);
- request_matcher_zombify_all_pending_calls(&rm->request_matcher);
+ request_matcher_kill_requests(exec_ctx, server, &rm->request_matcher);
+ request_matcher_zombify_all_pending_calls(exec_ctx, &rm->request_matcher);
}
}
-static void maybe_finish_shutdown(grpc_server *server) {
+static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_server *server) {
size_t i;
if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) {
return;
}
- kill_pending_work_locked(server);
+ kill_pending_work_locked(exec_ctx, server);
if (server->root_channel_data.next != &server->root_channel_data ||
server->listeners_destroyed < num_listeners(server)) {
@@ -538,8 +550,8 @@ static void maybe_finish_shutdown(grpc_server *server) {
server->shutdown_published = 1;
for (i = 0; i < server->num_shutdown_tags; i++) {
server_ref(server);
- grpc_cq_end_op(server->shutdown_tags[i].cq, server->shutdown_tags[i].tag, 1,
- done_shutdown_event, server,
+ grpc_cq_end_op(exec_ctx, server->shutdown_tags[i].cq,
+ server->shutdown_tags[i].tag, 1, done_shutdown_event, server,
&server->shutdown_tags[i].completion);
}
}
@@ -558,7 +570,7 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
return md;
}
-static void server_on_recv(void *ptr, int success) {
+static void server_on_recv(grpc_exec_ctx *exec_ctx, void *ptr, int success) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
gpr_timespec op_deadline;
@@ -578,7 +590,7 @@ static void server_on_recv(void *ptr, int success) {
}
if (calld->host && calld->path) {
calld->got_initial_metadata = 1;
- start_new_rpc(elem);
+ start_new_rpc(exec_ctx, elem);
}
break;
}
@@ -594,8 +606,8 @@ static void server_on_recv(void *ptr, int success) {
if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
- grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_iomgr_add_callback(&calld->kill_zombie_closure);
+ grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
} else {
gpr_mu_unlock(&calld->mu_state);
}
@@ -605,8 +617,8 @@ static void server_on_recv(void *ptr, int success) {
if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
- grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_iomgr_add_callback(&calld->kill_zombie_closure);
+ grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
} else if (calld->state == PENDING) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
@@ -618,7 +630,7 @@ static void server_on_recv(void *ptr, int success) {
break;
}
- calld->on_done_recv->cb(calld->on_done_recv->cb_arg, success);
+ calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
static void server_mutate_op(grpc_call_element *elem,
@@ -634,11 +646,12 @@ static void server_mutate_op(grpc_call_element *elem,
}
}
-static void server_start_transport_stream_op(grpc_call_element *elem,
+static void server_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_transport_stream_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
server_mutate_op(elem, op);
- grpc_call_next_op(elem, op);
+ grpc_call_next_op(exec_ctx, elem, op);
}
static void accept_stream(void *cd, grpc_transport *transport,
@@ -649,7 +662,8 @@ static void accept_stream(void *cd, grpc_transport *transport,
0, gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
-static void channel_connectivity_changed(void *cd, int iomgr_status_ignored) {
+static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
+ int iomgr_status_ignored) {
channel_data *chand = cd;
grpc_server *server = chand->server;
if (chand->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE) {
@@ -657,18 +671,19 @@ static void channel_connectivity_changed(void *cd, int iomgr_status_ignored) {
memset(&op, 0, sizeof(op));
op.on_connectivity_state_change = &chand->channel_connectivity_changed,
op.connectivity_state = &chand->connectivity_state;
- grpc_channel_next_op(grpc_channel_stack_element(
+ grpc_channel_next_op(exec_ctx,
+ grpc_channel_stack_element(
grpc_channel_get_channel_stack(chand->channel), 0),
&op);
} else {
gpr_mu_lock(&server->mu_global);
- destroy_channel(chand);
+ destroy_channel(exec_ctx, chand);
gpr_mu_unlock(&server->mu_global);
- GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "connectivity");
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "connectivity");
}
}
-static void init_call_elem(grpc_call_element *elem,
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
call_data *calld = elem->call_data;
@@ -678,14 +693,15 @@ static void init_call_elem(grpc_call_element *elem,
calld->call = grpc_call_from_top_element(elem);
gpr_mu_init(&calld->mu_state);
- grpc_iomgr_closure_init(&calld->server_on_recv, server_on_recv, elem);
+ grpc_closure_init(&calld->server_on_recv, server_on_recv, elem);
server_ref(chand->server);
if (initial_op) server_mutate_op(elem, initial_op);
}
-static void destroy_call_elem(grpc_call_element *elem) {
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
@@ -700,10 +716,11 @@ static void destroy_call_elem(grpc_call_element *elem) {
gpr_mu_destroy(&calld->mu_state);
- server_unref(chand->server);
+ server_unref(exec_ctx, chand->server);
}
-static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
@@ -718,11 +735,12 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
chand->next = chand->prev = chand;
chand->registered_methods = NULL;
chand->connectivity_state = GRPC_CHANNEL_IDLE;
- grpc_iomgr_closure_init(&chand->channel_connectivity_changed,
- channel_connectivity_changed, chand);
+ grpc_closure_init(&chand->channel_connectivity_changed,
+ channel_connectivity_changed, chand);
}
-static void destroy_channel_elem(grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
size_t i;
channel_data *chand = elem->channel_data;
if (chand->registered_methods) {
@@ -741,11 +759,11 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
chand->next->prev = chand->prev;
chand->prev->next = chand->next;
chand->next = chand->prev = chand;
- maybe_finish_shutdown(chand->server);
+ maybe_finish_shutdown(exec_ctx, chand->server);
gpr_mu_unlock(&chand->server->mu_global);
GRPC_MDSTR_UNREF(chand->path_key);
GRPC_MDSTR_UNREF(chand->authority_key);
- server_unref(chand->server);
+ server_unref(exec_ctx, chand->server);
}
}
@@ -869,6 +887,7 @@ void *grpc_server_register_method(grpc_server *server, const char *method,
void grpc_server_start(grpc_server *server) {
listener *l;
size_t i;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
server->pollsets = gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
for (i = 0; i < server->cq_count; i++) {
@@ -876,11 +895,14 @@ void grpc_server_start(grpc_server *server) {
}
for (l = server->listeners; l; l = l->next) {
- l->start(server, l->arg, server->pollsets, server->cq_count);
+ l->start(&exec_ctx, server, l->arg, server->pollsets, server->cq_count);
}
+
+ grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_server_setup_transport(grpc_server *s, grpc_transport *transport,
+void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
+ grpc_transport *transport,
grpc_channel_filter const **extra_filters,
size_t num_extra_filters, grpc_mdctx *mdctx,
const grpc_channel_args *args) {
@@ -913,11 +935,11 @@ void grpc_server_setup_transport(grpc_server *s, grpc_transport *transport,
for (i = 0; i < s->cq_count; i++) {
memset(&op, 0, sizeof(op));
op.bind_pollset = grpc_cq_pollset(s->cqs[i]);
- grpc_transport_perform_op(transport, &op);
+ grpc_transport_perform_op(exec_ctx, transport, &op);
}
- channel = grpc_channel_create_from_filters(NULL, filters, num_filters, args,
- mdctx, 0);
+ channel = grpc_channel_create_from_filters(exec_ctx, NULL, filters,
+ num_filters, args, mdctx, 0);
chand = (channel_data *)grpc_channel_stack_element(
grpc_channel_get_channel_stack(channel), 0)
->channel_data;
@@ -964,7 +986,7 @@ void grpc_server_setup_transport(grpc_server *s, grpc_transport *transport,
chand->next->prev = chand->prev->next = chand;
gpr_mu_unlock(&s->mu_global);
- gpr_free(filters);
+ gpr_free((void *)filters);
GRPC_CHANNEL_INTERNAL_REF(channel, "connectivity");
memset(&op, 0, sizeof(op));
@@ -973,19 +995,30 @@ void grpc_server_setup_transport(grpc_server *s, grpc_transport *transport,
op.on_connectivity_state_change = &chand->channel_connectivity_changed;
op.connectivity_state = &chand->connectivity_state;
op.disconnect = gpr_atm_acq_load(&s->shutdown_flag) != 0;
- grpc_transport_perform_op(transport, &op);
+ grpc_transport_perform_op(exec_ctx, transport, &op);
}
-void done_published_shutdown(void *done_arg, grpc_cq_completion *storage) {
- (void) done_arg;
+void done_published_shutdown(grpc_exec_ctx *exec_ctx, void *done_arg,
+ grpc_cq_completion *storage) {
+ (void)done_arg;
gpr_free(storage);
}
+static void listener_destroy_done(grpc_exec_ctx *exec_ctx, void *s,
+ int success) {
+ grpc_server *server = s;
+ gpr_mu_lock(&server->mu_global);
+ server->listeners_destroyed++;
+ maybe_finish_shutdown(exec_ctx, server);
+ gpr_mu_unlock(&server->mu_global);
+}
+
void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag) {
listener *l;
shutdown_tag *sdt;
channel_broadcaster broadcaster;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_SERVER_LOG_SHUTDOWN(GPR_INFO, server, cq, tag);
@@ -993,10 +1026,10 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
gpr_mu_lock(&server->mu_global);
grpc_cq_begin_op(cq);
if (server->shutdown_published) {
- grpc_cq_end_op(cq, tag, 1, done_published_shutdown, NULL,
+ grpc_cq_end_op(&exec_ctx, cq, tag, 1, done_published_shutdown, NULL,
gpr_malloc(sizeof(grpc_cq_completion)));
gpr_mu_unlock(&server->mu_global);
- return;
+ goto done;
}
server->shutdown_tags =
gpr_realloc(server->shutdown_tags,
@@ -1006,50 +1039,50 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
sdt->cq = cq;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
gpr_mu_unlock(&server->mu_global);
- return;
+ goto done;
}
server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
channel_broadcaster_init(server, &broadcaster);
+ gpr_atm_rel_store(&server->shutdown_flag, 1);
+
/* collect all unregistered then registered calls */
gpr_mu_lock(&server->mu_call);
- kill_pending_work_locked(server);
+ kill_pending_work_locked(&exec_ctx, server);
gpr_mu_unlock(&server->mu_call);
- gpr_atm_rel_store(&server->shutdown_flag, 1);
- maybe_finish_shutdown(server);
+ maybe_finish_shutdown(&exec_ctx, server);
gpr_mu_unlock(&server->mu_global);
/* Shutdown listeners */
for (l = server->listeners; l; l = l->next) {
- l->destroy(server, l->arg);
+ grpc_closure_init(&l->destroy_done, listener_destroy_done, server);
+ l->destroy(&exec_ctx, server, l->arg, &l->destroy_done);
}
- channel_broadcaster_shutdown(&broadcaster, 1, 0);
-}
+ channel_broadcaster_shutdown(&exec_ctx, &broadcaster, 1, 0);
-void grpc_server_listener_destroy_done(void *s) {
- grpc_server *server = s;
- gpr_mu_lock(&server->mu_global);
- server->listeners_destroyed++;
- maybe_finish_shutdown(server);
- gpr_mu_unlock(&server->mu_global);
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
}
void grpc_server_cancel_all_calls(grpc_server *server) {
channel_broadcaster broadcaster;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(&server->mu_global);
channel_broadcaster_init(server, &broadcaster);
gpr_mu_unlock(&server->mu_global);
- channel_broadcaster_shutdown(&broadcaster, 0, 1);
+ channel_broadcaster_shutdown(&exec_ctx, &broadcaster, 0, 1);
+ grpc_exec_ctx_finish(&exec_ctx);
}
void grpc_server_destroy(grpc_server *server) {
listener *l;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(&server->mu_global);
GPR_ASSERT(gpr_atm_acq_load(&server->shutdown_flag) || !server->listeners);
@@ -1063,14 +1096,16 @@ void grpc_server_destroy(grpc_server *server) {
gpr_mu_unlock(&server->mu_global);
- server_unref(server);
+ server_unref(&exec_ctx, server);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_server_add_listener(grpc_server *server, void *arg,
- void (*start)(grpc_server *server, void *arg,
- grpc_pollset **pollsets,
- size_t pollset_count),
- void (*destroy)(grpc_server *server, void *arg)) {
+void grpc_server_add_listener(
+ grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_pollset **pollsets, size_t pollset_count),
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_closure *on_done)) {
listener *l = gpr_malloc(sizeof(listener));
l->arg = arg;
l->start = start;
@@ -1079,52 +1114,54 @@ void grpc_server_add_listener(grpc_server *server, void *arg,
server->listeners = l;
}
-static grpc_call_error queue_call_request(grpc_server *server,
+static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
+ grpc_server *server,
requested_call *rc) {
call_data *calld = NULL;
- request_matcher *request_matcher = NULL;
+ request_matcher *rm = NULL;
int request_id;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
- fail_call(server, rc);
+ fail_call(exec_ctx, server, rc);
return GRPC_CALL_OK;
}
request_id = gpr_stack_lockfree_pop(server->request_freelist);
if (request_id == -1) {
/* out of request ids: just fail this one */
- fail_call(server, rc);
+ fail_call(exec_ctx, server, rc);
return GRPC_CALL_OK;
}
switch (rc->type) {
case BATCH_CALL:
- request_matcher = &server->unregistered_request_matcher;
+ rm = &server->unregistered_request_matcher;
break;
case REGISTERED_CALL:
- request_matcher = &rc->data.registered.registered_method->request_matcher;
+ rm = &rc->data.registered.registered_method->request_matcher;
break;
}
server->requested_calls[request_id] = *rc;
gpr_free(rc);
- if (gpr_stack_lockfree_push(request_matcher->requests, request_id)) {
+ if (gpr_stack_lockfree_push(rm->requests, request_id)) {
/* this was the first queued request: we need to lock and start
matching calls */
gpr_mu_lock(&server->mu_call);
- while ((calld = request_matcher->pending_head) != NULL) {
- request_id = gpr_stack_lockfree_pop(request_matcher->requests);
+ while ((calld = rm->pending_head) != NULL) {
+ request_id = gpr_stack_lockfree_pop(rm->requests);
if (request_id == -1) break;
- request_matcher->pending_head = calld->pending_next;
+ rm->pending_head = calld->pending_next;
gpr_mu_unlock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
if (calld->state == ZOMBIED) {
gpr_mu_unlock(&calld->mu_state);
- grpc_iomgr_closure_init(
+ grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
- grpc_iomgr_add_callback(&calld->kill_zombie_closure);
+ grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
} else {
GPR_ASSERT(calld->state == PENDING);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
- begin_call(server, calld, &server->requested_calls[request_id]);
+ begin_call(exec_ctx, server, calld,
+ &server->requested_calls[request_id]);
}
gpr_mu_lock(&server->mu_call);
}
@@ -1138,13 +1175,16 @@ grpc_call_error grpc_server_request_call(
grpc_metadata_array *initial_metadata,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag) {
+ grpc_call_error error;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
requested_call *rc = gpr_malloc(sizeof(*rc));
GRPC_SERVER_LOG_REQUEST_CALL(GPR_INFO, server, call, details,
initial_metadata, cq_bound_to_call,
cq_for_notification, tag);
if (!grpc_cq_is_server_cq(cq_for_notification)) {
gpr_free(rc);
- return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
+ error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
+ goto done;
}
grpc_cq_begin_op(cq_for_notification);
details->reserved = NULL;
@@ -1156,19 +1196,25 @@ grpc_call_error grpc_server_request_call(
rc->call = call;
rc->data.batch.details = details;
rc->data.batch.initial_metadata = initial_metadata;
- return queue_call_request(server, rc);
+ error = queue_call_request(&exec_ctx, server, rc);
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
+ return error;
}
grpc_call_error grpc_server_request_registered_call(
- grpc_server *server, void *rm, grpc_call **call, gpr_timespec *deadline,
+ grpc_server *server, void *rmp, grpc_call **call, gpr_timespec *deadline,
grpc_metadata_array *initial_metadata, grpc_byte_buffer **optional_payload,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag) {
+ grpc_call_error error;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
requested_call *rc = gpr_malloc(sizeof(*rc));
- registered_method *registered_method = rm;
+ registered_method *rm = rmp;
if (!grpc_cq_is_server_cq(cq_for_notification)) {
gpr_free(rc);
- return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
+ error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
+ goto done;
}
grpc_cq_begin_op(cq_for_notification);
rc->type = REGISTERED_CALL;
@@ -1177,16 +1223,21 @@ grpc_call_error grpc_server_request_registered_call(
rc->cq_bound_to_call = cq_bound_to_call;
rc->cq_for_notification = cq_for_notification;
rc->call = call;
- rc->data.registered.registered_method = registered_method;
+ rc->data.registered.registered_method = rm;
rc->data.registered.deadline = deadline;
rc->data.registered.initial_metadata = initial_metadata;
rc->data.registered.optional_payload = optional_payload;
- return queue_call_request(server, rc);
+ error = queue_call_request(&exec_ctx, server, rc);
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
+ return error;
}
-static void publish_registered_or_batch(grpc_call *call, int success,
+static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx,
+ grpc_call *call, int success,
void *tag);
-static void publish_was_not_set(grpc_call *call, int success, void *tag) {
+static void publish_was_not_set(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ int success, void *tag) {
abort();
}
@@ -1201,8 +1252,8 @@ static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
memcpy(*dest, grpc_mdstr_as_c_string(value), len + 1);
}
-static void begin_call(grpc_server *server, call_data *calld,
- requested_call *rc) {
+static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ call_data *calld, requested_call *rc) {
grpc_ioreq_completion_func publish = publish_was_not_set;
grpc_ioreq req[2];
grpc_ioreq *r = req;
@@ -1213,7 +1264,7 @@ static void begin_call(grpc_server *server, call_data *calld,
fill in the metadata array passed by the client, we need to perform
an ioreq op, that should complete immediately. */
- grpc_call_set_completion_queue(calld->call, rc->cq_bound_to_call);
+ grpc_call_set_completion_queue(exec_ctx, calld->call, rc->cq_bound_to_call);
*rc->call = calld->call;
calld->cq_new = rc->cq_for_notification;
switch (rc->type) {
@@ -1248,11 +1299,12 @@ static void begin_call(grpc_server *server, call_data *calld,
}
GRPC_CALL_INTERNAL_REF(calld->call, "server");
- grpc_call_start_ioreq_and_call_back(calld->call, req, (size_t)(r - req),
- publish, rc);
+ grpc_call_start_ioreq_and_call_back(exec_ctx, calld->call, req,
+ (size_t)(r - req), publish, rc);
}
-static void done_request_event(void *req, grpc_cq_completion *c) {
+static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
+ grpc_cq_completion *c) {
requested_call *rc = req;
grpc_server *server = rc->server;
@@ -1265,10 +1317,11 @@ static void done_request_event(void *req, grpc_cq_completion *c) {
gpr_free(req);
}
- server_unref(server);
+ server_unref(exec_ctx, server);
}
-static void fail_call(grpc_server *server, requested_call *rc) {
+static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ requested_call *rc) {
*rc->call = NULL;
switch (rc->type) {
case BATCH_CALL:
@@ -1279,11 +1332,12 @@ static void fail_call(grpc_server *server, requested_call *rc) {
break;
}
server_ref(server);
- grpc_cq_end_op(rc->cq_for_notification, rc->tag, 0, done_request_event, rc,
- &rc->completion);
+ grpc_cq_end_op(exec_ctx, rc->cq_for_notification, rc->tag, 0,
+ done_request_event, rc, &rc->completion);
}
-static void publish_registered_or_batch(grpc_call *call, int success,
+static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx,
+ grpc_call *call, int success,
void *prc) {
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
@@ -1291,9 +1345,9 @@ static void publish_registered_or_batch(grpc_call *call, int success,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
server_ref(chand->server);
- grpc_cq_end_op(calld->cq_new, rc->tag, success, done_request_event, rc,
- &rc->completion);
- GRPC_CALL_INTERNAL_UNREF(call, "server", 0);
+ grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, success, done_request_event,
+ rc, &rc->completion);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "server");
}
const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server) {
diff --git a/src/core/surface/server.h b/src/core/surface/server.h
index c638d682bb..4c46d07679 100644
--- a/src/core/surface/server.h
+++ b/src/core/surface/server.h
@@ -45,17 +45,17 @@ grpc_server *grpc_server_create_from_filters(
/* Add a listener to the server: when the server starts, it will call start,
and when it shuts down, it will call destroy */
-void grpc_server_add_listener(grpc_server *server, void *listener,
- void (*start)(grpc_server *server, void *arg,
- grpc_pollset **pollsets,
- size_t npollsets),
- void (*destroy)(grpc_server *server, void *arg));
-
-void grpc_server_listener_destroy_done(void *server);
+void grpc_server_add_listener(
+ grpc_exec_ctx *exec_ctx, grpc_server *server, void *listener,
+ void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_pollset **pollsets, size_t npollsets),
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
+ grpc_closure *on_done));
/* Setup a transport - creates a channel stack, binds the transport to the
server */
-void grpc_server_setup_transport(grpc_server *server, grpc_transport *transport,
+void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *server,
+ grpc_transport *transport,
grpc_channel_filter const **extra_filters,
size_t num_extra_filters, grpc_mdctx *mdctx,
const grpc_channel_args *args);
diff --git a/src/core/surface/server_chttp2.c b/src/core/surface/server_chttp2.c
index 4ab845bc00..3904ce969d 100644
--- a/src/core/surface/server_chttp2.c
+++ b/src/core/surface/server_chttp2.c
@@ -42,16 +42,17 @@
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
-static void setup_transport(void *server, grpc_transport *transport,
- grpc_mdctx *mdctx) {
+static void setup_transport(grpc_exec_ctx *exec_ctx, void *server,
+ grpc_transport *transport, grpc_mdctx *mdctx) {
static grpc_channel_filter const *extra_filters[] = {
&grpc_http_server_filter};
- grpc_server_setup_transport(server, transport, extra_filters,
+ grpc_server_setup_transport(exec_ctx, server, transport, extra_filters,
GPR_ARRAY_SIZE(extra_filters), mdctx,
grpc_server_get_channel_args(server));
}
-static void new_transport(void *server, grpc_endpoint *tcp) {
+static void new_transport(grpc_exec_ctx *exec_ctx, void *server,
+ grpc_endpoint *tcp) {
/*
* Beware that the call to grpc_create_chttp2_transport() has to happen before
* grpc_tcp_server_destroy(). This is fine here, but similar code
@@ -61,23 +62,25 @@ static void new_transport(void *server, grpc_endpoint *tcp) {
*/
grpc_mdctx *mdctx = grpc_mdctx_create();
grpc_transport *transport = grpc_create_chttp2_transport(
- grpc_server_get_channel_args(server), tcp, mdctx, 0);
- setup_transport(server, transport, mdctx);
- grpc_chttp2_transport_start_reading(transport, NULL, 0);
+ exec_ctx, grpc_server_get_channel_args(server), tcp, mdctx, 0);
+ setup_transport(exec_ctx, server, transport, mdctx);
+ grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
}
/* Server callback: start listening on our ports */
-static void start(grpc_server *server, void *tcpp, grpc_pollset **pollsets,
- size_t pollset_count) {
+static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp,
+ grpc_pollset **pollsets, size_t pollset_count) {
grpc_tcp_server *tcp = tcpp;
- grpc_tcp_server_start(tcp, pollsets, pollset_count, new_transport, server);
+ grpc_tcp_server_start(exec_ctx, tcp, pollsets, pollset_count, new_transport,
+ server);
}
/* Server callback: destroy the tcp listener (so we don't generate further
callbacks) */
-static void destroy(grpc_server *server, void *tcpp) {
+static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp,
+ grpc_closure *destroy_done) {
grpc_tcp_server *tcp = tcpp;
- grpc_tcp_server_destroy(tcp, grpc_server_listener_destroy_done, server);
+ grpc_tcp_server_destroy(exec_ctx, tcp, destroy_done);
}
int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
@@ -87,6 +90,7 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
unsigned count = 0;
int port_num = -1;
int port_temp;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
resolved = grpc_blocking_resolve_address(addr, "http");
if (!resolved) {
@@ -123,9 +127,8 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
grpc_resolved_addresses_destroy(resolved);
/* Register with the server only upon success */
- grpc_server_add_listener(server, tcp, start, destroy);
-
- return port_num;
+ grpc_server_add_listener(&exec_ctx, server, tcp, start, destroy);
+ goto done;
/* Error path: cleanup and return */
error:
@@ -133,7 +136,11 @@ error:
grpc_resolved_addresses_destroy(resolved);
}
if (tcp) {
- grpc_tcp_server_destroy(tcp, NULL, NULL);
+ grpc_tcp_server_destroy(&exec_ctx, tcp, NULL);
}
- return 0;
+ port_num = 0;
+
+done:
+ grpc_exec_ctx_finish(&exec_ctx);
+ return port_num;
}
diff --git a/src/core/transport/chttp2/frame_data.c b/src/core/transport/chttp2/frame_data.c
index 403358016d..acfa7c002e 100644
--- a/src/core/transport/chttp2/frame_data.c
+++ b/src/core/transport/chttp2/frame_data.c
@@ -70,7 +70,8 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_begin_frame(
}
grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
gpr_uint8 *const beg = GPR_SLICE_START_PTR(slice);
gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
@@ -161,7 +162,7 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
grpc_sopb_add_slice(
&p->incoming_sopb,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
- GPR_ASSERT(end - cur <= p->frame_size);
+ GPR_ASSERT((size_t)(end - cur) <= p->frame_size);
p->frame_size -= (gpr_uint32)(end - cur);
return GRPC_CHTTP2_PARSE_OK;
}
diff --git a/src/core/transport/chttp2/frame_data.h b/src/core/transport/chttp2/frame_data.h
index 23957b05ad..6762484e5b 100644
--- a/src/core/transport/chttp2/frame_data.h
+++ b/src/core/transport/chttp2/frame_data.h
@@ -36,6 +36,7 @@
/* Parser for GRPC streams embedded in DATA frames */
+#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
#include "src/core/transport/stream_op.h"
@@ -73,7 +74,8 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_begin_frame(
/* handle a slice of a data frame - is_last indicates the last slice of a
frame */
grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
/* create a slice with an empty data frame and is_last set */
diff --git a/src/core/transport/chttp2/frame_goaway.c b/src/core/transport/chttp2/frame_goaway.c
index 09d4da234c..2ff1eda89b 100644
--- a/src/core/transport/chttp2/frame_goaway.c
+++ b/src/core/transport/chttp2/frame_goaway.c
@@ -63,7 +63,8 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_begin_frame(
}
grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
gpr_uint8 *const beg = GPR_SLICE_START_PTR(slice);
gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
@@ -137,7 +138,7 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
/* fallthrough */
case GRPC_CHTTP2_GOAWAY_DEBUG:
memcpy(p->debug_data + p->debug_pos, cur, (size_t)(end - cur));
- GPR_ASSERT(end - cur < GPR_UINT32_MAX - p->debug_pos);
+ GPR_ASSERT((size_t)(end - cur) < GPR_UINT32_MAX - p->debug_pos);
p->debug_pos += (gpr_uint32)(end - cur);
p->state = GRPC_CHTTP2_GOAWAY_DEBUG;
if (is_last) {
diff --git a/src/core/transport/chttp2/frame_goaway.h b/src/core/transport/chttp2/frame_goaway.h
index 9c5edfc821..06aaa92f07 100644
--- a/src/core/transport/chttp2/frame_goaway.h
+++ b/src/core/transport/chttp2/frame_goaway.h
@@ -34,6 +34,7 @@
#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_GOAWAY_H
#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_GOAWAY_H
+#include "src/core/iomgr/exec_ctx.h"
#include "src/core/transport/chttp2/frame.h"
#include <grpc/support/port_platform.h>
#include <grpc/support/slice.h>
@@ -65,7 +66,8 @@ void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p);
grpc_chttp2_parse_error grpc_chttp2_goaway_parser_begin_frame(
grpc_chttp2_goaway_parser *parser, gpr_uint32 length, gpr_uint8 flags);
grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
void grpc_chttp2_goaway_append(gpr_uint32 last_stream_id, gpr_uint32 error_code,
diff --git a/src/core/transport/chttp2/frame_ping.c b/src/core/transport/chttp2/frame_ping.c
index 05451c7a8a..4d2c54269d 100644
--- a/src/core/transport/chttp2/frame_ping.c
+++ b/src/core/transport/chttp2/frame_ping.c
@@ -69,7 +69,8 @@ grpc_chttp2_parse_error grpc_chttp2_ping_parser_begin_frame(
}
grpc_chttp2_parse_error grpc_chttp2_ping_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
gpr_uint8 *const beg = GPR_SLICE_START_PTR(slice);
gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
@@ -89,7 +90,7 @@ grpc_chttp2_parse_error grpc_chttp2_ping_parser_parse(
for (ping = transport_parsing->pings.next;
ping != &transport_parsing->pings; ping = ping->next) {
if (0 == memcmp(p->opaque_8bytes, ping->id, 8)) {
- grpc_iomgr_add_delayed_callback(ping->on_recv, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, ping->on_recv, 1);
}
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
diff --git a/src/core/transport/chttp2/frame_ping.h b/src/core/transport/chttp2/frame_ping.h
index 99197e8352..2c71d0d491 100644
--- a/src/core/transport/chttp2/frame_ping.h
+++ b/src/core/transport/chttp2/frame_ping.h
@@ -34,6 +34,7 @@
#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_PING_H
#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_PING_H
+#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/slice.h>
#include "src/core/transport/chttp2/frame.h"
@@ -48,7 +49,8 @@ gpr_slice grpc_chttp2_ping_create(gpr_uint8 ack, gpr_uint8 *opaque_8bytes);
grpc_chttp2_parse_error grpc_chttp2_ping_parser_begin_frame(
grpc_chttp2_ping_parser *parser, gpr_uint32 length, gpr_uint8 flags);
grpc_chttp2_parse_error grpc_chttp2_ping_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_PING_H */
diff --git a/src/core/transport/chttp2/frame_rst_stream.c b/src/core/transport/chttp2/frame_rst_stream.c
index 67da245239..3cd5bcfc39 100644
--- a/src/core/transport/chttp2/frame_rst_stream.c
+++ b/src/core/transport/chttp2/frame_rst_stream.c
@@ -71,7 +71,8 @@ grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_begin_frame(
}
grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
gpr_uint8 *const beg = GPR_SLICE_START_PTR(slice);
gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
diff --git a/src/core/transport/chttp2/frame_rst_stream.h b/src/core/transport/chttp2/frame_rst_stream.h
index ed69e588af..92cb77c971 100644
--- a/src/core/transport/chttp2/frame_rst_stream.h
+++ b/src/core/transport/chttp2/frame_rst_stream.h
@@ -36,6 +36,7 @@
#include <grpc/support/slice.h>
#include "src/core/transport/chttp2/frame.h"
+#include "src/core/iomgr/exec_ctx.h"
typedef struct {
gpr_uint8 byte;
@@ -47,7 +48,8 @@ gpr_slice grpc_chttp2_rst_stream_create(gpr_uint32 stream_id, gpr_uint32 code);
grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_begin_frame(
grpc_chttp2_rst_stream_parser *parser, gpr_uint32 length, gpr_uint8 flags);
grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_RST_STREAM_H */
diff --git a/src/core/transport/chttp2/frame_settings.c b/src/core/transport/chttp2/frame_settings.c
index 54d3694a5c..395a2da452 100644
--- a/src/core/transport/chttp2/frame_settings.c
+++ b/src/core/transport/chttp2/frame_settings.c
@@ -138,7 +138,8 @@ grpc_chttp2_parse_error grpc_chttp2_settings_parser_begin_frame(
}
grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
- void *p, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *p,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_chttp2_settings_parser *parser = p;
const gpr_uint8 *cur = GPR_SLICE_START_PTR(slice);
diff --git a/src/core/transport/chttp2/frame_settings.h b/src/core/transport/chttp2/frame_settings.h
index 0ac68a9fa8..cf857dd602 100644
--- a/src/core/transport/chttp2/frame_settings.h
+++ b/src/core/transport/chttp2/frame_settings.h
@@ -37,6 +37,7 @@
#include <grpc/support/port_platform.h>
#include <grpc/support/slice.h>
#include "src/core/transport/chttp2/frame.h"
+#include "src/core/iomgr/exec_ctx.h"
typedef enum {
GRPC_CHTTP2_SPS_ID0,
@@ -94,7 +95,8 @@ grpc_chttp2_parse_error grpc_chttp2_settings_parser_begin_frame(
grpc_chttp2_settings_parser *parser, gpr_uint32 length, gpr_uint8 flags,
gpr_uint32 *settings);
grpc_chttp2_parse_error grpc_chttp2_settings_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_SETTINGS_H */
diff --git a/src/core/transport/chttp2/frame_window_update.c b/src/core/transport/chttp2/frame_window_update.c
index ea13969e8c..91bbcfe2c1 100644
--- a/src/core/transport/chttp2/frame_window_update.c
+++ b/src/core/transport/chttp2/frame_window_update.c
@@ -74,7 +74,8 @@ grpc_chttp2_parse_error grpc_chttp2_window_update_parser_begin_frame(
}
grpc_chttp2_parse_error grpc_chttp2_window_update_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
gpr_uint8 *const beg = GPR_SLICE_START_PTR(slice);
gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
diff --git a/src/core/transport/chttp2/frame_window_update.h b/src/core/transport/chttp2/frame_window_update.h
index deba801d00..fc074092ff 100644
--- a/src/core/transport/chttp2/frame_window_update.h
+++ b/src/core/transport/chttp2/frame_window_update.h
@@ -34,6 +34,7 @@
#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_WINDOW_UPDATE_H
#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_WINDOW_UPDATE_H
+#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/slice.h>
#include "src/core/transport/chttp2/frame.h"
@@ -50,7 +51,8 @@ grpc_chttp2_parse_error grpc_chttp2_window_update_parser_begin_frame(
grpc_chttp2_window_update_parser *parser, gpr_uint32 length,
gpr_uint8 flags);
grpc_chttp2_parse_error grpc_chttp2_window_update_parser_parse(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_WINDOW_UPDATE_H */
diff --git a/src/core/transport/chttp2/hpack_parser.c b/src/core/transport/chttp2/hpack_parser.c
index 9c40e8a4e6..3b0a5e7207 100644
--- a/src/core/transport/chttp2/hpack_parser.c
+++ b/src/core/transport/chttp2/hpack_parser.c
@@ -247,6 +247,7 @@ static const gpr_uint8 next_tbl[256] = {
41, 1, 1, 1, 42, 43, 1, 1, 44, 1, 1, 1, 1, 15, 2, 2, 2, 2, 2, 2,
3, 3, 3, 45, 46, 1, 1, 2, 2, 2, 35, 3, 3, 18, 47, 2,
};
+
/* next state, based upon current state and the current nibble: see above.
generated by gen_hpack_tables.c */
static const gpr_int16 next_sub_tbl[48 * 16] = {
@@ -303,6 +304,7 @@ static const gpr_int16 next_sub_tbl[48 * 16] = {
253, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 255,
};
+
/* emission table: indexed like next_tbl, ultimately gives the byte to be
emitted, or -1 for no byte, or 256 for end of stream
@@ -327,6 +329,7 @@ static const gpr_uint16 emit_tbl[256] = {
233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
248,
};
+
/* generated by gen_hpack_tables.c */
static const gpr_int16 emit_sub_tbl[249 * 16] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
@@ -1371,14 +1374,15 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p) {
int grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *beg, const gpr_uint8 *end) {
/* TODO(ctiller): limit the distance of end from beg, and perform multiple
- steps in the event of a large chunk of data to limit
- stack space usage when no tail call optimization is
- available */
+ steps in the event of a large chunk of data to limit
+ stack space usage when no tail call optimization is
+ available */
return p->state(p, beg, end);
}
grpc_chttp2_parse_error grpc_chttp2_header_parser_parse(
- void *hpack_parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *hpack_parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_chttp2_hpack_parser *parser = hpack_parser;
if (!grpc_chttp2_hpack_parser_parse(parser, GPR_SLICE_START_PTR(slice),
diff --git a/src/core/transport/chttp2/hpack_parser.h b/src/core/transport/chttp2/hpack_parser.h
index 4f489d67fb..f56867016c 100644
--- a/src/core/transport/chttp2/hpack_parser.h
+++ b/src/core/transport/chttp2/hpack_parser.h
@@ -37,6 +37,7 @@
#include <stddef.h>
#include <grpc/support/port_platform.h>
+#include "src/core/iomgr/exec_ctx.h"
#include "src/core/transport/chttp2/frame.h"
#include "src/core/transport/chttp2/hpack_table.h"
#include "src/core/transport/metadata.h"
@@ -107,7 +108,8 @@ int grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
grpc_chttp2_parse_error grpc_chttp2_header_parser_parse(
- void *hpack_parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *hpack_parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_PARSER_H */
diff --git a/src/core/transport/chttp2/hpack_table.c b/src/core/transport/chttp2/hpack_table.c
index e18778ab0b..d5cb752789 100644
--- a/src/core/transport/chttp2/hpack_table.c
+++ b/src/core/transport/chttp2/hpack_table.c
@@ -43,68 +43,130 @@ static struct {
const char *key;
const char *value;
} static_table[] = {
- /* 0: */ {NULL, NULL},
- /* 1: */ {":authority", ""},
- /* 2: */ {":method", "GET"},
- /* 3: */ {":method", "POST"},
- /* 4: */ {":path", "/"},
- /* 5: */ {":path", "/index.html"},
- /* 6: */ {":scheme", "http"},
- /* 7: */ {":scheme", "https"},
- /* 8: */ {":status", "200"},
- /* 9: */ {":status", "204"},
- /* 10: */ {":status", "206"},
- /* 11: */ {":status", "304"},
- /* 12: */ {":status", "400"},
- /* 13: */ {":status", "404"},
- /* 14: */ {":status", "500"},
- /* 15: */ {"accept-charset", ""},
- /* 16: */ {"accept-encoding", "gzip, deflate"},
- /* 17: */ {"accept-language", ""},
- /* 18: */ {"accept-ranges", ""},
- /* 19: */ {"accept", ""},
- /* 20: */ {"access-control-allow-origin", ""},
- /* 21: */ {"age", ""},
- /* 22: */ {"allow", ""},
- /* 23: */ {"authorization", ""},
- /* 24: */ {"cache-control", ""},
- /* 25: */ {"content-disposition", ""},
- /* 26: */ {"content-encoding", ""},
- /* 27: */ {"content-language", ""},
- /* 28: */ {"content-length", ""},
- /* 29: */ {"content-location", ""},
- /* 30: */ {"content-range", ""},
- /* 31: */ {"content-type", ""},
- /* 32: */ {"cookie", ""},
- /* 33: */ {"date", ""},
- /* 34: */ {"etag", ""},
- /* 35: */ {"expect", ""},
- /* 36: */ {"expires", ""},
- /* 37: */ {"from", ""},
- /* 38: */ {"host", ""},
- /* 39: */ {"if-match", ""},
- /* 40: */ {"if-modified-since", ""},
- /* 41: */ {"if-none-match", ""},
- /* 42: */ {"if-range", ""},
- /* 43: */ {"if-unmodified-since", ""},
- /* 44: */ {"last-modified", ""},
- /* 45: */ {"link", ""},
- /* 46: */ {"location", ""},
- /* 47: */ {"max-forwards", ""},
- /* 48: */ {"proxy-authenticate", ""},
- /* 49: */ {"proxy-authorization", ""},
- /* 50: */ {"range", ""},
- /* 51: */ {"referer", ""},
- /* 52: */ {"refresh", ""},
- /* 53: */ {"retry-after", ""},
- /* 54: */ {"server", ""},
- /* 55: */ {"set-cookie", ""},
- /* 56: */ {"strict-transport-security", ""},
- /* 57: */ {"transfer-encoding", ""},
- /* 58: */ {"user-agent", ""},
- /* 59: */ {"vary", ""},
- /* 60: */ {"via", ""},
- /* 61: */ {"www-authenticate", ""},
+ /* 0: */
+ {NULL, NULL},
+ /* 1: */
+ {":authority", ""},
+ /* 2: */
+ {":method", "GET"},
+ /* 3: */
+ {":method", "POST"},
+ /* 4: */
+ {":path", "/"},
+ /* 5: */
+ {":path", "/index.html"},
+ /* 6: */
+ {":scheme", "http"},
+ /* 7: */
+ {":scheme", "https"},
+ /* 8: */
+ {":status", "200"},
+ /* 9: */
+ {":status", "204"},
+ /* 10: */
+ {":status", "206"},
+ /* 11: */
+ {":status", "304"},
+ /* 12: */
+ {":status", "400"},
+ /* 13: */
+ {":status", "404"},
+ /* 14: */
+ {":status", "500"},
+ /* 15: */
+ {"accept-charset", ""},
+ /* 16: */
+ {"accept-encoding", "gzip, deflate"},
+ /* 17: */
+ {"accept-language", ""},
+ /* 18: */
+ {"accept-ranges", ""},
+ /* 19: */
+ {"accept", ""},
+ /* 20: */
+ {"access-control-allow-origin", ""},
+ /* 21: */
+ {"age", ""},
+ /* 22: */
+ {"allow", ""},
+ /* 23: */
+ {"authorization", ""},
+ /* 24: */
+ {"cache-control", ""},
+ /* 25: */
+ {"content-disposition", ""},
+ /* 26: */
+ {"content-encoding", ""},
+ /* 27: */
+ {"content-language", ""},
+ /* 28: */
+ {"content-length", ""},
+ /* 29: */
+ {"content-location", ""},
+ /* 30: */
+ {"content-range", ""},
+ /* 31: */
+ {"content-type", ""},
+ /* 32: */
+ {"cookie", ""},
+ /* 33: */
+ {"date", ""},
+ /* 34: */
+ {"etag", ""},
+ /* 35: */
+ {"expect", ""},
+ /* 36: */
+ {"expires", ""},
+ /* 37: */
+ {"from", ""},
+ /* 38: */
+ {"host", ""},
+ /* 39: */
+ {"if-match", ""},
+ /* 40: */
+ {"if-modified-since", ""},
+ /* 41: */
+ {"if-none-match", ""},
+ /* 42: */
+ {"if-range", ""},
+ /* 43: */
+ {"if-unmodified-since", ""},
+ /* 44: */
+ {"last-modified", ""},
+ /* 45: */
+ {"link", ""},
+ /* 46: */
+ {"location", ""},
+ /* 47: */
+ {"max-forwards", ""},
+ /* 48: */
+ {"proxy-authenticate", ""},
+ /* 49: */
+ {"proxy-authorization", ""},
+ /* 50: */
+ {"range", ""},
+ /* 51: */
+ {"referer", ""},
+ /* 52: */
+ {"refresh", ""},
+ /* 53: */
+ {"retry-after", ""},
+ /* 54: */
+ {"server", ""},
+ /* 55: */
+ {"set-cookie", ""},
+ /* 56: */
+ {"strict-transport-security", ""},
+ /* 57: */
+ {"transfer-encoding", ""},
+ /* 58: */
+ {"user-agent", ""},
+ /* 59: */
+ {"vary", ""},
+ /* 60: */
+ {"via", ""},
+ /* 61: */
+ {"www-authenticate", ""},
};
void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl *tbl, grpc_mdctx *mdctx) {
diff --git a/src/core/transport/chttp2/internal.h b/src/core/transport/chttp2/internal.h
index c8c1abb750..b35f8b5d88 100644
--- a/src/core/transport/chttp2/internal.h
+++ b/src/core/transport/chttp2/internal.h
@@ -155,7 +155,7 @@ typedef enum {
/* Outstanding ping request data */
typedef struct grpc_chttp2_outstanding_ping {
gpr_uint8 id[8];
- grpc_iomgr_closure *on_recv;
+ grpc_closure *on_recv;
struct grpc_chttp2_outstanding_ping *next;
struct grpc_chttp2_outstanding_ping *prev;
} grpc_chttp2_outstanding_ping;
@@ -163,9 +163,6 @@ typedef struct grpc_chttp2_outstanding_ping {
typedef struct {
/** data to write next write */
gpr_slice_buffer qbuf;
- /** queued callbacks */
- grpc_iomgr_closure *pending_closures_head;
- grpc_iomgr_closure *pending_closures_tail;
/** window available for us to send to peer */
gpr_int64 outgoing_window;
@@ -215,7 +212,7 @@ typedef struct {
/** is this a client? */
gpr_uint8 is_client;
/** callback for when writing is done */
- grpc_iomgr_closure done_cb;
+ grpc_closure done_cb;
} grpc_chttp2_transport_writing;
struct grpc_chttp2_transport_parsing {
@@ -269,7 +266,8 @@ struct grpc_chttp2_transport_parsing {
void *parser_data;
grpc_chttp2_stream_parsing *incoming_stream;
grpc_chttp2_parse_error (*parser)(
- void *parser_user_data, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser_user_data,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
/* received settings */
@@ -333,9 +331,9 @@ struct grpc_chttp2_transport {
grpc_chttp2_stream_map new_stream_map;
/** closure to execute writing */
- grpc_iomgr_closure writing_action;
+ grpc_closure writing_action;
/** closure to finish reading from the endpoint */
- grpc_iomgr_closure recv_data;
+ grpc_closure recv_data;
/** incoming read bytes */
gpr_slice_buffer read_buffer;
@@ -360,8 +358,8 @@ typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
gpr_uint32 id;
- grpc_iomgr_closure *send_done_closure;
- grpc_iomgr_closure *recv_done_closure;
+ grpc_closure *send_done_closure;
+ grpc_closure *recv_done_closure;
/** window available for us to send to peer */
gpr_int64 outgoing_window;
@@ -470,18 +468,23 @@ struct grpc_chttp2_stream {
int grpc_chttp2_unlocking_check_writes(grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
void grpc_chttp2_perform_writes(
- grpc_chttp2_transport_writing *transport_writing, grpc_endpoint *endpoint);
-void grpc_chttp2_terminate_writing(void *transport_writing, int success);
-void grpc_chttp2_cleanup_writing(grpc_chttp2_transport_global *global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
+ grpc_endpoint *endpoint);
+void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
+ void *transport_writing, int success);
+void grpc_chttp2_cleanup_writing(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
void grpc_chttp2_prepare_to_read(grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
-int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
+int grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice);
-void grpc_chttp2_publish_reads(grpc_chttp2_transport_global *global,
+void grpc_chttp2_publish_reads(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
/** Get a writable stream
@@ -568,19 +571,14 @@ int grpc_chttp2_list_pop_read_write_state_changed(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
-/** schedule a closure to run without the transport lock taken */
-void grpc_chttp2_schedule_closure(
- grpc_chttp2_transport_global *transport_global, grpc_iomgr_closure *closure,
- int success);
-
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
grpc_chttp2_transport_parsing *transport_parsing, gpr_uint32 id);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
grpc_chttp2_transport_parsing *transport_parsing, gpr_uint32 id);
void grpc_chttp2_add_incoming_goaway(
- grpc_chttp2_transport_global *transport_global, gpr_uint32 goaway_error,
- gpr_slice goaway_text);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ gpr_uint32 goaway_error, gpr_slice goaway_text);
void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
diff --git a/src/core/transport/chttp2/parsing.c b/src/core/transport/chttp2/parsing.c
index f26f446787..f7a0a10581 100644
--- a/src/core/transport/chttp2/parsing.c
+++ b/src/core/transport/chttp2/parsing.c
@@ -58,7 +58,8 @@ static int init_goaway_parser(grpc_chttp2_transport_parsing *transport_parsing);
static int init_skip_frame_parser(
grpc_chttp2_transport_parsing *transport_parsing, int is_header);
-static int parse_frame_slice(grpc_chttp2_transport_parsing *transport_parsing,
+static int parse_frame_slice(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice, int is_last);
void grpc_chttp2_prepare_to_read(
@@ -91,7 +92,7 @@ void grpc_chttp2_prepare_to_read(
}
void grpc_chttp2_publish_reads(
- grpc_chttp2_transport_global *transport_global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing) {
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_parsing *stream_parsing;
@@ -130,7 +131,7 @@ void grpc_chttp2_publish_reads(
/* move goaway to the global state if we received one (it will be
published later */
if (transport_parsing->goaway_received) {
- grpc_chttp2_add_incoming_goaway(transport_global,
+ grpc_chttp2_add_incoming_goaway(exec_ctx, transport_global,
(gpr_uint32)transport_parsing->goaway_error,
transport_parsing->goaway_text);
transport_parsing->goaway_text = gpr_empty_slice();
@@ -234,7 +235,8 @@ void grpc_chttp2_publish_reads(
}
}
-int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
+int grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice) {
gpr_uint8 *beg = GPR_SLICE_START_PTR(slice);
gpr_uint8 *end = GPR_SLICE_END_PTR(slice);
@@ -364,7 +366,8 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
transport_parsing->incoming_stream_id;
}
if (transport_parsing->incoming_frame_size == 0) {
- if (!parse_frame_slice(transport_parsing, gpr_empty_slice(), 1)) {
+ if (!parse_frame_slice(exec_ctx, transport_parsing, gpr_empty_slice(),
+ 1)) {
return 0;
}
transport_parsing->incoming_stream = NULL;
@@ -381,7 +384,7 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
case GRPC_DTS_FRAME:
GPR_ASSERT(cur < end);
if ((gpr_uint32)(end - cur) == transport_parsing->incoming_frame_size) {
- if (!parse_frame_slice(transport_parsing,
+ if (!parse_frame_slice(exec_ctx, transport_parsing,
gpr_slice_sub_no_ref(slice, (size_t)(cur - beg),
(size_t)(end - beg)),
1)) {
@@ -394,7 +397,7 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
transport_parsing->incoming_frame_size) {
size_t cur_offset = (size_t)(cur - beg);
if (!parse_frame_slice(
- transport_parsing,
+ exec_ctx, transport_parsing,
gpr_slice_sub_no_ref(
slice, cur_offset,
cur_offset + transport_parsing->incoming_frame_size),
@@ -405,7 +408,7 @@ int grpc_chttp2_perform_read(grpc_chttp2_transport_parsing *transport_parsing,
transport_parsing->incoming_stream = NULL;
goto dts_fh_0; /* loop */
} else {
- if (!parse_frame_slice(transport_parsing,
+ if (!parse_frame_slice(exec_ctx, transport_parsing,
gpr_slice_sub_no_ref(slice, (size_t)(cur - beg),
(size_t)(end - beg)),
0)) {
@@ -469,7 +472,8 @@ static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing) {
}
static grpc_chttp2_parse_error skip_parser(
- void *parser, grpc_chttp2_transport_parsing *transport_parsing,
+ grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
return GRPC_CHTTP2_PARSE_OK;
}
@@ -788,11 +792,12 @@ static int is_window_update_legal(gpr_int64 window_update, gpr_int64 window) {
}
*/
-static int parse_frame_slice(grpc_chttp2_transport_parsing *transport_parsing,
+static int parse_frame_slice(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice, int is_last) {
grpc_chttp2_stream_parsing *stream_parsing =
transport_parsing->incoming_stream;
- switch (transport_parsing->parser(transport_parsing->parser_data,
+ switch (transport_parsing->parser(exec_ctx, transport_parsing->parser_data,
transport_parsing, stream_parsing, slice,
is_last)) {
case GRPC_CHTTP2_PARSE_OK:
diff --git a/src/core/transport/chttp2/stream_encoder.c b/src/core/transport/chttp2/stream_encoder.c
index 6a22532bc2..eb02ccdec3 100644
--- a/src/core/transport/chttp2/stream_encoder.c
+++ b/src/core/transport/chttp2/stream_encoder.c
@@ -377,7 +377,7 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
gpr_uint32 indices_key;
int should_add_elem;
- GPR_ASSERT (GPR_SLICE_LENGTH(elem->key->slice) > 0);
+ GPR_ASSERT(GPR_SLICE_LENGTH(elem->key->slice) > 0);
if (GPR_SLICE_START_PTR(elem->key->slice)[0] != ':') { /* regular header */
st->seen_regular_header = 1;
} else if (st->seen_regular_header != 0) { /* reserved header */
diff --git a/src/core/transport/chttp2/stream_map.c b/src/core/transport/chttp2/stream_map.c
index bd16153ed1..c983105abb 100644
--- a/src/core/transport/chttp2/stream_map.c
+++ b/src/core/transport/chttp2/stream_map.c
@@ -149,7 +149,8 @@ static void **find(grpc_chttp2_stream_map *map, gpr_uint32 key) {
min_idx = mid_idx + 1;
} else if (mid_key > key) {
max_idx = mid_idx;
- } else /* mid_key == key */ {
+ } else /* mid_key == key */
+ {
return &values[mid_idx];
}
}
diff --git a/src/core/transport/chttp2/writing.c b/src/core/transport/chttp2/writing.c
index c015e82931..d1c9da6df0 100644
--- a/src/core/transport/chttp2/writing.c
+++ b/src/core/transport/chttp2/writing.c
@@ -163,7 +163,8 @@ int grpc_chttp2_unlocking_check_writes(
}
void grpc_chttp2_perform_writes(
- grpc_chttp2_transport_writing *transport_writing, grpc_endpoint *endpoint) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
+ grpc_endpoint *endpoint) {
GPR_ASSERT(transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing));
@@ -172,17 +173,8 @@ void grpc_chttp2_perform_writes(
GPR_ASSERT(transport_writing->outbuf.count > 0);
GPR_ASSERT(endpoint);
- switch (grpc_endpoint_write(endpoint, &transport_writing->outbuf,
- &transport_writing->done_cb)) {
- case GRPC_ENDPOINT_DONE:
- grpc_chttp2_terminate_writing(transport_writing, 1);
- break;
- case GRPC_ENDPOINT_ERROR:
- grpc_chttp2_terminate_writing(transport_writing, 0);
- break;
- case GRPC_ENDPOINT_PENDING:
- break;
- }
+ grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
+ &transport_writing->done_cb);
}
static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
@@ -219,7 +211,7 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
}
void grpc_chttp2_cleanup_writing(
- grpc_chttp2_transport_global *transport_global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
grpc_chttp2_stream_global *stream_global;
@@ -238,8 +230,7 @@ void grpc_chttp2_cleanup_writing(
stream_global->outgoing_sopb->nops == 0) {
GPR_ASSERT(stream_global->write_state != GRPC_WRITE_STATE_QUEUED_CLOSE);
stream_global->outgoing_sopb = NULL;
- grpc_chttp2_schedule_closure(transport_global,
- stream_global->send_done_closure, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, stream_global->send_done_closure, 1);
}
}
stream_global->writing_now = 0;
diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c
index deb2fedf0c..1679e4345d 100644
--- a/src/core/transport/chttp2_transport.c
+++ b/src/core/transport/chttp2_transport.c
@@ -78,26 +78,28 @@ int grpc_flowctl_trace = 0;
static const grpc_transport_vtable vtable;
static void lock(grpc_chttp2_transport *t);
-static void unlock(grpc_chttp2_transport *t);
+static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
-static void unlock_check_read_write_state(grpc_chttp2_transport *t);
+static void unlock_check_read_write_state(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
/* forward declarations of various callbacks that we'll build closures around */
-static void writing_action(void *t, int iomgr_success_ignored);
+static void writing_action(grpc_exec_ctx *exec_ctx, void *t,
+ int iomgr_success_ignored);
/** Set a transport level setting, and push it to our peer */
static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id,
gpr_uint32 value);
/** Endpoint callback to process incoming data */
-static void recv_data(void *tp, int success);
+static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success);
/** Start disconnection chain */
-static void drop_connection(grpc_chttp2_transport *t);
+static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
/** Perform a transport_op */
static void perform_stream_op_locked(
- grpc_chttp2_transport_global *transport_global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, grpc_transport_stream_op *op);
/** Cancel a stream: coming from the transport API */
@@ -111,24 +113,27 @@ static void close_from_api(grpc_chttp2_transport_global *transport_global,
gpr_slice *optional_message);
/** Add endpoint from this transport to pollset */
-static void add_to_pollset_locked(grpc_chttp2_transport *t,
+static void add_to_pollset_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
grpc_pollset *pollset);
-static void add_to_pollset_set_locked(grpc_chttp2_transport *t,
+static void add_to_pollset_set_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
grpc_pollset_set *pollset_set);
/** Start new streams that have been created if we can */
static void maybe_start_some_streams(
- grpc_chttp2_transport_global *transport_global);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global);
static void connectivity_state_set(
- grpc_chttp2_transport_global *transport_global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_connectivity_state state, const char *reason);
/*
* CONSTRUCTION/DESTRUCTION/REFCOUNTING
*/
-static void destruct_transport(grpc_chttp2_transport *t) {
+static void destruct_transport(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
size_t i;
gpr_mu_lock(&t->mu);
@@ -157,7 +162,7 @@ static void destruct_transport(grpc_chttp2_transport *t) {
grpc_chttp2_stream_map_destroy(&t->parsing_stream_map);
grpc_chttp2_stream_map_destroy(&t->new_stream_map);
- grpc_connectivity_state_destroy(&t->channel_callback.state_tracker);
+ grpc_connectivity_state_destroy(exec_ctx, &t->channel_callback.state_tracker);
gpr_mu_unlock(&t->mu);
gpr_mu_destroy(&t->mu);
@@ -166,7 +171,7 @@ static void destruct_transport(grpc_chttp2_transport *t) {
and maybe they hold resources that need to be freed */
while (t->global.pings.next != &t->global.pings) {
grpc_chttp2_outstanding_ping *ping = t->global.pings.next;
- grpc_iomgr_add_delayed_callback(ping->on_recv, 0);
+ grpc_exec_ctx_enqueue(exec_ctx, ping->on_recv, 0);
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
gpr_free(ping);
@@ -180,13 +185,13 @@ static void destruct_transport(grpc_chttp2_transport *t) {
#ifdef REFCOUNTING_DEBUG
#define REF_TRANSPORT(t, r) ref_transport(t, r, __FILE__, __LINE__)
-#define UNREF_TRANSPORT(t, r) unref_transport(t, r, __FILE__, __LINE__)
-static void unref_transport(grpc_chttp2_transport *t, const char *reason,
- const char *file, int line) {
+#define UNREF_TRANSPORT(cl, t, r) unref_transport(cl, t, r, __FILE__, __LINE__)
+static void unref_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ const char *reason, const char *file, int line) {
gpr_log(GPR_DEBUG, "chttp2:unref:%p %d->%d %s [%s:%d]", t, t->refs.count,
t->refs.count - 1, reason, file, line);
if (!gpr_unref(&t->refs)) return;
- destruct_transport(t);
+ destruct_transport(exec_ctx, t);
}
static void ref_transport(grpc_chttp2_transport *t, const char *reason,
@@ -197,16 +202,16 @@ static void ref_transport(grpc_chttp2_transport *t, const char *reason,
}
#else
#define REF_TRANSPORT(t, r) ref_transport(t)
-#define UNREF_TRANSPORT(t, r) unref_transport(t)
-static void unref_transport(grpc_chttp2_transport *t) {
+#define UNREF_TRANSPORT(cl, t, r) unref_transport(cl, t)
+static void unref_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
if (!gpr_unref(&t->refs)) return;
- destruct_transport(t);
+ destruct_transport(exec_ctx, t);
}
static void ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
#endif
-static void init_transport(grpc_chttp2_transport *t,
+static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const grpc_channel_args *channel_args,
grpc_endpoint *ep, grpc_mdctx *mdctx,
gpr_uint8 is_client) {
@@ -242,22 +247,23 @@ static void init_transport(grpc_chttp2_transport *t,
t->parsing.deframe_state =
is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->writing.is_client = is_client;
- grpc_connectivity_state_init(&t->channel_callback.state_tracker,
- GRPC_CHANNEL_READY, "transport");
+ grpc_connectivity_state_init(
+ &t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
+ is_client ? "client_transport" : "server_transport");
gpr_slice_buffer_init(&t->global.qbuf);
gpr_slice_buffer_init(&t->writing.outbuf);
grpc_chttp2_hpack_compressor_init(&t->writing.hpack_compressor, mdctx);
- grpc_iomgr_closure_init(&t->writing_action, writing_action, t);
+ grpc_closure_init(&t->writing_action, writing_action, t);
gpr_slice_buffer_init(&t->parsing.qbuf);
grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser);
grpc_chttp2_hpack_parser_init(&t->parsing.hpack_parser, t->metadata_context);
- grpc_iomgr_closure_init(&t->writing.done_cb, grpc_chttp2_terminate_writing,
- &t->writing);
- grpc_iomgr_closure_init(&t->recv_data, recv_data, t);
+ grpc_closure_init(&t->writing.done_cb, grpc_chttp2_terminate_writing,
+ &t->writing);
+ grpc_closure_init(&t->recv_data, recv_data, t);
gpr_slice_buffer_init(&t->read_buffer);
if (is_client) {
@@ -328,15 +334,15 @@ static void init_transport(grpc_chttp2_transport *t,
}
}
-static void destroy_transport(grpc_transport *gt) {
+static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
lock(t);
t->destroying = 1;
- drop_connection(t);
- unlock(t);
+ drop_connection(exec_ctx, t);
+ unlock(exec_ctx, t);
- UNREF_TRANSPORT(t, "destroy");
+ UNREF_TRANSPORT(exec_ctx, t, "destroy");
}
/** block grpc_endpoint_shutdown being called until a paired
@@ -346,44 +352,48 @@ static void prevent_endpoint_shutdown(grpc_chttp2_transport *t) {
gpr_ref(&t->shutdown_ep_refs);
}
-static void allow_endpoint_shutdown_locked(grpc_chttp2_transport *t) {
+static void allow_endpoint_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
if (gpr_unref(&t->shutdown_ep_refs)) {
if (t->ep) {
- grpc_endpoint_shutdown(t->ep);
+ grpc_endpoint_shutdown(exec_ctx, t->ep);
}
}
}
-static void allow_endpoint_shutdown_unlocked(grpc_chttp2_transport *t) {
+static void allow_endpoint_shutdown_unlocked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
if (gpr_unref(&t->shutdown_ep_refs)) {
gpr_mu_lock(&t->mu);
if (t->ep) {
- grpc_endpoint_shutdown(t->ep);
+ grpc_endpoint_shutdown(exec_ctx, t->ep);
}
gpr_mu_unlock(&t->mu);
}
}
-static void destroy_endpoint(grpc_chttp2_transport *t) {
- grpc_endpoint_destroy(t->ep);
+static void destroy_endpoint(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_endpoint_destroy(exec_ctx, t->ep);
t->ep = NULL;
- UNREF_TRANSPORT(
- t, "disconnect"); /* safe because we'll still have the ref for write */
+ /* safe because we'll still have the ref for write */
+ UNREF_TRANSPORT(exec_ctx, t, "disconnect");
}
-static void close_transport_locked(grpc_chttp2_transport *t) {
+static void close_transport_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
if (!t->closed) {
t->closed = 1;
- connectivity_state_set(&t->global, GRPC_CHANNEL_FATAL_FAILURE,
+ connectivity_state_set(exec_ctx, &t->global, GRPC_CHANNEL_FATAL_FAILURE,
"close_transport");
if (t->ep) {
- allow_endpoint_shutdown_locked(t);
+ allow_endpoint_shutdown_locked(exec_ctx, t);
}
}
}
-static int init_stream(grpc_transport *gt, grpc_stream *gs,
- const void *server_data,
+static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs, const void *server_data,
grpc_transport_stream_op *initial_op) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
@@ -415,13 +425,15 @@ static int init_stream(grpc_transport *gt, grpc_stream *gs,
s->global.in_stream_map = 1;
}
- if (initial_op) perform_stream_op_locked(&t->global, &s->global, initial_op);
- unlock(t);
+ if (initial_op)
+ perform_stream_op_locked(exec_ctx, &t->global, &s->global, initial_op);
+ unlock(exec_ctx, t);
return 0;
}
-static void destroy_stream(grpc_transport *gt, grpc_stream *gs) {
+static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
int i;
@@ -432,7 +444,7 @@ static void destroy_stream(grpc_transport *gt, grpc_stream *gs) {
s->global.id == 0);
GPR_ASSERT(!s->global.in_stream_map);
if (grpc_chttp2_unregister_stream(t, s) && t->global.sent_goaway) {
- close_transport_locked(t);
+ close_transport_locked(exec_ctx, t);
}
if (!t->parsing_active && s->global.id) {
GPR_ASSERT(grpc_chttp2_stream_map_find(&t->parsing_stream_map,
@@ -462,7 +474,7 @@ static void destroy_stream(grpc_transport *gt, grpc_stream *gs) {
grpc_chttp2_incoming_metadata_live_op_buffer_end(
&s->global.outstanding_metadata);
- UNREF_TRANSPORT(t, "stream");
+ UNREF_TRANSPORT(exec_ctx, t, "stream");
}
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
@@ -497,29 +509,17 @@ grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
static void lock(grpc_chttp2_transport *t) { gpr_mu_lock(&t->mu); }
-static void unlock(grpc_chttp2_transport *t) {
- grpc_iomgr_closure *run_closures;
-
- unlock_check_read_write_state(t);
+static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
+ unlock_check_read_write_state(exec_ctx, t);
if (!t->writing_active && !t->closed &&
grpc_chttp2_unlocking_check_writes(&t->global, &t->writing)) {
t->writing_active = 1;
REF_TRANSPORT(t, "writing");
- grpc_chttp2_schedule_closure(&t->global, &t->writing_action, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, &t->writing_action, 1);
prevent_endpoint_shutdown(t);
}
- run_closures = t->global.pending_closures_head;
- t->global.pending_closures_head = NULL;
- t->global.pending_closures_tail = NULL;
-
gpr_mu_unlock(&t->mu);
-
- while (run_closures) {
- grpc_iomgr_closure *next = run_closures->next;
- run_closures->cb(run_closures->cb_arg, run_closures->success);
- run_closures = next;
- }
}
/*
@@ -541,52 +541,54 @@ static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id,
}
}
-void grpc_chttp2_terminate_writing(void *transport_writing_ptr, int success) {
+void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
+ void *transport_writing_ptr, int success) {
grpc_chttp2_transport_writing *transport_writing = transport_writing_ptr;
grpc_chttp2_transport *t = TRANSPORT_FROM_WRITING(transport_writing);
lock(t);
- allow_endpoint_shutdown_locked(t);
+ allow_endpoint_shutdown_locked(exec_ctx, t);
if (!success) {
- drop_connection(t);
+ drop_connection(exec_ctx, t);
}
/* cleanup writing related jazz */
- grpc_chttp2_cleanup_writing(&t->global, &t->writing);
+ grpc_chttp2_cleanup_writing(exec_ctx, &t->global, &t->writing);
/* leave the writing flag up on shutdown to prevent further writes in unlock()
from starting */
t->writing_active = 0;
if (t->ep && !t->endpoint_reading) {
- destroy_endpoint(t);
+ destroy_endpoint(exec_ctx, t);
}
- unlock(t);
+ unlock(exec_ctx, t);
- UNREF_TRANSPORT(t, "writing");
+ UNREF_TRANSPORT(exec_ctx, t, "writing");
}
-static void writing_action(void *gt, int iomgr_success_ignored) {
+static void writing_action(grpc_exec_ctx *exec_ctx, void *gt,
+ int iomgr_success_ignored) {
grpc_chttp2_transport *t = gt;
- grpc_chttp2_perform_writes(&t->writing, t->ep);
+ grpc_chttp2_perform_writes(exec_ctx, &t->writing, t->ep);
}
void grpc_chttp2_add_incoming_goaway(
- grpc_chttp2_transport_global *transport_global, gpr_uint32 goaway_error,
- gpr_slice goaway_text) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
+ gpr_uint32 goaway_error, gpr_slice goaway_text) {
char *msg = gpr_dump_slice(goaway_text, GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg);
gpr_free(msg);
gpr_slice_unref(goaway_text);
transport_global->seen_goaway = 1;
- connectivity_state_set(transport_global, GRPC_CHANNEL_FATAL_FAILURE,
+ connectivity_state_set(exec_ctx, transport_global, GRPC_CHANNEL_FATAL_FAILURE,
"got_goaway");
}
static void maybe_start_some_streams(
- grpc_chttp2_transport_global *transport_global) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global) {
grpc_chttp2_stream_global *stream_global;
/* start streams where we have free grpc_chttp2_stream ids and free
* concurrency */
@@ -607,7 +609,8 @@ static void maybe_start_some_streams(
transport_global->next_stream_id += 2;
if (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID) {
- connectivity_state_set(transport_global, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ connectivity_state_set(exec_ctx, transport_global,
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
"no_more_stream_ids");
}
@@ -637,7 +640,7 @@ static void maybe_start_some_streams(
}
static void perform_stream_op_locked(
- grpc_chttp2_transport_global *transport_global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, grpc_transport_stream_op *op) {
if (op->cancel_with_status != GRPC_STATUS_OK) {
cancel_from_api(transport_global, stream_global, op->cancel_with_status);
@@ -665,14 +668,13 @@ static void perform_stream_op_locked(
transport_global->is_client ? "CLI" : "SVR", stream_global));
grpc_chttp2_list_add_waiting_for_concurrency(transport_global,
stream_global);
- maybe_start_some_streams(transport_global);
+ maybe_start_some_streams(exec_ctx, transport_global);
} else if (stream_global->outgoing_window > 0) {
grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
}
} else {
grpc_sopb_reset(op->send_ops);
- grpc_chttp2_schedule_closure(transport_global,
- stream_global->send_done_closure, 0);
+ grpc_exec_ctx_enqueue(exec_ctx, stream_global->send_done_closure, 0);
}
}
@@ -706,27 +708,24 @@ static void perform_stream_op_locked(
}
if (op->bind_pollset) {
- add_to_pollset_locked(TRANSPORT_FROM_GLOBAL(transport_global),
+ add_to_pollset_locked(exec_ctx, TRANSPORT_FROM_GLOBAL(transport_global),
op->bind_pollset);
}
- if (op->on_consumed) {
- grpc_chttp2_schedule_closure(transport_global, op->on_consumed, 1);
- }
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
}
-static void perform_stream_op(grpc_transport *gt, grpc_stream *gs,
- grpc_transport_stream_op *op) {
+static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs, grpc_transport_stream_op *op) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
lock(t);
- perform_stream_op_locked(&t->global, &s->global, op);
- unlock(t);
+ perform_stream_op_locked(exec_ctx, &t->global, &s->global, op);
+ unlock(exec_ctx, t);
}
-static void send_ping_locked(grpc_chttp2_transport *t,
- grpc_iomgr_closure *on_recv) {
+static void send_ping_locked(grpc_chttp2_transport *t, grpc_closure *on_recv) {
grpc_chttp2_outstanding_ping *p = gpr_malloc(sizeof(*p));
p->next = &t->global.pings;
p->prev = p->next->prev;
@@ -743,19 +742,18 @@ static void send_ping_locked(grpc_chttp2_transport *t,
gpr_slice_buffer_add(&t->global.qbuf, grpc_chttp2_ping_create(0, p->id));
}
-static void perform_transport_op(grpc_transport *gt, grpc_transport_op *op) {
+static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_transport_op *op) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
int close_transport = 0;
lock(t);
- if (op->on_consumed) {
- grpc_chttp2_schedule_closure(&t->global, op->on_consumed, 1);
- }
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
if (op->on_connectivity_state_change) {
grpc_connectivity_state_notify_on_state_change(
- &t->channel_callback.state_tracker, op->connectivity_state,
+ exec_ctx, &t->channel_callback.state_tracker, op->connectivity_state,
op->on_connectivity_state_change);
}
@@ -775,11 +773,11 @@ static void perform_transport_op(grpc_transport *gt, grpc_transport_op *op) {
}
if (op->bind_pollset) {
- add_to_pollset_locked(t, op->bind_pollset);
+ add_to_pollset_locked(exec_ctx, t, op->bind_pollset);
}
if (op->bind_pollset_set) {
- add_to_pollset_set_locked(t, op->bind_pollset_set);
+ add_to_pollset_set_locked(exec_ctx, t, op->bind_pollset_set);
}
if (op->send_ping) {
@@ -787,15 +785,15 @@ static void perform_transport_op(grpc_transport *gt, grpc_transport_op *op) {
}
if (op->disconnect) {
- close_transport_locked(t);
+ close_transport_locked(exec_ctx, t);
}
- unlock(t);
+ unlock(exec_ctx, t);
if (close_transport) {
lock(t);
- close_transport_locked(t);
- unlock(t);
+ close_transport_locked(exec_ctx, t);
+ unlock(exec_ctx, t);
}
}
@@ -811,7 +809,8 @@ static grpc_stream_state compute_state(gpr_uint8 write_closed,
return GRPC_STREAM_OPEN;
}
-static void remove_stream(grpc_chttp2_transport *t, gpr_uint32 id) {
+static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ gpr_uint32 id) {
size_t new_stream_count;
grpc_chttp2_stream *s =
grpc_chttp2_stream_map_delete(&t->parsing_stream_map, id);
@@ -826,7 +825,7 @@ static void remove_stream(grpc_chttp2_transport *t, gpr_uint32 id) {
grpc_chttp2_parsing_become_skip_parser(&t->parsing);
}
if (grpc_chttp2_unregister_stream(t, s) && t->global.sent_goaway) {
- close_transport_locked(t);
+ close_transport_locked(exec_ctx, t);
}
new_stream_count = grpc_chttp2_stream_map_size(&t->parsing_stream_map) +
@@ -834,11 +833,12 @@ static void remove_stream(grpc_chttp2_transport *t, gpr_uint32 id) {
GPR_ASSERT(new_stream_count <= GPR_UINT32_MAX);
if (new_stream_count != t->global.concurrent_stream_count) {
t->global.concurrent_stream_count = (gpr_uint32)new_stream_count;
- maybe_start_some_streams(&t->global);
+ maybe_start_some_streams(exec_ctx, &t->global);
}
}
-static void unlock_check_read_write_state(grpc_chttp2_transport *t) {
+static void unlock_check_read_write_state(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
grpc_chttp2_transport_global *transport_global = &t->global;
grpc_chttp2_stream_global *stream_global;
grpc_stream_state state;
@@ -852,7 +852,7 @@ static void unlock_check_read_write_state(grpc_chttp2_transport *t) {
GPR_ASSERT(stream_global->in_stream_map);
GPR_ASSERT(stream_global->write_state != GRPC_WRITE_STATE_OPEN);
GPR_ASSERT(stream_global->read_closed);
- remove_stream(t, stream_global->id);
+ remove_stream(exec_ctx, t, stream_global->id);
grpc_chttp2_list_add_read_write_state_changed(transport_global,
stream_global);
}
@@ -878,8 +878,7 @@ static void unlock_check_read_write_state(grpc_chttp2_transport *t) {
if (stream_global->outgoing_sopb != NULL) {
grpc_sopb_reset(stream_global->outgoing_sopb);
stream_global->outgoing_sopb = NULL;
- grpc_chttp2_schedule_closure(transport_global,
- stream_global->send_done_closure, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, stream_global->send_done_closure, 1);
}
stream_global->read_closed = 1;
if (!stream_global->published_cancelled) {
@@ -901,7 +900,7 @@ static void unlock_check_read_write_state(grpc_chttp2_transport *t) {
grpc_chttp2_list_add_closed_waiting_for_parsing(transport_global,
stream_global);
} else {
- remove_stream(t, stream_global->id);
+ remove_stream(exec_ctx, t, stream_global->id);
}
}
if (!stream_global->publish_sopb) {
@@ -929,8 +928,7 @@ static void unlock_check_read_write_state(grpc_chttp2_transport *t) {
&stream_global->outstanding_metadata);
grpc_sopb_swap(stream_global->publish_sopb, &stream_global->incoming_sopb);
stream_global->published_state = *stream_global->publish_state = state;
- grpc_chttp2_schedule_closure(transport_global,
- stream_global->recv_done_closure, 1);
+ grpc_exec_ctx_enqueue(exec_ctx, stream_global->recv_done_closure, 1);
stream_global->recv_done_closure = NULL;
stream_global->publish_sopb = NULL;
stream_global->publish_state = NULL;
@@ -1065,8 +1063,8 @@ static void end_all_the_calls(grpc_chttp2_transport *t) {
grpc_chttp2_for_all_streams(&t->global, NULL, cancel_stream_cb);
}
-static void drop_connection(grpc_chttp2_transport *t) {
- close_transport_locked(t);
+static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
+ close_transport_locked(exec_ctx, t);
end_all_the_calls(t);
}
@@ -1091,17 +1089,19 @@ static void update_global_window(void *args, gpr_uint32 id, void *stream) {
}
}
-static void read_error_locked(grpc_chttp2_transport *t) {
+static void read_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
t->endpoint_reading = 0;
if (!t->writing_active && t->ep) {
- destroy_endpoint(t);
+ destroy_endpoint(exec_ctx, t);
}
}
/* tcp read callback */
-static int recv_data_loop(grpc_chttp2_transport *t, int *success) {
+static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
size_t i;
int keep_reading = 0;
+ grpc_chttp2_transport *t = tp;
lock(t);
i = 0;
@@ -1114,12 +1114,13 @@ static int recv_data_loop(grpc_chttp2_transport *t, int *success) {
grpc_chttp2_prepare_to_read(&t->global, &t->parsing);
gpr_mu_unlock(&t->mu);
for (; i < t->read_buffer.count &&
- grpc_chttp2_perform_read(&t->parsing, t->read_buffer.slices[i]);
+ grpc_chttp2_perform_read(exec_ctx, &t->parsing,
+ t->read_buffer.slices[i]);
i++)
;
gpr_mu_lock(&t->mu);
if (i != t->read_buffer.count) {
- drop_connection(t);
+ drop_connection(exec_ctx, t);
}
/* merge stream lists */
grpc_chttp2_stream_map_move_into(&t->new_stream_map,
@@ -1132,102 +1133,60 @@ static int recv_data_loop(grpc_chttp2_transport *t, int *success) {
t->parsing.initial_window_update = 0;
}
/* handle higher level things */
- grpc_chttp2_publish_reads(&t->global, &t->parsing);
+ grpc_chttp2_publish_reads(exec_ctx, &t->global, &t->parsing);
t->parsing_active = 0;
}
- if (!*success || i != t->read_buffer.count) {
- drop_connection(t);
- read_error_locked(t);
+ if (!success || i != t->read_buffer.count) {
+ drop_connection(exec_ctx, t);
+ read_error_locked(exec_ctx, t);
} else if (!t->closed) {
keep_reading = 1;
REF_TRANSPORT(t, "keep_reading");
prevent_endpoint_shutdown(t);
}
gpr_slice_buffer_reset_and_unref(&t->read_buffer);
- unlock(t);
+ unlock(exec_ctx, t);
if (keep_reading) {
- int ret = -1;
- switch (grpc_endpoint_read(t->ep, &t->read_buffer, &t->recv_data)) {
- case GRPC_ENDPOINT_DONE:
- *success = 1;
- ret = 1;
- break;
- case GRPC_ENDPOINT_ERROR:
- *success = 0;
- ret = 1;
- break;
- case GRPC_ENDPOINT_PENDING:
- ret = 0;
- break;
- }
- allow_endpoint_shutdown_unlocked(t);
- UNREF_TRANSPORT(t, "keep_reading");
- return ret;
+ grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->recv_data);
+ allow_endpoint_shutdown_unlocked(exec_ctx, t);
+ UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else {
- UNREF_TRANSPORT(t, "recv_data");
- return 0;
+ UNREF_TRANSPORT(exec_ctx, t, "recv_data");
}
-
- gpr_log(GPR_ERROR, "should never reach here");
- abort();
-}
-
-static void recv_data(void *tp, int success) {
- grpc_chttp2_transport *t = tp;
-
- while (recv_data_loop(t, &success))
- ;
}
/*
* CALLBACK LOOP
*/
-static void schedule_closure_for_connectivity(void *a,
- grpc_iomgr_closure *closure) {
- grpc_chttp2_schedule_closure(a, closure, 1);
-}
-
static void connectivity_state_set(
- grpc_chttp2_transport_global *transport_global,
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_connectivity_state state, const char *reason) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "set connectivity_state=%d", state));
- grpc_connectivity_state_set_with_scheduler(
- &TRANSPORT_FROM_GLOBAL(transport_global)->channel_callback.state_tracker,
- state, schedule_closure_for_connectivity, transport_global, reason);
-}
-
-void grpc_chttp2_schedule_closure(
- grpc_chttp2_transport_global *transport_global, grpc_iomgr_closure *closure,
- int success) {
- closure->success = success;
- if (transport_global->pending_closures_tail == NULL) {
- transport_global->pending_closures_head =
- transport_global->pending_closures_tail = closure;
- } else {
- transport_global->pending_closures_tail->next = closure;
- transport_global->pending_closures_tail = closure;
- }
- closure->next = NULL;
+ grpc_connectivity_state_set(exec_ctx, &TRANSPORT_FROM_GLOBAL(transport_global)
+ ->channel_callback.state_tracker,
+ state, reason);
}
/*
* POLLSET STUFF
*/
-static void add_to_pollset_locked(grpc_chttp2_transport *t,
+static void add_to_pollset_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
grpc_pollset *pollset) {
if (t->ep) {
- grpc_endpoint_add_to_pollset(t->ep, pollset);
+ grpc_endpoint_add_to_pollset(exec_ctx, t->ep, pollset);
}
}
-static void add_to_pollset_set_locked(grpc_chttp2_transport *t,
+static void add_to_pollset_set_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
grpc_pollset_set *pollset_set) {
if (t->ep) {
- grpc_endpoint_add_to_pollset_set(t->ep, pollset_set);
+ grpc_endpoint_add_to_pollset_set(exec_ctx, t->ep, pollset_set);
}
}
@@ -1266,7 +1225,7 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *reason,
* INTEGRATION GLUE
*/
-static char *chttp2_get_peer(grpc_transport *t) {
+static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string);
}
@@ -1279,17 +1238,18 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
chttp2_get_peer};
grpc_transport *grpc_create_chttp2_transport(
- const grpc_channel_args *channel_args, grpc_endpoint *ep, grpc_mdctx *mdctx,
- int is_client) {
+ grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
+ grpc_endpoint *ep, grpc_mdctx *mdctx, int is_client) {
grpc_chttp2_transport *t = gpr_malloc(sizeof(grpc_chttp2_transport));
- init_transport(t, channel_args, ep, mdctx, is_client != 0);
+ init_transport(exec_ctx, t, channel_args, ep, mdctx, is_client != 0);
return &t->base;
}
-void grpc_chttp2_transport_start_reading(grpc_transport *transport,
+void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
gpr_slice *slices, size_t nslices) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)transport;
REF_TRANSPORT(t, "recv_data"); /* matches unref inside recv_data */
gpr_slice_buffer_addn(&t->read_buffer, slices, nslices);
- recv_data(t, 1);
+ recv_data(exec_ctx, t, 1);
}
diff --git a/src/core/transport/chttp2_transport.h b/src/core/transport/chttp2_transport.h
index fa0d6e4151..fce2b680fd 100644
--- a/src/core/transport/chttp2_transport.h
+++ b/src/core/transport/chttp2_transport.h
@@ -41,10 +41,11 @@ extern int grpc_http_trace;
extern int grpc_flowctl_trace;
grpc_transport *grpc_create_chttp2_transport(
- const grpc_channel_args *channel_args, grpc_endpoint *ep,
- grpc_mdctx *metadata_context, int is_client);
+ grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
+ grpc_endpoint *ep, grpc_mdctx *metadata_context, int is_client);
-void grpc_chttp2_transport_start_reading(grpc_transport *transport,
+void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
gpr_slice *slices, size_t nslices);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_TRANSPORT_H */
diff --git a/src/core/transport/connectivity_state.c b/src/core/transport/connectivity_state.c
index 61d26f06f0..a53fecc198 100644
--- a/src/core/transport/connectivity_state.c
+++ b/src/core/transport/connectivity_state.c
@@ -32,6 +32,9 @@
*/
#include "src/core/transport/connectivity_state.h"
+
+#include <string.h>
+
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
@@ -63,17 +66,20 @@ void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker,
tracker->name = gpr_strdup(name);
}
-void grpc_connectivity_state_destroy(grpc_connectivity_state_tracker *tracker) {
+void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_connectivity_state_tracker *tracker) {
+ int success;
grpc_connectivity_state_watcher *w;
while ((w = tracker->watchers)) {
tracker->watchers = w->next;
if (GRPC_CHANNEL_FATAL_FAILURE != *w->current) {
*w->current = GRPC_CHANNEL_FATAL_FAILURE;
- grpc_iomgr_add_callback(w->notify);
+ success = 1;
} else {
- grpc_iomgr_add_delayed_callback(w->notify, 0);
+ success = 0;
}
+ grpc_exec_ctx_enqueue(exec_ctx, w->notify, success);
gpr_free(w);
}
gpr_free(tracker->name);
@@ -81,20 +87,24 @@ void grpc_connectivity_state_destroy(grpc_connectivity_state_tracker *tracker) {
grpc_connectivity_state grpc_connectivity_state_check(
grpc_connectivity_state_tracker *tracker) {
+ if (grpc_connectivity_state_trace) {
+ gpr_log(GPR_DEBUG, "CONWATCH: %s: get %s", tracker->name,
+ grpc_connectivity_state_name(tracker->current_state));
+ }
return tracker->current_state;
}
int grpc_connectivity_state_notify_on_state_change(
- grpc_connectivity_state_tracker *tracker, grpc_connectivity_state *current,
- grpc_iomgr_closure *notify) {
+ grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
+ grpc_connectivity_state *current, grpc_closure *notify) {
if (grpc_connectivity_state_trace) {
- gpr_log(GPR_DEBUG, "CONWATCH: %s: from %s [cur=%s]", tracker->name,
- grpc_connectivity_state_name(*current),
- grpc_connectivity_state_name(tracker->current_state));
+ gpr_log(GPR_DEBUG, "CONWATCH: %s: from %s [cur=%s] notify=%p",
+ tracker->name, grpc_connectivity_state_name(*current),
+ grpc_connectivity_state_name(tracker->current_state), notify);
}
if (tracker->current_state != *current) {
*current = tracker->current_state;
- grpc_iomgr_add_callback(notify);
+ grpc_exec_ctx_enqueue(exec_ctx, notify, 1);
} else {
grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
w->current = current;
@@ -105,11 +115,10 @@ int grpc_connectivity_state_notify_on_state_change(
return tracker->current_state == GRPC_CHANNEL_IDLE;
}
-void grpc_connectivity_state_set_with_scheduler(
- grpc_connectivity_state_tracker *tracker, grpc_connectivity_state state,
- void (*scheduler)(void *arg, grpc_iomgr_closure *closure), void *arg,
- const char *reason) {
- grpc_connectivity_state_watcher *new = NULL;
+void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
+ grpc_connectivity_state_tracker *tracker,
+ grpc_connectivity_state state,
+ const char *reason) {
grpc_connectivity_state_watcher *w;
if (grpc_connectivity_state_trace) {
gpr_log(GPR_DEBUG, "SET: %s: %s --> %s [%s]", tracker->name,
@@ -121,28 +130,10 @@ void grpc_connectivity_state_set_with_scheduler(
}
GPR_ASSERT(tracker->current_state != GRPC_CHANNEL_FATAL_FAILURE);
tracker->current_state = state;
- while ((w = tracker->watchers)) {
+ while ((w = tracker->watchers) != NULL) {
+ *w->current = tracker->current_state;
tracker->watchers = w->next;
-
- if (state != *w->current) {
- *w->current = state;
- scheduler(arg, w->notify);
- gpr_free(w);
- } else {
- w->next = new;
- new = w;
- }
+ grpc_exec_ctx_enqueue(exec_ctx, w->notify, 1);
+ gpr_free(w);
}
- tracker->watchers = new;
-}
-
-static void default_scheduler(void *ignored, grpc_iomgr_closure *closure) {
- grpc_iomgr_add_callback(closure);
-}
-
-void grpc_connectivity_state_set(grpc_connectivity_state_tracker *tracker,
- grpc_connectivity_state state,
- const char *reason) {
- grpc_connectivity_state_set_with_scheduler(tracker, state, default_scheduler,
- NULL, reason);
}
diff --git a/src/core/transport/connectivity_state.h b/src/core/transport/connectivity_state.h
index a3b0b80c98..8b6b0554cd 100644
--- a/src/core/transport/connectivity_state.h
+++ b/src/core/transport/connectivity_state.h
@@ -35,13 +35,13 @@
#define GRPC_INTERNAL_CORE_TRANSPORT_CONNECTIVITY_STATE_H
#include <grpc/grpc.h>
-#include "src/core/iomgr/iomgr.h"
+#include "src/core/iomgr/exec_ctx.h"
typedef struct grpc_connectivity_state_watcher {
/** we keep watchers in a linked list */
struct grpc_connectivity_state_watcher *next;
/** closure to notify on change */
- grpc_iomgr_closure *notify;
+ grpc_closure *notify;
/** the current state as believed by the watcher */
grpc_connectivity_state *current;
} grpc_connectivity_state_watcher;
@@ -60,22 +60,22 @@ extern int grpc_connectivity_state_trace;
void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state init_state,
const char *name);
-void grpc_connectivity_state_destroy(grpc_connectivity_state_tracker *tracker);
+void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_connectivity_state_tracker *tracker);
-void grpc_connectivity_state_set(grpc_connectivity_state_tracker *tracker,
+/** Set connectivity state; not thread safe; access must be serialized with an
+ * external lock */
+void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
+ grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state state,
const char *reason);
-void grpc_connectivity_state_set_with_scheduler(
- grpc_connectivity_state_tracker *tracker, grpc_connectivity_state state,
- void (*scheduler)(void *arg, grpc_iomgr_closure *closure), void *arg,
- const char *reason);
grpc_connectivity_state grpc_connectivity_state_check(
grpc_connectivity_state_tracker *tracker);
/** Return 1 if the channel should start connecting, 0 otherwise */
int grpc_connectivity_state_notify_on_state_change(
- grpc_connectivity_state_tracker *tracker, grpc_connectivity_state *current,
- grpc_iomgr_closure *notify);
+ grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
+ grpc_connectivity_state *current, grpc_closure *notify);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CONNECTIVITY_STATE_H */
diff --git a/src/core/transport/transport.c b/src/core/transport/transport.c
index c0d92cf93f..828d212cfe 100644
--- a/src/core/transport/transport.c
+++ b/src/core/transport/transport.c
@@ -40,48 +40,48 @@ size_t grpc_transport_stream_size(grpc_transport *transport) {
return transport->vtable->sizeof_stream;
}
-void grpc_transport_destroy(grpc_transport *transport) {
- transport->vtable->destroy(transport);
+void grpc_transport_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport) {
+ transport->vtable->destroy(exec_ctx, transport);
}
-int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream,
+int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport, grpc_stream *stream,
const void *server_data,
grpc_transport_stream_op *initial_op) {
- return transport->vtable->init_stream(transport, stream, server_data,
- initial_op);
+ return transport->vtable->init_stream(exec_ctx, transport, stream,
+ server_data, initial_op);
}
-void grpc_transport_perform_stream_op(grpc_transport *transport,
+void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
grpc_stream *stream,
grpc_transport_stream_op *op) {
- transport->vtable->perform_stream_op(transport, stream, op);
+ transport->vtable->perform_stream_op(exec_ctx, transport, stream, op);
}
-void grpc_transport_perform_op(grpc_transport *transport,
+void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
grpc_transport_op *op) {
- transport->vtable->perform_op(transport, op);
+ transport->vtable->perform_op(exec_ctx, transport, op);
}
-void grpc_transport_destroy_stream(grpc_transport *transport,
+void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
grpc_stream *stream) {
- transport->vtable->destroy_stream(transport, stream);
+ transport->vtable->destroy_stream(exec_ctx, transport, stream);
}
-char *grpc_transport_get_peer(grpc_transport *transport) {
- return transport->vtable->get_peer(transport);
+char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport) {
+ return transport->vtable->get_peer(exec_ctx, transport);
}
void grpc_transport_stream_op_finish_with_failure(
- grpc_transport_stream_op *op) {
- if (op->send_ops) {
- op->on_done_send->cb(op->on_done_send->cb_arg, 0);
- }
- if (op->recv_ops) {
- op->on_done_recv->cb(op->on_done_recv->cb_arg, 0);
- }
- if (op->on_consumed) {
- op->on_consumed->cb(op->on_consumed->cb_arg, 0);
- }
+ grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op) {
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_done_recv, 0);
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_done_send, 0);
+ grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 0);
}
void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
@@ -101,15 +101,15 @@ void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
typedef struct {
gpr_slice message;
- grpc_iomgr_closure *then_call;
- grpc_iomgr_closure closure;
+ grpc_closure *then_call;
+ grpc_closure closure;
} close_message_data;
-static void free_message(void *p, int iomgr_success) {
+static void free_message(grpc_exec_ctx *exec_ctx, void *p, int iomgr_success) {
close_message_data *cmd = p;
gpr_slice_unref(cmd->message);
if (cmd->then_call != NULL) {
- cmd->then_call->cb(cmd->then_call->cb_arg, iomgr_success);
+ cmd->then_call->cb(exec_ctx, cmd->then_call->cb_arg, iomgr_success);
}
gpr_free(cmd);
}
@@ -130,7 +130,7 @@ void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
cmd = gpr_malloc(sizeof(*cmd));
cmd->message = *optional_message;
cmd->then_call = op->on_consumed;
- grpc_iomgr_closure_init(&cmd->closure, free_message, cmd);
+ grpc_closure_init(&cmd->closure, free_message, cmd);
op->on_consumed = &cmd->closure;
op->optional_close_message = &cmd->message;
}
diff --git a/src/core/transport/transport.h b/src/core/transport/transport.h
index 6e1ec2f64c..d4cee03862 100644
--- a/src/core/transport/transport.h
+++ b/src/core/transport/transport.h
@@ -64,11 +64,11 @@ typedef enum grpc_stream_state {
/* Transport stream op: a set of operations to perform on a transport
against a single stream */
typedef struct grpc_transport_stream_op {
- grpc_iomgr_closure *on_consumed;
+ grpc_closure *on_consumed;
grpc_stream_op_buffer *send_ops;
int is_last_send;
- grpc_iomgr_closure *on_done_send;
+ grpc_closure *on_done_send;
grpc_stream_op_buffer *recv_ops;
grpc_stream_state *recv_state;
@@ -76,7 +76,7 @@ typedef struct grpc_transport_stream_op {
These bytes will be eventually used to replenish per-stream flow control
windows. */
size_t max_recv_bytes;
- grpc_iomgr_closure *on_done_recv;
+ grpc_closure *on_done_recv;
grpc_pollset *bind_pollset;
@@ -95,9 +95,9 @@ typedef struct grpc_transport_stream_op {
/** Transport op: a set of operations to perform on a transport as a whole */
typedef struct grpc_transport_op {
/** called when processing of this op is done */
- grpc_iomgr_closure *on_consumed;
+ grpc_closure *on_consumed;
/** connectivity monitoring */
- grpc_iomgr_closure *on_connectivity_state_change;
+ grpc_closure *on_connectivity_state_change;
grpc_connectivity_state *connectivity_state;
/** should the transport be disconnected */
int disconnect;
@@ -118,7 +118,7 @@ typedef struct grpc_transport_op {
/** add this transport to a pollset_set */
grpc_pollset_set *bind_pollset_set;
/** send a ping, call this back if not NULL */
- grpc_iomgr_closure *send_ping;
+ grpc_closure *send_ping;
} grpc_transport_op;
/* Returns the amount of memory required to store a grpc_stream for this
@@ -134,7 +134,8 @@ size_t grpc_transport_stream_size(grpc_transport *transport);
stream - a pointer to uninitialized memory to initialize
server_data - either NULL for a client initiated stream, or a pointer
supplied from the accept_stream callback function */
-int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream,
+int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport, grpc_stream *stream,
const void *server_data,
grpc_transport_stream_op *initial_op);
@@ -148,10 +149,12 @@ int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream,
transport - the transport on which to create this stream
stream - the grpc_stream to destroy (memory is still owned by the
caller, but any child memory must be cleaned up) */
-void grpc_transport_destroy_stream(grpc_transport *transport,
+void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
grpc_stream *stream);
-void grpc_transport_stream_op_finish_with_failure(grpc_transport_stream_op *op);
+void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
+ grpc_transport_stream_op *op);
void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
grpc_status_code status);
@@ -171,17 +174,19 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op);
stream - the stream on which to send the operations. This must be
non-NULL and previously initialized by the same transport.
op - a grpc_transport_stream_op specifying the op to perform */
-void grpc_transport_perform_stream_op(grpc_transport *transport,
+void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
grpc_stream *stream,
grpc_transport_stream_op *op);
-void grpc_transport_perform_op(grpc_transport *transport,
+void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport,
grpc_transport_op *op);
/* Send a ping on a transport
Calls cb with user data when a response is received. */
-void grpc_transport_ping(grpc_transport *transport, grpc_iomgr_closure *cb);
+void grpc_transport_ping(grpc_transport *transport, grpc_closure *cb);
/* Advise peer of pending connection termination. */
void grpc_transport_goaway(grpc_transport *transport, grpc_status_code status,
@@ -191,9 +196,10 @@ void grpc_transport_goaway(grpc_transport *transport, grpc_status_code status,
void grpc_transport_close(grpc_transport *transport);
/* Destroy the transport */
-void grpc_transport_destroy(grpc_transport *transport);
+void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport);
/* Get the transports peer */
-char *grpc_transport_get_peer(grpc_transport *transport);
+char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
+ grpc_transport *transport);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_TRANSPORT_H */
diff --git a/src/core/transport/transport_impl.h b/src/core/transport/transport_impl.h
index d3bbdf6c27..900c6340ff 100644
--- a/src/core/transport/transport_impl.h
+++ b/src/core/transport/transport_impl.h
@@ -42,25 +42,27 @@ typedef struct grpc_transport_vtable {
size_t sizeof_stream; /* = sizeof(transport stream) */
/* implementation of grpc_transport_init_stream */
- int (*init_stream)(grpc_transport *self, grpc_stream *stream,
- const void *server_data,
+ int (*init_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
+ grpc_stream *stream, const void *server_data,
grpc_transport_stream_op *initial_op);
/* implementation of grpc_transport_perform_stream_op */
- void (*perform_stream_op)(grpc_transport *self, grpc_stream *stream,
- grpc_transport_stream_op *op);
+ void (*perform_stream_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
+ grpc_stream *stream, grpc_transport_stream_op *op);
/* implementation of grpc_transport_perform_op */
- void (*perform_op)(grpc_transport *self, grpc_transport_op *op);
+ void (*perform_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
+ grpc_transport_op *op);
/* implementation of grpc_transport_destroy_stream */
- void (*destroy_stream)(grpc_transport *self, grpc_stream *stream);
+ void (*destroy_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
+ grpc_stream *stream);
/* implementation of grpc_transport_destroy */
- void (*destroy)(grpc_transport *self);
+ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
/* implementation of grpc_transport_get_peer */
- char *(*get_peer)(grpc_transport *self);
+ char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
} grpc_transport_vtable;
/* an instance of a grpc transport */
diff --git a/src/core/tsi/fake_transport_security.c b/src/core/tsi/fake_transport_security.c
index b1a975155a..9ef4b163be 100644
--- a/src/core/tsi/fake_transport_security.c
+++ b/src/core/tsi/fake_transport_security.c
@@ -53,7 +53,7 @@
where the size field value is the size of the size field plus the size of
the data encoded in little endian on 4 bytes. */
typedef struct {
- unsigned char* data;
+ unsigned char *data;
size_t size;
size_t allocated_size;
size_t offset;
@@ -87,10 +87,10 @@ typedef struct {
/* --- Utils. ---*/
-static const char* tsi_fake_handshake_message_strings[] = {
+static const char *tsi_fake_handshake_message_strings[] = {
"CLIENT_INIT", "SERVER_INIT", "CLIENT_FINISHED", "SERVER_FINISHED"};
-static const char* tsi_fake_handshake_message_to_string(int msg) {
+static const char *tsi_fake_handshake_message_to_string(int msg) {
if (msg < 0 || msg >= TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
gpr_log(GPR_ERROR, "Invalid message %d", msg);
return "UNKNOWN";
@@ -99,7 +99,7 @@ static const char* tsi_fake_handshake_message_to_string(int msg) {
}
static tsi_result tsi_fake_handshake_message_from_string(
- const char* msg_string, tsi_fake_handshake_message* msg) {
+ const char *msg_string, tsi_fake_handshake_message *msg) {
tsi_fake_handshake_message i;
for (i = 0; i < TSI_FAKE_HANDSHAKE_MESSAGE_MAX; i++) {
if (strncmp(msg_string, tsi_fake_handshake_message_strings[i],
@@ -112,32 +112,32 @@ static tsi_result tsi_fake_handshake_message_from_string(
return TSI_DATA_CORRUPTED;
}
-static gpr_uint32 load32_little_endian(const unsigned char* buf) {
+static gpr_uint32 load32_little_endian(const unsigned char *buf) {
return ((gpr_uint32)(buf[0]) | (gpr_uint32)(buf[1] << 8) |
(gpr_uint32)(buf[2] << 16) | (gpr_uint32)(buf[3] << 24));
}
-static void store32_little_endian(gpr_uint32 value, unsigned char* buf) {
+static void store32_little_endian(gpr_uint32 value, unsigned char *buf) {
buf[3] = (unsigned char)(value >> 24) & 0xFF;
buf[2] = (unsigned char)(value >> 16) & 0xFF;
buf[1] = (unsigned char)(value >> 8) & 0xFF;
buf[0] = (unsigned char)(value)&0xFF;
}
-static void tsi_fake_frame_reset(tsi_fake_frame* frame, int needs_draining) {
+static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) {
frame->offset = 0;
frame->needs_draining = needs_draining;
if (!needs_draining) frame->size = 0;
}
/* Returns 1 if successful, 0 otherwise. */
-static int tsi_fake_frame_ensure_size(tsi_fake_frame* frame) {
+static int tsi_fake_frame_ensure_size(tsi_fake_frame *frame) {
if (frame->data == NULL) {
frame->allocated_size = frame->size;
frame->data = malloc(frame->allocated_size);
if (frame->data == NULL) return 0;
} else if (frame->size > frame->allocated_size) {
- unsigned char* new_data = realloc(frame->data, frame->size);
+ unsigned char *new_data = realloc(frame->data, frame->size);
if (new_data == NULL) {
free(frame->data);
frame->data = NULL;
@@ -150,12 +150,12 @@ static int tsi_fake_frame_ensure_size(tsi_fake_frame* frame) {
}
/* This method should not be called if frame->needs_framing is not 0. */
-static tsi_result fill_frame_from_bytes(const unsigned char* incoming_bytes,
- size_t* incoming_bytes_size,
- tsi_fake_frame* frame) {
+static tsi_result fill_frame_from_bytes(const unsigned char *incoming_bytes,
+ size_t *incoming_bytes_size,
+ tsi_fake_frame *frame) {
size_t available_size = *incoming_bytes_size;
size_t to_read_size = 0;
- const unsigned char* bytes_cursor = incoming_bytes;
+ const unsigned char *bytes_cursor = incoming_bytes;
if (frame->needs_draining) return TSI_INTERNAL_ERROR;
if (frame->data == NULL) {
@@ -198,9 +198,9 @@ static tsi_result fill_frame_from_bytes(const unsigned char* incoming_bytes,
}
/* This method should not be called if frame->needs_framing is 0. */
-static tsi_result drain_frame_to_bytes(unsigned char* outgoing_bytes,
- size_t* outgoing_bytes_size,
- tsi_fake_frame* frame) {
+static tsi_result drain_frame_to_bytes(unsigned char *outgoing_bytes,
+ size_t *outgoing_bytes_size,
+ tsi_fake_frame *frame) {
size_t to_write_size = frame->size - frame->offset;
if (!frame->needs_draining) return TSI_INTERNAL_ERROR;
if (*outgoing_bytes_size < to_write_size) {
@@ -214,8 +214,8 @@ static tsi_result drain_frame_to_bytes(unsigned char* outgoing_bytes,
return TSI_OK;
}
-static tsi_result bytes_to_frame(unsigned char* bytes, size_t bytes_size,
- tsi_fake_frame* frame) {
+static tsi_result bytes_to_frame(unsigned char *bytes, size_t bytes_size,
+ tsi_fake_frame *frame) {
frame->offset = 0;
frame->size = bytes_size + TSI_FAKE_FRAME_HEADER_SIZE;
if (!tsi_fake_frame_ensure_size(frame)) return TSI_OUT_OF_RESOURCES;
@@ -225,24 +225,24 @@ static tsi_result bytes_to_frame(unsigned char* bytes, size_t bytes_size,
return TSI_OK;
}
-static void tsi_fake_frame_destruct(tsi_fake_frame* frame) {
+static void tsi_fake_frame_destruct(tsi_fake_frame *frame) {
if (frame->data != NULL) free(frame->data);
}
/* --- tsi_frame_protector methods implementation. ---*/
-static tsi_result fake_protector_protect(tsi_frame_protector* self,
- const unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size) {
+static tsi_result fake_protector_protect(tsi_frame_protector *self,
+ const unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size) {
tsi_result result = TSI_OK;
- tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
+ tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
unsigned char frame_header[TSI_FAKE_FRAME_HEADER_SIZE];
- tsi_fake_frame* frame = &impl->protect_frame;
+ tsi_fake_frame *frame = &impl->protect_frame;
size_t saved_output_size = *protected_output_frames_size;
size_t drained_size = 0;
- size_t* num_bytes_written = protected_output_frames_size;
+ size_t *num_bytes_written = protected_output_frames_size;
*num_bytes_written = 0;
/* Try to drain first. */
@@ -293,11 +293,11 @@ static tsi_result fake_protector_protect(tsi_frame_protector* self,
}
static tsi_result fake_protector_protect_flush(
- tsi_frame_protector* self, unsigned char* protected_output_frames,
- size_t* protected_output_frames_size, size_t* still_pending_size) {
+ tsi_frame_protector *self, unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size, size_t *still_pending_size) {
tsi_result result = TSI_OK;
- tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
- tsi_fake_frame* frame = &impl->protect_frame;
+ tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
+ tsi_fake_frame *frame = &impl->protect_frame;
if (!frame->needs_draining) {
/* Create a short frame. */
frame->size = frame->offset;
@@ -314,15 +314,15 @@ static tsi_result fake_protector_protect_flush(
}
static tsi_result fake_protector_unprotect(
- tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
- size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size) {
+ tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
+ size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size) {
tsi_result result = TSI_OK;
- tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
- tsi_fake_frame* frame = &impl->unprotect_frame;
+ tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
+ tsi_fake_frame *frame = &impl->unprotect_frame;
size_t saved_output_size = *unprotected_bytes_size;
size_t drained_size = 0;
- size_t* num_bytes_written = unprotected_bytes_size;
+ size_t *num_bytes_written = unprotected_bytes_size;
*num_bytes_written = 0;
/* Try to drain first. */
@@ -362,8 +362,8 @@ static tsi_result fake_protector_unprotect(
return result;
}
-static void fake_protector_destroy(tsi_frame_protector* self) {
- tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
+static void fake_protector_destroy(tsi_frame_protector *self) {
+ tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
tsi_fake_frame_destruct(&impl->protect_frame);
tsi_fake_frame_destruct(&impl->unprotect_frame);
free(self);
@@ -377,8 +377,8 @@ static const tsi_frame_protector_vtable frame_protector_vtable = {
/* --- tsi_handshaker methods implementation. ---*/
static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
- tsi_handshaker* self, unsigned char* bytes, size_t* bytes_size) {
- tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
+ tsi_handshaker *self, unsigned char *bytes, size_t *bytes_size) {
+ tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
tsi_result result = TSI_OK;
if (impl->needs_incoming_message || impl->result == TSI_OK) {
*bytes_size = 0;
@@ -387,9 +387,9 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
if (!impl->outgoing.needs_draining) {
tsi_fake_handshake_message next_message_to_send =
impl->next_message_to_send + 2;
- const char* msg_string =
+ const char *msg_string =
tsi_fake_handshake_message_to_string(impl->next_message_to_send);
- result = bytes_to_frame((unsigned char*)msg_string, strlen(msg_string),
+ result = bytes_to_frame((unsigned char *)msg_string, strlen(msg_string),
&impl->outgoing);
if (result != TSI_OK) return result;
if (next_message_to_send > TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
@@ -418,9 +418,9 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
}
static tsi_result fake_handshaker_process_bytes_from_peer(
- tsi_handshaker* self, const unsigned char* bytes, size_t* bytes_size) {
+ tsi_handshaker *self, const unsigned char *bytes, size_t *bytes_size) {
tsi_result result = TSI_OK;
- tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
+ tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
tsi_fake_handshake_message expected_msg = impl->next_message_to_send - 1;
tsi_fake_handshake_message received_msg;
@@ -433,7 +433,7 @@ static tsi_result fake_handshaker_process_bytes_from_peer(
/* We now have a complete frame. */
result = tsi_fake_handshake_message_from_string(
- (const char*)impl->incoming.data + TSI_FAKE_FRAME_HEADER_SIZE,
+ (const char *)impl->incoming.data + TSI_FAKE_FRAME_HEADER_SIZE,
&received_msg);
if (result != TSI_OK) {
impl->result = result;
@@ -460,13 +460,13 @@ static tsi_result fake_handshaker_process_bytes_from_peer(
return TSI_OK;
}
-static tsi_result fake_handshaker_get_result(tsi_handshaker* self) {
- tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
+static tsi_result fake_handshaker_get_result(tsi_handshaker *self) {
+ tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
return impl->result;
}
-static tsi_result fake_handshaker_extract_peer(tsi_handshaker* self,
- tsi_peer* peer) {
+static tsi_result fake_handshaker_extract_peer(tsi_handshaker *self,
+ tsi_peer *peer) {
tsi_result result = tsi_construct_peer(1, peer);
if (result != TSI_OK) return result;
result = tsi_construct_string_peer_property_from_cstring(
@@ -477,15 +477,15 @@ static tsi_result fake_handshaker_extract_peer(tsi_handshaker* self,
}
static tsi_result fake_handshaker_create_frame_protector(
- tsi_handshaker* self, size_t* max_protected_frame_size,
- tsi_frame_protector** protector) {
+ tsi_handshaker *self, size_t *max_protected_frame_size,
+ tsi_frame_protector **protector) {
*protector = tsi_create_fake_protector(max_protected_frame_size);
if (*protector == NULL) return TSI_OUT_OF_RESOURCES;
return TSI_OK;
}
-static void fake_handshaker_destroy(tsi_handshaker* self) {
- tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
+static void fake_handshaker_destroy(tsi_handshaker *self) {
+ tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
tsi_fake_frame_destruct(&impl->incoming);
tsi_fake_frame_destruct(&impl->outgoing);
free(self);
@@ -500,8 +500,8 @@ static const tsi_handshaker_vtable handshaker_vtable = {
fake_handshaker_destroy,
};
-tsi_handshaker* tsi_create_fake_handshaker(int is_client) {
- tsi_fake_handshaker* impl = calloc(1, sizeof(tsi_fake_handshaker));
+tsi_handshaker *tsi_create_fake_handshaker(int is_client) {
+ tsi_fake_handshaker *impl = calloc(1, sizeof(tsi_fake_handshaker));
impl->base.vtable = &handshaker_vtable;
impl->is_client = is_client;
impl->result = TSI_HANDSHAKE_IN_PROGRESS;
@@ -515,9 +515,9 @@ tsi_handshaker* tsi_create_fake_handshaker(int is_client) {
return &impl->base;
}
-tsi_frame_protector* tsi_create_fake_protector(
- size_t* max_protected_frame_size) {
- tsi_fake_frame_protector* impl = calloc(1, sizeof(tsi_fake_frame_protector));
+tsi_frame_protector *tsi_create_fake_protector(
+ size_t *max_protected_frame_size) {
+ tsi_fake_frame_protector *impl = calloc(1, sizeof(tsi_fake_frame_protector));
if (impl == NULL) return NULL;
impl->max_frame_size = (max_protected_frame_size == NULL)
? TSI_FAKE_DEFAULT_FRAME_SIZE
diff --git a/src/core/tsi/fake_transport_security.h b/src/core/tsi/fake_transport_security.h
index 1fa11349fb..fe295aa536 100644
--- a/src/core/tsi/fake_transport_security.h
+++ b/src/core/tsi/fake_transport_security.h
@@ -48,11 +48,11 @@ extern "C" {
No cryptography is performed in these objects. They just simulate handshake
messages going back and forth for the handshaker and do some framing on
cleartext data for the protector. */
-tsi_handshaker* tsi_create_fake_handshaker(int is_client);
+tsi_handshaker *tsi_create_fake_handshaker(int is_client);
/* Creates a protector directly without going through the handshake phase. */
-tsi_frame_protector* tsi_create_fake_protector(
- size_t* max_protected_frame_size);
+tsi_frame_protector *tsi_create_fake_protector(
+ size_t *max_protected_frame_size);
#ifdef __cplusplus
}
diff --git a/src/core/tsi/ssl_transport_security.c b/src/core/tsi/ssl_transport_security.c
index 99ce7ecadf..ad6b2d7684 100644
--- a/src/core/tsi/ssl_transport_security.c
+++ b/src/core/tsi/ssl_transport_security.c
@@ -68,16 +68,16 @@
/* --- Structure definitions. ---*/
struct tsi_ssl_handshaker_factory {
- tsi_result (*create_handshaker)(tsi_ssl_handshaker_factory* self,
- const char* server_name_indication,
- tsi_handshaker** handshaker);
- void (*destroy)(tsi_ssl_handshaker_factory* self);
+ tsi_result (*create_handshaker)(tsi_ssl_handshaker_factory *self,
+ const char *server_name_indication,
+ tsi_handshaker **handshaker);
+ void (*destroy)(tsi_ssl_handshaker_factory *self);
};
typedef struct {
tsi_ssl_handshaker_factory base;
- SSL_CTX* ssl_context;
- unsigned char* alpn_protocol_list;
+ SSL_CTX *ssl_context;
+ unsigned char *alpn_protocol_list;
size_t alpn_protocol_list_length;
} tsi_ssl_client_handshaker_factory;
@@ -87,27 +87,27 @@ typedef struct {
/* Several contexts to support SNI.
The tsi_peer array contains the subject names of the server certificates
associated with the contexts at the same index. */
- SSL_CTX** ssl_contexts;
- tsi_peer* ssl_context_x509_subject_names;
+ SSL_CTX **ssl_contexts;
+ tsi_peer *ssl_context_x509_subject_names;
size_t ssl_context_count;
- unsigned char* alpn_protocol_list;
+ unsigned char *alpn_protocol_list;
size_t alpn_protocol_list_length;
} tsi_ssl_server_handshaker_factory;
typedef struct {
tsi_handshaker base;
- SSL* ssl;
- BIO* into_ssl;
- BIO* from_ssl;
+ SSL *ssl;
+ BIO *into_ssl;
+ BIO *from_ssl;
tsi_result result;
} tsi_ssl_handshaker;
typedef struct {
tsi_frame_protector base;
- SSL* ssl;
- BIO* into_ssl;
- BIO* from_ssl;
- unsigned char* buffer;
+ SSL *ssl;
+ BIO *into_ssl;
+ BIO *from_ssl;
+ unsigned char *buffer;
size_t buffer_size;
size_t buffer_offset;
} tsi_ssl_frame_protector;
@@ -115,9 +115,9 @@ typedef struct {
/* --- Library Initialization. ---*/
static gpr_once init_openssl_once = GPR_ONCE_INIT;
-static gpr_mu* openssl_mutexes = NULL;
+static gpr_mu *openssl_mutexes = NULL;
-static void openssl_locking_cb(int mode, int type, const char* file, int line) {
+static void openssl_locking_cb(int mode, int type, const char *file, int line) {
if (mode & CRYPTO_LOCK) {
gpr_mu_lock(&openssl_mutexes[type]);
} else {
@@ -148,7 +148,7 @@ static void init_openssl(void) {
/* --- Ssl utils. ---*/
-static const char* ssl_error_string(int error) {
+static const char *ssl_error_string(int error) {
switch (error) {
case SSL_ERROR_NONE:
return "SSL_ERROR_NONE";
@@ -174,8 +174,8 @@ static const char* ssl_error_string(int error) {
}
/* TODO(jboeuf): Remove when we are past the debugging phase with this code. */
-static void ssl_log_where_info(const SSL* ssl, int where, int flag,
- const char* msg) {
+static void ssl_log_where_info(const SSL *ssl, int where, int flag,
+ const char *msg) {
if ((where & flag) && tsi_tracing_enabled) {
gpr_log(GPR_INFO, "%20.20s - %30.30s - %5.10s", msg,
SSL_state_string_long(ssl), SSL_state_string(ssl));
@@ -183,7 +183,7 @@ static void ssl_log_where_info(const SSL* ssl, int where, int flag,
}
/* Used for debugging. TODO(jboeuf): Remove when code is mature enough. */
-static void ssl_info_callback(const SSL* ssl, int where, int ret) {
+static void ssl_info_callback(const SSL *ssl, int where, int ret) {
if (ret == 0) {
gpr_log(GPR_ERROR, "ssl_info_callback: error occured.\n");
return;
@@ -197,7 +197,7 @@ static void ssl_info_callback(const SSL* ssl, int where, int ret) {
/* Returns 1 if name looks like an IP address, 0 otherwise.
This is a very rough heuristic as it does not handle IPV6 or things like:
0300.0250.00.01, 0xC0.0Xa8.0x0.0x1, 000030052000001, 0xc0.052000001 */
-static int looks_like_ip_address(const char* name) {
+static int looks_like_ip_address(const char *name) {
size_t i;
size_t dot_count = 0;
size_t num_size = 0;
@@ -218,12 +218,12 @@ static int looks_like_ip_address(const char* name) {
}
/* Gets the subject CN from an X509 cert. */
-static tsi_result ssl_get_x509_common_name(X509* cert, unsigned char** utf8,
- size_t* utf8_size) {
+static tsi_result ssl_get_x509_common_name(X509 *cert, unsigned char **utf8,
+ size_t *utf8_size) {
int common_name_index = -1;
- X509_NAME_ENTRY* common_name_entry = NULL;
- ASN1_STRING* common_name_asn1 = NULL;
- X509_NAME* subject_name = X509_get_subject_name(cert);
+ X509_NAME_ENTRY *common_name_entry = NULL;
+ ASN1_STRING *common_name_asn1 = NULL;
+ X509_NAME *subject_name = X509_get_subject_name(cert);
int utf8_returned_size = 0;
if (subject_name == NULL) {
gpr_log(GPR_ERROR, "Could not get subject name from certificate.");
@@ -258,8 +258,8 @@ static tsi_result ssl_get_x509_common_name(X509* cert, unsigned char** utf8,
/* Gets the subject CN of an X509 cert as a tsi_peer_property. */
static tsi_result peer_property_from_x509_common_name(
- X509* cert, tsi_peer_property* property) {
- unsigned char* common_name;
+ X509 *cert, tsi_peer_property *property) {
+ unsigned char *common_name;
size_t common_name_size;
tsi_result result =
ssl_get_x509_common_name(cert, &common_name, &common_name_size);
@@ -273,7 +273,7 @@ static tsi_result peer_property_from_x509_common_name(
}
result = tsi_construct_string_peer_property(
TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY,
- common_name == NULL ? "" : (const char*)common_name, common_name_size,
+ common_name == NULL ? "" : (const char *)common_name, common_name_size,
property);
OPENSSL_free(common_name);
return result;
@@ -281,7 +281,7 @@ static tsi_result peer_property_from_x509_common_name(
/* Gets the subject SANs from an X509 cert as a tsi_peer_property. */
static tsi_result add_subject_alt_names_properties_to_peer(
- tsi_peer* peer, GENERAL_NAMES* subject_alt_names,
+ tsi_peer *peer, GENERAL_NAMES *subject_alt_names,
size_t subject_alt_name_count) {
size_t i;
tsi_result result = TSI_OK;
@@ -290,11 +290,11 @@ static tsi_result add_subject_alt_names_properties_to_peer(
peer->property_count -= subject_alt_name_count;
for (i = 0; i < subject_alt_name_count; i++) {
- GENERAL_NAME* subject_alt_name =
+ GENERAL_NAME *subject_alt_name =
sk_GENERAL_NAME_value(subject_alt_names, (int)i);
/* Filter out the non-dns entries names. */
if (subject_alt_name->type == GEN_DNS) {
- unsigned char* dns_name = NULL;
+ unsigned char *dns_name = NULL;
int dns_name_size =
ASN1_STRING_to_UTF8(&dns_name, subject_alt_name->d.dNSName);
if (dns_name_size < 0) {
@@ -304,7 +304,7 @@ static tsi_result add_subject_alt_names_properties_to_peer(
}
result = tsi_construct_string_peer_property(
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY,
- (const char*)dns_name, (size_t)dns_name_size,
+ (const char *)dns_name, (size_t)dns_name_size,
&peer->properties[peer->property_count++]);
OPENSSL_free(dns_name);
if (result != TSI_OK) break;
@@ -314,10 +314,10 @@ static tsi_result add_subject_alt_names_properties_to_peer(
}
/* Gets information about the peer's X509 cert as a tsi_peer object. */
-static tsi_result peer_from_x509(X509* cert, int include_certificate_type,
- tsi_peer* peer) {
+static tsi_result peer_from_x509(X509 *cert, int include_certificate_type,
+ tsi_peer *peer) {
/* TODO(jboeuf): Maybe add more properties. */
- GENERAL_NAMES* subject_alt_names =
+ GENERAL_NAMES *subject_alt_names =
X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0);
int subject_alt_name_count =
(subject_alt_names != NULL) ? sk_GENERAL_NAME_num(subject_alt_names) : 0;
@@ -364,8 +364,8 @@ static void log_ssl_error_stack(void) {
}
/* Performs an SSL_read and handle errors. */
-static tsi_result do_ssl_read(SSL* ssl, unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size) {
+static tsi_result do_ssl_read(SSL *ssl, unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size) {
int read_from_ssl;
GPR_ASSERT(*unprotected_bytes_size <= INT_MAX);
read_from_ssl =
@@ -401,7 +401,7 @@ static tsi_result do_ssl_read(SSL* ssl, unsigned char* unprotected_bytes,
}
/* Performs an SSL_write and handle errors. */
-static tsi_result do_ssl_write(SSL* ssl, unsigned char* unprotected_bytes,
+static tsi_result do_ssl_write(SSL *ssl, unsigned char *unprotected_bytes,
size_t unprotected_bytes_size) {
int ssl_write_result;
GPR_ASSERT(unprotected_bytes_size <= INT_MAX);
@@ -424,13 +424,13 @@ static tsi_result do_ssl_write(SSL* ssl, unsigned char* unprotected_bytes,
/* Loads an in-memory PEM certificate chain into the SSL context. */
static tsi_result ssl_ctx_use_certificate_chain(
- SSL_CTX* context, const unsigned char* pem_cert_chain,
+ SSL_CTX *context, const unsigned char *pem_cert_chain,
size_t pem_cert_chain_size) {
tsi_result result = TSI_OK;
- X509* certificate = NULL;
- BIO* pem;
+ X509 *certificate = NULL;
+ BIO *pem;
GPR_ASSERT(pem_cert_chain_size <= INT_MAX);
- pem = BIO_new_mem_buf((void*)pem_cert_chain, (int)pem_cert_chain_size);
+ pem = BIO_new_mem_buf((void *)pem_cert_chain, (int)pem_cert_chain_size);
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
do {
@@ -444,7 +444,7 @@ static tsi_result ssl_ctx_use_certificate_chain(
break;
}
while (1) {
- X509* certificate_authority = PEM_read_bio_X509(pem, NULL, NULL, "");
+ X509 *certificate_authority = PEM_read_bio_X509(pem, NULL, NULL, "");
if (certificate_authority == NULL) {
ERR_clear_error();
break; /* Done reading. */
@@ -456,7 +456,7 @@ static tsi_result ssl_ctx_use_certificate_chain(
}
/* We don't need to free certificate_authority as its ownership has been
transfered to the context. That is not the case for certificate though.
- */
+ */
}
} while (0);
@@ -466,14 +466,14 @@ static tsi_result ssl_ctx_use_certificate_chain(
}
/* Loads an in-memory PEM private key into the SSL context. */
-static tsi_result ssl_ctx_use_private_key(SSL_CTX* context,
- const unsigned char* pem_key,
+static tsi_result ssl_ctx_use_private_key(SSL_CTX *context,
+ const unsigned char *pem_key,
size_t pem_key_size) {
tsi_result result = TSI_OK;
- EVP_PKEY* private_key = NULL;
- BIO* pem;
+ EVP_PKEY *private_key = NULL;
+ BIO *pem;
GPR_ASSERT(pem_key_size <= INT_MAX);
- pem = BIO_new_mem_buf((void*)pem_key, (int)pem_key_size);
+ pem = BIO_new_mem_buf((void *)pem_key, (int)pem_key_size);
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
do {
private_key = PEM_read_bio_PrivateKey(pem, NULL, NULL, "");
@@ -494,16 +494,16 @@ static tsi_result ssl_ctx_use_private_key(SSL_CTX* context,
/* Loads in-memory PEM verification certs into the SSL context and optionally
returns the verification cert names (root_names can be NULL). */
static tsi_result ssl_ctx_load_verification_certs(
- SSL_CTX* context, const unsigned char* pem_roots, size_t pem_roots_size,
+ SSL_CTX *context, const unsigned char *pem_roots, size_t pem_roots_size,
STACK_OF(X509_NAME) * *root_names) {
tsi_result result = TSI_OK;
size_t num_roots = 0;
- X509* root = NULL;
- X509_NAME* root_name = NULL;
- BIO* pem;
- X509_STORE* root_store;
+ X509 *root = NULL;
+ X509_NAME *root_name = NULL;
+ BIO *pem;
+ X509_STORE *root_store;
GPR_ASSERT(pem_roots_size <= INT_MAX);
- pem = BIO_new_mem_buf((void*)pem_roots, (int)pem_roots_size);
+ pem = BIO_new_mem_buf((void *)pem_roots, (int)pem_roots_size);
root_store = SSL_CTX_get_cert_store(context);
if (root_store == NULL) return TSI_INVALID_ARGUMENT;
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
@@ -562,9 +562,9 @@ static tsi_result ssl_ctx_load_verification_certs(
/* Populates the SSL context with a private key and a cert chain, and sets the
cipher list and the ephemeral ECDH key. */
static tsi_result populate_ssl_context(
- SSL_CTX* context, const unsigned char* pem_private_key,
- size_t pem_private_key_size, const unsigned char* pem_certificate_chain,
- size_t pem_certificate_chain_size, const char* cipher_list) {
+ SSL_CTX *context, const unsigned char *pem_private_key,
+ size_t pem_private_key_size, const unsigned char *pem_certificate_chain,
+ size_t pem_certificate_chain_size, const char *cipher_list) {
tsi_result result = TSI_OK;
if (pem_certificate_chain != NULL) {
result = ssl_ctx_use_certificate_chain(context, pem_certificate_chain,
@@ -587,7 +587,7 @@ static tsi_result populate_ssl_context(
return TSI_INVALID_ARGUMENT;
}
{
- EC_KEY* ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
+ EC_KEY *ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
if (!SSL_CTX_set_tmp_ecdh(context, ecdh)) {
gpr_log(GPR_ERROR, "Could not set ephemeral ECDH key.");
EC_KEY_free(ecdh);
@@ -601,12 +601,12 @@ static tsi_result populate_ssl_context(
/* Extracts the CN and the SANs from an X509 cert as a peer object. */
static tsi_result extract_x509_subject_names_from_pem_cert(
- const unsigned char* pem_cert, size_t pem_cert_size, tsi_peer* peer) {
+ const unsigned char *pem_cert, size_t pem_cert_size, tsi_peer *peer) {
tsi_result result = TSI_OK;
- X509* cert = NULL;
- BIO* pem;
+ X509 *cert = NULL;
+ BIO *pem;
GPR_ASSERT(pem_cert_size <= INT_MAX);
- pem = BIO_new_mem_buf((void*)pem_cert, (int)pem_cert_size);
+ pem = BIO_new_mem_buf((void *)pem_cert, (int)pem_cert_size);
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
cert = PEM_read_bio_X509(pem, NULL, NULL, "");
@@ -623,11 +623,11 @@ static tsi_result extract_x509_subject_names_from_pem_cert(
/* Builds the alpn protocol name list according to rfc 7301. */
static tsi_result build_alpn_protocol_name_list(
- const unsigned char** alpn_protocols,
- const unsigned char* alpn_protocols_lengths, uint16_t num_alpn_protocols,
- unsigned char** protocol_name_list, size_t* protocol_name_list_length) {
+ const unsigned char **alpn_protocols,
+ const unsigned char *alpn_protocols_lengths, uint16_t num_alpn_protocols,
+ unsigned char **protocol_name_list, size_t *protocol_name_list_length) {
uint16_t i;
- unsigned char* current;
+ unsigned char *current;
*protocol_name_list = NULL;
*protocol_name_list_length = 0;
if (num_alpn_protocols == 0) return TSI_INVALID_ARGUMENT;
@@ -657,12 +657,12 @@ static tsi_result build_alpn_protocol_name_list(
/* --- tsi_frame_protector methods implementation. ---*/
-static tsi_result ssl_protector_protect(tsi_frame_protector* self,
- const unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size) {
- tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
+static tsi_result ssl_protector_protect(tsi_frame_protector *self,
+ const unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size) {
+ tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
int read_from_ssl;
size_t available;
tsi_result result = TSI_OK;
@@ -713,10 +713,10 @@ static tsi_result ssl_protector_protect(tsi_frame_protector* self,
}
static tsi_result ssl_protector_protect_flush(
- tsi_frame_protector* self, unsigned char* protected_output_frames,
- size_t* protected_output_frames_size, size_t* still_pending_size) {
+ tsi_frame_protector *self, unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size, size_t *still_pending_size) {
tsi_result result = TSI_OK;
- tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
+ tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
int read_from_ssl = 0;
int pending;
@@ -746,14 +746,14 @@ static tsi_result ssl_protector_protect_flush(
}
static tsi_result ssl_protector_unprotect(
- tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
- size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size) {
+ tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
+ size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size) {
tsi_result result = TSI_OK;
int written_into_ssl = 0;
size_t output_bytes_size = *unprotected_bytes_size;
size_t output_bytes_offset = 0;
- tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
+ tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
/* First, try to read remaining data from ssl. */
result = do_ssl_read(impl->ssl, unprotected_bytes, unprotected_bytes_size);
@@ -787,8 +787,8 @@ static tsi_result ssl_protector_unprotect(
return result;
}
-static void ssl_protector_destroy(tsi_frame_protector* self) {
- tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
+static void ssl_protector_destroy(tsi_frame_protector *self) {
+ tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
if (impl->buffer != NULL) free(impl->buffer);
if (impl->ssl != NULL) SSL_free(impl->ssl);
free(self);
@@ -801,10 +801,10 @@ static const tsi_frame_protector_vtable frame_protector_vtable = {
/* --- tsi_handshaker methods implementation. ---*/
-static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
- unsigned char* bytes,
- size_t* bytes_size) {
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
+static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
+ unsigned char *bytes,
+ size_t *bytes_size) {
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
int bytes_read_from_ssl = 0;
if (bytes == NULL || bytes_size == NULL || *bytes_size == 0 ||
*bytes_size > INT_MAX) {
@@ -825,8 +825,8 @@ static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
return BIO_pending(impl->from_ssl) == 0 ? TSI_OK : TSI_INCOMPLETE_DATA;
}
-static tsi_result ssl_handshaker_get_result(tsi_handshaker* self) {
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
+static tsi_result ssl_handshaker_get_result(tsi_handshaker *self) {
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
if ((impl->result == TSI_HANDSHAKE_IN_PROGRESS) &&
SSL_is_init_finished(impl->ssl)) {
impl->result = TSI_OK;
@@ -835,8 +835,8 @@ static tsi_result ssl_handshaker_get_result(tsi_handshaker* self) {
}
static tsi_result ssl_handshaker_process_bytes_from_peer(
- tsi_handshaker* self, const unsigned char* bytes, size_t* bytes_size) {
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
+ tsi_handshaker *self, const unsigned char *bytes, size_t *bytes_size) {
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
int bytes_written_into_ssl_size = 0;
if (bytes == NULL || bytes_size == 0 || *bytes_size > INT_MAX) {
return TSI_INVALID_ARGUMENT;
@@ -880,13 +880,13 @@ static tsi_result ssl_handshaker_process_bytes_from_peer(
}
}
-static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self,
- tsi_peer* peer) {
+static tsi_result ssl_handshaker_extract_peer(tsi_handshaker *self,
+ tsi_peer *peer) {
tsi_result result = TSI_OK;
- const unsigned char* alpn_selected = NULL;
+ const unsigned char *alpn_selected = NULL;
unsigned int alpn_selected_len;
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
- X509* peer_cert = SSL_get_peer_certificate(impl->ssl);
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
+ X509 *peer_cert = SSL_get_peer_certificate(impl->ssl);
if (peer_cert != NULL) {
result = peer_from_x509(peer_cert, 1, peer);
X509_free(peer_cert);
@@ -902,14 +902,14 @@ static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self,
}
if (alpn_selected != NULL) {
size_t i;
- tsi_peer_property* new_properties =
+ tsi_peer_property *new_properties =
calloc(1, sizeof(tsi_peer_property) * (peer->property_count + 1));
if (new_properties == NULL) return TSI_OUT_OF_RESOURCES;
for (i = 0; i < peer->property_count; i++) {
new_properties[i] = peer->properties[i];
}
result = tsi_construct_string_peer_property(
- TSI_SSL_ALPN_SELECTED_PROTOCOL, (const char*)alpn_selected,
+ TSI_SSL_ALPN_SELECTED_PROTOCOL, (const char *)alpn_selected,
alpn_selected_len, &new_properties[peer->property_count]);
if (result != TSI_OK) {
free(new_properties);
@@ -923,12 +923,12 @@ static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self,
}
static tsi_result ssl_handshaker_create_frame_protector(
- tsi_handshaker* self, size_t* max_output_protected_frame_size,
- tsi_frame_protector** protector) {
+ tsi_handshaker *self, size_t *max_output_protected_frame_size,
+ tsi_frame_protector **protector) {
size_t actual_max_output_protected_frame_size =
TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND;
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
- tsi_ssl_frame_protector* protector_impl =
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
+ tsi_ssl_frame_protector *protector_impl =
calloc(1, sizeof(tsi_ssl_frame_protector));
if (protector_impl == NULL) {
return TSI_OUT_OF_RESOURCES;
@@ -968,8 +968,8 @@ static tsi_result ssl_handshaker_create_frame_protector(
return TSI_OK;
}
-static void ssl_handshaker_destroy(tsi_handshaker* self) {
- tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
+static void ssl_handshaker_destroy(tsi_handshaker *self) {
+ tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
SSL_free(impl->ssl); /* The BIO objects are owned by ssl */
free(impl);
}
@@ -986,24 +986,24 @@ static const tsi_handshaker_vtable handshaker_vtable = {
/* --- tsi_ssl_handshaker_factory common methods. --- */
tsi_result tsi_ssl_handshaker_factory_create_handshaker(
- tsi_ssl_handshaker_factory* self, const char* server_name_indication,
- tsi_handshaker** handshaker) {
+ tsi_ssl_handshaker_factory *self, const char *server_name_indication,
+ tsi_handshaker **handshaker) {
if (self == NULL || handshaker == NULL) return TSI_INVALID_ARGUMENT;
return self->create_handshaker(self, server_name_indication, handshaker);
}
-void tsi_ssl_handshaker_factory_destroy(tsi_ssl_handshaker_factory* self) {
+void tsi_ssl_handshaker_factory_destroy(tsi_ssl_handshaker_factory *self) {
if (self == NULL) return;
self->destroy(self);
}
-static tsi_result create_tsi_ssl_handshaker(SSL_CTX* ctx, int is_client,
- const char* server_name_indication,
- tsi_handshaker** handshaker) {
- SSL* ssl = SSL_new(ctx);
- BIO* into_ssl = NULL;
- BIO* from_ssl = NULL;
- tsi_ssl_handshaker* impl = NULL;
+static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
+ const char *server_name_indication,
+ tsi_handshaker **handshaker) {
+ SSL *ssl = SSL_new(ctx);
+ BIO *into_ssl = NULL;
+ BIO *from_ssl = NULL;
+ tsi_ssl_handshaker *impl = NULL;
*handshaker = NULL;
if (ctx == NULL) {
gpr_log(GPR_ERROR, "SSL Context is null. Should never happen.");
@@ -1062,16 +1062,16 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX* ctx, int is_client,
return TSI_OK;
}
-static int select_protocol_list(const unsigned char** out,
- unsigned char* outlen,
- const unsigned char* client_list,
+static int select_protocol_list(const unsigned char **out,
+ unsigned char *outlen,
+ const unsigned char *client_list,
size_t client_list_len,
- const unsigned char* server_list,
+ const unsigned char *server_list,
size_t server_list_len) {
- const unsigned char* client_current = client_list;
+ const unsigned char *client_current = client_list;
while ((unsigned int)(client_current - client_list) < client_list_len) {
unsigned char client_current_len = *(client_current++);
- const unsigned char* server_current = server_list;
+ const unsigned char *server_current = server_list;
while ((server_current >= server_list) &&
(gpr_uintptr)(server_current - server_list) < server_list_len) {
unsigned char server_current_len = *(server_current++);
@@ -1091,31 +1091,31 @@ static int select_protocol_list(const unsigned char** out,
/* --- tsi_ssl__client_handshaker_factory methods implementation. --- */
static tsi_result ssl_client_handshaker_factory_create_handshaker(
- tsi_ssl_handshaker_factory* self, const char* server_name_indication,
- tsi_handshaker** handshaker) {
- tsi_ssl_client_handshaker_factory* impl =
- (tsi_ssl_client_handshaker_factory*)self;
+ tsi_ssl_handshaker_factory *self, const char *server_name_indication,
+ tsi_handshaker **handshaker) {
+ tsi_ssl_client_handshaker_factory *impl =
+ (tsi_ssl_client_handshaker_factory *)self;
return create_tsi_ssl_handshaker(impl->ssl_context, 1, server_name_indication,
handshaker);
}
static void ssl_client_handshaker_factory_destroy(
- tsi_ssl_handshaker_factory* self) {
- tsi_ssl_client_handshaker_factory* impl =
- (tsi_ssl_client_handshaker_factory*)self;
+ tsi_ssl_handshaker_factory *self) {
+ tsi_ssl_client_handshaker_factory *impl =
+ (tsi_ssl_client_handshaker_factory *)self;
if (impl->ssl_context != NULL) SSL_CTX_free(impl->ssl_context);
if (impl->alpn_protocol_list != NULL) free(impl->alpn_protocol_list);
free(impl);
}
-static int client_handshaker_factory_npn_callback(SSL* ssl, unsigned char** out,
- unsigned char* outlen,
- const unsigned char* in,
+static int client_handshaker_factory_npn_callback(SSL *ssl, unsigned char **out,
+ unsigned char *outlen,
+ const unsigned char *in,
unsigned int inlen,
- void* arg) {
- tsi_ssl_client_handshaker_factory* factory =
- (tsi_ssl_client_handshaker_factory*)arg;
- return select_protocol_list((const unsigned char**)out, outlen,
+ void *arg) {
+ tsi_ssl_client_handshaker_factory *factory =
+ (tsi_ssl_client_handshaker_factory *)arg;
+ return select_protocol_list((const unsigned char **)out, outlen,
factory->alpn_protocol_list,
factory->alpn_protocol_list_length, in, inlen);
}
@@ -1123,10 +1123,10 @@ static int client_handshaker_factory_npn_callback(SSL* ssl, unsigned char** out,
/* --- tsi_ssl_server_handshaker_factory methods implementation. --- */
static tsi_result ssl_server_handshaker_factory_create_handshaker(
- tsi_ssl_handshaker_factory* self, const char* server_name_indication,
- tsi_handshaker** handshaker) {
- tsi_ssl_server_handshaker_factory* impl =
- (tsi_ssl_server_handshaker_factory*)self;
+ tsi_ssl_handshaker_factory *self, const char *server_name_indication,
+ tsi_handshaker **handshaker) {
+ tsi_ssl_server_handshaker_factory *impl =
+ (tsi_ssl_server_handshaker_factory *)self;
if (impl->ssl_context_count == 0 || server_name_indication != NULL) {
return TSI_INVALID_ARGUMENT;
}
@@ -1136,9 +1136,9 @@ static tsi_result ssl_server_handshaker_factory_create_handshaker(
}
static void ssl_server_handshaker_factory_destroy(
- tsi_ssl_handshaker_factory* self) {
- tsi_ssl_server_handshaker_factory* impl =
- (tsi_ssl_server_handshaker_factory*)self;
+ tsi_ssl_handshaker_factory *self) {
+ tsi_ssl_server_handshaker_factory *impl =
+ (tsi_ssl_server_handshaker_factory *)self;
size_t i;
for (i = 0; i < impl->ssl_context_count; i++) {
if (impl->ssl_contexts[i] != NULL) {
@@ -1154,10 +1154,10 @@ static void ssl_server_handshaker_factory_destroy(
free(impl);
}
-static int does_entry_match_name(const char* entry, size_t entry_length,
- const char* name) {
- const char* dot;
- const char* name_subdomain = NULL;
+static int does_entry_match_name(const char *entry, size_t entry_length,
+ const char *name) {
+ const char *dot;
+ const char *name_subdomain = NULL;
size_t name_length = strlen(name);
size_t name_subdomain_length;
if (entry_length == 0) return 0;
@@ -1202,12 +1202,12 @@ static int does_entry_match_name(const char* entry, size_t entry_length,
strncmp(entry, name_subdomain, entry_length) == 0);
}
-static int ssl_server_handshaker_factory_servername_callback(SSL* ssl, int* ap,
- void* arg) {
- tsi_ssl_server_handshaker_factory* impl =
- (tsi_ssl_server_handshaker_factory*)arg;
+static int ssl_server_handshaker_factory_servername_callback(SSL *ssl, int *ap,
+ void *arg) {
+ tsi_ssl_server_handshaker_factory *impl =
+ (tsi_ssl_server_handshaker_factory *)arg;
size_t i = 0;
- const char* servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
+ const char *servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
if (servername == NULL || strlen(servername) == 0) {
return SSL_TLSEXT_ERR_NOACK;
}
@@ -1225,10 +1225,10 @@ static int ssl_server_handshaker_factory_servername_callback(SSL* ssl, int* ap,
#if TSI_OPENSSL_ALPN_SUPPORT
static int server_handshaker_factory_alpn_callback(
- SSL* ssl, const unsigned char** out, unsigned char* outlen,
- const unsigned char* in, unsigned int inlen, void* arg) {
- tsi_ssl_server_handshaker_factory* factory =
- (tsi_ssl_server_handshaker_factory*)arg;
+ SSL *ssl, const unsigned char **out, unsigned char *outlen,
+ const unsigned char *in, unsigned int inlen, void *arg) {
+ tsi_ssl_server_handshaker_factory *factory =
+ (tsi_ssl_server_handshaker_factory *)arg;
return select_protocol_list(out, outlen, in, inlen,
factory->alpn_protocol_list,
factory->alpn_protocol_list_length);
@@ -1236,9 +1236,9 @@ static int server_handshaker_factory_alpn_callback(
#endif /* TSI_OPENSSL_ALPN_SUPPORT */
static int server_handshaker_factory_npn_advertised_callback(
- SSL* ssl, const unsigned char** out, unsigned int* outlen, void* arg) {
- tsi_ssl_server_handshaker_factory* factory =
- (tsi_ssl_server_handshaker_factory*)arg;
+ SSL *ssl, const unsigned char **out, unsigned int *outlen, void *arg) {
+ tsi_ssl_server_handshaker_factory *factory =
+ (tsi_ssl_server_handshaker_factory *)arg;
*out = factory->alpn_protocol_list;
GPR_ASSERT(factory->alpn_protocol_list_length <= UINT_MAX);
*outlen = (unsigned int)factory->alpn_protocol_list_length;
@@ -1248,14 +1248,14 @@ static int server_handshaker_factory_npn_advertised_callback(
/* --- tsi_ssl_handshaker_factory constructors. --- */
tsi_result tsi_create_ssl_client_handshaker_factory(
- const unsigned char* pem_private_key, size_t pem_private_key_size,
- const unsigned char* pem_cert_chain, size_t pem_cert_chain_size,
- const unsigned char* pem_root_certs, size_t pem_root_certs_size,
- const char* cipher_list, const unsigned char** alpn_protocols,
- const unsigned char* alpn_protocols_lengths, uint16_t num_alpn_protocols,
- tsi_ssl_handshaker_factory** factory) {
- SSL_CTX* ssl_context = NULL;
- tsi_ssl_client_handshaker_factory* impl = NULL;
+ const unsigned char *pem_private_key, size_t pem_private_key_size,
+ const unsigned char *pem_cert_chain, size_t pem_cert_chain_size,
+ const unsigned char *pem_root_certs, size_t pem_root_certs_size,
+ const char *cipher_list, const unsigned char **alpn_protocols,
+ const unsigned char *alpn_protocols_lengths, uint16_t num_alpn_protocols,
+ tsi_ssl_handshaker_factory **factory) {
+ SSL_CTX *ssl_context = NULL;
+ tsi_ssl_client_handshaker_factory *impl = NULL;
tsi_result result = TSI_OK;
gpr_once_init(&init_openssl_once, init_openssl);
@@ -1327,15 +1327,15 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
}
tsi_result tsi_create_ssl_server_handshaker_factory(
- const unsigned char** pem_private_keys,
- const size_t* pem_private_keys_sizes, const unsigned char** pem_cert_chains,
- const size_t* pem_cert_chains_sizes, size_t key_cert_pair_count,
- const unsigned char* pem_client_root_certs,
+ const unsigned char **pem_private_keys,
+ const size_t *pem_private_keys_sizes, const unsigned char **pem_cert_chains,
+ const size_t *pem_cert_chains_sizes, size_t key_cert_pair_count,
+ const unsigned char *pem_client_root_certs,
size_t pem_client_root_certs_size, int force_client_auth,
- const char* cipher_list, const unsigned char** alpn_protocols,
- const unsigned char* alpn_protocols_lengths, uint16_t num_alpn_protocols,
- tsi_ssl_handshaker_factory** factory) {
- tsi_ssl_server_handshaker_factory* impl = NULL;
+ const char *cipher_list, const unsigned char **alpn_protocols,
+ const unsigned char *alpn_protocols_lengths, uint16_t num_alpn_protocols,
+ tsi_ssl_handshaker_factory **factory) {
+ tsi_ssl_server_handshaker_factory *impl = NULL;
tsi_result result = TSI_OK;
size_t i = 0;
@@ -1353,7 +1353,7 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
impl->base.create_handshaker =
ssl_server_handshaker_factory_create_handshaker;
impl->base.destroy = ssl_server_handshaker_factory_destroy;
- impl->ssl_contexts = calloc(key_cert_pair_count, sizeof(SSL_CTX*));
+ impl->ssl_contexts = calloc(key_cert_pair_count, sizeof(SSL_CTX *));
impl->ssl_context_x509_subject_names =
calloc(key_cert_pair_count, sizeof(tsi_peer));
if (impl->ssl_contexts == NULL ||
@@ -1388,7 +1388,7 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
if (pem_client_root_certs != NULL) {
int flags = SSL_VERIFY_PEER;
- STACK_OF(X509_NAME)* root_names = NULL;
+ STACK_OF(X509_NAME) *root_names = NULL;
result = ssl_ctx_load_verification_certs(
impl->ssl_contexts[i], pem_client_root_certs,
pem_client_root_certs_size, &root_names);
@@ -1431,17 +1431,17 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
/* --- tsi_ssl utils. --- */
-int tsi_ssl_peer_matches_name(const tsi_peer* peer, const char* name) {
+int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) {
size_t i = 0;
size_t san_count = 0;
- const tsi_peer_property* cn_property = NULL;
+ const tsi_peer_property *cn_property = NULL;
/* For now reject what looks like an IP address. */
if (looks_like_ip_address(name)) return 0;
/* Check the SAN first. */
for (i = 0; i < peer->property_count; i++) {
- const tsi_peer_property* property = &peer->properties[i];
+ const tsi_peer_property *property = &peer->properties[i];
if (property->name == NULL) continue;
if (strcmp(property->name,
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) {
diff --git a/src/core/tsi/ssl_transport_security.h b/src/core/tsi/ssl_transport_security.h
index cdf4f294be..51c0003a85 100644
--- a/src/core/tsi/ssl_transport_security.h
+++ b/src/core/tsi/ssl_transport_security.h
@@ -85,12 +85,12 @@ typedef struct tsi_ssl_handshaker_factory tsi_ssl_handshaker_factory;
- This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case
where a parameter is invalid. */
tsi_result tsi_create_ssl_client_handshaker_factory(
- const unsigned char* pem_private_key, size_t pem_private_key_size,
- const unsigned char* pem_cert_chain, size_t pem_cert_chain_size,
- const unsigned char* pem_root_certs, size_t pem_root_certs_size,
- const char* cipher_suites, const unsigned char** alpn_protocols,
- const unsigned char* alpn_protocols_lengths, uint16_t num_alpn_protocols,
- tsi_ssl_handshaker_factory** factory);
+ const unsigned char *pem_private_key, size_t pem_private_key_size,
+ const unsigned char *pem_cert_chain, size_t pem_cert_chain_size,
+ const unsigned char *pem_root_certs, size_t pem_root_certs_size,
+ const char *cipher_suites, const unsigned char **alpn_protocols,
+ const unsigned char *alpn_protocols_lengths, uint16_t num_alpn_protocols,
+ tsi_ssl_handshaker_factory **factory);
/* Creates a server handshaker factory.
- version indicates which version of the specification to use.
@@ -131,14 +131,14 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
- This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case
where a parameter is invalid. */
tsi_result tsi_create_ssl_server_handshaker_factory(
- const unsigned char** pem_private_keys,
- const size_t* pem_private_keys_sizes, const unsigned char** pem_cert_chains,
- const size_t* pem_cert_chains_sizes, size_t key_cert_pair_count,
- const unsigned char* pem_client_root_certs,
+ const unsigned char **pem_private_keys,
+ const size_t *pem_private_keys_sizes, const unsigned char **pem_cert_chains,
+ const size_t *pem_cert_chains_sizes, size_t key_cert_pair_count,
+ const unsigned char *pem_client_root_certs,
size_t pem_client_root_certs_size, int force_client_auth,
- const char* cipher_suites, const unsigned char** alpn_protocols,
- const unsigned char* alpn_protocols_lengths, uint16_t num_alpn_protocols,
- tsi_ssl_handshaker_factory** factory);
+ const char *cipher_suites, const unsigned char **alpn_protocols,
+ const unsigned char *alpn_protocols_lengths, uint16_t num_alpn_protocols,
+ tsi_ssl_handshaker_factory **factory);
/* Creates a handshaker.
- self is the factory from which the handshaker will be created.
@@ -151,12 +151,12 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
- This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case
where a parameter is invalid. */
tsi_result tsi_ssl_handshaker_factory_create_handshaker(
- tsi_ssl_handshaker_factory* self, const char* server_name_indication,
- tsi_handshaker** handshaker);
+ tsi_ssl_handshaker_factory *self, const char *server_name_indication,
+ tsi_handshaker **handshaker);
/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory
while handshakers created with this factory are still in use. */
-void tsi_ssl_handshaker_factory_destroy(tsi_ssl_handshaker_factory* self);
+void tsi_ssl_handshaker_factory_destroy(tsi_ssl_handshaker_factory *self);
/* Util that checks that an ssl peer matches a specific name.
Still TODO(jboeuf):
@@ -164,7 +164,7 @@ void tsi_ssl_handshaker_factory_destroy(tsi_ssl_handshaker_factory* self);
- handle %encoded chars.
- handle public suffix wildchar more strictly (e.g. *.co.uk)
- handle IP addresses in SAN. */
-int tsi_ssl_peer_matches_name(const tsi_peer* peer, const char* name);
+int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name);
#ifdef __cplusplus
}
diff --git a/src/core/tsi/transport_security.c b/src/core/tsi/transport_security.c
index ec02a478ba..c39e584496 100644
--- a/src/core/tsi/transport_security.c
+++ b/src/core/tsi/transport_security.c
@@ -42,8 +42,8 @@ int tsi_tracing_enabled = 0;
/* --- Utils. --- */
-char* tsi_strdup(const char* src) {
- char* dst;
+char *tsi_strdup(const char *src) {
+ char *dst;
size_t len;
if (!src) return NULL;
len = strlen(src) + 1;
@@ -55,7 +55,7 @@ char* tsi_strdup(const char* src) {
/* --- tsi_result common implementation. --- */
-const char* tsi_result_to_string(tsi_result result) {
+const char *tsi_result_to_string(tsi_result result) {
switch (result) {
case TSI_OK:
return "TSI_OK";
@@ -92,11 +92,11 @@ const char* tsi_result_to_string(tsi_result result) {
Calls specific implementation after state/input validation. */
-tsi_result tsi_frame_protector_protect(tsi_frame_protector* self,
- const unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size) {
+tsi_result tsi_frame_protector_protect(tsi_frame_protector *self,
+ const unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size) {
if (self == NULL || unprotected_bytes == NULL ||
unprotected_bytes_size == NULL || protected_output_frames == NULL ||
protected_output_frames_size == NULL) {
@@ -108,8 +108,8 @@ tsi_result tsi_frame_protector_protect(tsi_frame_protector* self,
}
tsi_result tsi_frame_protector_protect_flush(
- tsi_frame_protector* self, unsigned char* protected_output_frames,
- size_t* protected_output_frames_size, size_t* still_pending_size) {
+ tsi_frame_protector *self, unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size, size_t *still_pending_size) {
if (self == NULL || protected_output_frames == NULL ||
protected_output_frames == NULL || still_pending_size == NULL) {
return TSI_INVALID_ARGUMENT;
@@ -120,9 +120,9 @@ tsi_result tsi_frame_protector_protect_flush(
}
tsi_result tsi_frame_protector_unprotect(
- tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
- size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size) {
+ tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
+ size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size) {
if (self == NULL || protected_frames_bytes == NULL ||
protected_frames_bytes_size == NULL || unprotected_bytes == NULL ||
unprotected_bytes_size == NULL) {
@@ -133,7 +133,7 @@ tsi_result tsi_frame_protector_unprotect(
unprotected_bytes_size);
}
-void tsi_frame_protector_destroy(tsi_frame_protector* self) {
+void tsi_frame_protector_destroy(tsi_frame_protector *self) {
if (self == NULL) return;
self->vtable->destroy(self);
}
@@ -142,29 +142,29 @@ void tsi_frame_protector_destroy(tsi_frame_protector* self) {
Calls specific implementation after state/input validation. */
-tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
- unsigned char* bytes,
- size_t* bytes_size) {
+tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
+ unsigned char *bytes,
+ size_t *bytes_size) {
if (self == NULL) return TSI_INVALID_ARGUMENT;
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
return self->vtable->get_bytes_to_send_to_peer(self, bytes, bytes_size);
}
-tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker* self,
- const unsigned char* bytes,
- size_t* bytes_size) {
+tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self,
+ const unsigned char *bytes,
+ size_t *bytes_size) {
if (self == NULL) return TSI_INVALID_ARGUMENT;
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
return self->vtable->process_bytes_from_peer(self, bytes, bytes_size);
}
-tsi_result tsi_handshaker_get_result(tsi_handshaker* self) {
+tsi_result tsi_handshaker_get_result(tsi_handshaker *self) {
if (self == NULL) return TSI_INVALID_ARGUMENT;
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
return self->vtable->get_result(self);
}
-tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer) {
+tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer) {
if (self == NULL || peer == NULL) return TSI_INVALID_ARGUMENT;
memset(peer, 0, sizeof(tsi_peer));
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
@@ -175,8 +175,8 @@ tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer) {
}
tsi_result tsi_handshaker_create_frame_protector(
- tsi_handshaker* self, size_t* max_protected_frame_size,
- tsi_frame_protector** protector) {
+ tsi_handshaker *self, size_t *max_protected_frame_size,
+ tsi_frame_protector **protector) {
tsi_result result;
if (self == NULL || protector == NULL) return TSI_INVALID_ARGUMENT;
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
@@ -191,7 +191,7 @@ tsi_result tsi_handshaker_create_frame_protector(
return result;
}
-void tsi_handshaker_destroy(tsi_handshaker* self) {
+void tsi_handshaker_destroy(tsi_handshaker *self) {
if (self == NULL) return;
self->vtable->destroy(self);
}
@@ -204,7 +204,7 @@ tsi_peer_property tsi_init_peer_property(void) {
return property;
}
-static void tsi_peer_destroy_list_property(tsi_peer_property* children,
+static void tsi_peer_destroy_list_property(tsi_peer_property *children,
size_t child_count) {
size_t i;
for (i = 0; i < child_count; i++) {
@@ -213,7 +213,7 @@ static void tsi_peer_destroy_list_property(tsi_peer_property* children,
free(children);
}
-void tsi_peer_property_destruct(tsi_peer_property* property) {
+void tsi_peer_property_destruct(tsi_peer_property *property) {
if (property->name != NULL) {
free(property->name);
}
@@ -223,7 +223,7 @@ void tsi_peer_property_destruct(tsi_peer_property* property) {
*property = tsi_init_peer_property(); /* Reset everything to 0. */
}
-void tsi_peer_destruct(tsi_peer* self) {
+void tsi_peer_destruct(tsi_peer *self) {
if (self == NULL) return;
if (self->properties != NULL) {
tsi_peer_destroy_list_property(self->properties, self->property_count);
@@ -233,7 +233,7 @@ void tsi_peer_destruct(tsi_peer* self) {
}
tsi_result tsi_construct_allocated_string_peer_property(
- const char* name, size_t value_length, tsi_peer_property* property) {
+ const char *name, size_t value_length, tsi_peer_property *property) {
*property = tsi_init_peer_property();
if (name != NULL) {
property->name = tsi_strdup(name);
@@ -251,15 +251,15 @@ tsi_result tsi_construct_allocated_string_peer_property(
}
tsi_result tsi_construct_string_peer_property_from_cstring(
- const char* name, const char* value, tsi_peer_property* property) {
+ const char *name, const char *value, tsi_peer_property *property) {
return tsi_construct_string_peer_property(name, value, strlen(value),
property);
}
-tsi_result tsi_construct_string_peer_property(const char* name,
- const char* value,
+tsi_result tsi_construct_string_peer_property(const char *name,
+ const char *value,
size_t value_length,
- tsi_peer_property* property) {
+ tsi_peer_property *property) {
tsi_result result = tsi_construct_allocated_string_peer_property(
name, value_length, property);
if (result != TSI_OK) return result;
@@ -269,7 +269,7 @@ tsi_result tsi_construct_string_peer_property(const char* name,
return TSI_OK;
}
-tsi_result tsi_construct_peer(size_t property_count, tsi_peer* peer) {
+tsi_result tsi_construct_peer(size_t property_count, tsi_peer *peer) {
memset(peer, 0, sizeof(tsi_peer));
if (property_count > 0) {
peer->properties = calloc(property_count, sizeof(tsi_peer_property));
diff --git a/src/core/tsi/transport_security.h b/src/core/tsi/transport_security.h
index 34283f2f9c..4077737473 100644
--- a/src/core/tsi/transport_security.h
+++ b/src/core/tsi/transport_security.h
@@ -45,64 +45,64 @@ extern int tsi_tracing_enabled;
/* Base for tsi_frame_protector implementations.
See transport_security_interface.h for documentation. */
typedef struct {
- tsi_result (*protect)(tsi_frame_protector* self,
- const unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size);
- tsi_result (*protect_flush)(tsi_frame_protector* self,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size,
- size_t* still_pending_size);
- tsi_result (*unprotect)(tsi_frame_protector* self,
- const unsigned char* protected_frames_bytes,
- size_t* protected_frames_bytes_size,
- unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size);
- void (*destroy)(tsi_frame_protector* self);
+ tsi_result (*protect)(tsi_frame_protector *self,
+ const unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size);
+ tsi_result (*protect_flush)(tsi_frame_protector *self,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size,
+ size_t *still_pending_size);
+ tsi_result (*unprotect)(tsi_frame_protector *self,
+ const unsigned char *protected_frames_bytes,
+ size_t *protected_frames_bytes_size,
+ unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size);
+ void (*destroy)(tsi_frame_protector *self);
} tsi_frame_protector_vtable;
struct tsi_frame_protector {
- const tsi_frame_protector_vtable* vtable;
+ const tsi_frame_protector_vtable *vtable;
};
/* Base for tsi_handshaker implementations.
See transport_security_interface.h for documentation. */
typedef struct {
- tsi_result (*get_bytes_to_send_to_peer)(tsi_handshaker* self,
- unsigned char* bytes,
- size_t* bytes_size);
- tsi_result (*process_bytes_from_peer)(tsi_handshaker* self,
- const unsigned char* bytes,
- size_t* bytes_size);
- tsi_result (*get_result)(tsi_handshaker* self);
- tsi_result (*extract_peer)(tsi_handshaker* self, tsi_peer* peer);
- tsi_result (*create_frame_protector)(tsi_handshaker* self,
- size_t* max_protected_frame_size,
- tsi_frame_protector** protector);
- void (*destroy)(tsi_handshaker* self);
+ tsi_result (*get_bytes_to_send_to_peer)(tsi_handshaker *self,
+ unsigned char *bytes,
+ size_t *bytes_size);
+ tsi_result (*process_bytes_from_peer)(tsi_handshaker *self,
+ const unsigned char *bytes,
+ size_t *bytes_size);
+ tsi_result (*get_result)(tsi_handshaker *self);
+ tsi_result (*extract_peer)(tsi_handshaker *self, tsi_peer *peer);
+ tsi_result (*create_frame_protector)(tsi_handshaker *self,
+ size_t *max_protected_frame_size,
+ tsi_frame_protector **protector);
+ void (*destroy)(tsi_handshaker *self);
} tsi_handshaker_vtable;
struct tsi_handshaker {
- const tsi_handshaker_vtable* vtable;
+ const tsi_handshaker_vtable *vtable;
int frame_protector_created;
};
/* Peer and property construction/destruction functions. */
-tsi_result tsi_construct_peer(size_t property_count, tsi_peer* peer);
+tsi_result tsi_construct_peer(size_t property_count, tsi_peer *peer);
tsi_peer_property tsi_init_peer_property(void);
-void tsi_peer_property_destruct(tsi_peer_property* property);
-tsi_result tsi_construct_string_peer_property(const char* name,
- const char* value,
+void tsi_peer_property_destruct(tsi_peer_property *property);
+tsi_result tsi_construct_string_peer_property(const char *name,
+ const char *value,
size_t value_length,
- tsi_peer_property* property);
+ tsi_peer_property *property);
tsi_result tsi_construct_allocated_string_peer_property(
- const char* name, size_t value_length, tsi_peer_property* property);
+ const char *name, size_t value_length, tsi_peer_property *property);
tsi_result tsi_construct_string_peer_property_from_cstring(
- const char* name, const char* value, tsi_peer_property* property);
+ const char *name, const char *value, tsi_peer_property *property);
/* Utils. */
-char* tsi_strdup(const char* src); /* Sadly, no strdup in C89. */
+char *tsi_strdup(const char *src); /* Sadly, no strdup in C89. */
#ifdef __cplusplus
}
diff --git a/src/core/tsi/transport_security_interface.h b/src/core/tsi/transport_security_interface.h
index 03a51683a2..69ee17ae91 100644
--- a/src/core/tsi/transport_security_interface.h
+++ b/src/core/tsi/transport_security_interface.h
@@ -59,7 +59,7 @@ typedef enum {
TSI_OUT_OF_RESOURCES = 12
} tsi_result;
-const char* tsi_result_to_string(tsi_result result);
+const char *tsi_result_to_string(tsi_result result);
/* --- tsi tracing --- */
@@ -126,11 +126,11 @@ typedef struct tsi_frame_protector tsi_frame_protector;
if (result != TSI_OK) HandleError(result);
------------------------------------------------------------------------ */
-tsi_result tsi_frame_protector_protect(tsi_frame_protector* self,
- const unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size,
- unsigned char* protected_output_frames,
- size_t* protected_output_frames_size);
+tsi_result tsi_frame_protector_protect(tsi_frame_protector *self,
+ const unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size,
+ unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size);
/* Indicates that we need to flush the bytes buffered in the protector and get
the resulting frame.
@@ -141,8 +141,8 @@ tsi_result tsi_frame_protector_protect(tsi_frame_protector* self,
- still_pending_bytes is an output parameter indicating the number of bytes
that still need to be flushed from the protector.*/
tsi_result tsi_frame_protector_protect_flush(
- tsi_frame_protector* self, unsigned char* protected_output_frames,
- size_t* protected_output_frames_size, size_t* still_pending_size);
+ tsi_frame_protector *self, unsigned char *protected_output_frames,
+ size_t *protected_output_frames_size, size_t *still_pending_size);
/* Outputs unprotected bytes.
- protected_frames_bytes is an input only parameter and points to the
@@ -167,12 +167,12 @@ tsi_result tsi_frame_protector_protect_flush(
needs to be read before new protected data can be processed in which case
protected_frames_size will be set to 0. */
tsi_result tsi_frame_protector_unprotect(
- tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
- size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
- size_t* unprotected_bytes_size);
+ tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
+ size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
+ size_t *unprotected_bytes_size);
/* Destroys the tsi_frame_protector object. */
-void tsi_frame_protector_destroy(tsi_frame_protector* self);
+void tsi_frame_protector_destroy(tsi_frame_protector *self);
/* --- tsi_peer objects ---
@@ -184,20 +184,20 @@ void tsi_frame_protector_destroy(tsi_frame_protector* self);
/* Property values may contain NULL characters just like C++ strings.
The length field gives the length of the string. */
typedef struct tsi_peer_property {
- char* name;
+ char *name;
struct {
- char* data;
+ char *data;
size_t length;
} value;
} tsi_peer_property;
typedef struct {
- tsi_peer_property* properties;
+ tsi_peer_property *properties;
size_t property_count;
} tsi_peer;
/* Destructs the tsi_peer object. */
-void tsi_peer_destruct(tsi_peer* self);
+void tsi_peer_destruct(tsi_peer *self);
/* --- tsi_handshaker objects ----
@@ -279,9 +279,9 @@ typedef struct tsi_handshaker tsi_handshaker;
needs to be called again to get all the bytes to send to the peer (there
was more data to write than the specified bytes_size). In case of a fatal
error in the handshake, another specific error code is returned. */
-tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
- unsigned char* bytes,
- size_t* bytes_size);
+tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
+ unsigned char *bytes,
+ size_t *bytes_size);
/* Processes bytes received from the peer.
- bytes is the buffer containing the data.
@@ -292,16 +292,16 @@ tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
needs to be called again to complete the data needed for processing. In
case of a fatal error in the handshake, another specific error code is
returned. */
-tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker* self,
- const unsigned char* bytes,
- size_t* bytes_size);
+tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self,
+ const unsigned char *bytes,
+ size_t *bytes_size);
/* Gets the result of the handshaker.
Returns TSI_OK if the hanshake completed successfully and there has been no
errors. Returns TSI_HANDSHAKE_IN_PROGRESS if the handshaker is not done yet
but no error has been encountered so far. Otherwise the handshaker failed
with the returned error. */
-tsi_result tsi_handshaker_get_result(tsi_handshaker* self);
+tsi_result tsi_handshaker_get_result(tsi_handshaker *self);
/* Returns 1 if the handshake is in progress, 0 otherwise. */
#define tsi_handshaker_is_in_progress(h) \
@@ -311,7 +311,7 @@ tsi_result tsi_handshaker_get_result(tsi_handshaker* self);
tsi_handshaker_is_in_progress returns 1, it returns TSI_OK otherwise
assuming the handshaker is not in a fatal error state.
The caller is responsible for destructing the peer. */
-tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer);
+tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer);
/* This method creates a tsi_frame_protector object after the handshake phase
is done. After this method has been called successfully, the only method
@@ -330,12 +330,12 @@ tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer);
the handshaker is not in a fatal error state.
The caller is responsible for destroying the protector. */
tsi_result tsi_handshaker_create_frame_protector(
- tsi_handshaker* self, size_t* max_output_protected_frame_size,
- tsi_frame_protector** protector);
+ tsi_handshaker *self, size_t *max_output_protected_frame_size,
+ tsi_frame_protector **protector);
/* This method releases the tsi_handshaker object. After this method is called,
no other method can be called on the object. */
-void tsi_handshaker_destroy(tsi_handshaker* self);
+void tsi_handshaker_destroy(tsi_handshaker *self);
#ifdef __cplusplus
}
diff --git a/src/cpp/proto/proto_utils.cc b/src/cpp/proto/proto_utils.cc
index be84c222a0..f47acc8f8d 100644
--- a/src/cpp/proto/proto_utils.cc
+++ b/src/cpp/proto/proto_utils.cc
@@ -36,6 +36,7 @@
#include <grpc/grpc.h>
#include <grpc/byte_buffer.h>
#include <grpc/byte_buffer_reader.h>
+#include <grpc/support/log.h>
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
#include <grpc/support/port_platform.h>
@@ -111,7 +112,8 @@ class GrpcBufferReader GRPC_FINAL
if (backup_count_ > 0) {
*data = GPR_SLICE_START_PTR(slice_) + GPR_SLICE_LENGTH(slice_) -
backup_count_;
- *size = backup_count_;
+ GPR_ASSERT(backup_count_ <= INT_MAX);
+ *size = (int)backup_count_;
backup_count_ = 0;
return true;
}
diff --git a/src/cpp/util/time.cc b/src/cpp/util/time.cc
index b3401eb26b..6157a37745 100644
--- a/src/cpp/util/time.cc
+++ b/src/cpp/util/time.cc
@@ -57,8 +57,8 @@ void Timepoint2Timespec(const system_clock::time_point& from,
return;
}
nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);
- to->tv_sec = secs.count();
- to->tv_nsec = nsecs.count();
+ to->tv_sec = (time_t)secs.count();
+ to->tv_nsec = (int)nsecs.count();
to->clock_type = GPR_CLOCK_REALTIME;
}
@@ -73,8 +73,8 @@ void TimepointHR2Timespec(const high_resolution_clock::time_point& from,
return;
}
nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);
- to->tv_sec = secs.count();
- to->tv_nsec = nsecs.count();
+ to->tv_sec = (time_t)secs.count();
+ to->tv_nsec = (int)nsecs.count();
to->clock_type = GPR_CLOCK_REALTIME;
}