aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--include/grpc++/resource_quota.h3
-rw-r--r--include/grpc++/server_builder.h2
-rw-r--r--src/core/ext/census/grpc_filter.c3
-rw-r--r--src/core/ext/client_channel/channel_connectivity.c3
-rw-r--r--src/core/ext/client_channel/client_channel.c50
-rw-r--r--src/core/ext/client_channel/http_connect_handshaker.c8
-rw-r--r--src/core/ext/client_channel/subchannel.c13
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.c41
-rw-r--r--src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c32
-rw-r--r--src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h32
-rw-r--r--src/core/ext/lb_policy/pick_first/pick_first.c28
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c22
-rw-r--r--src/core/ext/load_reporting/load_reporting_filter.c3
-rw-r--r--src/core/ext/resolver/dns/native/dns_resolver.c13
-rw-r--r--src/core/ext/resolver/sockaddr/sockaddr_resolver.c4
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.c10
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.c3
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c189
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c9
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h4
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c43
-rw-r--r--src/core/lib/channel/channel_stack.c9
-rw-r--r--src/core/lib/channel/compress_filter.c6
-rw-r--r--src/core/lib/channel/deadline_filter.c11
-rw-r--r--src/core/lib/channel/handshaker.c8
-rw-r--r--src/core/lib/channel/http_client_filter.c15
-rw-r--r--src/core/lib/channel/http_server_filter.c9
-rw-r--r--src/core/lib/channel/message_size_filter.c5
-rw-r--r--src/core/lib/http/httpcli.c18
-rw-r--r--src/core/lib/http/httpcli_security_connector.c2
-rw-r--r--src/core/lib/iomgr/closure.c38
-rw-r--r--src/core/lib/iomgr/closure.h39
-rw-r--r--src/core/lib/iomgr/combiner.c110
-rw-r--r--src/core/lib/iomgr/combiner.h14
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.c37
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c26
-rw-r--r--src/core/lib/iomgr/ev_posix.c5
-rw-r--r--src/core/lib/iomgr/ev_posix.h3
-rw-r--r--src/core/lib/iomgr/exec_ctx.c134
-rw-r--r--src/core/lib/iomgr/exec_ctx.h31
-rw-r--r--src/core/lib/iomgr/executor.c32
-rw-r--r--src/core/lib/iomgr/executor.h4
-rw-r--r--src/core/lib/iomgr/pollset_uv.c2
-rw-r--r--src/core/lib/iomgr/pollset_windows.c5
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.c10
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.c6
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.c7
-rw-r--r--src/core/lib/iomgr/resource_quota.c101
-rw-r--r--src/core/lib/iomgr/socket_windows.c4
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.c15
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.c2
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.c6
-rw-r--r--src/core/lib/iomgr/tcp_posix.c21
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c16
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.c4
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.c11
-rw-r--r--src/core/lib/iomgr/tcp_uv.c12
-rw-r--r--src/core/lib/iomgr/tcp_windows.c27
-rw-r--r--src/core/lib/iomgr/timer_generic.c14
-rw-r--r--src/core/lib/iomgr/timer_uv.c9
-rw-r--r--src/core/lib/iomgr/udp_server.c10
-rw-r--r--src/core/lib/iomgr/workqueue.h11
-rw-r--r--src/core/lib/iomgr/workqueue_uv.c5
-rw-r--r--src/core/lib/iomgr/workqueue_windows.c5
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.c7
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c6
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.c8
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.c16
-rw-r--r--src/core/lib/security/transport/secure_endpoint.c9
-rw-r--r--src/core/lib/security/transport/security_connector.c10
-rw-r--r--src/core/lib/security/transport/security_handshaker.c18
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c18
-rw-r--r--src/core/lib/surface/call.c29
-rw-r--r--src/core/lib/surface/channel_ping.c2
-rw-r--r--src/core/lib/surface/completion_queue.c3
-rw-r--r--src/core/lib/surface/lame_client.c10
-rw-r--r--src/core/lib/surface/server.c53
-rw-r--r--src/core/lib/transport/connectivity_state.c14
-rw-r--r--src/core/lib/transport/transport.c27
-rw-r--r--src/ruby/lib/grpc/generic/bidi_call.rb1
-rw-r--r--src/ruby/lib/grpc/generic/service.rb2
-rw-r--r--src/ruby/spec/generic/rpc_server_pool_spec.rb12
-rw-r--r--test/core/bad_client/bad_client.c6
-rw-r--r--test/core/client_channel/resolvers/dns_resolver_connectivity_test.c10
-rw-r--r--test/core/client_channel/resolvers/sockaddr_resolver_test.c4
-rw-r--r--test/core/client_channel/set_initial_connect_string_test.c2
-rw-r--r--test/core/end2end/bad_server_response_test.c4
-rw-r--r--test/core/end2end/fake_resolver.c4
-rw-r--r--test/core/end2end/fixtures/http_proxy.c26
-rw-r--r--test/core/end2end/fuzzers/api_fuzzer.c14
-rw-r--r--test/core/end2end/tests/filter_causes_close.c7
-rw-r--r--test/core/http/httpcli_test.c18
-rw-r--r--test/core/http/httpscli_test.c18
-rw-r--r--test/core/internal_api_canaries/iomgr.c9
-rw-r--r--test/core/iomgr/combiner_test.c27
-rw-r--r--test/core/iomgr/endpoint_pair_test.c3
-rw-r--r--test/core/iomgr/endpoint_tests.c14
-rw-r--r--test/core/iomgr/ev_epoll_linux_test.c3
-rw-r--r--test/core/iomgr/fd_posix_test.c23
-rw-r--r--test/core/iomgr/resolve_address_test.c45
-rw-r--r--test/core/iomgr/resource_quota_test.c9
-rw-r--r--test/core/iomgr/tcp_client_posix_test.c7
-rw-r--r--test/core/iomgr/tcp_posix_test.c15
-rw-r--r--test/core/iomgr/tcp_server_posix_test.c5
-rw-r--r--test/core/iomgr/udp_server_test.c3
-rw-r--r--test/core/security/credentials_test.c12
-rw-r--r--test/core/security/jwt_verifier_test.c10
-rw-r--r--test/core/security/oauth2_utils.c3
-rw-r--r--test/core/security/secure_endpoint_test.c5
-rw-r--r--test/core/surface/concurrent_connectivity_test.c6
-rw-r--r--test/core/surface/lame_client_test.c6
-rw-r--r--test/core/transport/connectivity_state_test.c9
-rw-r--r--test/core/util/mock_endpoint.c10
-rw-r--r--test/core/util/passthru_endpoint.c15
-rw-r--r--test/core/util/port_server_client.c22
-rw-r--r--test/core/util/test_tcp_server.c6
-rwxr-xr-xtools/codegen/core/gen_nano_proto.sh47
-rwxr-xr-xtools/distrib/check_copyright.py3
118 files changed, 1024 insertions, 1025 deletions
diff --git a/include/grpc++/resource_quota.h b/include/grpc++/resource_quota.h
index 75e04d4e2f..68a514658d 100644
--- a/include/grpc++/resource_quota.h
+++ b/include/grpc++/resource_quota.h
@@ -37,6 +37,7 @@
struct grpc_resource_quota;
#include <grpc++/impl/codegen/config.h>
+#include <grpc++/impl/codegen/grpc_library.h>
namespace grpc {
@@ -44,7 +45,7 @@ namespace grpc {
/// A ResourceQuota can be attached to a server (via ServerBuilder), or a client
/// channel (via ChannelArguments). gRPC will attempt to keep memory used by
/// all attached entities below the ResourceQuota bound.
-class ResourceQuota final {
+class ResourceQuota final : private GrpcLibraryCodegen {
public:
explicit ResourceQuota(const grpc::string& name);
ResourceQuota();
diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h
index 9252c6a63a..2ac2f0a1ef 100644
--- a/include/grpc++/server_builder.h
+++ b/include/grpc++/server_builder.h
@@ -187,7 +187,7 @@ class ServerBuilder {
struct SyncServerSettings {
SyncServerSettings()
- : num_cqs(GPR_MAX(gpr_cpu_num_cores(), 4)),
+ : num_cqs(1),
min_pollers(1),
max_pollers(INT_MAX),
cq_timeout_msec(1000) {}
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 3e8acc85e1..8e4d4732b8 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -154,7 +154,8 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
memset(d, 0, sizeof(*d));
d->start_ts = args->start_time;
/* TODO(hongyu): call census_tracing_start_op here. */
- grpc_closure_init(&d->finish_recv, server_on_done_recv, elem);
+ grpc_closure_init(&d->finish_recv, server_on_done_recv, elem,
+ grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/ext/client_channel/channel_connectivity.c b/src/core/ext/client_channel/channel_connectivity.c
index 9797e66564..b10f444b63 100644
--- a/src/core/ext/client_channel/channel_connectivity.c
+++ b/src/core/ext/client_channel/channel_connectivity.c
@@ -198,7 +198,8 @@ void grpc_channel_watch_connectivity_state(
grpc_cq_begin_op(cq, tag);
gpr_mu_init(&w->mu);
- grpc_closure_init(&w->on_complete, watch_complete, w);
+ grpc_closure_init(&w->on_complete, watch_complete, w,
+ grpc_schedule_on_exec_ctx);
w->phase = WAITING;
w->state = last_observed_state;
w->cq = cq;
diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c
index 9d46338428..a762b8917e 100644
--- a/src/core/ext/client_channel/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -249,7 +249,8 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
- grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w);
+ grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w,
+ grpc_schedule_on_exec_ctx);
w->state = current_state;
w->lb_policy = lb_policy;
grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state,
@@ -361,14 +362,12 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
chand->method_params_table = method_params_table;
if (lb_policy != NULL) {
- grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
- NULL);
+ grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
} else if (chand->resolver == NULL /* disconnected */) {
grpc_closure_list_fail_all(
&chand->waiting_for_config_closures,
GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1));
- grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
- NULL);
+ grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
}
if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
@@ -425,7 +424,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_transport_op *op) {
channel_data *chand = elem->channel_data;
- grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != NULL) {
@@ -444,9 +443,8 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
if (op->send_ping != NULL) {
if (chand->lb_policy == NULL) {
- grpc_exec_ctx_sched(exec_ctx, op->send_ping,
- GRPC_ERROR_CREATE("Ping with no load balancing"),
- NULL);
+ grpc_closure_sched(exec_ctx, op->send_ping,
+ GRPC_ERROR_CREATE("Ping with no load balancing"));
} else {
grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
op->bind_pollset = NULL;
@@ -465,8 +463,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
if (!chand->started_resolving) {
grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
GRPC_ERROR_REF(op->disconnect_with_error));
- grpc_exec_ctx_enqueue_list(exec_ctx,
- &chand->waiting_for_config_closures, NULL);
+ grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
}
if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
@@ -511,7 +508,8 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
gpr_mu_init(&chand->mu);
chand->owning_stack = args->channel_stack;
grpc_closure_init(&chand->on_resolver_result_changed,
- on_resolver_result_changed, chand);
+ on_resolver_result_changed, chand,
+ grpc_schedule_on_exec_ctx);
chand->interested_parties = grpc_pollset_set_create();
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
@@ -678,8 +676,9 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
calld->waiting_ops_count = 0;
calld->waiting_ops_capacity = 0;
GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
- grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(retry_ops, a),
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(
+ exec_ctx, grpc_closure_create(retry_ops, a, grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
}
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
@@ -761,14 +760,14 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
if (cpa->connected_subchannel == NULL) {
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
- grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
+ grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error));
} else {
call_data *calld = cpa->elem->call_data;
gpr_mu_lock(&calld->mu);
if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags, cpa->connected_subchannel,
cpa->on_ready, GRPC_ERROR_NONE)) {
- grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE);
}
gpr_mu_unlock(&calld->mu);
}
@@ -800,9 +799,9 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
cpa = closure->cb_arg;
if (cpa->connected_subchannel == connected_subchannel) {
cpa->connected_subchannel = NULL;
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, cpa->on_ready,
- GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL);
+ GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
}
}
gpr_mu_unlock(&chand->mu);
@@ -853,12 +852,12 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
cpa->connected_subchannel = connected_subchannel;
cpa->on_ready = on_ready;
cpa->elem = elem;
- grpc_closure_init(&cpa->closure, continue_picking, cpa);
+ grpc_closure_init(&cpa->closure, continue_picking, cpa,
+ grpc_schedule_on_exec_ctx);
grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
GRPC_ERROR_NONE);
} else {
- grpc_exec_ctx_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"),
- NULL);
+ grpc_closure_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"));
}
gpr_mu_unlock(&chand->mu);
@@ -943,7 +942,8 @@ retry:
calld->connected_subchannel == NULL &&
op->send_initial_metadata != NULL) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
- grpc_closure_init(&calld->next_step, subchannel_ready, elem);
+ grpc_closure_init(&calld->next_step, subchannel_ready, elem,
+ grpc_schedule_on_exec_ctx);
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
/* If a subchannel is not available immediately, the polling entity from
call_data should be provided to channel_data's interested_parties, so
@@ -1089,7 +1089,8 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
// get the service config data once the resolver returns.
// Take a reference to the call stack to be owned by the callback.
GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
- grpc_closure_init(&calld->read_service_config, read_service_config, elem);
+ grpc_closure_init(&calld->read_service_config, read_service_config, elem,
+ grpc_schedule_on_exec_ctx);
grpc_closure_list_append(&chand->waiting_for_config_closures,
&calld->read_service_config, GRPC_ERROR_NONE);
gpr_mu_unlock(&chand->mu);
@@ -1202,7 +1203,8 @@ void grpc_client_channel_watch_connectivity_state(
w->pollset = pollset;
w->on_complete = on_complete;
grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset);
- grpc_closure_init(&w->my_closure, on_external_watch_complete, w);
+ grpc_closure_init(&w->my_closure, on_external_watch_complete, w,
+ grpc_schedule_on_exec_ctx);
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
"external_connectivity_watcher");
gpr_mu_lock(&chand->mu);
diff --git a/src/core/ext/client_channel/http_connect_handshaker.c b/src/core/ext/client_channel/http_connect_handshaker.c
index 76c78ee853..a0fc95e7bb 100644
--- a/src/core/ext/client_channel/http_connect_handshaker.c
+++ b/src/core/ext/client_channel/http_connect_handshaker.c
@@ -131,7 +131,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
handshaker->shutdown = true;
}
// Invoke callback.
- grpc_exec_ctx_sched(exec_ctx, handshaker->on_handshake_done, error, NULL);
+ grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
}
// Callback invoked when finished writing HTTP CONNECT request.
@@ -229,7 +229,7 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
goto done;
}
// Success. Invoke handshake-done callback.
- grpc_exec_ctx_sched(exec_ctx, handshaker->on_handshake_done, error, NULL);
+ grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
done:
// Set shutdown to true so that subsequent calls to
// http_connect_handshaker_shutdown() do nothing.
@@ -313,9 +313,9 @@ grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server) {
handshaker->proxy_server = gpr_strdup(proxy_server);
grpc_slice_buffer_init(&handshaker->write_buffer);
grpc_closure_init(&handshaker->request_done_closure, on_write_done,
- handshaker);
+ handshaker, grpc_schedule_on_exec_ctx);
grpc_closure_init(&handshaker->response_read_closure, on_read_done,
- handshaker);
+ handshaker, grpc_schedule_on_exec_ctx);
grpc_http_parser_init(&handshaker->http_parser, GRPC_HTTP_RESPONSE,
&handshaker->http_response);
return &handshaker->base;
diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c
index f294e69392..87f0ef298a 100644
--- a/src/core/ext/client_channel/subchannel.c
+++ b/src/core/ext/client_channel/subchannel.c
@@ -293,8 +293,9 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
gpr_atm old_refs;
old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) {
- grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(subchannel_destroy, c),
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, grpc_closure_create(subchannel_destroy, c,
+ grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
}
}
@@ -330,7 +331,8 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
c->args = grpc_channel_args_copy(args->args);
c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
&c->root_external_state_watcher;
- grpc_closure_init(&c->connected, subchannel_connected, c);
+ grpc_closure_init(&c->connected, subchannel_connected, c,
+ grpc_schedule_on_exec_ctx);
grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
"subchannel");
int initial_backoff_ms =
@@ -505,7 +507,8 @@ void grpc_subchannel_notify_on_state_change(
w->subchannel = c;
w->pollset_set = interested_parties;
w->notify = notify;
- grpc_closure_init(&w->closure, on_external_state_watcher_done, w);
+ grpc_closure_init(&w->closure, on_external_state_watcher_done, w,
+ grpc_schedule_on_exec_ctx);
if (interested_parties != NULL) {
grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set,
interested_parties);
@@ -626,7 +629,7 @@ static void publish_transport_locked(grpc_exec_ctx *exec_ctx,
sw_subchannel->subchannel = c;
sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
grpc_closure_init(&sw_subchannel->closure, subchannel_on_child_state_changed,
- sw_subchannel);
+ sw_subchannel, grpc_schedule_on_exec_ctx);
if (c->disconnected) {
gpr_free(sw_subchannel);
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c
index bed5e6c901..bb0ee9d95c 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/lb_policy/grpclb/grpclb.c
@@ -180,8 +180,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
wrapped_rr_closure_arg *wc_arg = arg;
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
- grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
- NULL);
+ grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
if (wc_arg->rr_policy != NULL) {
/* if *target is NULL, no pick has been made by the RR policy (eg, all
@@ -248,7 +247,8 @@ static void add_pending_pick(pending_pick **root,
pick_args->lb_token_mdelem_storage;
pp->wrapped_on_complete_arg.free_when_done = pp;
grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure,
- wrapped_rr_closure, &pp->wrapped_on_complete_arg);
+ wrapped_rr_closure, &pp->wrapped_on_complete_arg,
+ grpc_schedule_on_exec_ctx);
*root = pp;
}
@@ -268,7 +268,8 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root;
grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure,
- wrapped_rr_closure, &pping->wrapped_notify_arg);
+ wrapped_rr_closure, &pping->wrapped_notify_arg,
+ grpc_schedule_on_exec_ctx);
*root = pping;
}
@@ -667,7 +668,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
gpr_malloc(sizeof(rr_connectivity_data));
memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
- rr_connectivity);
+ rr_connectivity, grpc_schedule_on_exec_ctx);
rr_connectivity->glb_policy = glb_policy;
rr_connectivity->state = new_rr_state;
@@ -908,15 +909,15 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_ERROR_NONE);
pp = next;
}
while (pping != NULL) {
pending_ping *next = pping->next;
- grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
+ GRPC_ERROR_NONE);
pping = next;
}
}
@@ -932,9 +933,9 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
+ GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
} else {
pp->next = glb_policy->pending_picks;
glb_policy->pending_picks = pp;
@@ -957,9 +958,9 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
+ GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
} else {
pp->next = glb_policy->pending_picks;
glb_policy->pending_picks = pp;
@@ -994,11 +995,10 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL;
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, on_complete,
GRPC_ERROR_CREATE("No mdelem storage for the LB token. Load reporting "
- "won't work without it. Failing"),
- NULL);
+ "won't work without it. Failing"));
return 0;
}
@@ -1017,7 +1017,8 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
wrapped_rr_closure_arg *wc_arg = gpr_malloc(sizeof(wrapped_rr_closure_arg));
memset(wc_arg, 0, sizeof(wrapped_rr_closure_arg));
- grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg);
+ grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
+ grpc_schedule_on_exec_ctx);
wc_arg->rr_policy = glb_policy->rr_policy;
wc_arg->target = target;
wc_arg->wrapped_closure = on_complete;
@@ -1117,9 +1118,11 @@ static void lb_call_init_locked(glb_lb_policy *glb_policy) {
glb_policy->lb_call_status_details_capacity = 0;
grpc_closure_init(&glb_policy->lb_on_server_status_received,
- lb_on_server_status_received, glb_policy);
+ lb_on_server_status_received, glb_policy,
+ grpc_schedule_on_exec_ctx);
grpc_closure_init(&glb_policy->lb_on_response_received,
- lb_on_response_received, glb_policy);
+ lb_on_response_received, glb_policy,
+ grpc_schedule_on_exec_ctx);
gpr_backoff_init(&glb_policy->lb_call_backoff_state,
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
index afecb716fb..e352e0396f 100644
--- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
+++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
@@ -1,35 +1,3 @@
-/*
- *
- * Copyright 2016, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
/* Automatically generated nanopb constant definitions */
/* Generated by nanopb-0.3.7-dev */
diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
index e36d0966f8..725aa7e386 100644
--- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
+++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
@@ -1,35 +1,3 @@
-/*
- *
- * Copyright 2016, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
/* Automatically generated nanopb header */
/* Generated by nanopb-0.3.7-dev */
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
index b9cfe6b5c0..821becff69 100644
--- a/src/core/ext/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -120,7 +120,7 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
pp = next;
}
@@ -138,9 +138,9 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
+ GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -165,9 +165,9 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
+ GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -306,14 +306,15 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
gpr_atm_rel_store(&p->selected, (gpr_atm)selected);
- grpc_exec_ctx_sched(exec_ctx,
- grpc_closure_create(destroy_subchannels, p),
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx,
+ grpc_closure_create(destroy_subchannels, p,
+ grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
- grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
grpc_connected_subchannel_notify_on_state_change(
@@ -366,8 +367,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE,
- NULL);
+ grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
@@ -419,8 +419,7 @@ static void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (selected) {
grpc_connected_subchannel_ping(exec_ctx, selected, closure);
} else {
- grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"),
- NULL);
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"));
}
}
@@ -485,7 +484,8 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
p->num_subchannels = subchannel_idx;
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable);
- grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p);
+ grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p,
+ grpc_schedule_on_exec_ctx);
gpr_mu_init(&p->mu);
return &p->base;
}
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index f0305473d2..47f20a1904 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -321,8 +321,8 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_exec_ctx_sched(exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE("Channel Shutdown"), NULL);
+ grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE("Channel Shutdown"));
gpr_free(pp);
}
grpc_connectivity_state_set(
@@ -348,9 +348,9 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL);
+ GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -376,9 +376,9 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
*pp->target = NULL;
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL);
+ GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -581,7 +581,7 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
(void *)selected->subchannel, (void *)selected);
}
- grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
update_lb_connectivity_status(exec_ctx, sd, error);
@@ -634,7 +634,7 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
}
@@ -684,8 +684,8 @@ static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
} else {
gpr_mu_unlock(&p->mu);
- grpc_exec_ctx_sched(exec_ctx, closure,
- GRPC_ERROR_CREATE("Round Robin not connected"), NULL);
+ grpc_closure_sched(exec_ctx, closure,
+ GRPC_ERROR_CREATE("Round Robin not connected"));
}
}
@@ -749,7 +749,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
}
++subchannel_idx;
grpc_closure_init(&sd->connectivity_changed_closure,
- rr_connectivity_changed, sd);
+ rr_connectivity_changed, sd, grpc_schedule_on_exec_ctx);
}
}
if (subchannel_idx == 0) {
diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c
index 18bb826948..1403eb0d33 100644
--- a/src/core/ext/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/load_reporting/load_reporting_filter.c
@@ -114,7 +114,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
memset(calld, 0, sizeof(call_data));
calld->id = (intptr_t)args->call_stack;
- grpc_closure_init(&calld->on_initial_md_ready, on_initial_md_ready, elem);
+ grpc_closure_init(&calld->on_initial_md_ready, on_initial_md_ready, elem,
+ grpc_schedule_on_exec_ctx);
/* TODO(dgq): do something with the data
channel_data *chand = elem->channel_data;
diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c
index 2675fa931f..efbb2be4e1 100644
--- a/src/core/ext/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/resolver/dns/native/dns_resolver.c
@@ -112,8 +112,8 @@ static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
}
if (r->next_completion != NULL) {
*r->target_result = NULL;
- grpc_exec_ctx_sched(exec_ctx, r->next_completion,
- GRPC_ERROR_CREATE("Resolver Shutdown"), NULL);
+ grpc_closure_sched(exec_ctx, r->next_completion,
+ GRPC_ERROR_CREATE("Resolver Shutdown"));
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
@@ -219,9 +219,10 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(!r->resolving);
r->resolving = true;
r->addresses = NULL;
- grpc_resolve_address(exec_ctx, r->name_to_resolve, r->default_port,
- r->interested_parties,
- grpc_closure_create(dns_on_resolved, r), &r->addresses);
+ grpc_resolve_address(
+ exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties,
+ grpc_closure_create(dns_on_resolved, r, grpc_schedule_on_exec_ctx),
+ &r->addresses);
}
static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
@@ -231,7 +232,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
*r->target_result = r->resolved_result == NULL
? NULL
: grpc_channel_args_copy(r->resolved_result);
- grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
r->published_version = r->resolved_version;
}
diff --git a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
index 88808c674f..55ea502c9e 100644
--- a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
@@ -89,7 +89,7 @@ static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_result = NULL;
- grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
@@ -123,7 +123,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
*r->target_result =
grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
- grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
}
}
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c
index 114bb07222..c807342ebc 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.c
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c
@@ -141,7 +141,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
}
grpc_closure *notify = c->notify;
c->notify = NULL;
- grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
+ grpc_closure_sched(exec_ctx, notify, error);
grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
c->handshake_mgr = NULL;
gpr_mu_unlock(&c->mu);
@@ -180,7 +180,7 @@ static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
memset(c->result, 0, sizeof(*c->result));
grpc_closure *notify = c->notify;
c->notify = NULL;
- grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
+ grpc_closure_sched(exec_ctx, notify, error);
gpr_mu_unlock(&c->mu);
chttp2_connector_unref(exec_ctx, arg);
} else {
@@ -203,7 +203,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
memset(c->result, 0, sizeof(*c->result));
grpc_closure *notify = c->notify;
c->notify = NULL;
- grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
+ grpc_closure_sched(exec_ctx, notify, error);
if (c->endpoint != NULL) grpc_endpoint_shutdown(exec_ctx, c->endpoint);
gpr_mu_unlock(&c->mu);
chttp2_connector_unref(exec_ctx, arg);
@@ -211,7 +211,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GPR_ASSERT(c->endpoint != NULL);
if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
- c);
+ c, grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&c->initial_string_buffer);
grpc_slice_buffer_add(&c->initial_string_buffer,
c->args.initial_connect_string);
@@ -237,7 +237,7 @@ static void chttp2_connector_connect(grpc_exec_ctx *exec_ctx,
c->result = result;
GPR_ASSERT(c->endpoint == NULL);
chttp2_connector_ref(con); // Ref taken for callback.
- grpc_closure_init(&c->connected, connected, c);
+ grpc_closure_init(&c->connected, connected, c, grpc_schedule_on_exec_ctx);
GPR_ASSERT(!c->connecting);
c->connecting = true;
grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint,
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c
index f0857714fc..9af62d372e 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.c
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.c
@@ -281,7 +281,8 @@ grpc_error *grpc_chttp2_server_add_port(
state = gpr_malloc(sizeof(*state));
memset(state, 0, sizeof(*state));
grpc_closure_init(&state->tcp_server_shutdown_complete,
- tcp_server_shutdown_complete, state);
+ tcp_server_shutdown_complete, state,
+ grpc_schedule_on_exec_ctx);
err = grpc_tcp_server_create(exec_ctx, &state->tcp_server_shutdown_complete,
args, &tcp_server);
if (err != GRPC_ERROR_NONE) {
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 6bc054866b..488c3b93cc 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -73,20 +73,14 @@ static const grpc_transport_vtable vtable;
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
static void write_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
-static void write_action_end(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
-static void read_action_begin(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
static void read_action_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
grpc_error *error);
-static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs,
- grpc_error *error);
/** Set a transport level setting, and push it to our peer */
static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_setting_id id, uint32_t value);
@@ -112,12 +106,8 @@ static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
void *byte_stream,
grpc_error *error_ignored);
-static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
-static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
@@ -166,8 +156,8 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
and maybe they hold resources that need to be freed */
while (t->pings.next != &t->pings) {
grpc_chttp2_outstanding_ping *ping = t->pings.next;
- grpc_exec_ctx_sched(exec_ctx, ping->on_recv,
- GRPC_ERROR_CREATE("Transport closed"), NULL);
+ grpc_closure_sched(exec_ctx, ping->on_recv,
+ GRPC_ERROR_CREATE("Transport closed"));
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
gpr_free(ping);
@@ -246,18 +236,15 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_slice_buffer_init(&t->outbuf);
grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
- grpc_closure_init(&t->write_action_begin_locked, write_action_begin_locked,
- t);
- grpc_closure_init(&t->write_action, write_action, t);
- grpc_closure_init(&t->write_action_end, write_action_end, t);
- grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t);
- grpc_closure_init(&t->read_action_begin, read_action_begin, t);
- grpc_closure_init(&t->read_action_locked, read_action_locked, t);
- grpc_closure_init(&t->benign_reclaimer, benign_reclaimer, t);
- grpc_closure_init(&t->destructive_reclaimer, destructive_reclaimer, t);
- grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t);
+ grpc_closure_init(&t->write_action, write_action, t,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&t->read_action_locked, read_action_locked, t,
+ grpc_combiner_scheduler(t->combiner, false));
+ grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t,
+ grpc_combiner_scheduler(t->combiner, false));
grpc_closure_init(&t->destructive_reclaimer_locked,
- destructive_reclaimer_locked, t);
+ destructive_reclaimer_locked, t,
+ grpc_combiner_scheduler(t->combiner, false));
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(&t->hpack_parser);
@@ -395,9 +382,10 @@ static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
- grpc_combiner_execute(exec_ctx, t->combiner,
- grpc_closure_create(destroy_transport_locked, t),
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(exec_ctx, grpc_closure_create(
+ destroy_transport_locked, t,
+ grpc_combiner_scheduler(t->combiner, false)),
+ GRPC_ERROR_NONE);
}
static void close_transport_locked(grpc_exec_ctx *exec_ctx,
@@ -471,8 +459,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_data_parser_init(&s->data_parser);
grpc_slice_buffer_init(&s->flow_controlled_buffer);
s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- grpc_closure_init(&s->complete_fetch, complete_fetch, s);
- grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s);
+ grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s,
+ grpc_schedule_on_exec_ctx);
GRPC_CHTTP2_REF_TRANSPORT(t, "stream");
@@ -547,9 +535,10 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
s->destroy_stream_arg = and_free_memory;
- grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s);
- grpc_combiner_execute(exec_ctx, t->combiner, &s->destroy_stream,
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(
+ exec_ctx, grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s,
+ grpc_combiner_scheduler(t->combiner, false)),
+ GRPC_ERROR_NONE);
GPR_TIMER_END("destroy_stream", 0);
}
@@ -600,7 +589,7 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
write_state_name(st), reason));
t->write_state = st;
if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) {
- grpc_exec_ctx_enqueue_list(exec_ctx, &t->run_after_write, NULL);
+ grpc_closure_list_sched(exec_ctx, &t->run_after_write);
if (t->close_transport_on_writes_finished != NULL) {
grpc_error *err = t->close_transport_on_writes_finished;
t->close_transport_on_writes_finished = NULL;
@@ -618,9 +607,12 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_WRITE_STATE_IDLE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
- grpc_combiner_execute_finally(exec_ctx, t->combiner,
- &t->write_action_begin_locked,
- GRPC_ERROR_NONE, covered_by_poller);
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_init(
+ &t->write_action_begin_locked, write_action_begin_locked, t,
+ grpc_combiner_finally_scheduler(t->combiner, covered_by_poller)),
+ GRPC_ERROR_NONE);
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING:
set_write_state(
@@ -662,7 +654,7 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
if (!t->closed && grpc_chttp2_begin_write(exec_ctx, t)) {
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"begin writing");
- grpc_exec_ctx_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
} else {
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
"begin writing nothing");
@@ -674,19 +666,13 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
grpc_chttp2_transport *t = gt;
GPR_TIMER_BEGIN("write_action", 0);
- grpc_endpoint_write(exec_ctx, t->ep, &t->outbuf, &t->write_action_end);
+ grpc_endpoint_write(
+ exec_ctx, t->ep, &t->outbuf,
+ grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t,
+ grpc_combiner_scheduler(t->combiner, false)));
GPR_TIMER_END("write_action", 0);
}
-static void write_action_end(grpc_exec_ctx *exec_ctx, void *gt,
- grpc_error *error) {
- grpc_chttp2_transport *t = gt;
- GPR_TIMER_BEGIN("write_action_end", 0);
- grpc_combiner_execute(exec_ctx, t->combiner, &t->write_action_end_locked,
- GRPC_ERROR_REF(error), false);
- GPR_TIMER_END("write_action_end", 0);
-}
-
static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
@@ -716,18 +702,24 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"continue writing [!covered]");
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
- grpc_combiner_execute_finally(exec_ctx, t->combiner,
- &t->write_action_begin_locked,
- GRPC_ERROR_NONE, false);
+ grpc_closure_run(
+ exec_ctx,
+ grpc_closure_init(
+ &t->write_action_begin_locked, write_action_begin_locked, t,
+ grpc_combiner_finally_scheduler(t->combiner, false)),
+ GRPC_ERROR_NONE);
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
GPR_TIMER_MARK("state=writing_stale_with_poller", 0);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"continue writing [covered]");
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
- grpc_combiner_execute_finally(exec_ctx, t->combiner,
- &t->write_action_begin_locked,
- GRPC_ERROR_NONE, true);
+ grpc_closure_run(
+ exec_ctx,
+ grpc_closure_init(&t->write_action_begin_locked,
+ write_action_begin_locked, t,
+ grpc_combiner_finally_scheduler(t->combiner, true)),
+ GRPC_ERROR_NONE);
break;
}
@@ -965,15 +957,6 @@ static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
}
}
-static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs,
- grpc_error *error) {
- grpc_chttp2_stream *s = gs;
- grpc_chttp2_transport *t = s->t;
- grpc_combiner_execute(exec_ctx, t->combiner, &s->complete_fetch_locked,
- GRPC_ERROR_REF(error),
- s->complete_fetch_covered_by_poller);
-}
-
static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
static void log_metadata(const grpc_metadata_batch *md_batch, uint32_t id,
@@ -1009,7 +992,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_closure *on_complete = op->on_complete;
if (on_complete == NULL) {
- on_complete = grpc_closure_create(do_nothing, NULL);
+ on_complete =
+ grpc_closure_create(do_nothing, NULL, grpc_schedule_on_exec_ctx);
}
/* use final_data as a barrier until enqueue time; the inital counter is
@@ -1212,13 +1196,15 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
gpr_free(str);
}
- grpc_closure_init(&op->transport_private.closure, perform_stream_op_locked,
- op);
op->transport_private.args[0] = gt;
op->transport_private.args[1] = gs;
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
- grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure,
- GRPC_ERROR_NONE, op->covered_by_poller);
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_init(
+ &op->transport_private.closure, perform_stream_op_locked, op,
+ grpc_combiner_scheduler(t->combiner, op->covered_by_poller)),
+ GRPC_ERROR_NONE);
GPR_TIMER_END("perform_stream_op", 0);
}
@@ -1247,7 +1233,7 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_outstanding_ping *ping;
for (ping = t->pings.next; ping != &t->pings; ping = ping->next) {
if (0 == memcmp(opaque_8bytes, ping->id, 8)) {
- grpc_exec_ctx_sched(exec_ctx, ping->on_recv, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, ping->on_recv, GRPC_ERROR_NONE);
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
gpr_free(ping);
@@ -1321,11 +1307,12 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
char *msg = grpc_transport_op_string(op);
gpr_free(msg);
op->transport_private.args[0] = gt;
- grpc_closure_init(&op->transport_private.closure, perform_transport_op_locked,
- op);
GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op");
- grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure,
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(
+ exec_ctx, grpc_closure_init(&op->transport_private.closure,
+ perform_transport_op_locked, op,
+ grpc_combiner_scheduler(t->combiner, false)),
+ GRPC_ERROR_NONE);
}
/*******************************************************************************
@@ -1801,19 +1788,6 @@ static void update_global_window(void *args, uint32_t id, void *stream) {
* INPUT PROCESSING - PARSING
*/
-static void read_action_begin(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
- /* Control flow:
- reading_action_locked ->
- (parse_unlocked -> post_parse_locked)? ->
- post_reading_action_locked */
- GPR_TIMER_BEGIN("reading_action", 0);
- grpc_chttp2_transport *t = tp;
- grpc_combiner_execute(exec_ctx, t->combiner, &t->read_action_locked,
- GRPC_ERROR_REF(error), false);
- GPR_TIMER_END("reading_action", 0);
-}
-
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
grpc_http_parser parser;
@@ -1913,7 +1887,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_slice_buffer_reset_and_unref(&t->read_buffer);
if (keep_reading) {
- grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->read_action_begin);
+ grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer,
+ &t->read_action_locked);
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else {
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action");
@@ -2050,10 +2025,12 @@ static int incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
bs->next_action.slice = slice;
bs->next_action.max_size_hint = max_size_hint;
bs->next_action.on_complete = on_complete;
- grpc_closure_init(&bs->next_action.closure, incoming_byte_stream_next_locked,
- bs);
- grpc_combiner_execute(exec_ctx, bs->transport->combiner,
- &bs->next_action.closure, GRPC_ERROR_NONE, false);
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_init(
+ &bs->next_action.closure, incoming_byte_stream_next_locked, bs,
+ grpc_combiner_scheduler(bs->transport->combiner, false)),
+ GRPC_ERROR_NONE);
GPR_TIMER_END("incoming_byte_stream_next", 0);
return 0;
}
@@ -2075,10 +2052,12 @@ static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
GPR_TIMER_BEGIN("incoming_byte_stream_destroy", 0);
grpc_chttp2_incoming_byte_stream *bs =
(grpc_chttp2_incoming_byte_stream *)byte_stream;
- grpc_closure_init(&bs->destroy_action, incoming_byte_stream_destroy_locked,
- bs);
- grpc_combiner_execute(exec_ctx, bs->transport->combiner, &bs->destroy_action,
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_init(
+ &bs->destroy_action, incoming_byte_stream_destroy_locked, bs,
+ grpc_combiner_scheduler(bs->transport->combiner, false)),
+ GRPC_ERROR_NONE);
GPR_TIMER_END("incoming_byte_stream_destroy", 0);
}
@@ -2086,7 +2065,7 @@ static void incoming_byte_stream_publish_error(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
grpc_error *error) {
GPR_ASSERT(error != GRPC_ERROR_NONE);
- grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error), NULL);
+ grpc_closure_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error));
bs->on_next = NULL;
GRPC_ERROR_UNREF(bs->error);
bs->error = error;
@@ -2103,7 +2082,7 @@ void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
bs->remaining_bytes -= (uint32_t)GRPC_SLICE_LENGTH(slice);
if (bs->on_next != NULL) {
*bs->next = slice;
- grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE);
bs->on_next = NULL;
} else {
grpc_slice_buffer_add(&bs->slices, slice);
@@ -2171,7 +2150,7 @@ static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer");
grpc_resource_user_post_reclaimer(exec_ctx,
grpc_endpoint_get_resource_user(t->ep),
- false, &t->benign_reclaimer);
+ false, &t->benign_reclaimer_locked);
}
}
@@ -2182,24 +2161,10 @@ static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer");
grpc_resource_user_post_reclaimer(exec_ctx,
grpc_endpoint_get_resource_user(t->ep),
- true, &t->destructive_reclaimer);
+ true, &t->destructive_reclaimer_locked);
}
}
-static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_chttp2_transport *t = arg;
- grpc_combiner_execute(exec_ctx, t->combiner, &t->benign_reclaimer_locked,
- GRPC_ERROR_REF(error), false);
-}
-
-static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_chttp2_transport *t = arg;
- grpc_combiner_execute(exec_ctx, t->combiner, &t->destructive_reclaimer_locked,
- GRPC_ERROR_REF(error), false);
-}
-
static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
@@ -2380,5 +2345,5 @@ void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_move_into(read_buffer, &t->read_buffer);
gpr_free(read_buffer);
}
- read_action_begin(exec_ctx, t, GRPC_ERROR_NONE);
+ grpc_closure_sched(exec_ctx, &t->read_action_locked, GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 6a9200b8db..64ce07b2f7 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -1634,10 +1634,11 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
however -- it might be that we receive a RST_STREAM following this
and can avoid the extra write */
GRPC_CHTTP2_STREAM_REF(s, "final_rst");
- grpc_combiner_execute_finally(
- exec_ctx, t->combiner,
- grpc_closure_create(force_client_rst_stream, s), GRPC_ERROR_NONE,
- false);
+ grpc_closure_sched(
+ exec_ctx, grpc_closure_create(force_client_rst_stream, s,
+ grpc_combiner_finally_scheduler(
+ t->combiner, false)),
+ GRPC_ERROR_NONE);
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
GRPC_ERROR_NONE);
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index b727965d43..a52acbacdb 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -212,10 +212,8 @@ struct grpc_chttp2_transport {
grpc_closure write_action_begin_locked;
grpc_closure write_action;
- grpc_closure write_action_end;
grpc_closure write_action_end_locked;
- grpc_closure read_action_begin;
grpc_closure read_action_locked;
/** incoming read bytes */
@@ -336,10 +334,8 @@ struct grpc_chttp2_transport {
/** have we scheduled a destructive cleanup? */
bool destructive_reclaimer_registered;
/** benign cleanup closure */
- grpc_closure benign_reclaimer;
grpc_closure benign_reclaimer_locked;
/** destructive cleanup closure */
- grpc_closure destructive_reclaimer;
grpc_closure destructive_reclaimer_locked;
};
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index afc59f4b12..df0a769f6c 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -849,17 +849,17 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
OP_RECV_INITIAL_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
- grpc_exec_ctx_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
- GRPC_ERROR_CANCELLED, NULL);
+ grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
+ GRPC_ERROR_CANCELLED);
} else if (stream_state->state_callback_received[OP_FAILED]) {
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, stream_op->recv_initial_metadata_ready,
- make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL);
+ make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
} else {
grpc_chttp2_incoming_metadata_buffer_publish(
&oas->s->state.rs.initial_metadata, stream_op->recv_initial_metadata);
- grpc_exec_ctx_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
+ GRPC_ERROR_NONE);
}
stream_state->state_op_done[OP_RECV_INITIAL_METADATA] = true;
result = ACTION_TAKEN_NO_CALLBACK;
@@ -910,22 +910,22 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
CRONET_LOG(GPR_DEBUG, "Stream is cancelled.");
- grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready,
- GRPC_ERROR_CANCELLED, NULL);
+ grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
+ GRPC_ERROR_CANCELLED);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->state_callback_received[OP_FAILED]) {
CRONET_LOG(GPR_DEBUG, "Stream failed.");
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, stream_op->recv_message_ready,
- make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL);
+ make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->rs.read_stream_closed == true) {
/* No more data will be received */
CRONET_LOG(GPR_DEBUG, "read stream closed");
- grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready,
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
+ GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
oas->state.state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK;
@@ -958,8 +958,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
&stream_state->rs.read_slice_buffer, 0);
*((grpc_byte_buffer **)stream_op->recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs;
- grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready,
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
+ GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
oas->state.state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK;
@@ -993,8 +993,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
&stream_state->rs.read_slice_buffer, 0);
*((grpc_byte_buffer **)stream_op->recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs;
- grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready,
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
+ GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
oas->state.state_op_done[OP_RECV_MESSAGE] = true;
/* Do an extra read to trigger on_succeeded() callback in case connection
@@ -1055,18 +1055,17 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
OP_ON_COMPLETE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
- grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete,
- GRPC_ERROR_REF(stream_state->cancel_error), NULL);
+ grpc_closure_sched(exec_ctx, stream_op->on_complete,
+ GRPC_ERROR_REF(stream_state->cancel_error));
} else if (stream_state->state_callback_received[OP_FAILED]) {
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, stream_op->on_complete,
- make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL);
+ make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
} else {
/* All actions in this stream_op are complete. Call the on_complete
* callback
*/
- grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE,
- NULL);
+ grpc_closure_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE);
}
oas->state.state_op_done[OP_ON_COMPLETE] = true;
oas->done = true;
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index 1d0b7d4f31..cddd84fcca 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -297,7 +297,8 @@ void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
memset(op, 0, sizeof(*op));
op->cancel_error = GRPC_ERROR_CANCELLED;
- op->on_complete = grpc_closure_create(destroy_op, op);
+ op->on_complete =
+ grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
elem->filter->start_transport_stream_op(exec_ctx, elem, op);
}
@@ -307,7 +308,8 @@ void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx,
grpc_slice *optional_message) {
grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
memset(op, 0, sizeof(*op));
- op->on_complete = grpc_closure_create(destroy_op, op);
+ op->on_complete =
+ grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
grpc_transport_stream_op_add_cancellation_with_message(op, status,
optional_message);
elem->filter->start_transport_stream_op(exec_ctx, elem, op);
@@ -319,7 +321,8 @@ void grpc_call_element_send_close_with_message(grpc_exec_ctx *exec_ctx,
grpc_slice *optional_message) {
grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
memset(op, 0, sizeof(*op));
- op->on_complete = grpc_closure_create(destroy_op, op);
+ op->on_complete =
+ grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
grpc_transport_stream_op_add_close(op, status, optional_message);
elem->filter->start_transport_stream_op(exec_ctx, elem, op);
}
diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c
index 0e336dc330..35455d4908 100644
--- a/src/core/lib/channel/compress_filter.c
+++ b/src/core/lib/channel/compress_filter.c
@@ -269,8 +269,10 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* initialize members */
grpc_slice_buffer_init(&calld->slices);
calld->has_compression_algorithm = 0;
- grpc_closure_init(&calld->got_slice, got_slice, elem);
- grpc_closure_init(&calld->send_done, send_done, elem);
+ grpc_closure_init(&calld->got_slice, got_slice, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&calld->send_done, send_done, elem,
+ grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index 470ccfea57..902ebf1955 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -123,7 +123,8 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
grpc_transport_stream_op* op) {
deadline_state->next_on_complete = op->on_complete;
- grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state);
+ grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state,
+ grpc_schedule_on_exec_ctx);
op->on_complete = &deadline_state->on_complete;
}
@@ -172,8 +173,9 @@ void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
state->elem = elem;
state->deadline = deadline;
- grpc_closure_init(&state->closure, start_timer_after_init, state);
- grpc_exec_ctx_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_init(&state->closure, start_timer_after_init, state,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE);
}
}
@@ -290,7 +292,8 @@ static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
calld->next_recv_initial_metadata_ready = op->recv_initial_metadata_ready;
calld->recv_initial_metadata = op->recv_initial_metadata;
grpc_closure_init(&calld->recv_initial_metadata_ready,
- recv_initial_metadata_ready, elem);
+ recv_initial_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
op->recv_initial_metadata_ready = &calld->recv_initial_metadata_ready;
}
// Make sure we know when the call is complete, so that we can cancel
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 23edc826ca..ff827527b3 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -165,7 +165,7 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
// Cancel deadline timer, since we're invoking the on_handshake_done
// callback now.
grpc_timer_cancel(exec_ctx, &mgr->deadline_timer);
- grpc_exec_ctx_sched(exec_ctx, &mgr->on_handshake_done, error, NULL);
+ grpc_closure_sched(exec_ctx, &mgr->on_handshake_done, error);
mgr->shutdown = true;
} else {
grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index],
@@ -218,8 +218,10 @@ void grpc_handshake_manager_do_handshake(
grpc_slice_buffer_init(mgr->args.read_buffer);
// Initialize state needed for calling handshakers.
mgr->acceptor = acceptor;
- grpc_closure_init(&mgr->call_next_handshaker, call_next_handshaker, mgr);
- grpc_closure_init(&mgr->on_handshake_done, on_handshake_done, &mgr->args);
+ grpc_closure_init(&mgr->call_next_handshaker, call_next_handshaker, mgr,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&mgr->on_handshake_done, on_handshake_done, &mgr->args,
+ grpc_schedule_on_exec_ctx);
// Start deadline timer, which owns a ref.
gpr_ref(&mgr->refs);
grpc_timer_init(exec_ctx, &mgr->deadline_timer,
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index 1a2d08dda5..c37cab3939 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -352,12 +352,17 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
calld->send_message_blocked = false;
grpc_slice_buffer_init(&calld->slices);
grpc_closure_init(&calld->hc_on_recv_initial_metadata,
- hc_on_recv_initial_metadata, elem);
+ hc_on_recv_initial_metadata, elem,
+ grpc_schedule_on_exec_ctx);
grpc_closure_init(&calld->hc_on_recv_trailing_metadata,
- hc_on_recv_trailing_metadata, elem);
- grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem);
- grpc_closure_init(&calld->got_slice, got_slice, elem);
- grpc_closure_init(&calld->send_done, send_done, elem);
+ hc_on_recv_trailing_metadata, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&calld->got_slice, got_slice, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&calld->send_done, send_done, elem,
+ grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index a5134ee21b..a6d720530a 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -334,9 +334,12 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data;
/* initialize members */
memset(calld, 0, sizeof(*calld));
- grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
- grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem);
- grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem);
+ grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem,
+ grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&calld->read_slice_buffer);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c
index f05c789010..b5e882de52 100644
--- a/src/core/lib/channel/message_size_filter.c
+++ b/src/core/lib/channel/message_size_filter.c
@@ -124,7 +124,7 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
gpr_free(message_string);
}
// Invoke the next callback.
- grpc_exec_ctx_sched(exec_ctx, calld->next_recv_message_ready, error, NULL);
+ grpc_closure_sched(exec_ctx, calld->next_recv_message_ready, error);
}
// Start transport stream op.
@@ -160,7 +160,8 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL;
- grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem);
+ grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem,
+ grpc_schedule_on_exec_ctx);
// Get max sizes from channel data, then merge in per-method config values.
// Note: Per-method config is only available on the client, so we
// apply the max request size to the send limit and the max response
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index 1035f31109..581a74b73f 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -103,7 +103,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
grpc_error *error) {
grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent,
req->context->pollset_set);
- grpc_exec_ctx_sched(exec_ctx, req->on_done, error, NULL);
+ grpc_closure_sched(exec_ctx, req->on_done, error);
grpc_http_parser_destroy(&req->parser);
if (req->addresses != NULL) {
grpc_resolved_addresses_destroy(req->addresses);
@@ -224,7 +224,8 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
return;
}
addr = &req->addresses->addrs[req->next_address++];
- grpc_closure_init(&req->connected, on_connected, req);
+ grpc_closure_init(&req->connected, on_connected, req,
+ grpc_schedule_on_exec_ctx);
grpc_arg arg;
arg.key = GRPC_ARG_RESOURCE_QUOTA;
arg.type = GRPC_ARG_POINTER;
@@ -266,8 +267,9 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
req->pollent = pollent;
req->overall_error = GRPC_ERROR_NONE;
req->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
- grpc_closure_init(&req->on_read, on_read, req);
- grpc_closure_init(&req->done_write, done_write, req);
+ grpc_closure_init(&req->on_read, on_read, req, grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&req->done_write, done_write, req,
+ grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&req->incoming);
grpc_slice_buffer_init(&req->outgoing);
grpc_iomgr_register_object(&req->iomgr_obj, name);
@@ -277,9 +279,11 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(pollent);
grpc_polling_entity_add_to_pollset_set(exec_ctx, req->pollent,
req->context->pollset_set);
- grpc_resolve_address(exec_ctx, request->host, req->handshaker->default_port,
- req->context->pollset_set,
- grpc_closure_create(on_resolved, req), &req->addresses);
+ grpc_resolve_address(
+ exec_ctx, request->host, req->handshaker->default_port,
+ req->context->pollset_set,
+ grpc_closure_create(on_resolved, req, grpc_schedule_on_exec_ctx),
+ &req->addresses);
}
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index 14cdb1dab3..6b197c2e35 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -96,7 +96,7 @@ static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx,
error = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
}
- grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
+ grpc_closure_sched(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer);
}
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index c6ddc76732..da0ec878a3 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -37,10 +37,13 @@
#include "src/core/lib/profiling/timers.h"
-void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
- void *cb_arg) {
+grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
+ void *cb_arg,
+ grpc_closure_scheduler *scheduler) {
closure->cb = cb;
closure->cb_arg = cb_arg;
+ closure->scheduler = scheduler;
+ return closure;
}
void grpc_closure_list_init(grpc_closure_list *closure_list) {
@@ -105,11 +108,12 @@ static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
cb(exec_ctx, cb_arg, error);
}
-grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
+grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
+ grpc_closure_scheduler *scheduler) {
wrapped_closure *wc = gpr_malloc(sizeof(*wc));
wc->cb = cb;
wc->cb_arg = cb_arg;
- grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
+ grpc_closure_init(&wc->wrapper, closure_wrapper, wc, scheduler);
return &wc->wrapper;
}
@@ -117,8 +121,30 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) {
GPR_TIMER_BEGIN("grpc_closure_run", 0);
if (c != NULL) {
- c->cb(exec_ctx, c->cb_arg, error);
+ c->scheduler->vtable->run(exec_ctx, c, error);
+ } else {
+ GRPC_ERROR_UNREF(error);
}
- GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_closure_run", 0);
}
+
+void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
+ grpc_error *error) {
+ GPR_TIMER_BEGIN("grpc_closure_sched", 0);
+ if (c != NULL) {
+ c->scheduler->vtable->sched(exec_ctx, c, error);
+ } else {
+ GRPC_ERROR_UNREF(error);
+ }
+ GPR_TIMER_END("grpc_closure_sched", 0);
+}
+
+void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
+ grpc_closure *c = list->head;
+ while (c != NULL) {
+ grpc_closure *next = c->next_data.next;
+ c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error);
+ c = next;
+ }
+ list->head = list->tail = NULL;
+}
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index 2b4b271eaa..1b5d9b20a0 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -59,6 +59,22 @@ typedef struct grpc_closure_list {
typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
+typedef struct grpc_closure_scheduler grpc_closure_scheduler;
+
+typedef struct grpc_closure_scheduler_vtable {
+ /* NOTE: for all these functions, closure->scheduler == the scheduler that was
+ used to find this vtable */
+ void (*run)(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error);
+ void (*sched)(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error);
+} grpc_closure_scheduler_vtable;
+
+/** Abstract type that can schedule closures for execution */
+struct grpc_closure_scheduler {
+ const grpc_closure_scheduler_vtable *vtable;
+};
+
/** A closure over a grpc_iomgr_cb_func. */
struct grpc_closure {
/** Once queued, next indicates the next queued closure; before then, scratch
@@ -75,6 +91,10 @@ struct grpc_closure {
/** Arguments to be passed to "cb". */
void *cb_arg;
+ /** Scheduler to schedule against: NULL to schedule against current execution
+ context */
+ grpc_closure_scheduler *scheduler;
+
/** Once queued, the result of the closure. Before then: scratch space */
union {
grpc_error *error;
@@ -82,12 +102,14 @@ struct grpc_closure {
} error_data;
};
-/** Initializes \a closure with \a cb and \a cb_arg. */
-void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
- void *cb_arg);
+/** Initializes \a closure with \a cb and \a cb_arg. Returns \a closure. */
+grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
+ void *cb_arg,
+ grpc_closure_scheduler *scheduler);
/* Create a heap allocated closure: try to avoid except for very rare events */
-grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg);
+grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
+ grpc_closure_scheduler *scheduler);
#define GRPC_CLOSURE_LIST_INIT \
{ NULL, NULL }
@@ -115,4 +137,13 @@ bool grpc_closure_list_empty(grpc_closure_list list);
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
+/** Schedule a closure to be run. Does not need to be run from a safe point. */
+void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error);
+
+/** Schedule all closures in a list to be run. Does not need to be run from a
+ * safe point. */
+void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx,
+ grpc_closure_list *closure_list);
+
#endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index cfc67020ae..c26a73b2b7 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -56,6 +56,10 @@ int grpc_combiner_trace = 0;
struct grpc_combiner {
grpc_combiner *next_combiner_on_this_exec_ctx;
grpc_workqueue *optional_workqueue;
+ grpc_closure_scheduler uncovered_scheduler;
+ grpc_closure_scheduler covered_scheduler;
+ grpc_closure_scheduler uncovered_finally_scheduler;
+ grpc_closure_scheduler covered_finally_scheduler;
gpr_mpscq queue;
// state is:
// lower bit - zero if orphaned (STATE_UNORPHANED)
@@ -70,6 +74,26 @@ struct grpc_combiner {
grpc_closure offload;
};
+static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx,
+ grpc_closure *closure, grpc_error *error);
+static void combiner_exec_covered(grpc_exec_ctx *exec_ctx,
+ grpc_closure *closure, grpc_error *error);
+static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
+ grpc_closure *closure,
+ grpc_error *error);
+static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
+ grpc_closure *closure,
+ grpc_error *error);
+
+static const grpc_closure_scheduler_vtable scheduler_uncovered = {
+ combiner_exec_uncovered, combiner_exec_uncovered};
+static const grpc_closure_scheduler_vtable scheduler_covered = {
+ combiner_exec_covered, combiner_exec_covered};
+static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = {
+ combiner_finally_exec_uncovered, combiner_finally_exec_uncovered};
+static const grpc_closure_scheduler_vtable finally_scheduler_covered = {
+ combiner_finally_exec_covered, combiner_finally_exec_covered};
+
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
typedef struct {
@@ -102,11 +126,16 @@ grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
lock->time_to_execute_final_list = false;
lock->optional_workqueue = optional_workqueue;
lock->final_list_covered_by_poller = false;
+ lock->uncovered_scheduler.vtable = &scheduler_uncovered;
+ lock->covered_scheduler.vtable = &scheduler_covered;
+ lock->uncovered_finally_scheduler.vtable = &finally_scheduler_uncovered;
+ lock->covered_finally_scheduler.vtable = &finally_scheduler_covered;
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0);
gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list);
- grpc_closure_init(&lock->offload, offload, lock);
+ grpc_closure_init(&lock->offload, offload, lock,
+ grpc_workqueue_scheduler(lock->optional_workqueue));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@@ -148,9 +177,9 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
- grpc_closure *cl, grpc_error *error,
- bool covered_by_poller) {
+static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
+ grpc_closure *cl, grpc_error *error,
+ bool covered_by_poller) {
GPR_TIMER_BEGIN("combiner.execute", 0);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(gpr_log(
@@ -171,6 +200,24 @@ void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
GPR_TIMER_END("combiner.execute", 0);
}
+#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
+ ((grpc_combiner *)(((char *)((closure)->scheduler)) - \
+ offsetof(grpc_combiner, scheduler_name)))
+
+static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
+ grpc_error *error) {
+ combiner_exec(exec_ctx,
+ COMBINER_FROM_CLOSURE_SCHEDULER(cl, uncovered_scheduler), cl,
+ error, false);
+}
+
+static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
+ grpc_error *error) {
+ combiner_exec(exec_ctx,
+ COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_scheduler), cl,
+ error, true);
+}
+
static void move_next(grpc_exec_ctx *exec_ctx) {
exec_ctx->active_combiner =
exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
@@ -188,8 +235,7 @@ static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
move_next(exec_ctx);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
lock->optional_workqueue));
- grpc_workqueue_enqueue(exec_ctx, lock->optional_workqueue, &lock->offload,
- GRPC_ERROR_NONE);
+ grpc_closure_sched(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
}
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
@@ -312,23 +358,22 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
}
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
- grpc_error *error) {
- grpc_combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure,
- GRPC_ERROR_REF(error), false);
-}
+ grpc_error *error);
-void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
- grpc_closure *closure, grpc_error *error,
- bool covered_by_poller) {
+static void combiner_execute_finally(grpc_exec_ctx *exec_ctx,
+ grpc_combiner *lock, grpc_closure *closure,
+ grpc_error *error,
+ bool covered_by_poller) {
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
closure, exec_ctx->active_combiner, covered_by_poller));
GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
- grpc_combiner_execute(exec_ctx, lock,
- grpc_closure_create(enqueue_finally, closure), error,
- false);
+ grpc_closure_sched(
+ exec_ctx, grpc_closure_create(enqueue_finally, closure,
+ grpc_combiner_scheduler(lock, false)),
+ error);
GPR_TIMER_END("combiner.execute_finally", 0);
return;
}
@@ -342,3 +387,36 @@ void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure_list_append(&lock->final_list, closure, error);
GPR_TIMER_END("combiner.execute_finally", 0);
}
+
+static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
+ grpc_error *error) {
+ combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure,
+ GRPC_ERROR_REF(error), false);
+}
+
+static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
+ grpc_closure *cl,
+ grpc_error *error) {
+ combiner_execute_finally(exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(
+ cl, uncovered_finally_scheduler),
+ cl, error, false);
+}
+
+static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
+ grpc_closure *cl, grpc_error *error) {
+ combiner_execute_finally(
+ exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_finally_scheduler),
+ cl, error, true);
+}
+
+grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner,
+ bool covered_by_poller) {
+ return covered_by_poller ? &combiner->covered_scheduler
+ : &combiner->uncovered_scheduler;
+}
+
+grpc_closure_scheduler *grpc_combiner_finally_scheduler(
+ grpc_combiner *combiner, bool covered_by_poller) {
+ return covered_by_poller ? &combiner->covered_finally_scheduler
+ : &combiner->uncovered_finally_scheduler;
+}
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index d04eeed83a..81dff85d40 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -50,14 +50,12 @@
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
// Destroy the lock
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
-// Execute \a action within the lock.
-void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
- grpc_closure *closure, grpc_error *error,
- bool covered_by_poller);
-// Execute \a action within the lock just prior to unlocking.
-void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
- grpc_closure *closure, grpc_error *error,
- bool covered_by_poller);
+// Fetch a scheduler to schedule closures against
+grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock,
+ bool covered_by_poller);
+// Scheduler to execute \a action within the lock just prior to unlocking.
+grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock,
+ bool covered_by_poller);
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
index 1b15e0eb4f..045001f6d8 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -202,6 +202,8 @@ static void fd_global_shutdown(void);
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
+ grpc_closure_scheduler workqueue_scheduler;
+
gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
the refcount.
@@ -305,6 +307,8 @@ static __thread polling_island *g_current_thread_polling_island;
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
+static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
@@ -317,6 +321,9 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
+static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
+ workqueue_enqueue, workqueue_enqueue};
+
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
@@ -529,6 +536,7 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
*error = GRPC_ERROR_NONE;
pi = gpr_malloc(sizeof(*pi));
+ pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
@@ -800,10 +808,10 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue, grpc_closure *closure,
+static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
+ grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler;
/* take a ref to the workqueue: otherwise it can happen that whatever events
* this kicks off ends up destroying the workqueue before this function
* completes */
@@ -820,6 +828,12 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("workqueue.enqueue", 0);
}
+static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
+ polling_island *pi = (polling_island *)workqueue;
+ return workqueue == NULL ? grpc_schedule_on_exec_ctx
+ : &pi->workqueue_scheduler;
+}
+
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@@ -1030,8 +1044,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
fd->po.pi = NULL;
}
- grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error),
- NULL);
+ grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->po.mu);
UNREF_BY(fd, 2, reason); /* Drop the reference */
@@ -1057,16 +1070,14 @@ static grpc_error *fd_shutdown_error(bool shutdown) {
static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st, grpc_closure *closure) {
if (fd->shutdown) {
- grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"),
- NULL);
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"));
} else if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */
*st = closure;
} else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
- grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown),
- NULL);
+ grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown));
} else {
/* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR,
@@ -1088,7 +1099,7 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
return 0;
} else {
/* waiting ==> queue closure */
- grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL);
+ grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown));
*st = CLOSURE_NOT_READY;
return 1;
}
@@ -1359,7 +1370,7 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
/* Release the ref and set pollset->po.pi to NULL */
pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
- grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
}
/* pollset->po.mu lock must be held by the caller before calling this */
@@ -1410,7 +1421,9 @@ static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
workqueue_maybe_wakeup(pi);
}
grpc_closure *c = (grpc_closure *)n;
- grpc_closure_run(exec_ctx, c, c->error_data.error);
+ grpc_error *error = c->error_data.error;
+ c->cb(exec_ctx, c->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
return true;
} else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
/* n == NULL might mean there's work but it's not available to be popped
@@ -1959,7 +1972,7 @@ static const grpc_event_engine_vtable vtable = {
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
- .workqueue_enqueue = workqueue_enqueue,
+ .workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 21b28e5554..5bc5621443 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -397,7 +397,7 @@ static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
if (!fd->released) {
close(fd->fd);
}
- grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
}
static int fd_wrapped_fd(grpc_fd *fd) {
@@ -457,16 +457,14 @@ static grpc_error *fd_shutdown_error(bool shutdown) {
static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st, grpc_closure *closure) {
if (fd->shutdown) {
- grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"),
- NULL);
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"));
} else if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */
*st = closure;
} else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
- grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown),
- NULL);
+ grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown));
maybe_wake_one_watcher_locked(fd);
} else {
/* upcallptr was set to a different closure. This is an error! */
@@ -489,7 +487,7 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
return 0;
} else {
/* waiting ==> queue closure */
- grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL);
+ grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown));
*st = CLOSURE_NOT_READY;
return 1;
}
@@ -852,7 +850,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GRPC_FD_UNREF(pollset->fds[i], "multipoller");
}
pollset->fd_count = 0;
- grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
}
static void work_combine_error(grpc_error **composite, grpc_error *error) {
@@ -901,7 +899,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (!pollset_has_workers(pollset) &&
!grpc_closure_list_empty(pollset->idle_jobs)) {
GPR_TIMER_MARK("pollset_work.idle_jobs", 0);
- grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+ grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
goto done;
}
/* If we're shutting down then we don't execute any extended work */
@@ -1081,7 +1079,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu);
} else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
- grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+ grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
@@ -1100,7 +1098,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->shutdown_done = closure;
pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset_has_workers(pollset)) {
- grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+ grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
}
if (!pollset->called_shutdown && !pollset_has_workers(pollset)) {
pollset->called_shutdown = 1;
@@ -1288,10 +1286,8 @@ static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {}
#endif
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue, grpc_closure *closure,
- grpc_error *error) {
- grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
+ return grpc_schedule_on_exec_ctx;
}
/*******************************************************************************
@@ -1534,7 +1530,7 @@ static const grpc_event_engine_vtable vtable = {
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
- .workqueue_enqueue = workqueue_enqueue,
+ .workqueue_scheduler = workqueue_scheduler,
.shutdown_engine = shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index ab139895fd..2975d619e1 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -275,9 +275,8 @@ void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
}
#endif
-void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- grpc_closure *closure, grpc_error *error) {
- g_event_engine->workqueue_enqueue(exec_ctx, workqueue, closure, error);
+grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
+ return g_event_engine->workqueue_scheduler(workqueue);
}
#endif // GRPC_POSIX_SOCKET
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index cb5832539d..1068a4bad5 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -106,8 +106,7 @@ typedef struct grpc_event_engine_vtable {
grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue);
void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif
- void (*workqueue_enqueue)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- grpc_closure *closure, grpc_error *error);
+ grpc_closure_scheduler *(*workqueue_scheduler)(grpc_workqueue *workqueue);
} grpc_event_engine_vtable;
void grpc_event_engine_init(void);
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c
index 604713e578..6aa788f8e5 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.c
@@ -57,7 +57,6 @@ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
return true;
}
-#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
@@ -67,8 +66,10 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
+ grpc_error *error = c->error_data.error;
did_something = true;
- grpc_closure_run(exec_ctx, c, c->error_data.error);
+ c->cb(exec_ctx, c->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
c = next;
}
} else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) {
@@ -76,30 +77,6 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
}
}
GPR_ASSERT(exec_ctx->active_combiner == NULL);
- if (exec_ctx->stealing_from_workqueue != NULL) {
- if (grpc_exec_ctx_ready_to_finish(exec_ctx)) {
- grpc_workqueue_enqueue(exec_ctx, exec_ctx->stealing_from_workqueue,
- exec_ctx->stolen_closure,
- exec_ctx->stolen_closure->error_data.error);
- GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
- "exec_ctx_sched");
- exec_ctx->stealing_from_workqueue = NULL;
- exec_ctx->stolen_closure = NULL;
- } else {
- grpc_closure *c = exec_ctx->stolen_closure;
- GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
- "exec_ctx_sched");
- exec_ctx->stealing_from_workqueue = NULL;
- exec_ctx->stolen_closure = NULL;
- grpc_error *error = c->error_data.error;
- GPR_TIMER_BEGIN("grpc_exec_ctx_flush.stolen_cb", 0);
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- GPR_TIMER_END("grpc_exec_ctx_flush.stolen_cb", 0);
- grpc_exec_ctx_flush(exec_ctx);
- did_something = true;
- }
- }
GPR_TIMER_END("grpc_exec_ctx_flush", 0);
return did_something;
}
@@ -109,104 +86,21 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
grpc_exec_ctx_flush(exec_ctx);
}
-void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error,
- grpc_workqueue *offload_target_or_null) {
- GPR_TIMER_BEGIN("grpc_exec_ctx_sched", 0);
- if (offload_target_or_null == NULL) {
- grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
- } else if (exec_ctx->stealing_from_workqueue == NULL) {
- exec_ctx->stealing_from_workqueue = offload_target_or_null;
- closure->error_data.error = error;
- exec_ctx->stolen_closure = closure;
- } else if (exec_ctx->stealing_from_workqueue != offload_target_or_null) {
- grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error);
- GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
- } else { /* stealing_from_workqueue == offload_target_or_null */
- grpc_workqueue_enqueue(exec_ctx, offload_target_or_null,
- exec_ctx->stolen_closure,
- exec_ctx->stolen_closure->error_data.error);
- closure->error_data.error = error;
- exec_ctx->stolen_closure = closure;
- GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
- }
- GPR_TIMER_END("grpc_exec_ctx_sched", 0);
+static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
+ closure->cb(exec_ctx, closure->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
}
-void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
- grpc_closure_list *list,
- grpc_workqueue *offload_target_or_null) {
- grpc_closure_list_move(list, &exec_ctx->closure_list);
+static void exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
+ grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
}
void grpc_exec_ctx_global_init(void) {}
void grpc_exec_ctx_global_shutdown(void) {}
-#else
-static gpr_mu g_mu;
-static gpr_cv g_cv;
-static int g_threads = 0;
-
-static void run_closure(void *arg) {
- grpc_closure *closure = arg;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- closure->cb(&exec_ctx, closure->cb_arg, (closure->final_data & 1) != 0);
- grpc_exec_ctx_finish(&exec_ctx);
- gpr_mu_lock(&g_mu);
- if (--g_threads == 0) {
- gpr_cv_signal(&g_cv);
- }
- gpr_mu_unlock(&g_mu);
-}
-
-static void start_closure(grpc_closure *closure) {
- gpr_thd_id id;
- gpr_mu_lock(&g_mu);
- g_threads++;
- gpr_mu_unlock(&g_mu);
- gpr_thd_new(&id, run_closure, closure, NULL);
-}
-
-bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { return false; }
-
-void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {}
-void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- bool success,
- grpc_workqueue *offload_target_or_null) {
- GPR_ASSERT(offload_target_or_null == NULL);
- if (closure == NULL) return;
- closure->final_data = success;
- start_closure(closure);
-}
-
-void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
- grpc_closure_list *list,
- grpc_workqueue *offload_target_or_null) {
- GPR_ASSERT(offload_target_or_null == NULL);
- if (list == NULL) return;
- grpc_closure *p = list->head;
- while (p) {
- grpc_closure *start = p;
- p = grpc_closure_next(start);
- start_closure(start);
- }
- grpc_closure_list r = GRPC_CLOSURE_LIST_INIT;
- *list = r;
-}
-
-void grpc_exec_ctx_global_init(void) {
- gpr_mu_init(&g_mu);
- gpr_cv_init(&g_cv);
-}
-
-void grpc_exec_ctx_global_shutdown(void) {
- gpr_mu_lock(&g_mu);
- while (g_threads != 0) {
- gpr_cv_wait(&g_cv, &g_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
- }
- gpr_mu_unlock(&g_mu);
-
- gpr_mu_destroy(&g_mu);
- gpr_cv_destroy(&g_cv);
-}
-#endif
+static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = {
+ exec_ctx_run, exec_ctx_sched};
+static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable};
+grpc_closure_scheduler *grpc_schedule_on_exec_ctx = &exec_ctx_scheduler;
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index 7e50cb9825..e566f1b3e8 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -66,17 +66,6 @@ typedef struct grpc_combiner grpc_combiner;
#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
struct grpc_exec_ctx {
grpc_closure_list closure_list;
- /** The workqueue we're stealing work from.
- As items are queued to the execution context, we try to steal one
- workqueue item and execute it inline (assuming the exec_ctx is not
- finished) - doing so does not invalidate the workqueue's contract, and
- provides a small latency win in cases where we get a hit */
- grpc_workqueue *stealing_from_workqueue;
- /** The workqueue item that was stolen from the workqueue above. When new
- items are scheduled to be offloaded to that workqueue, we need to update
- this like a 1-deep fifo to maintain the invariant that workqueue items
- queued by one thread are started in order */
- grpc_closure *stolen_closure;
/** currently active combiner: updated only via combiner.c */
grpc_combiner *active_combiner;
/** last active combiner in the active combiner list */
@@ -89,10 +78,7 @@ struct grpc_exec_ctx {
/* initializer for grpc_exec_ctx:
prefer to use GRPC_EXEC_CTX_INIT whenever possible */
#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
- { \
- GRPC_CLOSURE_LIST_INIT, NULL, NULL, NULL, NULL, false, finish_check_arg, \
- finish_check \
- }
+ { GRPC_CLOSURE_LIST_INIT, NULL, NULL, false, finish_check_arg, finish_check }
#else
struct grpc_exec_ctx {
bool cached_ready_to_finish;
@@ -108,6 +94,8 @@ struct grpc_exec_ctx {
#define GRPC_EXEC_CTX_INIT \
GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL)
+extern grpc_closure_scheduler *grpc_schedule_on_exec_ctx;
+
/** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held.
* Returns true if work was performed, false otherwise. */
@@ -115,14 +103,6 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
/** Finish any pending work for a grpc_exec_ctx. Must be called before
* the instance is destroyed, or work may be lost. */
void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);
-/** Add a closure to be executed in the future.
- If \a offload_target_or_null is NULL, the closure will be executed at the
- next exec_ctx.{finish,flush} point.
- If \a offload_target_or_null is non-NULL, the closure will be scheduled
- against the workqueue, and a reference to the workqueue will be consumed. */
-void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error,
- grpc_workqueue *offload_target_or_null);
/** Returns true if we'd like to leave this execution context as soon as
possible: useful for deciding whether to do something more or not depending
on outside context */
@@ -131,11 +111,6 @@ bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx);
bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored);
/** A finish check that is always ready to finish */
bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored);
-/** Add a list of closures to be executed at the next flush/finish point.
- * Leaves \a list empty. */
-void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
- grpc_closure_list *list,
- grpc_workqueue *offload_target_or_null);
void grpc_exec_ctx_global_init(void);
diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c
index 8d7535d6fe..1342a28d8d 100644
--- a/src/core/lib/iomgr/executor.c
+++ b/src/core/lib/iomgr/executor.c
@@ -77,10 +77,18 @@ static void closure_exec_thread_func(void *ignored) {
gpr_mu_unlock(&g_executor.mu);
break;
} else {
- grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
+ grpc_closure *c = g_executor.closures.head;
+ grpc_closure_list_init(&g_executor.closures);
+ gpr_mu_unlock(&g_executor.mu);
+ while (c != NULL) {
+ grpc_closure *next = c->next_data.next;
+ grpc_error *error = c->error_data.error;
+ c->cb(&exec_ctx, c->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
+ c = next;
+ }
+ grpc_exec_ctx_flush(&exec_ctx);
}
- gpr_mu_unlock(&g_executor.mu);
- grpc_exec_ctx_flush(&exec_ctx);
}
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -112,7 +120,8 @@ static void maybe_spawn_locked() {
g_executor.pending_join = 1;
}
-void grpc_executor_push(grpc_closure *closure, grpc_error *error) {
+static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
gpr_mu_lock(&g_executor.mu);
if (g_executor.shutting_down == 0) {
grpc_closure_list_append(&g_executor.closures, closure, error);
@@ -133,7 +142,15 @@ void grpc_executor_shutdown() {
* list below because we aren't accepting new work */
/* Execute pending callbacks, some may be performing cleanups */
- grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
+ grpc_closure *c = g_executor.closures.head;
+ grpc_closure_list_init(&g_executor.closures);
+ while (c != NULL) {
+ grpc_closure *next = c->next_data.next;
+ grpc_error *error = c->error_data.error;
+ c->cb(&exec_ctx, c->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
+ c = next;
+ }
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
if (pending_join) {
@@ -141,3 +158,8 @@ void grpc_executor_shutdown() {
}
gpr_mu_destroy(&g_executor.mu);
}
+
+static const grpc_closure_scheduler_vtable executor_vtable = {executor_push,
+ executor_push};
+static grpc_closure_scheduler executor_scheduler = {&executor_vtable};
+grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler;
diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h
index da9dcd07d0..53f3b6d441 100644
--- a/src/core/lib/iomgr/executor.h
+++ b/src/core/lib/iomgr/executor.h
@@ -43,9 +43,7 @@
* non-blocking solution available. */
void grpc_executor_init();
-/** Enqueue \a closure for its eventual execution of \a f(arg) on a separate
- * thread */
-void grpc_executor_push(grpc_closure *closure, grpc_error *error);
+extern grpc_closure_scheduler *grpc_executor_scheduler;
/** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown();
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c
index 3a74b842b6..ed3edeee94 100644
--- a/src/core/lib/iomgr/pollset_uv.c
+++ b/src/core/lib/iomgr/pollset_uv.c
@@ -83,7 +83,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
// Drain any pending UV callbacks without blocking
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
}
- grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
}
void grpc_pollset_destroy(grpc_pollset *pollset) {
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index 5540303e49..2a45e708df 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -109,7 +109,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
- grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
pollset->on_shutdown = closure;
}
@@ -167,8 +167,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
if (pollset->shutting_down && pollset->on_shutdown != NULL) {
- grpc_exec_ctx_sched(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE,
- NULL);
+ grpc_closure_sched(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE);
pollset->on_shutdown = NULL;
}
goto done;
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c
index 821932e562..50e470d149 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.c
@@ -163,10 +163,9 @@ typedef struct {
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
grpc_error *error) {
request *r = rp;
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, r->on_done,
- grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out),
- NULL);
+ grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
gpr_free(r->name);
gpr_free(r->default_port);
gpr_free(r);
@@ -185,12 +184,13 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
request *r = gpr_malloc(sizeof(request));
- grpc_closure_init(&r->request_closure, do_request_thread, r);
+ grpc_closure_init(&r->request_closure, do_request_thread, r,
+ grpc_executor_scheduler);
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
r->addrs_out = addrs;
- grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE);
+ grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
}
void (*grpc_resolve_address)(
diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c
index 3269c4f09f..9b5f3209f0 100644
--- a/src/core/lib/iomgr/resolve_address_uv.c
+++ b/src/core/lib/iomgr/resolve_address_uv.c
@@ -98,7 +98,7 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_error *error;
error = handle_addrinfo_result(status, res, r->addresses);
- grpc_exec_ctx_sched(&exec_ctx, r->on_done, error, NULL);
+ grpc_closure_sched(&exec_ctx, r->on_done, error);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(r->hints);
@@ -193,7 +193,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
int s;
err = try_split_host_port(name, default_port, &host, &port);
if (err != GRPC_ERROR_NONE) {
- grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL);
+ grpc_closure_sched(exec_ctx, on_done, err);
return;
}
r = gpr_malloc(sizeof(request));
@@ -217,7 +217,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
*addrs = NULL;
err = GRPC_ERROR_CREATE("getaddrinfo failed");
err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, uv_strerror(s));
- grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL);
+ grpc_closure_sched(exec_ctx, on_done, err);
gpr_free(r);
gpr_free(req);
gpr_free(hints);
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c
index fada5ecbe8..2439ce3cb7 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.c
@@ -154,7 +154,7 @@ static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
} else {
GRPC_ERROR_REF(error);
}
- grpc_exec_ctx_sched(exec_ctx, r->on_done, error, NULL);
+ grpc_closure_sched(exec_ctx, r->on_done, error);
gpr_free(r->name);
gpr_free(r->default_port);
gpr_free(r);
@@ -173,12 +173,13 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_closure *on_done,
grpc_resolved_addresses **addresses) {
request *r = gpr_malloc(sizeof(request));
- grpc_closure_init(&r->request_closure, do_request_thread, r);
+ grpc_closure_init(&r->request_closure, do_request_thread, r,
+ grpc_executor_scheduler);
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
r->addresses = addresses;
- grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE);
+ grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
}
void (*grpc_resolve_address)(
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
index 213d29600c..8db539edfb 100644
--- a/src/core/lib/iomgr/resource_quota.c
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -265,9 +265,8 @@ static void rq_step_sched(grpc_exec_ctx *exec_ctx,
if (resource_quota->step_scheduled) return;
resource_quota->step_scheduled = true;
grpc_resource_quota_internal_ref(resource_quota);
- grpc_combiner_execute_finally(exec_ctx, resource_quota->combiner,
- &resource_quota->rq_step_closure,
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(exec_ctx, &resource_quota->rq_step_closure,
+ GRPC_ERROR_NONE);
}
/* returns true if all allocations are completed */
@@ -294,7 +293,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
}
if (resource_user->free_pool >= 0) {
resource_user->allocating = false;
- grpc_exec_ctx_enqueue_list(exec_ctx, &resource_user->on_allocated, NULL);
+ grpc_closure_list_sched(exec_ctx, &resource_user->on_allocated);
gpr_mu_unlock(&resource_user->mu);
} else {
rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
@@ -439,7 +438,7 @@ static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
resource_user->new_reclaimers[destructive] = NULL;
GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
if (gpr_atm_acq_load(&resource_user->shutdown) > 0) {
- grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED, NULL);
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED);
return false;
}
resource_user->reclaimers[destructive] = closure;
@@ -480,10 +479,10 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
grpc_resource_user *resource_user = ru;
- grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
- GRPC_ERROR_CANCELLED, NULL);
- grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
- GRPC_ERROR_CANCELLED, NULL);
+ grpc_closure_sched(exec_ctx, resource_user->reclaimers[0],
+ GRPC_ERROR_CANCELLED);
+ grpc_closure_sched(exec_ctx, resource_user->reclaimers[1],
+ GRPC_ERROR_CANCELLED);
resource_user->reclaimers[0] = NULL;
resource_user->reclaimers[1] = NULL;
rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
@@ -496,10 +495,10 @@ static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, (grpc_rulist)i);
}
- grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
- GRPC_ERROR_CANCELLED, NULL);
- grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
- GRPC_ERROR_CANCELLED, NULL);
+ grpc_closure_sched(exec_ctx, resource_user->reclaimers[0],
+ GRPC_ERROR_CANCELLED);
+ grpc_closure_sched(exec_ctx, resource_user->reclaimers[1],
+ GRPC_ERROR_CANCELLED);
if (resource_user->free_pool != 0) {
resource_user->resource_quota->free_pool += resource_user->free_pool;
rq_step_sched(exec_ctx, resource_user->resource_quota);
@@ -571,9 +570,12 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) {
gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR,
(intptr_t)resource_quota);
}
- grpc_closure_init(&resource_quota->rq_step_closure, rq_step, resource_quota);
+ grpc_closure_init(
+ &resource_quota->rq_step_closure, rq_step, resource_quota,
+ grpc_combiner_finally_scheduler(resource_quota->combiner, true));
grpc_closure_init(&resource_quota->rq_reclamation_done_closure,
- rq_reclamation_done, resource_quota);
+ rq_reclamation_done, resource_quota,
+ grpc_combiner_scheduler(resource_quota->combiner, false));
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
resource_quota->roots[i] = NULL;
}
@@ -614,9 +616,8 @@ void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
rq_resize_args *a = gpr_malloc(sizeof(*a));
a->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
a->size = (int64_t)size;
- grpc_closure_init(&a->closure, rq_resize, a);
- grpc_combiner_execute(&exec_ctx, resource_quota->combiner, &a->closure,
- GRPC_ERROR_NONE, false);
+ grpc_closure_init(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx);
+ grpc_closure_sched(&exec_ctx, &a->closure, GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -663,15 +664,19 @@ grpc_resource_user *grpc_resource_user_create(
resource_user->resource_quota =
grpc_resource_quota_internal_ref(resource_quota);
grpc_closure_init(&resource_user->allocate_closure, &ru_allocate,
- resource_user);
+ resource_user,
+ grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_closure_init(&resource_user->add_to_free_pool_closure,
- &ru_add_to_free_pool, resource_user);
+ &ru_add_to_free_pool, resource_user,
+ grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_closure_init(&resource_user->post_reclaimer_closure[0],
- &ru_post_benign_reclaimer, resource_user);
+ &ru_post_benign_reclaimer, resource_user,
+ grpc_combiner_scheduler(resource_quota->combiner, false));
grpc_closure_init(&resource_user->post_reclaimer_closure[1],
- &ru_post_destructive_reclaimer, resource_user);
- grpc_closure_init(&resource_user->destroy_closure, &ru_destroy,
- resource_user);
+ &ru_post_destructive_reclaimer, resource_user,
+ grpc_combiner_scheduler(resource_quota->combiner, false));
+ grpc_closure_init(&resource_user->destroy_closure, &ru_destroy, resource_user,
+ grpc_combiner_scheduler(resource_quota->combiner, false));
gpr_mu_init(&resource_user->mu);
gpr_atm_rel_store(&resource_user->refs, 1);
gpr_atm_rel_store(&resource_user->shutdown, 0);
@@ -706,9 +711,8 @@ static void ru_unref_by(grpc_exec_ctx *exec_ctx,
gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
GPR_ASSERT(old >= amount);
if (old == amount) {
- grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
- &resource_user->destroy_closure, GRPC_ERROR_NONE,
- false);
+ grpc_closure_sched(exec_ctx, &resource_user->destroy_closure,
+ GRPC_ERROR_NONE);
}
}
@@ -724,9 +728,12 @@ void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx,
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
- grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
- grpc_closure_create(ru_shutdown, resource_user),
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(exec_ctx,
+ grpc_closure_create(
+ ru_shutdown, resource_user,
+ grpc_combiner_scheduler(
+ resource_user->resource_quota->combiner, false)),
+ GRPC_ERROR_NONE);
}
}
@@ -746,12 +753,11 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE);
if (!resource_user->allocating) {
resource_user->allocating = true;
- grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
- &resource_user->allocate_closure, GRPC_ERROR_NONE,
- false);
+ grpc_closure_sched(exec_ctx, &resource_user->allocate_closure,
+ GRPC_ERROR_NONE);
}
} else {
- grpc_exec_ctx_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE);
}
gpr_mu_unlock(&resource_user->mu);
}
@@ -770,9 +776,8 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
if (is_bigger_than_zero && was_zero_or_negative &&
!resource_user->added_to_free_pool) {
resource_user->added_to_free_pool = true;
- grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
- &resource_user->add_to_free_pool_closure,
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(exec_ctx, &resource_user->add_to_free_pool_closure,
+ GRPC_ERROR_NONE);
}
gpr_mu_unlock(&resource_user->mu);
ru_unref_by(exec_ctx, resource_user, (gpr_atm)size);
@@ -784,9 +789,9 @@ void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_closure *closure) {
GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL);
resource_user->new_reclaimers[destructive] = closure;
- grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
- &resource_user->post_reclaimer_closure[destructive],
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(exec_ctx,
+ &resource_user->post_reclaimer_closure[destructive],
+ GRPC_ERROR_NONE);
}
void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
@@ -795,18 +800,20 @@ void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name, resource_user->name);
}
- grpc_combiner_execute(
- exec_ctx, resource_user->resource_quota->combiner,
- &resource_user->resource_quota->rq_reclamation_done_closure,
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(
+ exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure,
+ GRPC_ERROR_NONE);
}
void grpc_resource_user_slice_allocator_init(
grpc_resource_user_slice_allocator *slice_allocator,
grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) {
- grpc_closure_init(&slice_allocator->on_allocated, ru_allocated_slices,
- slice_allocator);
- grpc_closure_init(&slice_allocator->on_done, cb, p);
+ grpc_closure_init(
+ &slice_allocator->on_allocated, ru_allocated_slices, slice_allocator,
+ grpc_combiner_scheduler(resource_user->resource_quota->combiner, false));
+ grpc_closure_init(
+ &slice_allocator->on_done, cb, p,
+ grpc_combiner_scheduler(resource_user->resource_quota->combiner, false));
slice_allocator->resource_user = resource_user;
}
diff --git a/src/core/lib/iomgr/socket_windows.c b/src/core/lib/iomgr/socket_windows.c
index 54911e0e31..2f2e02f715 100644
--- a/src/core/lib/iomgr/socket_windows.c
+++ b/src/core/lib/iomgr/socket_windows.c
@@ -131,7 +131,7 @@ static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&socket->state_mu);
if (info->has_pending_iocp) {
info->has_pending_iocp = 0;
- grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
info->closure = closure;
}
@@ -154,7 +154,7 @@ void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
GPR_ASSERT(!info->has_pending_iocp);
gpr_mu_lock(&socket->state_mu);
if (info->closure) {
- grpc_exec_ctx_sched(exec_ctx, info->closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, info->closure, GRPC_ERROR_NONE);
info->closure = NULL;
} else {
info->has_pending_iocp = 1;
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index a3a70a8ed7..d089d2bc3b 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -265,7 +265,7 @@ finish:
grpc_channel_args_destroy(ac->channel_args);
gpr_free(ac);
}
- grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+ grpc_closure_sched(exec_ctx, closure, error);
}
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
@@ -294,7 +294,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
if (error != GRPC_ERROR_NONE) {
- grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+ grpc_closure_sched(exec_ctx, closure, error);
return;
}
if (dsmode == GRPC_DSMODE_IPV4) {
@@ -303,7 +303,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
addr = &addr4_copy;
}
if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) {
- grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+ grpc_closure_sched(exec_ctx, closure, error);
return;
}
@@ -321,14 +321,13 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
if (err >= 0) {
*ep =
grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
- grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
goto done;
}
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error");
- grpc_exec_ctx_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"),
- NULL);
+ grpc_closure_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"));
goto done;
}
@@ -343,8 +342,8 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
addr_str = NULL;
gpr_mu_init(&ac->mu);
ac->refs = 2;
- ac->write_closure.cb = on_writable;
- ac->write_closure.cb_arg = ac;
+ grpc_closure_init(&ac->write_closure, on_writable, ac,
+ grpc_schedule_on_exec_ctx);
ac->channel_args = grpc_channel_args_copy(channel_args);
if (grpc_tcp_trace) {
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c
index b07f9ceffa..b1664b85fd 100644
--- a/src/core/lib/iomgr/tcp_client_uv.c
+++ b/src/core/lib/iomgr/tcp_client_uv.c
@@ -110,7 +110,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
if (done) {
uv_tcp_connect_cleanup(&exec_ctx, connect);
}
- grpc_exec_ctx_sched(&exec_ctx, closure, error, NULL);
+ grpc_closure_sched(&exec_ctx, closure, error);
grpc_exec_ctx_finish(&exec_ctx);
}
diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c
index 1127588ebc..692252bbe0 100644
--- a/src/core/lib/iomgr/tcp_client_windows.c
+++ b/src/core/lib/iomgr/tcp_client_windows.c
@@ -129,7 +129,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
/* If the connection was aborted, the callback was already called when
the deadline was met. */
- grpc_exec_ctx_sched(exec_ctx, on_done, error, NULL);
+ grpc_closure_sched(exec_ctx, on_done, error);
}
/* Tries to issue one async connection, then schedules both an IOCP
@@ -227,7 +227,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
ac->addr_name = grpc_sockaddr_to_uri(addr);
ac->endpoint = endpoint;
ac->resource_quota = resource_quota;
- grpc_closure_init(&ac->on_connect, on_connect, ac);
+ grpc_closure_init(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &ac->alarm, deadline, on_alarm, ac,
gpr_now(GPR_CLOCK_MONOTONIC));
@@ -247,7 +247,7 @@ failure:
closesocket(sock);
}
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
- grpc_exec_ctx_sched(exec_ctx, on_done, final_error, NULL);
+ grpc_closure_sched(exec_ctx, on_done, final_error);
}
#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 540305e4fa..21a0371d10 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -316,7 +316,7 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->finished_edge = false;
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else {
- grpc_exec_ctx_sched(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
}
}
@@ -460,11 +460,10 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (buf->length == 0) {
GPR_TIMER_END("tcp_write", 0);
- grpc_exec_ctx_sched(exec_ctx, cb,
- grpc_fd_is_shutdown(tcp->em_fd)
- ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp)
- : GRPC_ERROR_NONE,
- NULL);
+ grpc_closure_sched(exec_ctx, cb,
+ grpc_fd_is_shutdown(tcp->em_fd)
+ ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp)
+ : GRPC_ERROR_NONE);
return;
}
tcp->outgoing_buffer = buf;
@@ -484,7 +483,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
gpr_log(GPR_DEBUG, "write: %s", str);
grpc_error_free_string(str);
}
- grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+ grpc_closure_sched(exec_ctx, cb, error);
}
GPR_TIMER_END("tcp_write", 0);
@@ -552,10 +551,10 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd,
gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
- tcp->read_closure.cb = tcp_handle_read;
- tcp->read_closure.cb_arg = tcp;
- tcp->write_closure.cb = tcp_handle_write;
- tcp->write_closure.cb_arg = tcp;
+ grpc_closure_init(&tcp->read_closure, tcp_handle_read, tcp,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&tcp->write_closure, tcp_handle_write, tcp,
+ grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_slice_allocator_init(
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index 179f47ef76..6db624dd56 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -208,7 +208,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
GPR_ASSERT(s->shutdown);
gpr_mu_unlock(&s->mu);
if (s->shutdown_complete != NULL) {
- grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
gpr_mu_destroy(&s->mu);
@@ -254,8 +254,8 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
grpc_tcp_listener *sp;
for (sp = s->head; sp; sp = sp->next) {
grpc_unlink_if_unix_domain_socket(&sp->addr);
- sp->destroyed_closure.cb = destroyed_port;
- sp->destroyed_closure.cb_arg = s;
+ grpc_closure_init(&sp->destroyed_closure, destroyed_port, s,
+ grpc_schedule_on_exec_ctx);
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
"tcp_listener_shutdown");
}
@@ -723,8 +723,8 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
"clone_port", clone_port(sp, (unsigned)(pollset_count - 1))));
for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
- sp->read_closure.cb = on_read;
- sp->read_closure.cb_arg = sp;
+ grpc_closure_init(&sp->read_closure, on_read, sp,
+ grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++;
sp = sp->next;
@@ -733,8 +733,8 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
}
- sp->read_closure.cb = on_read;
- sp->read_closure.cb_arg = sp;
+ grpc_closure_init(&sp->read_closure, on_read, sp,
+ grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++;
sp = sp->next;
@@ -760,7 +760,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
- grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL);
+ grpc_closure_list_sched(exec_ctx, &s->shutdown_starting);
gpr_mu_unlock(&s->mu);
tcp_server_destroy(exec_ctx, s);
}
diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c
index e1a174cfa2..8abc60d624 100644
--- a/src/core/lib/iomgr/tcp_server_uv.c
+++ b/src/core/lib/iomgr/tcp_server_uv.c
@@ -126,7 +126,7 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (s->shutdown_complete != NULL) {
- grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
while (s->head) {
@@ -170,7 +170,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) {
/* Complete shutdown_starting work before destroying. */
grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
+ grpc_closure_list_sched(&local_exec_ctx, &s->shutdown_starting);
if (exec_ctx == NULL) {
grpc_exec_ctx_flush(&local_exec_ctx);
tcp_server_destroy(&local_exec_ctx, s);
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c
index b0c8586bac..6302bff5d2 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.c
@@ -162,11 +162,12 @@ static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg,
static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_tcp_server *s) {
if (s->shutdown_complete != NULL) {
- grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
- grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(destroy_server, s),
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, grpc_closure_create(destroy_server, s,
+ grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
}
grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
@@ -204,7 +205,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
- grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL);
+ grpc_closure_list_sched(exec_ctx, &s->shutdown_starting);
gpr_mu_unlock(&s->mu);
tcp_server_destroy(exec_ctx, s);
}
@@ -465,7 +466,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
sp->new_socket = INVALID_SOCKET;
sp->port = port;
sp->port_index = port_index;
- grpc_closure_init(&sp->on_accept, on_accept, sp);
+ grpc_closure_init(&sp->on_accept, on_accept, sp, grpc_schedule_on_exec_ctx);
GPR_ASSERT(sp->socket);
gpr_mu_unlock(&s->mu);
*listener = sp;
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
index 6e2ad1dbe9..4be95457d5 100644
--- a/src/core/lib/iomgr/tcp_uv.c
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -169,7 +169,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
// nread < 0: Error
error = GRPC_ERROR_CREATE("TCP Read failed");
}
- grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
+ grpc_closure_sched(&exec_ctx, cb, error);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -190,7 +190,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
error = GRPC_ERROR_CREATE("TCP Read failed at start");
error =
grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
- grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+ grpc_closure_sched(exec_ctx, cb, error);
}
if (grpc_tcp_trace) {
const char *str = grpc_error_string(error);
@@ -217,7 +217,7 @@ static void write_callback(uv_write_t *req, int status) {
gpr_free(tcp->write_buffers);
grpc_resource_user_free(&exec_ctx, tcp->resource_user,
sizeof(uv_buf_t) * tcp->write_slices->count);
- grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
+ grpc_closure_sched(&exec_ctx, cb, error);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -243,8 +243,8 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
if (tcp->shutting_down) {
- grpc_exec_ctx_sched(exec_ctx, cb,
- GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
+ grpc_closure_sched(exec_ctx, cb,
+ GRPC_ERROR_CREATE("TCP socket is shutting down"));
return;
}
@@ -254,7 +254,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (tcp->write_slices->count == 0) {
// No slices means we don't have to do anything,
// and libuv doesn't like empty writes
- grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE);
return;
}
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index d4613b674e..fa13ac1d25 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -188,7 +188,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
tcp->read_cb = NULL;
TCP_UNREF(exec_ctx, tcp, "read");
- grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+ grpc_closure_sched(exec_ctx, cb, error);
}
static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -202,8 +202,8 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
WSABUF buffer;
if (tcp->shutting_down) {
- grpc_exec_ctx_sched(exec_ctx, cb,
- GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
+ grpc_closure_sched(exec_ctx, cb,
+ GRPC_ERROR_CREATE("TCP socket is shutting down"));
return;
}
@@ -227,7 +227,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
/* Did we get data immediately ? Yay. */
if (info->wsa_error != WSAEWOULDBLOCK) {
info->bytes_transfered = bytes_read;
- grpc_exec_ctx_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE);
return;
}
@@ -240,8 +240,8 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
info->wsa_error = wsa_error;
- grpc_exec_ctx_sched(exec_ctx, &tcp->on_read,
- GRPC_WSA_ERROR(info->wsa_error, "WSARecv"), NULL);
+ grpc_closure_sched(exec_ctx, &tcp->on_read,
+ GRPC_WSA_ERROR(info->wsa_error, "WSARecv"));
return;
}
}
@@ -272,7 +272,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
}
TCP_UNREF(exec_ctx, tcp, "write");
- grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+ grpc_closure_sched(exec_ctx, cb, error);
}
/* Initiates a write. */
@@ -290,8 +290,8 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
size_t len;
if (tcp->shutting_down) {
- grpc_exec_ctx_sched(exec_ctx, cb,
- GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
+ grpc_closure_sched(exec_ctx, cb,
+ GRPC_ERROR_CREATE("TCP socket is shutting down"));
return;
}
@@ -322,7 +322,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_error *error = status == 0
? GRPC_ERROR_NONE
: GRPC_WSA_ERROR(info->wsa_error, "WSASend");
- grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+ grpc_closure_sched(exec_ctx, cb, error);
if (allocated) gpr_free(allocated);
return;
}
@@ -340,8 +340,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
TCP_UNREF(exec_ctx, tcp, "write");
- grpc_exec_ctx_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"),
- NULL);
+ grpc_closure_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"));
return;
}
}
@@ -424,8 +423,8 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket,
tcp->socket = socket;
gpr_mu_init(&tcp->mu);
gpr_ref_init(&tcp->refcount, 1);
- grpc_closure_init(&tcp->on_read, on_read, tcp);
- grpc_closure_init(&tcp->on_write, on_write, tcp);
+ grpc_closure_init(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
tcp->peer_string = gpr_strdup(peer_string);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c
index 00058f9d86..ecd3b284dc 100644
--- a/src/core/lib/iomgr/timer_generic.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -184,22 +184,22 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
shard_type *shard = &g_shards[shard_idx(timer)];
GPR_ASSERT(deadline.clock_type == g_clock_type);
GPR_ASSERT(now.clock_type == g_clock_type);
- grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg);
+ grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg,
+ grpc_schedule_on_exec_ctx);
timer->deadline = deadline;
timer->triggered = 0;
if (!g_initialized) {
timer->triggered = 1;
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, &timer->closure,
- GRPC_ERROR_CREATE("Attempt to create timer before initialization"),
- NULL);
+ GRPC_ERROR_CREATE("Attempt to create timer before initialization"));
return;
}
if (gpr_time_cmp(deadline, now) <= 0) {
timer->triggered = 1;
- grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE);
return;
}
@@ -251,7 +251,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
shard_type *shard = &g_shards[shard_idx(timer)];
gpr_mu_lock(&shard->mu);
if (!timer->triggered) {
- grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL);
+ grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED);
timer->triggered = 1;
if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer);
@@ -317,7 +317,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
grpc_timer *timer;
gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) {
- grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_REF(error), NULL);
+ grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_REF(error));
n++;
}
*new_min_deadline = compute_min_deadline(shard);
diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c
index cfcb89268b..00b835ffb8 100644
--- a/src/core/lib/iomgr/timer_uv.c
+++ b/src/core/lib/iomgr/timer_uv.c
@@ -55,7 +55,7 @@ void run_expired_timer(uv_timer_t *handle) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_ASSERT(!timer->triggered);
timer->triggered = 1;
- grpc_exec_ctx_sched(&exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(&exec_ctx, &timer->closure, GRPC_ERROR_NONE);
stop_uv_timer(handle);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -65,10 +65,11 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
void *timer_cb_arg, gpr_timespec now) {
uint64_t timeout;
uv_timer_t *uv_timer;
- grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg);
+ grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg,
+ grpc_schedule_on_exec_ctx);
if (gpr_time_cmp(deadline, now) <= 0) {
timer->triggered = 1;
- grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE);
return;
}
timer->triggered = 0;
@@ -83,7 +84,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
if (!timer->triggered) {
timer->triggered = 1;
- grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL);
+ grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED);
stop_uv_timer((uv_timer_t *)timer->uv_timer);
}
}
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index 3c24ea9afa..dfbd295c91 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -126,7 +126,7 @@ grpc_udp_server *grpc_udp_server_create(void) {
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
if (s->shutdown_complete != NULL) {
- grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
gpr_mu_destroy(&s->mu);
@@ -170,8 +170,8 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
for (sp = s->head; sp; sp = sp->next) {
grpc_unlink_if_unix_domain_socket(&sp->addr);
- sp->destroyed_closure.cb = destroyed_port;
- sp->destroyed_closure.cb_arg = s;
+ grpc_closure_init(&sp->destroyed_closure, destroyed_port, s,
+ grpc_schedule_on_exec_ctx);
/* Call the orphan_cb to signal that the FD is about to be closed and
* should no longer be used. */
@@ -446,8 +446,8 @@ void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
}
- sp->read_closure.cb = on_read;
- sp->read_closure.cb_arg = sp;
+ grpc_closure_init(&sp->read_closure, on_read, sp,
+ grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++;
diff --git a/src/core/lib/iomgr/workqueue.h b/src/core/lib/iomgr/workqueue.h
index 73d9849843..371b0f55dc 100644
--- a/src/core/lib/iomgr/workqueue.h
+++ b/src/core/lib/iomgr/workqueue.h
@@ -72,17 +72,16 @@ grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif
-/** Add a work item to a workqueue. Items added to a work queue will be started
- in approximately the order they were enqueued, on some thread that may or
- may not be the current thread. Successive closures enqueued onto a workqueue
- MAY be executed concurrently.
+/** Fetch the workqueue closure scheduler. Items added to a work queue will be
+ started in approximately the order they were enqueued, on some thread that
+ may or may not be the current thread. Successive closures enqueued onto a
+ workqueue MAY be executed concurrently.
It is generally more expensive to add a closure to a workqueue than to the
execution context, both in terms of CPU work and in execution latency.
Use work queues when it's important that other threads be given a chance to
tackle some workload. */
-void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- grpc_closure *closure, grpc_error *error);
+grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue);
#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_H */
diff --git a/src/core/lib/iomgr/workqueue_uv.c b/src/core/lib/iomgr/workqueue_uv.c
index e58ca476cc..4d61b40912 100644
--- a/src/core/lib/iomgr/workqueue_uv.c
+++ b/src/core/lib/iomgr/workqueue_uv.c
@@ -58,9 +58,8 @@ grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
#endif
-void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- grpc_closure *closure, grpc_error *error) {
- grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
+ return grpc_schedule_on_exec_ctx;
}
#endif /* GPR_UV */
diff --git a/src/core/lib/iomgr/workqueue_windows.c b/src/core/lib/iomgr/workqueue_windows.c
index 5c93d3c59e..234b47cdf5 100644
--- a/src/core/lib/iomgr/workqueue_windows.c
+++ b/src/core/lib/iomgr/workqueue_windows.c
@@ -56,9 +56,8 @@ grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
#endif
-void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- grpc_closure *closure, grpc_error *error) {
- grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
+ return grpc_schedule_on_exec_ctx;
}
#endif /* GPR_WINDOWS */
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.c b/src/core/lib/security/credentials/fake/fake_credentials.c
index ea4cb76fb9..1cf142fa9a 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.c
+++ b/src/core/lib/security/credentials/fake/fake_credentials.c
@@ -113,9 +113,10 @@ static void md_only_test_get_request_metadata(
if (c->is_async) {
grpc_credentials_metadata_request *cb_arg =
grpc_credentials_metadata_request_create(creds, cb, user_data);
- grpc_executor_push(
- grpc_closure_create(on_simulated_token_fetch_done, cb_arg),
- GRPC_ERROR_NONE);
+ grpc_closure_sched(exec_ctx,
+ grpc_closure_create(on_simulated_token_fetch_done,
+ cb_arg, grpc_executor_scheduler),
+ GRPC_ERROR_NONE);
} else {
cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK, NULL);
}
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index afe0e3d357..caf57c856b 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -130,7 +130,8 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_get(
&exec_ctx, &context, &detector.pollent, resource_quota, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
- grpc_closure_create(on_compute_engine_detection_http_response, &detector),
+ grpc_closure_create(on_compute_engine_detection_http_response, &detector,
+ grpc_schedule_on_exec_ctx),
&detector.response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
@@ -155,7 +156,8 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_context_destroy(&context);
grpc_closure_init(&destroy_closure, destroy_pollset,
- grpc_polling_entity_pollset(&detector.pollent));
+ grpc_polling_entity_pollset(&detector.pollent),
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx,
grpc_polling_entity_pollset(&detector.pollent),
&destroy_closure);
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c
index 03097a57c0..8c75098612 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.c
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c
@@ -677,7 +677,7 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_httpcli_get(
exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
- grpc_closure_create(on_keys_retrieved, ctx),
+ grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx),
&ctx->responses[HTTP_RESPONSE_KEYS]);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
grpc_json_destroy(json);
@@ -778,7 +778,8 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
*(path_prefix++) = '\0';
gpr_asprintf(&req.http.path, "/%s/%s", path_prefix, iss);
}
- http_cb = grpc_closure_create(on_keys_retrieved, ctx);
+ http_cb =
+ grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx);
rsp_idx = HTTP_RESPONSE_KEYS;
} else {
req.host = gpr_strdup(strstr(iss, "https://") == iss ? iss + 8 : iss);
@@ -790,7 +791,8 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&req.http.path, "/%s%s", path_prefix,
GRPC_OPENID_CONFIG_URL_SUFFIX);
}
- http_cb = grpc_closure_create(on_openid_config_retrieved, ctx);
+ http_cb = grpc_closure_create(on_openid_config_retrieved, ctx,
+ grpc_schedule_on_exec_ctx);
rsp_idx = HTTP_RESPONSE_OPENID;
}
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
index b3625b22c0..9aa7863977 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
@@ -312,9 +312,10 @@ static void compute_engine_fetch_oauth2(
extreme memory pressure. */
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("oauth2_credentials");
- grpc_httpcli_get(exec_ctx, httpcli_context, pollent, resource_quota, &request,
- deadline, grpc_closure_create(response_cb, metadata_req),
- &metadata_req->response);
+ grpc_httpcli_get(
+ exec_ctx, httpcli_context, pollent, resource_quota, &request, deadline,
+ grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
+ &metadata_req->response);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
}
@@ -368,10 +369,11 @@ static void refresh_token_fetch_oauth2(
extreme memory pressure. */
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("oauth2_credentials_refresh");
- grpc_httpcli_post(exec_ctx, httpcli_context, pollent, resource_quota,
- &request, body, strlen(body), deadline,
- grpc_closure_create(response_cb, metadata_req),
- &metadata_req->response);
+ grpc_httpcli_post(
+ exec_ctx, httpcli_context, pollent, resource_quota, &request, body,
+ strlen(body), deadline,
+ grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
+ &metadata_req->response);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
gpr_free(body);
}
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c
index 331a8f1835..750c3675b1 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.c
@@ -146,7 +146,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
}
}
ep->read_buffer = NULL;
- grpc_exec_ctx_sched(exec_ctx, ep->read_cb, error, NULL);
+ grpc_closure_sched(exec_ctx, ep->read_cb, error);
SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
}
@@ -329,10 +329,9 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
if (result != TSI_OK) {
/* TODO(yangg) do different things according to the error type? */
grpc_slice_buffer_reset_and_unref(&ep->output_buffer);
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, cb,
- grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result),
- NULL);
+ grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result));
GPR_TIMER_END("secure_endpoint.endpoint_write", 0);
return;
}
@@ -417,7 +416,7 @@ grpc_endpoint *grpc_secure_endpoint_create(
grpc_slice_buffer_init(&ep->output_buffer);
grpc_slice_buffer_init(&ep->source_buffer);
ep->read_buffer = NULL;
- grpc_closure_init(&ep->on_read, on_read, ep);
+ grpc_closure_init(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx);
gpr_mu_init(&ep->protector_mu);
gpr_ref_init(&ep->ref, 1);
return &ep->base;
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index 5b088aa58d..17ad681c82 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -134,9 +134,9 @@ void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
grpc_auth_context **auth_context,
grpc_closure *on_peer_checked) {
if (sc == NULL) {
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, on_peer_checked,
- GRPC_ERROR_CREATE("cannot check peer -- no security connector"), NULL);
+ GRPC_ERROR_CREATE("cannot check peer -- no security connector"));
tsi_peer_destruct(&peer);
} else {
sc->vtable->check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked);
@@ -273,7 +273,7 @@ static void fake_check_peer(grpc_exec_ctx *exec_ctx,
GRPC_FAKE_TRANSPORT_SECURITY_TYPE);
end:
- grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
+ grpc_closure_sched(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer);
}
@@ -508,7 +508,7 @@ static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx,
? c->overridden_target_name
: c->target_name,
&peer, auth_context);
- grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
+ grpc_closure_sched(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer);
}
@@ -518,7 +518,7 @@ static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx,
grpc_closure *on_peer_checked) {
grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context);
tsi_peer_destruct(&peer);
- grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
+ grpc_closure_sched(exec_ctx, on_peer_checked, error);
}
static void add_shallow_auth_property_to_peer(tsi_peer *peer,
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c
index 41a775db85..748bf4a432 100644
--- a/src/core/lib/security/transport/security_handshaker.c
+++ b/src/core/lib/security/transport/security_handshaker.c
@@ -136,7 +136,7 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
h->shutdown = true;
}
// Invoke callback.
- grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, error, NULL);
+ grpc_closure_sched(exec_ctx, h->on_handshake_done, error);
}
static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -173,7 +173,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1);
grpc_channel_args_destroy(tmp_args);
// Invoke callback.
- grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE);
// Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing.
h->shutdown = true;
@@ -392,10 +392,13 @@ static grpc_handshaker *security_handshaker_create(
h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
grpc_closure_init(&h->on_handshake_data_sent_to_peer,
- on_handshake_data_sent_to_peer, h);
+ on_handshake_data_sent_to_peer, h,
+ grpc_schedule_on_exec_ctx);
grpc_closure_init(&h->on_handshake_data_received_from_peer,
- on_handshake_data_received_from_peer, h);
- grpc_closure_init(&h->on_peer_checked, on_peer_checked, h);
+ on_handshake_data_received_from_peer, h,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&h->on_peer_checked, on_peer_checked, h,
+ grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&h->left_overs);
grpc_slice_buffer_init(&h->outgoing);
return &h->base;
@@ -418,9 +421,8 @@ static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_tcp_server_acceptor *acceptor,
grpc_closure *on_handshake_done,
grpc_handshaker_args *args) {
- grpc_exec_ctx_sched(exec_ctx, on_handshake_done,
- GRPC_ERROR_CREATE("Failed to create security handshaker"),
- NULL);
+ grpc_closure_sched(exec_ctx, on_handshake_done,
+ GRPC_ERROR_CREATE("Failed to create security handshaker"));
}
static const grpc_handshaker_vtable fail_handshaker_vtable = {
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index e6a242e68f..5b4adc4661 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -132,7 +132,7 @@ static void on_md_processing_done(
grpc_metadata_batch_filter(calld->recv_initial_metadata, remove_consumed_md,
elem);
grpc_metadata_array_destroy(&calld->md);
- grpc_exec_ctx_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE);
} else {
grpc_slice message;
grpc_transport_stream_op *close_op = gpr_malloc(sizeof(*close_op));
@@ -148,13 +148,13 @@ static void on_md_processing_done(
calld->transport_op->send_message = NULL;
}
calld->transport_op->send_trailing_metadata = NULL;
- close_op->on_complete = grpc_closure_create(destroy_op, close_op);
+ close_op->on_complete =
+ grpc_closure_create(destroy_op, close_op, grpc_schedule_on_exec_ctx);
grpc_transport_stream_op_add_close(close_op, status, &message);
grpc_call_next_op(&exec_ctx, elem, close_op);
- grpc_exec_ctx_sched(&exec_ctx, calld->on_done_recv,
- grpc_error_set_int(GRPC_ERROR_CREATE(error_details),
- GRPC_ERROR_INT_GRPC_STATUS, status),
- NULL);
+ grpc_closure_sched(&exec_ctx, calld->on_done_recv,
+ grpc_error_set_int(GRPC_ERROR_CREATE(error_details),
+ GRPC_ERROR_INT_GRPC_STATUS, status));
}
grpc_exec_ctx_finish(&exec_ctx);
@@ -174,8 +174,7 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
return;
}
}
- grpc_exec_ctx_sched(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error),
- NULL);
+ grpc_closure_sched(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error));
}
static void set_recv_ops_md_callbacks(grpc_call_element *elem,
@@ -214,7 +213,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* initialize members */
memset(calld, 0, sizeof(*calld));
- grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem);
+ grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem,
+ grpc_schedule_on_exec_ctx);
if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) {
args->context[GRPC_CONTEXT_SECURITY].destroy(
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 8ca3cab9d5..b20801005a 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -794,7 +794,8 @@ static void send_cancel(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) {
memset(&tc->op, 0, sizeof(tc->op));
tc->op.cancel_error = tc->error;
/* reuse closure to catch completion */
- grpc_closure_init(&tc->closure, done_termination, tc);
+ grpc_closure_init(&tc->closure, done_termination, tc,
+ grpc_schedule_on_exec_ctx);
tc->op.on_complete = &tc->closure;
execute_op(exec_ctx, tc->call, &tc->op);
}
@@ -804,7 +805,8 @@ static void send_close(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) {
memset(&tc->op, 0, sizeof(tc->op));
tc->op.close_error = tc->error;
/* reuse closure to catch completion */
- grpc_closure_init(&tc->closure, done_termination, tc);
+ grpc_closure_init(&tc->closure, done_termination, tc,
+ grpc_schedule_on_exec_ctx);
tc->op.on_complete = &tc->closure;
execute_op(exec_ctx, tc->call, &tc->op);
}
@@ -814,13 +816,13 @@ static grpc_call_error terminate_with_status(grpc_exec_ctx *exec_ctx,
set_status_from_error(tc->call, STATUS_FROM_API_OVERRIDE, tc->error);
if (tc->type == TC_CANCEL) {
- grpc_closure_init(&tc->closure, send_cancel, tc);
+ grpc_closure_init(&tc->closure, send_cancel, tc, grpc_schedule_on_exec_ctx);
GRPC_CALL_INTERNAL_REF(tc->call, "cancel");
} else if (tc->type == TC_CLOSE) {
- grpc_closure_init(&tc->closure, send_close, tc);
+ grpc_closure_init(&tc->closure, send_close, tc, grpc_schedule_on_exec_ctx);
GRPC_CALL_INTERNAL_REF(tc->call, "close");
}
- grpc_exec_ctx_sched(exec_ctx, &tc->closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, &tc->closure, GRPC_ERROR_NONE);
return GRPC_CALL_OK;
}
@@ -1138,8 +1140,8 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx,
} else {
*call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0);
}
- grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready,
- bctl);
+ grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready, bctl,
+ grpc_schedule_on_exec_ctx);
continue_receiving_slices(exec_ctx, bctl);
}
}
@@ -1251,9 +1253,10 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
call->has_initial_md_been_received = true;
if (call->saved_receiving_stream_ready_bctlp != NULL) {
grpc_closure *saved_rsr_closure = grpc_closure_create(
- receiving_stream_ready, call->saved_receiving_stream_ready_bctlp);
+ receiving_stream_ready, call->saved_receiving_stream_ready_bctlp,
+ grpc_schedule_on_exec_ctx);
call->saved_receiving_stream_ready_bctlp = NULL;
- grpc_exec_ctx_sched(exec_ctx, saved_rsr_closure, error, NULL);
+ grpc_closure_sched(exec_ctx, saved_rsr_closure, error);
}
gpr_mu_unlock(&call->mu);
@@ -1558,7 +1561,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call->received_initial_metadata = 1;
call->buffered_metadata[0] = op->data.recv_initial_metadata;
grpc_closure_init(&call->receiving_initial_metadata_ready,
- receiving_initial_metadata_ready, bctl);
+ receiving_initial_metadata_ready, bctl,
+ grpc_schedule_on_exec_ctx);
bctl->recv_initial_metadata = 1;
stream_op->recv_initial_metadata =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
@@ -1581,7 +1585,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call->receiving_buffer = op->data.recv_message;
stream_op->recv_message = &call->receiving_stream;
grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready,
- bctl);
+ bctl, grpc_schedule_on_exec_ctx);
stream_op->recv_message_ready = &call->receiving_stream_ready;
num_completion_callbacks_needed++;
break;
@@ -1646,7 +1650,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed);
stream_op->context = call->context;
- grpc_closure_init(&bctl->finish_batch, finish_batch, bctl);
+ grpc_closure_init(&bctl->finish_batch, finish_batch, bctl,
+ grpc_schedule_on_exec_ctx);
stream_op->on_complete = &bctl->finish_batch;
gpr_mu_unlock(&call->mu);
diff --git a/src/core/lib/surface/channel_ping.c b/src/core/lib/surface/channel_ping.c
index 0d2f01a649..e68febdddf 100644
--- a/src/core/lib/surface/channel_ping.c
+++ b/src/core/lib/surface/channel_ping.c
@@ -71,7 +71,7 @@ void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
GPR_ASSERT(reserved == NULL);
pr->tag = tag;
pr->cq = cq;
- grpc_closure_init(&pr->closure, ping_done, pr);
+ grpc_closure_init(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx);
op->send_ping = &pr->closure;
op->bind_pollset = grpc_cq_pollset(cq);
grpc_cq_begin_op(cq, tag);
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 184c1a1a16..4613c9021e 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -168,7 +168,8 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
#ifndef NDEBUG
cc->outstanding_tag_count = 0;
#endif
- grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc);
+ grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc,
+ grpc_schedule_on_exec_ctx);
GPR_TIMER_END("grpc_completion_queue_create", 0);
diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c
index 57da94ac1e..f1ad13711a 100644
--- a/src/core/lib/surface/lame_client.c
+++ b/src/core/lib/surface/lame_client.c
@@ -98,16 +98,16 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
if (op->on_connectivity_state_change) {
GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN);
*op->connectivity_state = GRPC_CHANNEL_SHUTDOWN;
- grpc_exec_ctx_sched(exec_ctx, op->on_connectivity_state_change,
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, op->on_connectivity_state_change,
+ GRPC_ERROR_NONE);
}
if (op->send_ping != NULL) {
- grpc_exec_ctx_sched(exec_ctx, op->send_ping,
- GRPC_ERROR_CREATE("lame client channel"), NULL);
+ grpc_closure_sched(exec_ctx, op->send_ping,
+ GRPC_ERROR_CREATE("lame client channel"));
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
if (op->on_consumed != NULL) {
- grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
}
}
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 62d7afc8da..78699e9e65 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -278,7 +278,8 @@ static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
int send_goaway, grpc_error *send_disconnect) {
struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc));
- grpc_closure_init(&sc->closure, shutdown_cleanup, sc);
+ grpc_closure_init(&sc->closure, shutdown_cleanup, sc,
+ grpc_schedule_on_exec_ctx);
grpc_transport_op *op = grpc_make_transport_op(&sc->closure);
grpc_channel_element *elem;
@@ -346,9 +347,9 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie,
- grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
- grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE,
- NULL);
+ grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
}
}
@@ -440,8 +441,8 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
orphan_channel(chand);
server_ref(chand->server);
maybe_finish_shutdown(exec_ctx, chand->server);
- chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
- chand->finish_destroy_channel_closure.cb_arg = chand;
+ grpc_closure_init(&chand->finish_destroy_channel_closure,
+ finish_destroy_channel, chand, grpc_schedule_on_exec_ctx);
if (grpc_server_channel_trace && error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(error);
@@ -545,8 +546,9 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie,
- grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
- grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, error, NULL);
+ grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, error);
return;
}
@@ -590,9 +592,9 @@ static void finish_start_new_rpc(
gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
- grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE,
- NULL);
+ grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
return;
}
@@ -607,7 +609,8 @@ static void finish_start_new_rpc(
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_MESSAGE;
op.data.recv_message = &calld->payload;
- grpc_closure_init(&calld->publish, publish_new_rpc, elem);
+ grpc_closure_init(&calld->publish, publish_new_rpc, elem,
+ grpc_schedule_on_exec_ctx);
grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1,
&calld->publish);
break;
@@ -813,9 +816,10 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
- grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure,
- GRPC_ERROR_NONE, NULL);
+ grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure,
+ GRPC_ERROR_NONE);
} else if (calld->state == PENDING) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
@@ -851,7 +855,8 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_INITIAL_METADATA;
op.data.recv_initial_metadata = &calld->initial_metadata;
- grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem);
+ grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem,
+ grpc_schedule_on_exec_ctx);
grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1,
&calld->got_initial_metadata);
}
@@ -887,7 +892,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
gpr_mu_init(&calld->mu_state);
grpc_closure_init(&calld->server_on_recv_initial_metadata,
- server_on_recv_initial_metadata, elem);
+ server_on_recv_initial_metadata, elem,
+ grpc_schedule_on_exec_ctx);
server_ref(chand->server);
return GRPC_ERROR_NONE;
@@ -926,7 +932,8 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->registered_methods = NULL;
chand->connectivity_state = GRPC_CHANNEL_IDLE;
grpc_closure_init(&chand->channel_connectivity_changed,
- channel_connectivity_changed, chand);
+ channel_connectivity_changed, chand,
+ grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE;
}
@@ -1278,7 +1285,8 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* Shutdown listeners */
for (l = server->listeners; l; l = l->next) {
- grpc_closure_init(&l->destroy_done, listener_destroy_done, server);
+ grpc_closure_init(&l->destroy_done, listener_destroy_done, server,
+ grpc_schedule_on_exec_ctx);
l->destroy(&exec_ctx, server, l->arg, &l->destroy_done);
}
@@ -1384,9 +1392,10 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie,
- grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
- grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure,
- GRPC_ERROR_NONE, NULL);
+ grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure,
+ GRPC_ERROR_NONE);
} else {
GPR_ASSERT(calld->state == PENDING);
calld->state = ACTIVATED;
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index 4f49d7cf7d..c656d93740 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -81,7 +81,7 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
} else {
error = GRPC_ERROR_CREATE("Shutdown connectivity owner");
}
- grpc_exec_ctx_sched(exec_ctx, w->notify, error, NULL);
+ grpc_closure_sched(exec_ctx, w->notify, error);
gpr_free(w);
}
GRPC_ERROR_UNREF(tracker->current_error);
@@ -121,7 +121,7 @@ bool grpc_connectivity_state_notify_on_state_change(
if (current == NULL) {
grpc_connectivity_state_watcher *w = tracker->watchers;
if (w != NULL && w->notify == notify) {
- grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
+ grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED);
tracker->watchers = w->next;
gpr_free(w);
return false;
@@ -129,7 +129,7 @@ bool grpc_connectivity_state_notify_on_state_change(
while (w != NULL) {
grpc_connectivity_state_watcher *rm_candidate = w->next;
if (rm_candidate != NULL && rm_candidate->notify == notify) {
- grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
+ grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED);
w->next = w->next->next;
gpr_free(rm_candidate);
return false;
@@ -140,8 +140,8 @@ bool grpc_connectivity_state_notify_on_state_change(
} else {
if (tracker->current_state != *current) {
*current = tracker->current_state;
- grpc_exec_ctx_sched(exec_ctx, notify,
- GRPC_ERROR_REF(tracker->current_error), NULL);
+ grpc_closure_sched(exec_ctx, notify,
+ GRPC_ERROR_REF(tracker->current_error));
} else {
grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
w->current = current;
@@ -191,8 +191,8 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
w->notify);
}
- grpc_exec_ctx_sched(exec_ctx, w->notify,
- GRPC_ERROR_REF(tracker->current_error), NULL);
+ grpc_closure_sched(exec_ctx, w->notify,
+ GRPC_ERROR_REF(tracker->current_error));
gpr_free(w);
}
}
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index b448126da8..0d24062c1e 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -68,7 +68,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
#endif
if (gpr_unref(&refcount->refs)) {
- grpc_exec_ctx_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
}
}
@@ -82,7 +82,7 @@ void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
grpc_iomgr_cb_func cb, void *cb_arg) {
#endif
gpr_ref_init(&refcount->refs, initial_refs);
- grpc_closure_init(&refcount->destroy, cb, cb_arg);
+ grpc_closure_init(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx);
}
static void move64(uint64_t *from, uint64_t *to) {
@@ -168,11 +168,10 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op,
grpc_error *error) {
- grpc_exec_ctx_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_REF(error),
- NULL);
- grpc_exec_ctx_sched(exec_ctx, op->recv_initial_metadata_ready,
- GRPC_ERROR_REF(error), NULL);
- grpc_exec_ctx_sched(exec_ctx, op->on_complete, error, NULL);
+ grpc_closure_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_REF(error));
+ grpc_closure_sched(exec_ctx, op->recv_initial_metadata_ready,
+ GRPC_ERROR_REF(error));
+ grpc_closure_sched(exec_ctx, op->on_complete, error);
}
typedef struct {
@@ -196,7 +195,8 @@ static void add_error(grpc_transport_stream_op *op, grpc_error **which,
cmd = gpr_malloc(sizeof(*cmd));
cmd->error = error;
cmd->then_call = op->on_complete;
- grpc_closure_init(&cmd->closure, free_message, cmd);
+ grpc_closure_init(&cmd->closure, free_message, cmd,
+ grpc_schedule_on_exec_ctx);
op->on_complete = &cmd->closure;
*which = error;
}
@@ -269,14 +269,14 @@ typedef struct {
static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
made_transport_op *op = arg;
- grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error),
- NULL);
+ grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
gpr_free(op);
}
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
made_transport_op *op = gpr_malloc(sizeof(*op));
- grpc_closure_init(&op->outer_on_complete, destroy_made_transport_op, op);
+ grpc_closure_init(&op->outer_on_complete, destroy_made_transport_op, op,
+ grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete;
memset(&op->op, 0, sizeof(op->op));
op->op.on_consumed = &op->outer_on_complete;
@@ -292,8 +292,7 @@ typedef struct {
static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
made_transport_stream_op *op = arg;
- grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error),
- NULL);
+ grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
gpr_free(op);
}
@@ -301,7 +300,7 @@ grpc_transport_stream_op *grpc_make_transport_stream_op(
grpc_closure *on_complete) {
made_transport_stream_op *op = gpr_malloc(sizeof(*op));
grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op,
- op);
+ op, grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete;
memset(&op->op, 0, sizeof(op->op));
op->op.on_complete = &op->outer_on_complete;
diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb
index 8943f3f1fe..adc77bbf5d 100644
--- a/src/ruby/lib/grpc/generic/bidi_call.rb
+++ b/src/ruby/lib/grpc/generic/bidi_call.rb
@@ -200,6 +200,7 @@ module GRPC
if is_client
batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
@call.status = batch_result.status
+ @call.trailing_metadata = @call.status.metadata if @call.status
batch_result.check_status
GRPC.logger.debug("bidi-read-loop: done status #{@call.status}")
end
diff --git a/src/ruby/lib/grpc/generic/service.rb b/src/ruby/lib/grpc/generic/service.rb
index 84f1ce7520..f5a6b49eb4 100644
--- a/src/ruby/lib/grpc/generic/service.rb
+++ b/src/ruby/lib/grpc/generic/service.rb
@@ -110,7 +110,7 @@ module GRPC
rpc_descs[name] = RpcDesc.new(name, input, output,
marshal_class_method,
unmarshal_class_method)
- define_method(GenericService.underscore(name.to_s).to_sym) do
+ define_method(GenericService.underscore(name.to_s).to_sym) do |_, _|
fail GRPC::BadStatus.new_status_exception(
GRPC::Core::StatusCodes::UNIMPLEMENTED)
end
diff --git a/src/ruby/spec/generic/rpc_server_pool_spec.rb b/src/ruby/spec/generic/rpc_server_pool_spec.rb
index 48ccaee510..69e8222cb9 100644
--- a/src/ruby/spec/generic/rpc_server_pool_spec.rb
+++ b/src/ruby/spec/generic/rpc_server_pool_spec.rb
@@ -94,18 +94,6 @@ describe GRPC::Pool do
expect(q.pop).to be(o)
p.stop
end
-
- it 'it throws an error if all of the workers have tasks to do' do
- p = Pool.new(5)
- p.start
- job = proc {}
- 5.times do
- expect(p.ready_for_work?).to be(true)
- p.schedule(&job)
- end
- expect { p.schedule(&job) }.to raise_error
- expect { p.schedule(&job) }.to raise_error
- end
end
describe '#stop' do
diff --git a/test/core/bad_client/bad_client.c b/test/core/bad_client/bad_client.c
index 07fcd995d7..d579dcc7d4 100644
--- a/test/core/bad_client/bad_client.c
+++ b/test/core/bad_client/bad_client.c
@@ -148,7 +148,8 @@ void grpc_run_bad_client_test(
grpc_slice_buffer_init(&outgoing);
grpc_slice_buffer_add(&outgoing, slice);
- grpc_closure_init(&done_write_closure, done_write, &a);
+ grpc_closure_init(&done_write_closure, done_write, &a,
+ grpc_schedule_on_exec_ctx);
/* Write data */
grpc_endpoint_write(&exec_ctx, sfd.client, &outgoing, &done_write_closure);
@@ -175,7 +176,8 @@ void grpc_run_bad_client_test(
grpc_slice_buffer_init(&args.incoming);
gpr_event_init(&args.read_done);
grpc_closure read_done_closure;
- grpc_closure_init(&read_done_closure, read_done, &args);
+ grpc_closure_init(&read_done_closure, read_done, &args,
+ grpc_schedule_on_exec_ctx);
grpc_endpoint_read(&exec_ctx, sfd.client, &args.incoming,
&read_done_closure);
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
index b421720492..169323e0f7 100644
--- a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
+++ b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
@@ -108,16 +108,18 @@ int main(int argc, char **argv) {
grpc_resolver *resolver = create_resolver(&exec_ctx, "dns:test");
gpr_event ev1;
gpr_event_init(&ev1);
- grpc_resolver_next(&exec_ctx, resolver, &result,
- grpc_closure_create(on_done, &ev1));
+ grpc_resolver_next(
+ &exec_ctx, resolver, &result,
+ grpc_closure_create(on_done, &ev1, grpc_schedule_on_exec_ctx));
grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(wait_loop(5, &ev1));
GPR_ASSERT(result == NULL);
gpr_event ev2;
gpr_event_init(&ev2);
- grpc_resolver_next(&exec_ctx, resolver, &result,
- grpc_closure_create(on_done, &ev2));
+ grpc_resolver_next(
+ &exec_ctx, resolver, &result,
+ grpc_closure_create(on_done, &ev2, grpc_schedule_on_exec_ctx));
grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(wait_loop(30, &ev2));
GPR_ASSERT(result != NULL);
diff --git a/test/core/client_channel/resolvers/sockaddr_resolver_test.c b/test/core/client_channel/resolvers/sockaddr_resolver_test.c
index a9fd85aea1..d6c8920ad0 100644
--- a/test/core/client_channel/resolvers/sockaddr_resolver_test.c
+++ b/test/core/client_channel/resolvers/sockaddr_resolver_test.c
@@ -68,8 +68,8 @@ static void test_succeeds(grpc_resolver_factory *factory, const char *string) {
on_resolution_arg on_res_arg;
memset(&on_res_arg, 0, sizeof(on_res_arg));
on_res_arg.expected_server_name = uri->path;
- grpc_closure *on_resolution =
- grpc_closure_create(on_resolution_cb, &on_res_arg);
+ grpc_closure *on_resolution = grpc_closure_create(
+ on_resolution_cb, &on_res_arg, grpc_schedule_on_exec_ctx);
grpc_resolver_next(&exec_ctx, resolver, &on_res_arg.resolver_result,
on_resolution);
diff --git a/test/core/client_channel/set_initial_connect_string_test.c b/test/core/client_channel/set_initial_connect_string_test.c
index 11e57439d5..2082f65458 100644
--- a/test/core/client_channel/set_initial_connect_string_test.c
+++ b/test/core/client_channel/set_initial_connect_string_test.c
@@ -94,7 +94,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_tcp_server_acceptor *acceptor) {
gpr_free(acceptor);
test_tcp_server *server = arg;
- grpc_closure_init(&on_read, handle_read, NULL);
+ grpc_closure_init(&on_read, handle_read, NULL, grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&state.incoming_buffer);
grpc_slice_buffer_init(&state.temp_incoming_buffer);
state.tcp = tcp;
diff --git a/test/core/end2end/bad_server_response_test.c b/test/core/end2end/bad_server_response_test.c
index 30468558e8..f6a9cbeef9 100644
--- a/test/core/end2end/bad_server_response_test.c
+++ b/test/core/end2end/bad_server_response_test.c
@@ -147,8 +147,8 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_tcp_server_acceptor *acceptor) {
gpr_free(acceptor);
test_tcp_server *server = arg;
- grpc_closure_init(&on_read, handle_read, NULL);
- grpc_closure_init(&on_write, done_write, NULL);
+ grpc_closure_init(&on_read, handle_read, NULL, grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&on_write, done_write, NULL, grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&state.temp_incoming_buffer);
grpc_slice_buffer_init(&state.outgoing_buffer);
state.tcp = tcp;
diff --git a/test/core/end2end/fake_resolver.c b/test/core/end2end/fake_resolver.c
index ed85030797..45d48720c6 100644
--- a/test/core/end2end/fake_resolver.c
+++ b/test/core/end2end/fake_resolver.c
@@ -87,7 +87,7 @@ static void fake_resolver_shutdown(grpc_exec_ctx* exec_ctx,
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_result = NULL;
- grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
@@ -100,7 +100,7 @@ static void fake_resolver_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
*r->target_result =
grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
- grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
}
}
diff --git a/test/core/end2end/fixtures/http_proxy.c b/test/core/end2end/fixtures/http_proxy.c
index 80865fc7a6..ca7d9e9f9a 100644
--- a/test/core/end2end/fixtures/http_proxy.c
+++ b/test/core/end2end/fixtures/http_proxy.c
@@ -376,15 +376,20 @@ static void on_accept(grpc_exec_ctx* exec_ctx, void* arg,
gpr_ref_init(&conn->refcount, 1);
conn->pollset_set = grpc_pollset_set_create();
grpc_pollset_set_add_pollset(exec_ctx, conn->pollset_set, proxy->pollset);
- grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn);
- grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done,
- conn);
- grpc_closure_init(&conn->on_write_response_done, on_write_response_done,
- conn);
- grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn);
- grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn);
- grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn);
- grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn);
+ grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done, conn,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&conn->on_write_response_done, on_write_response_done, conn,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn,
+ grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&conn->client_read_buffer);
grpc_slice_buffer_init(&conn->client_deferred_write_buffer);
grpc_slice_buffer_init(&conn->client_write_buffer);
@@ -471,7 +476,8 @@ void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy) {
gpr_free(proxy->proxy_name);
grpc_channel_args_destroy(proxy->channel_args);
grpc_closure destroyed;
- grpc_closure_init(&destroyed, destroy_pollset, proxy->pollset);
+ grpc_closure_init(&destroyed, destroy_pollset, proxy->pollset,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, proxy->pollset, &destroyed);
gpr_free(proxy);
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/test/core/end2end/fuzzers/api_fuzzer.c b/test/core/end2end/fuzzers/api_fuzzer.c
index 746134c85b..8136f9312c 100644
--- a/test/core/end2end/fuzzers/api_fuzzer.c
+++ b/test/core/end2end/fuzzers/api_fuzzer.c
@@ -349,11 +349,11 @@ static void finish_resolve(grpc_exec_ctx *exec_ctx, void *arg,
addrs->addrs = gpr_malloc(sizeof(*addrs->addrs));
addrs->addrs[0].len = 0;
*r->addrs = addrs;
- grpc_exec_ctx_sched(exec_ctx, r->on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, r->on_done, GRPC_ERROR_NONE);
} else {
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, r->on_done,
- GRPC_ERROR_CREATE_REFERENCING("Resolution failed", &error, 1), NULL);
+ GRPC_ERROR_CREATE_REFERENCING("Resolution failed", &error, 1));
}
gpr_free(r->addr);
@@ -398,7 +398,7 @@ static void do_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
future_connect *fc = arg;
if (error != GRPC_ERROR_NONE) {
*fc->ep = NULL;
- grpc_exec_ctx_sched(exec_ctx, fc->closure, GRPC_ERROR_REF(error), NULL);
+ grpc_closure_sched(exec_ctx, fc->closure, GRPC_ERROR_REF(error));
} else if (g_server != NULL) {
grpc_endpoint *client;
grpc_endpoint *server;
@@ -410,7 +410,7 @@ static void do_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_server_setup_transport(exec_ctx, g_server, transport, NULL, NULL);
grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL);
- grpc_exec_ctx_sched(exec_ctx, fc->closure, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, fc->closure, GRPC_ERROR_NONE);
} else {
sched_connect(exec_ctx, fc->closure, fc->ep, fc->deadline);
}
@@ -421,8 +421,8 @@ static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep, gpr_timespec deadline) {
if (gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) < 0) {
*ep = NULL;
- grpc_exec_ctx_sched(exec_ctx, closure,
- GRPC_ERROR_CREATE("Connect deadline exceeded"), NULL);
+ grpc_closure_sched(exec_ctx, closure,
+ GRPC_ERROR_CREATE("Connect deadline exceeded"));
return;
}
diff --git a/test/core/end2end/tests/filter_causes_close.c b/test/core/end2end/tests/filter_causes_close.c
index 21905b98fa..7a7129ceb1 100644
--- a/test/core/end2end/tests/filter_causes_close.c
+++ b/test/core/end2end/tests/filter_causes_close.c
@@ -217,9 +217,9 @@ static void recv_im_ready(grpc_exec_ctx *exec_ctx, void *arg,
&message);
grpc_call_next_op(exec_ctx, elem, op);
}
- grpc_exec_ctx_sched(
+ grpc_closure_sched(
exec_ctx, calld->recv_im_ready,
- GRPC_ERROR_CREATE_REFERENCING("Forced call to close", &error, 1), NULL);
+ GRPC_ERROR_CREATE_REFERENCING("Forced call to close", &error, 1));
}
static void start_transport_stream_op(grpc_exec_ctx *exec_ctx,
@@ -228,7 +228,8 @@ static void start_transport_stream_op(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data;
if (op->recv_initial_metadata != NULL) {
calld->recv_im_ready = op->recv_initial_metadata_ready;
- op->recv_initial_metadata_ready = grpc_closure_create(recv_im_ready, elem);
+ op->recv_initial_metadata_ready =
+ grpc_closure_create(recv_im_ready, elem, grpc_schedule_on_exec_ctx);
}
grpc_call_next_op(exec_ctx, elem, op);
}
diff --git a/test/core/http/httpcli_test.c b/test/core/http/httpcli_test.c
index 3e312c1dde..4f00cad205 100644
--- a/test/core/http/httpcli_test.c
+++ b/test/core/http/httpcli_test.c
@@ -90,9 +90,10 @@ static void test_get(int port) {
grpc_http_response response;
memset(&response, 0, sizeof(response));
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get");
- grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
- n_seconds_time(15),
- grpc_closure_create(on_finish, &response), &response);
+ grpc_httpcli_get(
+ &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
+ grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
+ &response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu);
while (!g_done) {
@@ -130,9 +131,11 @@ static void test_post(int port) {
grpc_http_response response;
memset(&response, 0, sizeof(response));
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
- grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
- "hello", 5, n_seconds_time(15),
- grpc_closure_create(on_finish, &response), &response);
+ grpc_httpcli_post(
+ &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
+ n_seconds_time(15),
+ grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
+ &response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu);
while (!g_done) {
@@ -207,7 +210,8 @@ int main(int argc, char **argv) {
test_post(port);
grpc_httpcli_context_destroy(&g_context);
- grpc_closure_init(&destroyed, destroy_pops, &g_pops);
+ grpc_closure_init(&destroyed, destroy_pops, &g_pops,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
&destroyed);
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/test/core/http/httpscli_test.c b/test/core/http/httpscli_test.c
index d06035149e..53b26b645f 100644
--- a/test/core/http/httpscli_test.c
+++ b/test/core/http/httpscli_test.c
@@ -91,9 +91,10 @@ static void test_get(int port) {
grpc_http_response response;
memset(&response, 0, sizeof(response));
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get");
- grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
- n_seconds_time(15),
- grpc_closure_create(on_finish, &response), &response);
+ grpc_httpcli_get(
+ &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
+ grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
+ &response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu);
while (!g_done) {
@@ -132,9 +133,11 @@ static void test_post(int port) {
grpc_http_response response;
memset(&response, 0, sizeof(response));
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
- grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
- "hello", 5, n_seconds_time(15),
- grpc_closure_create(on_finish, &response), &response);
+ grpc_httpcli_post(
+ &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
+ n_seconds_time(15),
+ grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
+ &response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu);
while (!g_done) {
@@ -210,7 +213,8 @@ int main(int argc, char **argv) {
test_post(port);
grpc_httpcli_context_destroy(&g_context);
- grpc_closure_init(&destroyed, destroy_pops, &g_pops);
+ grpc_closure_init(&destroyed, destroy_pops, &g_pops,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
&destroyed);
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/test/core/internal_api_canaries/iomgr.c b/test/core/internal_api_canaries/iomgr.c
index de03c47c13..773ef602b2 100644
--- a/test/core/internal_api_canaries/iomgr.c
+++ b/test/core/internal_api_canaries/iomgr.c
@@ -60,9 +60,9 @@ static void test_code(void) {
closure_list.head = NULL;
closure_list.tail = NULL;
- grpc_closure_init(&closure, NULL, NULL);
+ grpc_closure_init(&closure, NULL, NULL, grpc_schedule_on_exec_ctx);
- grpc_closure_create(NULL, NULL);
+ grpc_closure_create(NULL, NULL, grpc_schedule_on_exec_ctx);
grpc_closure_list_move(NULL, NULL);
grpc_closure_list_append(NULL, NULL, GRPC_ERROR_CREATE("Foo"));
@@ -72,8 +72,8 @@ static void test_code(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_exec_ctx_flush(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
- grpc_exec_ctx_sched(&exec_ctx, &closure, GRPC_ERROR_CREATE("Foo"), NULL);
- grpc_exec_ctx_enqueue_list(&exec_ctx, &closure_list, NULL);
+ grpc_closure_sched(&exec_ctx, &closure, GRPC_ERROR_CREATE("Foo"));
+ grpc_closure_list_sched(&exec_ctx, &closure_list);
/* endpoint.h */
grpc_endpoint endpoint;
@@ -99,7 +99,6 @@ static void test_code(void) {
/* executor.h */
grpc_executor_init();
- grpc_executor_push(&closure, GRPC_ERROR_CREATE("Phi"));
grpc_executor_shutdown();
/* pollset.h */
diff --git a/test/core/iomgr/combiner_test.c b/test/core/iomgr/combiner_test.c
index f7d5809be7..9b6d6ff9b4 100644
--- a/test/core/iomgr/combiner_test.c
+++ b/test/core/iomgr/combiner_test.c
@@ -59,9 +59,10 @@ static void test_execute_one(void) {
grpc_combiner *lock = grpc_combiner_create(NULL);
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_combiner_execute(&exec_ctx, lock,
- grpc_closure_create(set_bool_to_true, &done),
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(&exec_ctx,
+ grpc_closure_create(set_bool_to_true, &done,
+ grpc_combiner_scheduler(lock, false)),
+ GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(done);
grpc_combiner_destroy(&exec_ctx, lock);
@@ -94,9 +95,10 @@ static void execute_many_loop(void *a) {
ex_args *c = gpr_malloc(sizeof(*c));
c->ctr = &args->ctr;
c->value = n++;
- grpc_combiner_execute(&exec_ctx, args->lock,
- grpc_closure_create(check_one, c), GRPC_ERROR_NONE,
- false);
+ grpc_closure_sched(
+ &exec_ctx, grpc_closure_create(check_one, c, grpc_combiner_scheduler(
+ args->lock, false)),
+ GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx);
}
// sleep for a little bit, to test a combiner draining and another thread
@@ -134,9 +136,10 @@ static void in_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static void add_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_combiner_execute_finally(exec_ctx, arg,
- grpc_closure_create(in_finally, NULL),
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(exec_ctx, grpc_closure_create(
+ in_finally, NULL,
+ grpc_combiner_finally_scheduler(arg, false)),
+ GRPC_ERROR_NONE);
}
static void test_execute_finally(void) {
@@ -144,8 +147,10 @@ static void test_execute_finally(void) {
grpc_combiner *lock = grpc_combiner_create(NULL);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_combiner_execute(&exec_ctx, lock, grpc_closure_create(add_finally, lock),
- GRPC_ERROR_NONE, false);
+ grpc_closure_sched(&exec_ctx,
+ grpc_closure_create(add_finally, lock,
+ grpc_combiner_scheduler(lock, false)),
+ GRPC_ERROR_NONE);
grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(got_in_finally);
grpc_combiner_destroy(&exec_ctx, lock);
diff --git a/test/core/iomgr/endpoint_pair_test.c b/test/core/iomgr/endpoint_pair_test.c
index 2a257a7cea..f02171f2ef 100644
--- a/test/core/iomgr/endpoint_pair_test.c
+++ b/test/core/iomgr/endpoint_pair_test.c
@@ -81,7 +81,8 @@ int main(int argc, char **argv) {
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
grpc_endpoint_tests(configs[0], g_pollset, g_mu);
- grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+ grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c
index 8186ea7e85..87a9d79e9b 100644
--- a/test/core/iomgr/endpoint_tests.c
+++ b/test/core/iomgr/endpoint_tests.c
@@ -211,9 +211,10 @@ static void read_and_write_test(grpc_endpoint_test_config config,
state.write_done = 0;
state.current_read_data = 0;
state.current_write_data = 0;
- grpc_closure_init(&state.done_read, read_and_write_test_read_handler, &state);
+ grpc_closure_init(&state.done_read, read_and_write_test_read_handler, &state,
+ grpc_schedule_on_exec_ctx);
grpc_closure_init(&state.done_write, read_and_write_test_write_handler,
- &state);
+ &state, grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&state.outgoing);
grpc_slice_buffer_init(&state.incoming);
@@ -290,16 +291,19 @@ static void multiple_shutdown_test(grpc_endpoint_test_config config) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
- grpc_closure_create(inc_on_failure, &fail_count));
+ grpc_closure_create(inc_on_failure, &fail_count,
+ grpc_schedule_on_exec_ctx));
wait_for_fail_count(&exec_ctx, &fail_count, 0);
grpc_endpoint_shutdown(&exec_ctx, f.client_ep);
wait_for_fail_count(&exec_ctx, &fail_count, 1);
grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
- grpc_closure_create(inc_on_failure, &fail_count));
+ grpc_closure_create(inc_on_failure, &fail_count,
+ grpc_schedule_on_exec_ctx));
wait_for_fail_count(&exec_ctx, &fail_count, 2);
grpc_slice_buffer_add(&slice_buffer, grpc_slice_from_copied_string("a"));
grpc_endpoint_write(&exec_ctx, f.client_ep, &slice_buffer,
- grpc_closure_create(inc_on_failure, &fail_count));
+ grpc_closure_create(inc_on_failure, &fail_count,
+ grpc_schedule_on_exec_ctx));
wait_for_fail_count(&exec_ctx, &fail_count, 3);
grpc_endpoint_shutdown(&exec_ctx, f.client_ep);
wait_for_fail_count(&exec_ctx, &fail_count, 3);
diff --git a/test/core/iomgr/ev_epoll_linux_test.c b/test/core/iomgr/ev_epoll_linux_test.c
index 564b05d7f4..5bce9801a5 100644
--- a/test/core/iomgr/ev_epoll_linux_test.c
+++ b/test/core/iomgr/ev_epoll_linux_test.c
@@ -102,7 +102,8 @@ static void test_pollset_cleanup(grpc_exec_ctx *exec_ctx,
int i;
for (i = 0; i < num_pollsets; i++) {
- grpc_closure_init(&destroyed, destroy_pollset, pollsets[i].pollset);
+ grpc_closure_init(&destroyed, destroy_pollset, pollsets[i].pollset,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(exec_ctx, pollsets[i].pollset, &destroyed);
grpc_exec_ctx_flush(exec_ctx);
diff --git a/test/core/iomgr/fd_posix_test.c b/test/core/iomgr/fd_posix_test.c
index 6166699fe6..4dd476526d 100644
--- a/test/core/iomgr/fd_posix_test.c
+++ b/test/core/iomgr/fd_posix_test.c
@@ -219,8 +219,8 @@ static void listen_cb(grpc_exec_ctx *exec_ctx, void *arg, /*=sv_arg*/
se->sv = sv;
se->em_fd = grpc_fd_create(fd, "listener");
grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd);
- se->session_read_closure.cb = session_read_cb;
- se->session_read_closure.cb_arg = se;
+ grpc_closure_init(&se->session_read_closure, session_read_cb, se,
+ grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure);
grpc_fd_notify_on_read(exec_ctx, listen_em_fd, &sv->listen_closure);
@@ -249,8 +249,8 @@ static int server_start(grpc_exec_ctx *exec_ctx, server *sv) {
sv->em_fd = grpc_fd_create(fd, "server");
grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd);
/* Register to be interested in reading from listen_fd. */
- sv->listen_closure.cb = listen_cb;
- sv->listen_closure.cb_arg = sv;
+ grpc_closure_init(&sv->listen_closure, listen_cb, sv,
+ grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sv->em_fd, &sv->listen_closure);
return port;
@@ -333,8 +333,8 @@ static void client_session_write(grpc_exec_ctx *exec_ctx, void *arg, /*client */
if (errno == EAGAIN) {
gpr_mu_lock(g_mu);
if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) {
- cl->write_closure.cb = client_session_write;
- cl->write_closure.cb_arg = cl;
+ grpc_closure_init(&cl->write_closure, client_session_write, cl,
+ grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_write(exec_ctx, cl->em_fd, &cl->write_closure);
cl->client_write_cnt++;
} else {
@@ -459,10 +459,10 @@ static void test_grpc_fd_change(void) {
grpc_closure second_closure;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- first_closure.cb = first_read_callback;
- first_closure.cb_arg = &a;
- second_closure.cb = second_read_callback;
- second_closure.cb_arg = &b;
+ grpc_closure_init(&first_closure, first_read_callback, &a,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&second_closure, second_read_callback, &b,
+ grpc_schedule_on_exec_ctx);
init_change_data(&a);
init_change_data(&b);
@@ -546,7 +546,8 @@ int main(int argc, char **argv) {
grpc_pollset_init(g_pollset, &g_mu);
test_grpc_fd();
test_grpc_fd_change();
- grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+ grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(g_pollset);
diff --git a/test/core/iomgr/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c
index e4136a7a7a..d844e6eceb 100644
--- a/test/core/iomgr/resolve_address_test.c
+++ b/test/core/iomgr/resolve_address_test.c
@@ -71,7 +71,8 @@ void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) {
grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset);
grpc_pollset_set_destroy(args->pollset_set);
grpc_closure do_nothing_cb;
- grpc_closure_init(&do_nothing_cb, do_nothing, NULL);
+ grpc_closure_init(&do_nothing_cb, do_nothing, NULL,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(exec_ctx, args->pollset, &do_nothing_cb);
// exec_ctx needs to be flushed before calling grpc_pollset_destroy()
grpc_exec_ctx_flush(exec_ctx);
@@ -136,8 +137,10 @@ static void test_localhost(void) {
args_struct args;
args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args);
- grpc_resolve_address(&exec_ctx, "localhost:1", NULL, args.pollset_set,
- grpc_closure_create(must_succeed, &args), &args.addrs);
+ grpc_resolve_address(
+ &exec_ctx, "localhost:1", NULL, args.pollset_set,
+ grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
+ &args.addrs);
args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -147,8 +150,10 @@ static void test_default_port(void) {
args_struct args;
args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args);
- grpc_resolve_address(&exec_ctx, "localhost", "1", args.pollset_set,
- grpc_closure_create(must_succeed, &args), &args.addrs);
+ grpc_resolve_address(
+ &exec_ctx, "localhost", "1", args.pollset_set,
+ grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
+ &args.addrs);
args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -158,8 +163,10 @@ static void test_missing_default_port(void) {
args_struct args;
args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args);
- grpc_resolve_address(&exec_ctx, "localhost", NULL, args.pollset_set,
- grpc_closure_create(must_fail, &args), &args.addrs);
+ grpc_resolve_address(
+ &exec_ctx, "localhost", NULL, args.pollset_set,
+ grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx),
+ &args.addrs);
args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -169,8 +176,10 @@ static void test_ipv6_with_port(void) {
args_struct args;
args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args);
- grpc_resolve_address(&exec_ctx, "[2001:db8::1]:1", NULL, args.pollset_set,
- grpc_closure_create(must_succeed, &args), &args.addrs);
+ grpc_resolve_address(
+ &exec_ctx, "[2001:db8::1]:1", NULL, args.pollset_set,
+ grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
+ &args.addrs);
args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -185,8 +194,10 @@ static void test_ipv6_without_port(void) {
args_struct args;
args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args);
- grpc_resolve_address(&exec_ctx, kCases[i], "80", args.pollset_set,
- grpc_closure_create(must_succeed, &args), &args.addrs);
+ grpc_resolve_address(
+ &exec_ctx, kCases[i], "80", args.pollset_set,
+ grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
+ &args.addrs);
args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -202,8 +213,10 @@ static void test_invalid_ip_addresses(void) {
args_struct args;
args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args);
- grpc_resolve_address(&exec_ctx, kCases[i], NULL, args.pollset_set,
- grpc_closure_create(must_fail, &args), &args.addrs);
+ grpc_resolve_address(
+ &exec_ctx, kCases[i], NULL, args.pollset_set,
+ grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx),
+ &args.addrs);
args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -219,8 +232,10 @@ static void test_unparseable_hostports(void) {
args_struct args;
args_init(&exec_ctx, &args);
poll_pollset_until_request_done(&args);
- grpc_resolve_address(&exec_ctx, kCases[i], "1", args.pollset_set,
- grpc_closure_create(must_fail, &args), &args.addrs);
+ grpc_resolve_address(
+ &exec_ctx, kCases[i], "1", args.pollset_set,
+ grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx),
+ &args.addrs);
args_finish(&exec_ctx, &args);
grpc_exec_ctx_finish(&exec_ctx);
}
diff --git a/test/core/iomgr/resource_quota_test.c b/test/core/iomgr/resource_quota_test.c
index a82d44f7f8..181776341f 100644
--- a/test/core/iomgr/resource_quota_test.c
+++ b/test/core/iomgr/resource_quota_test.c
@@ -45,7 +45,9 @@ static void inc_int_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) {
static void set_bool_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) {
*(bool *)a = true;
}
-grpc_closure *set_bool(bool *p) { return grpc_closure_create(set_bool_cb, p); }
+grpc_closure *set_bool(bool *p) {
+ return grpc_closure_create(set_bool_cb, p, grpc_schedule_on_exec_ctx);
+}
typedef struct {
size_t size;
@@ -67,7 +69,7 @@ grpc_closure *make_reclaimer(grpc_resource_user *resource_user, size_t size,
a->size = size;
a->resource_user = resource_user;
a->then = then;
- return grpc_closure_create(reclaimer_cb, a);
+ return grpc_closure_create(reclaimer_cb, a, grpc_schedule_on_exec_ctx);
}
static void unused_reclaimer_cb(grpc_exec_ctx *exec_ctx, void *arg,
@@ -76,7 +78,8 @@ static void unused_reclaimer_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_closure_run(exec_ctx, arg, GRPC_ERROR_NONE);
}
grpc_closure *make_unused_reclaimer(grpc_closure *then) {
- return grpc_closure_create(unused_reclaimer_cb, then);
+ return grpc_closure_create(unused_reclaimer_cb, then,
+ grpc_schedule_on_exec_ctx);
}
static void destroy_user(grpc_resource_user *usr) {
diff --git a/test/core/iomgr/tcp_client_posix_test.c b/test/core/iomgr/tcp_client_posix_test.c
index 5fab826fb7..0ea7a000eb 100644
--- a/test/core/iomgr/tcp_client_posix_test.c
+++ b/test/core/iomgr/tcp_client_posix_test.c
@@ -113,7 +113,7 @@ void test_succeeds(void) {
/* connect to it */
GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)addr,
(socklen_t *)&resolved_addr.len) == 0);
- grpc_closure_init(&done, must_succeed, NULL);
+ grpc_closure_init(&done, must_succeed, NULL, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL,
&resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));
@@ -163,7 +163,7 @@ void test_fails(void) {
gpr_mu_unlock(g_mu);
/* connect to a broken address */
- grpc_closure_init(&done, must_fail, NULL);
+ grpc_closure_init(&done, must_fail, NULL, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL,
&resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));
@@ -207,7 +207,8 @@ int main(int argc, char **argv) {
gpr_log(GPR_ERROR, "End of first test");
test_fails();
grpc_pollset_set_destroy(g_pollset_set);
- grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+ grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
diff --git a/test/core/iomgr/tcp_posix_test.c b/test/core/iomgr/tcp_posix_test.c
index 5eafa570bb..c646e61de0 100644
--- a/test/core/iomgr/tcp_posix_test.c
+++ b/test/core/iomgr/tcp_posix_test.c
@@ -194,7 +194,7 @@ static void read_test(size_t num_bytes, size_t slice_size) {
state.read_bytes = 0;
state.target_read_bytes = written_bytes;
grpc_slice_buffer_init(&state.incoming);
- grpc_closure_init(&state.read_cb, read_cb, &state);
+ grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
@@ -245,7 +245,7 @@ static void large_read_test(size_t slice_size) {
state.read_bytes = 0;
state.target_read_bytes = (size_t)written_bytes;
grpc_slice_buffer_init(&state.incoming);
- grpc_closure_init(&state.read_cb, read_cb, &state);
+ grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
@@ -384,7 +384,8 @@ static void write_test(size_t num_bytes, size_t slice_size) {
grpc_slice_buffer_init(&outgoing);
grpc_slice_buffer_addn(&outgoing, slices, num_blocks);
- grpc_closure_init(&write_done_closure, write_done, &state);
+ grpc_closure_init(&write_done_closure, write_done, &state,
+ grpc_schedule_on_exec_ctx);
grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
drain_socket_blocking(sv[0], num_bytes, num_bytes);
@@ -429,7 +430,8 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure fd_released_cb;
int fd_released_done = 0;
- grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done);
+ grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done,
+ grpc_schedule_on_exec_ctx);
gpr_log(GPR_INFO,
"Release fd read_test of size %" PRIuPTR ", slice size %" PRIuPTR,
@@ -452,7 +454,7 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
state.read_bytes = 0;
state.target_read_bytes = written_bytes;
grpc_slice_buffer_init(&state.incoming);
- grpc_closure_init(&state.read_cb, read_cb, &state);
+ grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
@@ -561,7 +563,8 @@ int main(int argc, char **argv) {
grpc_pollset_init(g_pollset, &g_mu);
grpc_endpoint_tests(configs[0], g_pollset, g_mu);
run_tests();
- grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+ grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
diff --git a/test/core/iomgr/tcp_server_posix_test.c b/test/core/iomgr/tcp_server_posix_test.c
index 9a7810e227..020f005980 100644
--- a/test/core/iomgr/tcp_server_posix_test.c
+++ b/test/core/iomgr/tcp_server_posix_test.c
@@ -104,7 +104,7 @@ static void server_weak_ref_shutdown(grpc_exec_ctx *exec_ctx, void *arg,
static void server_weak_ref_init(server_weak_ref *weak_ref) {
weak_ref->server = NULL;
grpc_closure_init(&weak_ref->server_shutdown, server_weak_ref_shutdown,
- weak_ref);
+ weak_ref, grpc_schedule_on_exec_ctx);
}
/* Make weak_ref->server_shutdown a shutdown_starting cb on server.
@@ -366,7 +366,8 @@ int main(int argc, char **argv) {
test_connect(1);
test_connect(10);
- grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+ grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
diff --git a/test/core/iomgr/udp_server_test.c b/test/core/iomgr/udp_server_test.c
index 9bea229466..0a247caf89 100644
--- a/test/core/iomgr/udp_server_test.c
+++ b/test/core/iomgr/udp_server_test.c
@@ -234,7 +234,8 @@ int main(int argc, char **argv) {
test_receive(1);
test_receive(10);
- grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+ grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(g_pollset);
diff --git a/test/core/security/credentials_test.c b/test/core/security/credentials_test.c
index d4c755088d..d624a38438 100644
--- a/test/core/security/credentials_test.c
+++ b/test/core/security/credentials_test.c
@@ -565,7 +565,7 @@ static int compute_engine_httpcli_get_success_override(
grpc_httpcli_response *response) {
validate_compute_engine_http_request(request);
*response = http_response(200, valid_oauth2_json_response);
- grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1;
}
@@ -575,7 +575,7 @@ static int compute_engine_httpcli_get_failure_override(
grpc_httpcli_response *response) {
validate_compute_engine_http_request(request);
*response = http_response(403, "Not Authorized.");
- grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1;
}
@@ -668,7 +668,7 @@ static int refresh_token_httpcli_post_success(
grpc_closure *on_done, grpc_httpcli_response *response) {
validate_refresh_token_http_request(request, body, body_size);
*response = http_response(200, valid_oauth2_json_response);
- grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1;
}
@@ -678,7 +678,7 @@ static int refresh_token_httpcli_post_failure(
grpc_closure *on_done, grpc_httpcli_response *response) {
validate_refresh_token_http_request(request, body, body_size);
*response = http_response(403, "Not Authorized.");
- grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1;
}
@@ -917,7 +917,7 @@ static int default_creds_gce_detection_httpcli_get_success_override(
response->hdrs = headers;
GPR_ASSERT(strcmp(request->http.path, "/") == 0);
GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0);
- grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1;
}
@@ -975,7 +975,7 @@ static int default_creds_gce_detection_httpcli_get_failure_override(
GPR_ASSERT(strcmp(request->http.path, "/") == 0);
GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0);
*response = http_response(200, "");
- grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1;
}
diff --git a/test/core/security/jwt_verifier_test.c b/test/core/security/jwt_verifier_test.c
index 9a21814adc..a4d65dccd9 100644
--- a/test/core/security/jwt_verifier_test.c
+++ b/test/core/security/jwt_verifier_test.c
@@ -346,7 +346,7 @@ static int httpcli_get_google_keys_for_email(
"/robot/v1/metadata/x509/"
"777-abaslkan11hlb6nmim3bpspl31ud@developer."
"gserviceaccount.com") == 0);
- grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1;
}
@@ -390,7 +390,7 @@ static int httpcli_get_custom_keys_for_email(
GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
GPR_ASSERT(strcmp(request->host, "keys.bar.com") == 0);
GPR_ASSERT(strcmp(request->http.path, "/jwk/foo@bar.com") == 0);
- grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1;
}
@@ -424,7 +424,7 @@ static int httpcli_get_jwk_set(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
GPR_ASSERT(strcmp(request->host, "www.googleapis.com") == 0);
GPR_ASSERT(strcmp(request->http.path, "/oauth2/v3/certs") == 0);
- grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1;
}
@@ -439,7 +439,7 @@ static int httpcli_get_openid_config(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(strcmp(request->http.path, GRPC_OPENID_CONFIG_URL_SUFFIX) == 0);
grpc_httpcli_set_override(httpcli_get_jwk_set,
httpcli_post_should_not_be_called);
- grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1;
}
@@ -479,7 +479,7 @@ static int httpcli_get_bad_json(grpc_exec_ctx *exec_ctx,
grpc_httpcli_response *response) {
*response = http_response(200, gpr_strdup("{\"bad\": \"stuff\"}"));
GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
- grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
return 1;
}
diff --git a/test/core/security/oauth2_utils.c b/test/core/security/oauth2_utils.c
index 44a209258d..ff77af908a 100644
--- a/test/core/security/oauth2_utils.c
+++ b/test/core/security/oauth2_utils.c
@@ -92,7 +92,8 @@ char *grpc_test_fetch_oauth2_token_with_credentials(
request.pops = grpc_polling_entity_create_from_pollset(pollset);
request.is_done = 0;
- grpc_closure_init(&do_nothing_closure, do_nothing, NULL);
+ grpc_closure_init(&do_nothing_closure, do_nothing, NULL,
+ grpc_schedule_on_exec_ctx);
grpc_call_credentials_get_request_metadata(
&exec_ctx, creds, &request.pops, null_ctx, on_oauth2_response, &request);
diff --git a/test/core/security/secure_endpoint_test.c b/test/core/security/secure_endpoint_test.c
index b5d95004fe..cbf8a171af 100644
--- a/test/core/security/secure_endpoint_test.c
+++ b/test/core/security/secure_endpoint_test.c
@@ -158,7 +158,7 @@ static void test_leftover(grpc_endpoint_test_config config, size_t slice_size) {
gpr_log(GPR_INFO, "Start test left over");
grpc_slice_buffer_init(&incoming);
- grpc_closure_init(&done_closure, inc_call_ctr, &n);
+ grpc_closure_init(&done_closure, inc_call_ctr, &n, grpc_schedule_on_exec_ctx);
grpc_endpoint_read(&exec_ctx, f.client_ep, &incoming, &done_closure);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(n == 1);
@@ -191,7 +191,8 @@ int main(int argc, char **argv) {
grpc_pollset_init(g_pollset, &g_mu);
grpc_endpoint_tests(configs[0], g_pollset, g_mu);
test_leftover(configs[1], 1);
- grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+ grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+ grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
diff --git a/test/core/surface/concurrent_connectivity_test.c b/test/core/surface/concurrent_connectivity_test.c
index 93a4794222..8ebe8d07e4 100644
--- a/test/core/surface/concurrent_connectivity_test.c
+++ b/test/core/surface/concurrent_connectivity_test.c
@@ -229,9 +229,9 @@ int main(int argc, char **argv) {
gpr_atm_rel_store(&args.stop, 1);
gpr_thd_join(server);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_pollset_shutdown(
- &exec_ctx, args.pollset,
- grpc_closure_create(done_pollset_shutdown, args.pollset));
+ grpc_pollset_shutdown(&exec_ctx, args.pollset,
+ grpc_closure_create(done_pollset_shutdown, args.pollset,
+ grpc_schedule_on_exec_ctx));
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
diff --git a/test/core/surface/lame_client_test.c b/test/core/surface/lame_client_test.c
index 6afcefca92..b6db6a6b08 100644
--- a/test/core/surface/lame_client_test.c
+++ b/test/core/surface/lame_client_test.c
@@ -62,7 +62,8 @@ void test_transport_op(grpc_channel *channel) {
grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_closure_init(&transport_op_cb, verify_connectivity, &state);
+ grpc_closure_init(&transport_op_cb, verify_connectivity, &state,
+ grpc_schedule_on_exec_ctx);
op = grpc_make_transport_op(NULL);
op->on_connectivity_state_change = &transport_op_cb;
@@ -71,7 +72,8 @@ void test_transport_op(grpc_channel *channel) {
elem->filter->start_transport_op(&exec_ctx, elem, op);
grpc_exec_ctx_finish(&exec_ctx);
- grpc_closure_init(&transport_op_cb, do_nothing, NULL);
+ grpc_closure_init(&transport_op_cb, do_nothing, NULL,
+ grpc_schedule_on_exec_ctx);
op = grpc_make_transport_op(&transport_op_cb);
elem->filter->start_transport_op(&exec_ctx, elem, op);
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/test/core/transport/connectivity_state_test.c b/test/core/transport/connectivity_state_test.c
index 1050059eff..3520ef0a80 100644
--- a/test/core/transport/connectivity_state_test.c
+++ b/test/core/transport/connectivity_state_test.c
@@ -86,7 +86,8 @@ static void test_check(void) {
static void test_subscribe_then_unsubscribe(void) {
grpc_connectivity_state_tracker tracker;
- grpc_closure *closure = grpc_closure_create(must_fail, THE_ARG);
+ grpc_closure *closure =
+ grpc_closure_create(must_fail, THE_ARG, grpc_schedule_on_exec_ctx);
grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_log(GPR_DEBUG, "test_subscribe_then_unsubscribe");
@@ -109,7 +110,8 @@ static void test_subscribe_then_unsubscribe(void) {
static void test_subscribe_then_destroy(void) {
grpc_connectivity_state_tracker tracker;
- grpc_closure *closure = grpc_closure_create(must_succeed, THE_ARG);
+ grpc_closure *closure =
+ grpc_closure_create(must_succeed, THE_ARG, grpc_schedule_on_exec_ctx);
grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_log(GPR_DEBUG, "test_subscribe_then_destroy");
@@ -128,7 +130,8 @@ static void test_subscribe_then_destroy(void) {
static void test_subscribe_with_failure_then_destroy(void) {
grpc_connectivity_state_tracker tracker;
- grpc_closure *closure = grpc_closure_create(must_fail, THE_ARG);
+ grpc_closure *closure =
+ grpc_closure_create(must_fail, THE_ARG, grpc_schedule_on_exec_ctx);
grpc_connectivity_state state = GRPC_CHANNEL_SHUTDOWN;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_log(GPR_DEBUG, "test_subscribe_with_failure_then_destroy");
diff --git a/test/core/util/mock_endpoint.c b/test/core/util/mock_endpoint.c
index bf6d85252a..04793bceab 100644
--- a/test/core/util/mock_endpoint.c
+++ b/test/core/util/mock_endpoint.c
@@ -55,7 +55,7 @@ static void me_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
gpr_mu_lock(&m->mu);
if (m->read_buffer.count > 0) {
grpc_slice_buffer_swap(&m->read_buffer, slices);
- grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE);
} else {
m->on_read = cb;
m->on_read_out = slices;
@@ -69,7 +69,7 @@ static void me_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
for (size_t i = 0; i < slices->count; i++) {
m->on_write(slices->slices[i]);
}
- grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE);
}
static void me_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -82,8 +82,8 @@ static void me_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep;
gpr_mu_lock(&m->mu);
if (m->on_read) {
- grpc_exec_ctx_sched(exec_ctx, m->on_read,
- GRPC_ERROR_CREATE("Endpoint Shutdown"), NULL);
+ grpc_closure_sched(exec_ctx, m->on_read,
+ GRPC_ERROR_CREATE("Endpoint Shutdown"));
m->on_read = NULL;
}
gpr_mu_unlock(&m->mu);
@@ -144,7 +144,7 @@ void grpc_mock_endpoint_put_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
gpr_mu_lock(&m->mu);
if (m->on_read != NULL) {
grpc_slice_buffer_add(m->on_read_out, slice);
- grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE);
m->on_read = NULL;
} else {
grpc_slice_buffer_add(&m->read_buffer, slice);
diff --git a/test/core/util/passthru_endpoint.c b/test/core/util/passthru_endpoint.c
index b3405f02e9..15ba092c5b 100644
--- a/test/core/util/passthru_endpoint.c
+++ b/test/core/util/passthru_endpoint.c
@@ -63,11 +63,10 @@ static void me_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
half *m = (half *)ep;
gpr_mu_lock(&m->parent->mu);
if (m->parent->shutdown) {
- grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_CREATE("Already shutdown"),
- NULL);
+ grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_CREATE("Already shutdown"));
} else if (m->read_buffer.count > 0) {
grpc_slice_buffer_swap(&m->read_buffer, slices);
- grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE);
} else {
m->on_read = cb;
m->on_read_out = slices;
@@ -91,7 +90,7 @@ static void me_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
for (size_t i = 0; i < slices->count; i++) {
grpc_slice_buffer_add(m->on_read_out, grpc_slice_ref(slices->slices[i]));
}
- grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE, NULL);
+ grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE);
m->on_read = NULL;
} else {
for (size_t i = 0; i < slices->count; i++) {
@@ -99,7 +98,7 @@ static void me_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
}
gpr_mu_unlock(&m->parent->mu);
- grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+ grpc_closure_sched(exec_ctx, cb, error);
}
static void me_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -113,14 +112,12 @@ static void me_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
gpr_mu_lock(&m->parent->mu);
m->parent->shutdown = true;
if (m->on_read) {
- grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"),
- NULL);
+ grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"));
m->on_read = NULL;
}
m = other_half(m);
if (m->on_read) {
- grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"),
- NULL);
+ grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"));
m->on_read = NULL;
}
gpr_mu_unlock(&m->parent->mu);
diff --git a/test/core/util/port_server_client.c b/test/core/util/port_server_client.c
index b2342feeb4..0bde726ba1 100644
--- a/test/core/util/port_server_client.c
+++ b/test/core/util/port_server_client.c
@@ -92,7 +92,8 @@ void grpc_free_port_using_server(char *server, int port) {
grpc_pollset *pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(pollset, &pr.mu);
pr.pops = grpc_polling_entity_create_from_pollset(pollset);
- shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops);
+ shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops,
+ grpc_schedule_on_exec_ctx);
req.host = server;
gpr_asprintf(&path, "/drop/%d", port);
@@ -103,7 +104,9 @@ void grpc_free_port_using_server(char *server, int port) {
grpc_resource_quota_create("port_server_client/free");
grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
- grpc_closure_create(freed_port_from_server, &pr), &rsp);
+ grpc_closure_create(freed_port_from_server, &pr,
+ grpc_schedule_on_exec_ctx),
+ &rsp);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(pr.mu);
while (!pr.done) {
@@ -174,7 +177,8 @@ static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
grpc_resource_quota_create("port_server_client/pick_retry");
grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pops, resource_quota, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
- grpc_closure_create(got_port_from_server, pr),
+ grpc_closure_create(got_port_from_server, pr,
+ grpc_schedule_on_exec_ctx),
&pr->response);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
return;
@@ -208,7 +212,8 @@ int grpc_pick_port_using_server(char *server) {
grpc_pollset *pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(pollset, &pr.mu);
pr.pops = grpc_polling_entity_create_from_pollset(pollset);
- shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops);
+ shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops,
+ grpc_schedule_on_exec_ctx);
pr.port = -1;
pr.server = server;
pr.ctx = &context;
@@ -219,10 +224,11 @@ int grpc_pick_port_using_server(char *server) {
grpc_httpcli_context_init(&context);
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("port_server_client/pick");
- grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req,
- GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
- grpc_closure_create(got_port_from_server, &pr),
- &pr.response);
+ grpc_httpcli_get(
+ &exec_ctx, &context, &pr.pops, resource_quota, &req,
+ GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
+ grpc_closure_create(got_port_from_server, &pr, grpc_schedule_on_exec_ctx),
+ &pr.response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(pr.mu);
diff --git a/test/core/util/test_tcp_server.c b/test/core/util/test_tcp_server.c
index 16df91d968..2338b81ab1 100644
--- a/test/core/util/test_tcp_server.c
+++ b/test/core/util/test_tcp_server.c
@@ -57,7 +57,8 @@ void test_tcp_server_init(test_tcp_server *server,
grpc_tcp_server_cb on_connect, void *user_data) {
grpc_init();
server->tcp_server = NULL;
- grpc_closure_init(&server->shutdown_complete, on_server_destroyed, server);
+ grpc_closure_init(&server->shutdown_complete, on_server_destroyed, server,
+ grpc_schedule_on_exec_ctx);
server->shutdown = 0;
server->pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(server->pollset, &server->mu);
@@ -111,7 +112,8 @@ void test_tcp_server_destroy(test_tcp_server *server) {
gpr_timespec shutdown_deadline;
grpc_closure do_nothing_cb;
grpc_tcp_server_unref(&exec_ctx, server->tcp_server);
- grpc_closure_init(&do_nothing_cb, do_nothing, NULL);
+ grpc_closure_init(&do_nothing_cb, do_nothing, NULL,
+ grpc_schedule_on_exec_ctx);
shutdown_deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(5, GPR_TIMESPAN));
while (!server->shutdown &&
diff --git a/tools/codegen/core/gen_nano_proto.sh b/tools/codegen/core/gen_nano_proto.sh
index df107c208f..99e49814b8 100755
--- a/tools/codegen/core/gen_nano_proto.sh
+++ b/tools/codegen/core/gen_nano_proto.sh
@@ -42,46 +42,6 @@
# 4: Output dir not an absolute path.
# 5: Couldn't create output directory (2nd argument).
-read -r -d '' COPYRIGHT <<'EOF'
-/*
- *
- * Copyright <YEAR>, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-EOF
-
-CURRENT_YEAR=$(date +%Y)
-COPYRIGHT_FILE=$(mktemp)
-echo "${COPYRIGHT/<YEAR>/$CURRENT_YEAR}" > $COPYRIGHT_FILE
-
set -ex
if [ $# -lt 2 ] || [ $# -gt 3 ]; then
echo "Usage: $0 <input.proto> <absolute path to output dir> [grpc path]"
@@ -143,13 +103,6 @@ readonly UC_PROTO_BASENAME=`echo $PROTO_BASENAME | tr [a-z] [A-Z]`
sed -i "s:PB_${UC_PROTO_BASENAME}_PB_H_INCLUDED:GRPC_${INCLUDE_GUARD_BASE}_${UC_PROTO_BASENAME}_PB_H:g" \
"$OUTPUT_DIR/$PROTO_BASENAME.pb.h"
-# prepend copyright
-TMPFILE=$(mktemp)
-cat $COPYRIGHT_FILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.c" > $TMPFILE
-mv -v $TMPFILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.c"
-cat $COPYRIGHT_FILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.h" > $TMPFILE
-mv -v $TMPFILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.h"
-
deactivate
rm -rf $VENV_DIR
diff --git a/tools/distrib/check_copyright.py b/tools/distrib/check_copyright.py
index 51852adfd3..718bb563f3 100755
--- a/tools/distrib/check_copyright.py
+++ b/tools/distrib/check_copyright.py
@@ -103,6 +103,9 @@ _EXEMPT = frozenset((
'examples/python/route_guide/route_guide_pb2.py',
'examples/python/route_guide/route_guide_pb2_grpc.py',
+ 'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
+ 'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
+
# An older file originally from outside gRPC.
'src/php/tests/bootstrap.php',
))