From ee4b14521380f8c387c27f4cd351565d0afa3d61 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 12 May 2017 10:56:03 -0700 Subject: Remove workqueue, covered_by_poller as concepts, get Mac build up --- .../resolvers/dns_resolver_connectivity_test.c | 11 ++++---- .../client_channel/resolvers/dns_resolver_test.c | 2 +- .../client_channel/resolvers/fake_resolver_test.c | 6 ++-- .../resolvers/sockaddr_resolver_test.c | 2 +- test/core/end2end/fake_resolver.c | 7 ++--- test/core/iomgr/combiner_test.c | 32 +++++++++++----------- test/core/util/mock_endpoint.c | 13 ++------- test/core/util/passthru_endpoint.c | 13 ++------- test/core/util/trickle_endpoint.c | 19 +++---------- 9 files changed, 37 insertions(+), 68 deletions(-) (limited to 'test/core') diff --git a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c index 8e15faa1dd..3f5a2c946e 100644 --- a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c +++ b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c @@ -126,11 +126,10 @@ static void call_resolver_next_after_locking(grpc_exec_ctx *exec_ctx, a->resolver = resolver; a->result = result; a->on_complete = on_complete; - grpc_closure_sched( - exec_ctx, - grpc_closure_create(call_resolver_next_now_lock_taken, a, - grpc_combiner_scheduler(resolver->combiner, false)), - GRPC_ERROR_NONE); + grpc_closure_sched(exec_ctx, grpc_closure_create( + call_resolver_next_now_lock_taken, a, + grpc_combiner_scheduler(resolver->combiner)), + GRPC_ERROR_NONE); } int main(int argc, char **argv) { @@ -138,7 +137,7 @@ int main(int argc, char **argv) { grpc_init(); gpr_mu_init(&g_mu); - g_combiner = grpc_combiner_create(NULL); + g_combiner = grpc_combiner_create(); grpc_resolve_address = my_resolve_address; grpc_channel_args *result = (grpc_channel_args *)1; diff --git a/test/core/client_channel/resolvers/dns_resolver_test.c b/test/core/client_channel/resolvers/dns_resolver_test.c index fa7857d418..55bcc1a0d7 100644 --- a/test/core/client_channel/resolvers/dns_resolver_test.c +++ b/test/core/client_channel/resolvers/dns_resolver_test.c @@ -81,7 +81,7 @@ int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_init(); - g_combiner = grpc_combiner_create(NULL); + g_combiner = grpc_combiner_create(); dns = grpc_resolver_factory_lookup("dns"); diff --git a/test/core/client_channel/resolvers/fake_resolver_test.c b/test/core/client_channel/resolvers/fake_resolver_test.c index 861918fbd6..a20f119e64 100644 --- a/test/core/client_channel/resolvers/fake_resolver_test.c +++ b/test/core/client_channel/resolvers/fake_resolver_test.c @@ -88,7 +88,7 @@ void on_resolution_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { static void test_fake_resolver() { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_combiner *combiner = grpc_combiner_create(NULL); + grpc_combiner *combiner = grpc_combiner_create(); // Create resolver. grpc_fake_resolver_response_generator *response_generator = grpc_fake_resolver_response_generator_create(); @@ -116,7 +116,7 @@ static void test_fake_resolver() { memset(&on_res_arg, 0, sizeof(on_res_arg)); on_res_arg.expected_resolver_result = results; grpc_closure *on_resolution = grpc_closure_create( - on_resolution_cb, &on_res_arg, grpc_combiner_scheduler(combiner, false)); + on_resolution_cb, &on_res_arg, grpc_combiner_scheduler(combiner)); // Set resolver results and trigger first resolution. on_resolution_cb // performs the checks. @@ -151,7 +151,7 @@ static void test_fake_resolver() { memset(&on_res_arg_update, 0, sizeof(on_res_arg_update)); on_res_arg_update.expected_resolver_result = results_update; on_resolution = grpc_closure_create(on_resolution_cb, &on_res_arg_update, - grpc_combiner_scheduler(combiner, false)); + grpc_combiner_scheduler(combiner)); // Set updated resolver results and trigger a second resolution. grpc_fake_resolver_response_generator_set_response( diff --git a/test/core/client_channel/resolvers/sockaddr_resolver_test.c b/test/core/client_channel/resolvers/sockaddr_resolver_test.c index 847eabae3b..5049946852 100644 --- a/test/core/client_channel/resolvers/sockaddr_resolver_test.c +++ b/test/core/client_channel/resolvers/sockaddr_resolver_test.c @@ -104,7 +104,7 @@ int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_init(); - g_combiner = grpc_combiner_create(NULL); + g_combiner = grpc_combiner_create(); ipv4 = grpc_resolver_factory_lookup("ipv4"); ipv6 = grpc_resolver_factory_lookup("ipv6"); diff --git a/test/core/end2end/fake_resolver.c b/test/core/end2end/fake_resolver.c index 736b224fd6..c84e53e26c 100644 --- a/test/core/end2end/fake_resolver.c +++ b/test/core/end2end/fake_resolver.c @@ -173,10 +173,9 @@ void grpc_fake_resolver_response_generator_set_response( GPR_ASSERT(generator->resolver != NULL); generator->next_response = grpc_channel_args_copy(next_response); grpc_closure_sched( - exec_ctx, - grpc_closure_create( - set_response_cb, generator, - grpc_combiner_scheduler(generator->resolver->base.combiner, false)), + exec_ctx, grpc_closure_create(set_response_cb, generator, + grpc_combiner_scheduler( + generator->resolver->base.combiner)), GRPC_ERROR_NONE); } diff --git a/test/core/iomgr/combiner_test.c b/test/core/iomgr/combiner_test.c index bc4d2af8ac..92935ca017 100644 --- a/test/core/iomgr/combiner_test.c +++ b/test/core/iomgr/combiner_test.c @@ -44,7 +44,7 @@ static void test_no_op(void) { gpr_log(GPR_DEBUG, "test_no_op"); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_COMBINER_UNREF(&exec_ctx, grpc_combiner_create(NULL), "test_no_op"); + GRPC_COMBINER_UNREF(&exec_ctx, grpc_combiner_create(), "test_no_op"); grpc_exec_ctx_finish(&exec_ctx); } @@ -56,12 +56,12 @@ static void set_bool_to_true(grpc_exec_ctx *exec_ctx, void *value, static void test_execute_one(void) { gpr_log(GPR_DEBUG, "test_execute_one"); - grpc_combiner *lock = grpc_combiner_create(NULL); + grpc_combiner *lock = grpc_combiner_create(); bool done = false; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_closure_sched(&exec_ctx, grpc_closure_create(set_bool_to_true, &done, - grpc_combiner_scheduler(lock, false)), + grpc_combiner_scheduler(lock)), GRPC_ERROR_NONE); grpc_exec_ctx_flush(&exec_ctx); GPR_ASSERT(done); @@ -95,10 +95,10 @@ static void execute_many_loop(void *a) { ex_args *c = gpr_malloc(sizeof(*c)); c->ctr = &args->ctr; c->value = n++; - grpc_closure_sched( - &exec_ctx, grpc_closure_create(check_one, c, grpc_combiner_scheduler( - args->lock, false)), - GRPC_ERROR_NONE); + grpc_closure_sched(&exec_ctx, + grpc_closure_create( + check_one, c, grpc_combiner_scheduler(args->lock)), + GRPC_ERROR_NONE); grpc_exec_ctx_flush(&exec_ctx); } // sleep for a little bit, to test a combiner draining and another thread @@ -111,7 +111,7 @@ static void execute_many_loop(void *a) { static void test_execute_many(void) { gpr_log(GPR_DEBUG, "test_execute_many"); - grpc_combiner *lock = grpc_combiner_create(NULL); + grpc_combiner *lock = grpc_combiner_create(); gpr_thd_id thds[100]; thd_args ta[GPR_ARRAY_SIZE(thds)]; for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) { @@ -136,21 +136,21 @@ static void in_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { } static void add_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_closure_sched(exec_ctx, grpc_closure_create( - in_finally, NULL, - grpc_combiner_finally_scheduler(arg, false)), + grpc_closure_sched(exec_ctx, + grpc_closure_create(in_finally, NULL, + grpc_combiner_finally_scheduler(arg)), GRPC_ERROR_NONE); } static void test_execute_finally(void) { gpr_log(GPR_DEBUG, "test_execute_finally"); - grpc_combiner *lock = grpc_combiner_create(NULL); + grpc_combiner *lock = grpc_combiner_create(); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_closure_sched(&exec_ctx, - grpc_closure_create(add_finally, lock, - grpc_combiner_scheduler(lock, false)), - GRPC_ERROR_NONE); + grpc_closure_sched( + &exec_ctx, + grpc_closure_create(add_finally, lock, grpc_combiner_scheduler(lock)), + GRPC_ERROR_NONE); grpc_exec_ctx_flush(&exec_ctx); GPR_ASSERT(got_in_finally); GRPC_COMBINER_UNREF(&exec_ctx, lock, "test_execute_finally"); diff --git a/test/core/util/mock_endpoint.c b/test/core/util/mock_endpoint.c index c747297984..e65c88b0bf 100644 --- a/test/core/util/mock_endpoint.c +++ b/test/core/util/mock_endpoint.c @@ -117,18 +117,9 @@ static grpc_resource_user *me_get_resource_user(grpc_endpoint *ep) { static int me_get_fd(grpc_endpoint *ep) { return -1; } -static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; } - static const grpc_endpoint_vtable vtable = { - me_read, - me_write, - me_get_workqueue, - me_add_to_pollset, - me_add_to_pollset_set, - me_shutdown, - me_destroy, - me_get_resource_user, - me_get_peer, + me_read, me_write, me_add_to_pollset, me_add_to_pollset_set, + me_shutdown, me_destroy, me_get_resource_user, me_get_peer, me_get_fd, }; diff --git a/test/core/util/passthru_endpoint.c b/test/core/util/passthru_endpoint.c index 6400845d23..0eea6749c6 100644 --- a/test/core/util/passthru_endpoint.c +++ b/test/core/util/passthru_endpoint.c @@ -169,23 +169,14 @@ static char *me_get_peer(grpc_endpoint *ep) { static int me_get_fd(grpc_endpoint *ep) { return -1; } -static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; } - static grpc_resource_user *me_get_resource_user(grpc_endpoint *ep) { half *m = (half *)ep; return m->resource_user; } static const grpc_endpoint_vtable vtable = { - me_read, - me_write, - me_get_workqueue, - me_add_to_pollset, - me_add_to_pollset_set, - me_shutdown, - me_destroy, - me_get_resource_user, - me_get_peer, + me_read, me_write, me_add_to_pollset, me_add_to_pollset_set, + me_shutdown, me_destroy, me_get_resource_user, me_get_peer, me_get_fd, }; diff --git a/test/core/util/trickle_endpoint.c b/test/core/util/trickle_endpoint.c index 69386a0718..a3f3a2a7af 100644 --- a/test/core/util/trickle_endpoint.c +++ b/test/core/util/trickle_endpoint.c @@ -92,11 +92,6 @@ static void te_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, gpr_mu_unlock(&te->mu); } -static grpc_workqueue *te_get_workqueue(grpc_endpoint *ep) { - trickle_endpoint *te = (trickle_endpoint *)ep; - return grpc_endpoint_get_workqueue(te->wrapped); -} - static void te_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_pollset *pollset) { trickle_endpoint *te = (trickle_endpoint *)ep; @@ -155,16 +150,10 @@ static void te_finish_write(grpc_exec_ctx *exec_ctx, void *arg, gpr_mu_unlock(&te->mu); } -static const grpc_endpoint_vtable vtable = {te_read, - te_write, - te_get_workqueue, - te_add_to_pollset, - te_add_to_pollset_set, - te_shutdown, - te_destroy, - te_get_resource_user, - te_get_peer, - te_get_fd}; +static const grpc_endpoint_vtable vtable = { + te_read, te_write, te_add_to_pollset, te_add_to_pollset_set, + te_shutdown, te_destroy, te_get_resource_user, te_get_peer, + te_get_fd}; grpc_endpoint *grpc_trickle_endpoint_create(grpc_endpoint *wrap, double bytes_per_second) { -- cgit v1.2.3 From 61f96c16834475438278e72cad6ae03013e7cebf Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 12 May 2017 13:36:39 -0700 Subject: Update epollers --- src/core/lib/iomgr/ev_epoll1_linux.c | 100 +------- .../lib/iomgr/ev_epoll_limited_pollers_linux.c | 198 +-------------- src/core/lib/iomgr/ev_epoll_thread_pool_linux.c | 172 ++----------- src/core/lib/iomgr/ev_epollex_linux.c | 156 +----------- src/core/lib/iomgr/ev_epollsig_linux.c | 279 ++++----------------- src/core/lib/iomgr/executor.c | 6 +- test/core/iomgr/ev_epollsig_linux_test.c | 82 ------ 7 files changed, 89 insertions(+), 904 deletions(-) (limited to 'test/core') diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index ad69f808cd..39785f6d56 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -58,7 +58,6 @@ #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/lockfree_event.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" -#include "src/core/lib/iomgr/workqueue.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/support/block_annotate.h" @@ -130,7 +129,9 @@ struct grpc_pollset { * Pollset-set Declarations */ -struct grpc_pollset_set {}; +struct grpc_pollset_set { + char unused; +}; /******************************************************************************* * Common helpers @@ -283,10 +284,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); } -static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { - return (grpc_workqueue *)0xb0b51ed; -} - static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *notifier) { grpc_lfev_set_ready(exec_ctx, &fd->read_closure); @@ -313,8 +310,6 @@ GPR_TLS_DECL(g_current_thread_worker); static gpr_atm g_active_poller; static pollset_neighbourhood *g_neighbourhoods; static size_t g_num_neighbourhoods; -static gpr_mu g_wq_mu; -static grpc_closure_list g_wq_items; /* Return true if first in list */ static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) { @@ -363,8 +358,6 @@ static grpc_error *pollset_global_init(void) { gpr_atm_no_barrier_store(&g_active_poller, 0); global_wakeup_fd.read_fd = -1; grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd); - gpr_mu_init(&g_wq_mu); - g_wq_items = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; if (err != GRPC_ERROR_NONE) return err; struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET), .data.ptr = &global_wakeup_fd}; @@ -383,7 +376,6 @@ static grpc_error *pollset_global_init(void) { static void pollset_global_shutdown(void) { gpr_tls_destroy(&g_current_thread_pollset); gpr_tls_destroy(&g_current_thread_worker); - gpr_mu_destroy(&g_wq_mu); if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd); for (size_t i = 0; i < g_num_neighbourhoods; i++) { gpr_mu_destroy(&g_neighbourhoods[i].mu); @@ -507,9 +499,6 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, for (int i = 0; i < r; i++) { void *data_ptr = events[i].data.ptr; if (data_ptr == &global_wakeup_fd) { - gpr_mu_lock(&g_wq_mu); - grpc_closure_list_move(&g_wq_items, &exec_ctx->closure_list); - gpr_mu_unlock(&g_wq_mu); append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd), err_desc); } else { @@ -791,84 +780,6 @@ static grpc_error *pollset_kick(grpc_pollset *pollset, static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd *fd) {} -/******************************************************************************* - * Workqueue Definitions - */ - -#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG -static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue, - const char *file, int line, - const char *reason) { - return workqueue; -} - -static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, - const char *file, int line, const char *reason) {} -#else -static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) { - return workqueue; -} - -static void workqueue_unref(grpc_exec_ctx *exec_ctx, - grpc_workqueue *workqueue) {} -#endif - -static void wq_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { - // find a neighbourhood to wakeup - bool scheduled = false; - size_t initial_neighbourhood = choose_neighbourhood(); - for (size_t i = 0; !scheduled && i < g_num_neighbourhoods; i++) { - pollset_neighbourhood *neighbourhood = - &g_neighbourhoods[(initial_neighbourhood + i) % g_num_neighbourhoods]; - if (gpr_mu_trylock(&neighbourhood->mu)) { - if (neighbourhood->active_root != NULL) { - grpc_pollset *inspect = neighbourhood->active_root; - do { - if (gpr_mu_trylock(&inspect->mu)) { - if (inspect->root_worker != NULL) { - grpc_pollset_worker *inspect_worker = inspect->root_worker; - do { - if (inspect_worker->kick_state == UNKICKED) { - inspect_worker->kick_state = KICKED; - grpc_closure_list_append( - &inspect_worker->schedule_on_end_work, closure, error); - if (inspect_worker->initialized_cv) { - gpr_cv_signal(&inspect_worker->cv); - } - scheduled = true; - } - inspect_worker = inspect_worker->next; - } while (!scheduled && inspect_worker != inspect->root_worker); - } - gpr_mu_unlock(&inspect->mu); - } - inspect = inspect->next; - } while (!scheduled && inspect != neighbourhood->active_root); - } - gpr_mu_unlock(&neighbourhood->mu); - } - } - if (!scheduled) { - gpr_mu_lock(&g_wq_mu); - grpc_closure_list_append(&g_wq_items, closure, error); - gpr_mu_unlock(&g_wq_mu); - GRPC_LOG_IF_ERROR("workqueue_scheduler", - grpc_wakeup_fd_wakeup(&global_wakeup_fd)); - } -} - -static const grpc_closure_scheduler_vtable - singleton_workqueue_scheduler_vtable = {wq_sched, wq_sched, - "epoll1_workqueue"}; - -static grpc_closure_scheduler singleton_workqueue_scheduler = { - &singleton_workqueue_scheduler_vtable}; - -static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) { - return &singleton_workqueue_scheduler; -} - /******************************************************************************* * Pollset-set Definitions */ @@ -920,7 +831,6 @@ static const grpc_event_engine_vtable vtable = { .fd_notify_on_read = fd_notify_on_read, .fd_notify_on_write = fd_notify_on_write, .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, - .fd_get_workqueue = fd_get_workqueue, .pollset_init = pollset_init, .pollset_shutdown = pollset_shutdown, @@ -938,10 +848,6 @@ static const grpc_event_engine_vtable vtable = { .pollset_set_add_fd = pollset_set_add_fd, .pollset_set_del_fd = pollset_set_del_fd, - .workqueue_ref = workqueue_ref, - .workqueue_unref = workqueue_unref, - .workqueue_scheduler = workqueue_scheduler, - .shutdown_engine = shutdown_engine, }; diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c index d23bf6c06c..d7a2d35484 100644 --- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +++ b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c @@ -61,7 +61,6 @@ #include "src/core/lib/iomgr/lockfree_event.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" -#include "src/core/lib/iomgr/workqueue.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/support/block_annotate.h" #include "src/core/lib/support/env.h" @@ -184,13 +183,15 @@ static void fd_global_shutdown(void); * Polling island Declarations */ -#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG +//#define PI_REFCOUNT_DEBUG + +#ifdef PI_REFCOUNT_DEBUG #define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__) #define PI_UNREF(exec_ctx, p, r) \ pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__) -#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */ +#else /* defined(PI_REFCOUNT_DEBUG) */ #define PI_ADD_REF(p, r) pi_add_ref((p)) #define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p)) @@ -204,8 +205,6 @@ typedef struct worker_node { /* This is also used as grpc_workqueue (by directly casing it) */ typedef struct polling_island { - grpc_closure_scheduler workqueue_scheduler; - gpr_mu mu; /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement the refcount. @@ -226,15 +225,6 @@ typedef struct polling_island { /* Number of threads currently polling on this island */ gpr_atm poller_count; - /* Mutex guarding the read end of the workqueue (must be held to pop from - * workqueue_items) */ - gpr_mu workqueue_read_mu; - /* Queue of closures to be executed */ - gpr_mpscq workqueue_items; - /* Count of items in workqueue_items */ - gpr_atm workqueue_item_count; - /* Wakeup fd used to wake pollers to check the contents of workqueue_items */ - grpc_wakeup_fd workqueue_wakeup_fd; /* The list of workers waiting to do polling on this polling island */ gpr_mu worker_list_mu; @@ -323,8 +313,6 @@ static __thread polling_island *g_current_thread_polling_island; /* Forward declaration */ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi); -static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); #ifdef GRPC_TSAN /* Currently TSAN may incorrectly flag data races between epoll_ctl and @@ -337,13 +325,10 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, gpr_atm g_epoll_sync; #endif /* defined(GRPC_TSAN) */ -static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = { - workqueue_enqueue, workqueue_enqueue, "workqueue"}; - static void pi_add_ref(polling_island *pi); static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi); -#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG +#ifdef PI_REFCOUNT_DEBUG static void pi_add_ref_dbg(polling_island *pi, const char *reason, const char *file, int line) { long old_cnt = gpr_atm_acq_load(&pi->ref_count); @@ -359,36 +344,6 @@ static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi, gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)", (void *)pi, old_cnt, (old_cnt - 1), reason, file, line); } - -static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue, - const char *file, int line, - const char *reason) { - if (workqueue != NULL) { - pi_add_ref_dbg((polling_island *)workqueue, reason, file, line); - } - return workqueue; -} - -static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, - const char *file, int line, const char *reason) { - if (workqueue != NULL) { - pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line); - } -} -#else -static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) { - if (workqueue != NULL) { - pi_add_ref((polling_island *)workqueue); - } - return workqueue; -} - -static void workqueue_unref(grpc_exec_ctx *exec_ctx, - grpc_workqueue *workqueue) { - if (workqueue != NULL) { - pi_unref(exec_ctx, (polling_island *)workqueue); - } -} #endif static void pi_add_ref(polling_island *pi) { @@ -592,17 +547,12 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, *error = GRPC_ERROR_NONE; pi = gpr_malloc(sizeof(*pi)); - pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable; gpr_mu_init(&pi->mu); pi->fd_cnt = 0; pi->fd_capacity = 0; pi->fds = NULL; pi->epoll_fd = -1; - gpr_mu_init(&pi->workqueue_read_mu); - gpr_mpscq_init(&pi->workqueue_items); - gpr_atm_rel_store(&pi->workqueue_item_count, 0); - gpr_atm_rel_store(&pi->ref_count, 0); gpr_atm_rel_store(&pi->poller_count, 0); gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL); @@ -610,11 +560,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, gpr_mu_init(&pi->worker_list_mu); worker_node_init(&pi->worker_list_head); - if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd), - err_desc)) { - goto done; - } - pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC); if (pi->epoll_fd < 0) { @@ -622,8 +567,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, goto done; } - polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error); - if (initial_fd != NULL) { polling_island_add_fds_locked(pi, &initial_fd, 1, true, error); } @@ -642,11 +585,7 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) { if (pi->epoll_fd >= 0) { close(pi->epoll_fd); } - GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0); - gpr_mu_destroy(&pi->workqueue_read_mu); - gpr_mpscq_destroy(&pi->workqueue_items); gpr_mu_destroy(&pi->mu); - grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd); gpr_mu_destroy(&pi->worker_list_mu); GPR_ASSERT(is_worker_node_detached(&pi->worker_list_head)); @@ -794,45 +733,6 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) { } } -static void workqueue_maybe_wakeup(polling_island *pi) { - /* If this thread is the current poller, then it may be that it's about to - decrement the current poller count, so we need to look past this thread */ - bool is_current_poller = (g_current_thread_polling_island == pi); - gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0; - gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count); - /* Only issue a wakeup if it's likely that some poller could come in and take - it right now. Note that since we do an anticipatory mpscq_pop every poll - loop, it's ok if we miss the wakeup here, as we'll get the work item when - the next poller enters anyway. */ - if (current_pollers > min_current_pollers_for_wakeup) { - GRPC_LOG_IF_ERROR("workqueue_wakeup_fd", - grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd)); - } -} - -static void workqueue_move_items_to_parent(polling_island *q) { - polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to); - if (p == NULL) { - return; - } - gpr_mu_lock(&q->workqueue_read_mu); - int num_added = 0; - while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) { - gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items); - if (n != NULL) { - gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1); - gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1); - gpr_mpscq_push(&p->workqueue_items, n); - num_added++; - } - } - gpr_mu_unlock(&q->workqueue_read_mu); - if (num_added > 0) { - workqueue_maybe_wakeup(p); - } - workqueue_move_items_to_parent(p); -} - static polling_island *polling_island_merge(polling_island *p, polling_island *q, grpc_error **error) { @@ -857,8 +757,6 @@ static polling_island *polling_island_merge(polling_island *p, /* Add the 'merged_to' link from p --> q */ gpr_atm_rel_store(&p->merged_to, (gpr_atm)q); PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */ - - workqueue_move_items_to_parent(p); } /* else if p == q, nothing needs to be done */ @@ -869,32 +767,6 @@ static polling_island *polling_island_merge(polling_island *p, return q; } -static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { - GPR_TIMER_BEGIN("workqueue.enqueue", 0); - grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler; - /* take a ref to the workqueue: otherwise it can happen that whatever events - * this kicks off ends up destroying the workqueue before this function - * completes */ - GRPC_WORKQUEUE_REF(workqueue, "enqueue"); - polling_island *pi = (polling_island *)workqueue; - gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1); - closure->error_data.error = error; - gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next); - if (last == 0) { - workqueue_maybe_wakeup(pi); - } - workqueue_move_items_to_parent(pi); - GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue"); - GPR_TIMER_END("workqueue.enqueue", 0); -} - -static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) { - polling_island *pi = (polling_island *)workqueue; - return workqueue == NULL ? grpc_schedule_on_exec_ctx - : &pi->workqueue_scheduler; -} - static grpc_error *polling_island_global_init() { grpc_error *error = GRPC_ERROR_NONE; @@ -1153,14 +1025,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); } -static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { - gpr_mu_lock(&fd->po.mu); - grpc_workqueue *workqueue = - GRPC_WORKQUEUE_REF((grpc_workqueue *)fd->po.pi, "fd_get_workqueue"); - gpr_mu_unlock(&fd->po.mu); - return workqueue; -} - /******************************************************************************* * Pollset Definitions */ @@ -1432,33 +1296,6 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { gpr_mu_destroy(&pollset->po.mu); } -static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx, - polling_island *pi) { - if (gpr_mu_trylock(&pi->workqueue_read_mu)) { - gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items); - gpr_mu_unlock(&pi->workqueue_read_mu); - if (n != NULL) { - if (gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) > 1) { - workqueue_maybe_wakeup(pi); - } - grpc_closure *c = (grpc_closure *)n; - grpc_error *error = c->error_data.error; -#ifndef NDEBUG - c->scheduled = false; -#endif - c->cb(exec_ctx, c->cb_arg, error); - GRPC_ERROR_UNREF(error); - return true; - } else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) { - /* n == NULL might mean there's work but it's not available to be popped - * yet - try to ensure another workqueue wakes up to check shortly if so - */ - workqueue_maybe_wakeup(pi); - } - } - return false; -} - /* NOTE: This function may modify 'now' */ static bool acquire_polling_lease(grpc_pollset_worker *worker, polling_island *pi, gpr_timespec deadline, @@ -1594,12 +1431,7 @@ static void pollset_do_epoll_pwait(grpc_exec_ctx *exec_ctx, int epoll_fd, for (int i = 0; i < ep_rv; ++i) { void *data_ptr = ep_ev[i].data.ptr; - if (data_ptr == &pi->workqueue_wakeup_fd) { - append_error(error, - grpc_wakeup_fd_consume_wakeup(&pi->workqueue_wakeup_fd), - err_desc); - maybe_do_workqueue_work(exec_ctx, pi); - } else if (data_ptr == &polling_island_wakeup_fd) { + if (data_ptr == &polling_island_wakeup_fd) { GRPC_POLLING_TRACE( "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: " "%d) got merged", @@ -1675,15 +1507,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, PI_ADD_REF(pi, "ps_work"); gpr_mu_unlock(&pollset->po.mu); - /* If we get some workqueue work to do, it might end up completing an item on - the completion queue, so there's no need to poll... so we skip that and - redo the complete loop to verify */ - if (!maybe_do_workqueue_work(exec_ctx, pi)) { - g_current_thread_polling_island = pi; - pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now, - deadline, sig_mask, error); - g_current_thread_polling_island = NULL; - } + g_current_thread_polling_island = pi; + pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now, deadline, + sig_mask, error); + g_current_thread_polling_island = NULL; GPR_ASSERT(pi != NULL); @@ -2036,7 +1863,6 @@ static const grpc_event_engine_vtable vtable = { .fd_notify_on_read = fd_notify_on_read, .fd_notify_on_write = fd_notify_on_write, .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, - .fd_get_workqueue = fd_get_workqueue, .pollset_init = pollset_init, .pollset_shutdown = pollset_shutdown, @@ -2054,10 +1880,6 @@ static const grpc_event_engine_vtable vtable = { .pollset_set_add_fd = pollset_set_add_fd, .pollset_set_del_fd = pollset_set_del_fd, - .workqueue_ref = workqueue_ref, - .workqueue_unref = workqueue_unref, - .workqueue_scheduler = workqueue_scheduler, - .shutdown_engine = shutdown_engine, }; diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c index bb44321922..c56b47be11 100644 --- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +++ b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c @@ -61,7 +61,6 @@ #include "src/core/lib/iomgr/lockfree_event.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" -#include "src/core/lib/iomgr/workqueue.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/support/block_annotate.h" @@ -109,23 +108,22 @@ static void fd_global_shutdown(void); * epoll set Declarations */ -#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG +//#define EPS_REFCOUNT_DEBUG + +#ifdef EPS_REFCOUNT_DEBUG #define EPS_ADD_REF(p, r) eps_add_ref_dbg((p), (r), __FILE__, __LINE__) #define EPS_UNREF(exec_ctx, p, r) \ eps_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__) -#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */ +#else /* defined(EPS_REFCOUNT_DEBUG) */ #define EPS_ADD_REF(p, r) eps_add_ref((p)) #define EPS_UNREF(exec_ctx, p, r) eps_unref((exec_ctx), (p)) #endif /* !defined(GRPC_EPS_REF_COUNT_DEBUG) */ -/* This is also used as grpc_workqueue (by directly casting it) */ typedef struct epoll_set { - grpc_closure_scheduler workqueue_scheduler; - /* Mutex poller should acquire to poll this. This enforces that only one * poller can be polling on epoll_set at any time */ gpr_mu mu; @@ -139,15 +137,6 @@ typedef struct epoll_set { /* Number of threads currently polling on this epoll set*/ gpr_atm poller_count; - /* Mutex guarding the read end of the workqueue (must be held to pop from - * workqueue_items) */ - gpr_mu workqueue_read_mu; - /* Queue of closures to be executed */ - gpr_mpscq workqueue_items; - /* Count of items in workqueue_items */ - gpr_atm workqueue_item_count; - /* Wakeup fd used to wake pollers to check the contents of workqueue_items */ - grpc_wakeup_fd workqueue_wakeup_fd; /* Is the epoll set shutdown */ gpr_atm is_shutdown; @@ -181,7 +170,9 @@ struct grpc_pollset { /******************************************************************************* * Pollset-set Declarations */ -struct grpc_pollset_set {}; +struct grpc_pollset_set { + char unused; +}; /***************************************************************************** * Dedicated polling threads and pollsets - Declarations @@ -235,8 +226,6 @@ static __thread epoll_set *g_current_thread_epoll_set; /* Forward declaration */ static void epoll_set_delete(epoll_set *eps); -static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); #ifdef GRPC_TSAN /* Currently TSAN may incorrectly flag data races between epoll_ctl and @@ -249,13 +238,10 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, gpr_atm g_epoll_sync; #endif /* defined(GRPC_TSAN) */ -static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = { - workqueue_enqueue, workqueue_enqueue, "workqueue"}; - static void eps_add_ref(epoll_set *eps); static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps); -#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG +#ifdef EPS_REFCOUNT_DEBUG static void eps_add_ref_dbg(epoll_set *eps, const char *reason, const char *file, int line) { long old_cnt = gpr_atm_acq_load(&eps->ref_count); @@ -271,36 +257,6 @@ static void eps_unref_dbg(grpc_exec_ctx *exec_ctx, epoll_set *eps, gpr_log(GPR_DEBUG, "Unref eps: %p, old:%ld -> new:%ld (%s) - (%s, %d)", (void *)eps, old_cnt, (old_cnt - 1), reason, file, line); } - -static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue, - const char *file, int line, - const char *reason) { - if (workqueue != NULL) { - eps_add_ref_dbg((epoll_set *)workqueue, reason, file, line); - } - return workqueue; -} - -static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, - const char *file, int line, const char *reason) { - if (workqueue != NULL) { - eps_unref_dbg(exec_ctx, (epoll_set *)workqueue, reason, file, line); - } -} -#else -static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) { - if (workqueue != NULL) { - eps_add_ref((epoll_set *)workqueue); - } - return workqueue; -} - -static void workqueue_unref(grpc_exec_ctx *exec_ctx, - grpc_workqueue *workqueue) { - if (workqueue != NULL) { - eps_unref(exec_ctx, (epoll_set *)workqueue); - } -} #endif static void eps_add_ref(epoll_set *eps) { @@ -394,24 +350,15 @@ static epoll_set *epoll_set_create(grpc_error **error) { *error = GRPC_ERROR_NONE; eps = gpr_malloc(sizeof(*eps)); - eps->workqueue_scheduler.vtable = &workqueue_scheduler_vtable; eps->epoll_fd = -1; gpr_mu_init(&eps->mu); - gpr_mu_init(&eps->workqueue_read_mu); - gpr_mpscq_init(&eps->workqueue_items); - gpr_atm_rel_store(&eps->workqueue_item_count, 0); gpr_atm_rel_store(&eps->ref_count, 0); gpr_atm_rel_store(&eps->poller_count, 0); gpr_atm_rel_store(&eps->is_shutdown, false); - if (!append_error(error, grpc_wakeup_fd_init(&eps->workqueue_wakeup_fd), - err_desc)) { - goto done; - } - eps->epoll_fd = epoll_create1(EPOLL_CLOEXEC); if (eps->epoll_fd < 0) { @@ -419,8 +366,6 @@ static epoll_set *epoll_set_create(grpc_error **error) { goto done; } - epoll_set_add_wakeup_fd_locked(eps, &eps->workqueue_wakeup_fd, error); - done: if (*error != GRPC_ERROR_NONE) { epoll_set_delete(eps); @@ -434,57 +379,11 @@ static void epoll_set_delete(epoll_set *eps) { close(eps->epoll_fd); } - GPR_ASSERT(gpr_atm_no_barrier_load(&eps->workqueue_item_count) == 0); gpr_mu_destroy(&eps->mu); - gpr_mu_destroy(&eps->workqueue_read_mu); - gpr_mpscq_destroy(&eps->workqueue_items); - grpc_wakeup_fd_destroy(&eps->workqueue_wakeup_fd); gpr_free(eps); } -static void workqueue_maybe_wakeup(epoll_set *eps) { - /* If this thread is the current poller, then it may be that it's about to - decrement the current poller count, so we need to look past this thread */ - bool is_current_poller = (g_current_thread_epoll_set == eps); - gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0; - gpr_atm current_pollers = gpr_atm_no_barrier_load(&eps->poller_count); - /* Only issue a wakeup if it's likely that some poller could come in and take - it right now. Note that since we do an anticipatory mpscq_pop every poll - loop, it's ok if we miss the wakeup here, as we'll get the work item when - the next poller enters anyway. */ - if (current_pollers > min_current_pollers_for_wakeup) { - GRPC_LOG_IF_ERROR("workqueue_wakeup_fd", - grpc_wakeup_fd_wakeup(&eps->workqueue_wakeup_fd)); - } -} - -static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { - GPR_TIMER_BEGIN("workqueue.enqueue", 0); - grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler; - /* take a ref to the workqueue: otherwise it can happen that whatever events - * this kicks off ends up destroying the workqueue before this function - * completes */ - GRPC_WORKQUEUE_REF(workqueue, "enqueue"); - epoll_set *eps = (epoll_set *)workqueue; - gpr_atm last = gpr_atm_no_barrier_fetch_add(&eps->workqueue_item_count, 1); - closure->error_data.error = error; - gpr_mpscq_push(&eps->workqueue_items, &closure->next_data.atm_next); - if (last == 0) { - workqueue_maybe_wakeup(eps); - } - - GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue"); - GPR_TIMER_END("workqueue.enqueue", 0); -} - -static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) { - epoll_set *eps = (epoll_set *)workqueue; - return workqueue == NULL ? grpc_schedule_on_exec_ctx - : &eps->workqueue_scheduler; -} - static grpc_error *epoll_set_global_init() { grpc_error *error = GRPC_ERROR_NONE; @@ -680,8 +579,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); } -static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; } - /******************************************************************************* * Pollset Definitions */ @@ -865,32 +762,6 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { gpr_mu_destroy(&pollset->mu); } -static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx, epoll_set *eps) { - if (gpr_mu_trylock(&eps->workqueue_read_mu)) { - gpr_mpscq_node *n = gpr_mpscq_pop(&eps->workqueue_items); - gpr_mu_unlock(&eps->workqueue_read_mu); - if (n != NULL) { - if (gpr_atm_full_fetch_add(&eps->workqueue_item_count, -1) > 1) { - workqueue_maybe_wakeup(eps); - } - grpc_closure *c = (grpc_closure *)n; - grpc_error *error = c->error_data.error; -#ifndef NDEBUG - c->scheduled = false; -#endif - c->cb(exec_ctx, c->cb_arg, error); - GRPC_ERROR_UNREF(error); - return true; - } else if (gpr_atm_no_barrier_load(&eps->workqueue_item_count) > 0) { - /* n == NULL might mean there's work but it's not available to be popped - * yet - try to ensure another workqueue wakes up to check shortly if so - */ - workqueue_maybe_wakeup(eps); - } - } - return false; -} - /* Blocking call */ static void acquire_epoll_lease(epoll_set *eps) { if (g_num_threads_per_eps > 1) { @@ -934,12 +805,7 @@ static void do_epoll_wait(grpc_exec_ctx *exec_ctx, int epoll_fd, epoll_set *eps, for (int i = 0; i < ep_rv; ++i) { void *data_ptr = ep_ev[i].data.ptr; - if (data_ptr == &eps->workqueue_wakeup_fd) { - append_error(error, - grpc_wakeup_fd_consume_wakeup(&eps->workqueue_wakeup_fd), - err_desc); - maybe_do_workqueue_work(exec_ctx, eps); - } else if (data_ptr == &epoll_set_wakeup_fd) { + if (data_ptr == &epoll_set_wakeup_fd) { gpr_atm_rel_store(&eps->is_shutdown, 1); gpr_log(GPR_INFO, "pollset poller: shutdown set"); } else { @@ -966,18 +832,13 @@ static void epoll_set_work(grpc_exec_ctx *exec_ctx, epoll_set *eps, epoll set. */ epoll_fd = eps->epoll_fd; - /* If we get some workqueue work to do, it might end up completing an item on - the completion queue, so there's no need to poll... so we skip that and - redo the complete loop to verify */ - if (!maybe_do_workqueue_work(exec_ctx, eps)) { - gpr_atm_no_barrier_fetch_add(&eps->poller_count, 1); - g_current_thread_epoll_set = eps; + gpr_atm_no_barrier_fetch_add(&eps->poller_count, 1); + g_current_thread_epoll_set = eps; - do_epoll_wait(exec_ctx, epoll_fd, eps, error); + do_epoll_wait(exec_ctx, epoll_fd, eps, error); - g_current_thread_epoll_set = NULL; - gpr_atm_no_barrier_fetch_add(&eps->poller_count, -1); - } + g_current_thread_epoll_set = NULL; + gpr_atm_no_barrier_fetch_add(&eps->poller_count, -1); GPR_TIMER_END("epoll_set_work", 0); } @@ -1120,7 +981,6 @@ static const grpc_event_engine_vtable vtable = { .fd_notify_on_read = fd_notify_on_read, .fd_notify_on_write = fd_notify_on_write, .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, - .fd_get_workqueue = fd_get_workqueue, .pollset_init = pollset_init, .pollset_shutdown = pollset_shutdown, @@ -1138,10 +998,6 @@ static const grpc_event_engine_vtable vtable = { .pollset_set_add_fd = pollset_set_add_fd, .pollset_set_del_fd = pollset_set_del_fd, - .workqueue_ref = workqueue_ref, - .workqueue_unref = workqueue_unref, - .workqueue_scheduler = workqueue_scheduler, - .shutdown_engine = shutdown_engine, }; diff --git a/src/core/lib/iomgr/ev_epollex_linux.c b/src/core/lib/iomgr/ev_epollex_linux.c index 7cb6085e25..b1dad14ba4 100644 --- a/src/core/lib/iomgr/ev_epollex_linux.c +++ b/src/core/lib/iomgr/ev_epollex_linux.c @@ -59,7 +59,6 @@ #include "src/core/lib/iomgr/sys_epoll_wrapper.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" -#include "src/core/lib/iomgr/workqueue.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/support/block_annotate.h" #include "src/core/lib/support/spinlock.h" @@ -139,17 +138,6 @@ struct grpc_fd { Ref/Unref by two to avoid altering the orphaned bit */ gpr_atm refst; - /* Wakeup fd used to wake pollers to check the contents of workqueue_items */ - grpc_wakeup_fd workqueue_wakeup_fd; - grpc_closure_scheduler workqueue_scheduler; - /* Spinlock guarding the read end of the workqueue (must be held to pop from - * workqueue_items) */ - gpr_spinlock workqueue_read_mu; - /* Queue of closures to be executed */ - gpr_mpscq workqueue_items; - /* Count of items in workqueue_items */ - gpr_atm workqueue_item_count; - /* The fd is either closed or we relinquished control of it. In either cases, this indicates that the 'fd' on this structure is no longer valid */ @@ -172,12 +160,6 @@ struct grpc_fd { static void fd_global_init(void); static void fd_global_shutdown(void); -static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); - -static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = { - workqueue_enqueue, workqueue_enqueue, "workqueue"}; - /******************************************************************************* * Pollset Declarations */ @@ -347,13 +329,6 @@ static grpc_fd *fd_create(int fd, const char *name) { grpc_lfev_init(&new_fd->write_closure); gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL); - GRPC_LOG_IF_ERROR("fd_create", - grpc_wakeup_fd_init(&new_fd->workqueue_wakeup_fd)); - new_fd->workqueue_scheduler.vtable = &workqueue_scheduler_vtable; - new_fd->workqueue_read_mu = GPR_SPINLOCK_INITIALIZER; - gpr_mpscq_init(&new_fd->workqueue_items); - gpr_atm_no_barrier_store(&new_fd->workqueue_item_count, 0); - new_fd->freelist_next = NULL; new_fd->on_done_closure = NULL; @@ -446,91 +421,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); } -static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { - REF_BY(fd, 2, "return_workqueue"); - return (grpc_workqueue *)fd; -} - -#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG -static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue, - const char *file, int line, - const char *reason) { - if (workqueue != NULL) { - ref_by((grpc_fd *)workqueue, 2, file, line, reason); - } - return workqueue; -} - -static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, - const char *file, int line, const char *reason) { - if (workqueue != NULL) { - unref_by(exec_ctx, (grpc_fd *)workqueue, 2, file, line, reason); - } -} -#else -static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) { - if (workqueue != NULL) { - ref_by((grpc_fd *)workqueue, 2); - } - return workqueue; -} - -static void workqueue_unref(grpc_exec_ctx *exec_ctx, - grpc_workqueue *workqueue) { - if (workqueue != NULL) { - unref_by(exec_ctx, (grpc_fd *)workqueue, 2); - } -} -#endif - -static void workqueue_wakeup(grpc_fd *fd) { - GRPC_LOG_IF_ERROR("workqueue_enqueue", - grpc_wakeup_fd_wakeup(&fd->workqueue_wakeup_fd)); -} - -static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { - GPR_TIMER_BEGIN("workqueue.enqueue", 0); - grpc_fd *fd = (grpc_fd *)(((char *)closure->scheduler) - - offsetof(grpc_fd, workqueue_scheduler)); - REF_BY(fd, 2, "workqueue_enqueue"); - gpr_atm last = gpr_atm_no_barrier_fetch_add(&fd->workqueue_item_count, 1); - closure->error_data.error = error; - gpr_mpscq_push(&fd->workqueue_items, &closure->next_data.atm_next); - if (last == 0) { - workqueue_wakeup(fd); - } - UNREF_BY(exec_ctx, fd, 2, "workqueue_enqueue"); -} - -static void fd_invoke_workqueue(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - /* handle spurious wakeups */ - if (!gpr_spinlock_trylock(&fd->workqueue_read_mu)) return; - gpr_mpscq_node *n = gpr_mpscq_pop(&fd->workqueue_items); - gpr_spinlock_unlock(&fd->workqueue_read_mu); - if (n != NULL) { - if (gpr_atm_full_fetch_add(&fd->workqueue_item_count, -1) > 1) { - workqueue_wakeup(fd); - } - grpc_closure *c = (grpc_closure *)n; - grpc_error *error = c->error_data.error; -#ifndef NDEBUG - c->scheduled = false; -#endif - c->cb(exec_ctx, c->cb_arg, error); - GRPC_ERROR_UNREF(error); - } else if (gpr_atm_no_barrier_load(&fd->workqueue_item_count) > 0) { - /* n == NULL might mean there's work but it's not available to be popped - * yet - try to ensure another workqueue wakes up to check shortly if so - */ - workqueue_wakeup(fd); - } -} - -static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) { - return &((grpc_fd *)workqueue)->workqueue_scheduler; -} - /******************************************************************************* * Pollable Definitions */ @@ -596,22 +486,7 @@ static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) { .data.ptr = fd}; if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) { switch (errno) { - case EEXIST: /* if this fd is already in the epoll set, the workqueue fd - must also be - just return */ - gpr_mu_unlock(&fd->orphaned_mu); - return GRPC_ERROR_NONE; - default: - append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc); - } - } - struct epoll_event ev_wq = { - .events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE), - .data.ptr = (void *)(1 + (intptr_t)fd)}; - if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->workqueue_wakeup_fd.read_fd, &ev_wq) != - 0) { - switch (errno) { - case EEXIST: /* if the workqueue fd is already in the epoll set we're ok - - no need to do anything special */ + case EEXIST: break; default: append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc); @@ -874,29 +749,21 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } append_error(&error, grpc_wakeup_fd_consume_wakeup(&p->wakeup), err_desc); } else { - grpc_fd *fd = (grpc_fd *)(((intptr_t)data_ptr) & ~(intptr_t)1); - bool is_workqueue = (((intptr_t)data_ptr) & 1) != 0; + grpc_fd *fd = (grpc_fd *)data_ptr; bool cancel = (events[i].events & (EPOLLERR | EPOLLHUP)) != 0; bool read_ev = (events[i].events & (EPOLLIN | EPOLLPRI)) != 0; bool write_ev = (events[i].events & EPOLLOUT) != 0; if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, - "PS:%p poll %p got fd %p: is_wq=%d cancel=%d read=%d " + "PS:%p poll %p got fd %p: cancel=%d read=%d " "write=%d", - pollset, p, fd, is_workqueue, cancel, read_ev, write_ev); + pollset, p, fd, cancel, read_ev, write_ev); } - if (is_workqueue) { - append_error(&error, - grpc_wakeup_fd_consume_wakeup(&fd->workqueue_wakeup_fd), - err_desc); - fd_invoke_workqueue(exec_ctx, fd); - } else { - if (read_ev || cancel) { - fd_become_readable(exec_ctx, fd, pollset); - } - if (write_ev || cancel) { - fd_become_writable(exec_ctx, fd); - } + if (read_ev || cancel) { + fd_become_readable(exec_ctx, fd, pollset); + } + if (write_ev || cancel) { + fd_become_writable(exec_ctx, fd); } } } @@ -1449,7 +1316,6 @@ static const grpc_event_engine_vtable vtable = { .fd_notify_on_read = fd_notify_on_read, .fd_notify_on_write = fd_notify_on_write, .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, - .fd_get_workqueue = fd_get_workqueue, .pollset_init = pollset_init, .pollset_shutdown = pollset_shutdown, @@ -1467,10 +1333,6 @@ static const grpc_event_engine_vtable vtable = { .pollset_set_add_fd = pollset_set_add_fd, .pollset_set_del_fd = pollset_set_del_fd, - .workqueue_ref = workqueue_ref, - .workqueue_unref = workqueue_unref, - .workqueue_scheduler = workqueue_scheduler, - .shutdown_engine = shutdown_engine, }; diff --git a/src/core/lib/iomgr/ev_epollsig_linux.c b/src/core/lib/iomgr/ev_epollsig_linux.c index 52362a62f4..e50000dcc9 100644 --- a/src/core/lib/iomgr/ev_epollsig_linux.c +++ b/src/core/lib/iomgr/ev_epollsig_linux.c @@ -59,7 +59,6 @@ #include "src/core/lib/iomgr/lockfree_event.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" -#include "src/core/lib/iomgr/workqueue.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/support/block_annotate.h" @@ -177,7 +176,9 @@ static void fd_global_shutdown(void); * Polling island Declarations */ -#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG +//#define PI_REFCOUNT_DEBUG + +#ifdef PI_REFCOUNT_DEBUG #define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__) #define PI_UNREF(exec_ctx, p, r) \ @@ -192,8 +193,6 @@ static void fd_global_shutdown(void); /* This is also used as grpc_workqueue (by directly casing it) */ typedef struct polling_island { - grpc_closure_scheduler workqueue_scheduler; - gpr_mu mu; /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement the refcount. @@ -214,15 +213,6 @@ typedef struct polling_island { /* Number of threads currently polling on this island */ gpr_atm poller_count; - /* Mutex guarding the read end of the workqueue (must be held to pop from - * workqueue_items) */ - gpr_mu workqueue_read_mu; - /* Queue of closures to be executed */ - gpr_mpscq workqueue_items; - /* Count of items in workqueue_items */ - gpr_atm workqueue_item_count; - /* Wakeup fd used to wake pollers to check the contents of workqueue_items */ - grpc_wakeup_fd workqueue_wakeup_fd; /* The fd of the underlying epoll set */ int epoll_fd; @@ -297,8 +287,6 @@ static __thread polling_island *g_current_thread_polling_island; /* Forward declaration */ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi); -static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); #ifdef GRPC_TSAN /* Currently TSAN may incorrectly flag data races between epoll_ctl and @@ -311,13 +299,10 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, gpr_atm g_epoll_sync; #endif /* defined(GRPC_TSAN) */ -static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = { - workqueue_enqueue, workqueue_enqueue, "workqueue"}; - static void pi_add_ref(polling_island *pi); static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi); -#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG +#ifdef PI_REFCOUNT_DEBUG static void pi_add_ref_dbg(polling_island *pi, const char *reason, const char *file, int line) { long old_cnt = gpr_atm_acq_load(&pi->ref_count); @@ -333,36 +318,6 @@ static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi, gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)", (void *)pi, old_cnt, (old_cnt - 1), reason, file, line); } - -static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue, - const char *file, int line, - const char *reason) { - if (workqueue != NULL) { - pi_add_ref_dbg((polling_island *)workqueue, reason, file, line); - } - return workqueue; -} - -static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, - const char *file, int line, const char *reason) { - if (workqueue != NULL) { - pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line); - } -} -#else -static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) { - if (workqueue != NULL) { - pi_add_ref((polling_island *)workqueue); - } - return workqueue; -} - -static void workqueue_unref(grpc_exec_ctx *exec_ctx, - grpc_workqueue *workqueue) { - if (workqueue != NULL) { - pi_unref(exec_ctx, (polling_island *)workqueue); - } -} #endif static void pi_add_ref(polling_island *pi) { @@ -526,26 +481,16 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, *error = GRPC_ERROR_NONE; pi = gpr_malloc(sizeof(*pi)); - pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable; gpr_mu_init(&pi->mu); pi->fd_cnt = 0; pi->fd_capacity = 0; pi->fds = NULL; pi->epoll_fd = -1; - gpr_mu_init(&pi->workqueue_read_mu); - gpr_mpscq_init(&pi->workqueue_items); - gpr_atm_rel_store(&pi->workqueue_item_count, 0); - gpr_atm_rel_store(&pi->ref_count, 0); gpr_atm_rel_store(&pi->poller_count, 0); gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL); - if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd), - err_desc)) { - goto done; - } - pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC); if (pi->epoll_fd < 0) { @@ -553,8 +498,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, goto done; } - polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error); - if (initial_fd != NULL) { polling_island_add_fds_locked(pi, &initial_fd, 1, true, error); } @@ -573,11 +516,7 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) { if (pi->epoll_fd >= 0) { close(pi->epoll_fd); } - GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0); - gpr_mu_destroy(&pi->workqueue_read_mu); - gpr_mpscq_destroy(&pi->workqueue_items); gpr_mu_destroy(&pi->mu); - grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd); gpr_free(pi->fds); gpr_free(pi); } @@ -722,45 +661,6 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) { } } -static void workqueue_maybe_wakeup(polling_island *pi) { - /* If this thread is the current poller, then it may be that it's about to - decrement the current poller count, so we need to look past this thread */ - bool is_current_poller = (g_current_thread_polling_island == pi); - gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0; - gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count); - /* Only issue a wakeup if it's likely that some poller could come in and take - it right now. Note that since we do an anticipatory mpscq_pop every poll - loop, it's ok if we miss the wakeup here, as we'll get the work item when - the next poller enters anyway. */ - if (current_pollers > min_current_pollers_for_wakeup) { - GRPC_LOG_IF_ERROR("workqueue_wakeup_fd", - grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd)); - } -} - -static void workqueue_move_items_to_parent(polling_island *q) { - polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to); - if (p == NULL) { - return; - } - gpr_mu_lock(&q->workqueue_read_mu); - int num_added = 0; - while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) { - gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items); - if (n != NULL) { - gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1); - gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1); - gpr_mpscq_push(&p->workqueue_items, n); - num_added++; - } - } - gpr_mu_unlock(&q->workqueue_read_mu); - if (num_added > 0) { - workqueue_maybe_wakeup(p); - } - workqueue_move_items_to_parent(p); -} - static polling_island *polling_island_merge(polling_island *p, polling_island *q, grpc_error **error) { @@ -785,8 +685,6 @@ static polling_island *polling_island_merge(polling_island *p, /* Add the 'merged_to' link from p --> q */ gpr_atm_rel_store(&p->merged_to, (gpr_atm)q); PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */ - - workqueue_move_items_to_parent(p); } /* else if p == q, nothing needs to be done */ @@ -797,32 +695,6 @@ static polling_island *polling_island_merge(polling_island *p, return q; } -static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { - GPR_TIMER_BEGIN("workqueue.enqueue", 0); - grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler; - /* take a ref to the workqueue: otherwise it can happen that whatever events - * this kicks off ends up destroying the workqueue before this function - * completes */ - GRPC_WORKQUEUE_REF(workqueue, "enqueue"); - polling_island *pi = (polling_island *)workqueue; - gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1); - closure->error_data.error = error; - gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next); - if (last == 0) { - workqueue_maybe_wakeup(pi); - } - workqueue_move_items_to_parent(pi); - GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue"); - GPR_TIMER_END("workqueue.enqueue", 0); -} - -static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) { - polling_island *pi = (polling_island *)workqueue; - return workqueue == NULL ? grpc_schedule_on_exec_ctx - : &pi->workqueue_scheduler; -} - static grpc_error *polling_island_global_init() { grpc_error *error = GRPC_ERROR_NONE; @@ -1081,14 +953,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); } -static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { - gpr_mu_lock(&fd->po.mu); - grpc_workqueue *workqueue = - GRPC_WORKQUEUE_REF((grpc_workqueue *)fd->po.pi, "fd_get_workqueue"); - gpr_mu_unlock(&fd->po.mu); - return workqueue; -} - /******************************************************************************* * Pollset Definitions */ @@ -1326,33 +1190,6 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { gpr_mu_destroy(&pollset->po.mu); } -static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx, - polling_island *pi) { - if (gpr_mu_trylock(&pi->workqueue_read_mu)) { - gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items); - gpr_mu_unlock(&pi->workqueue_read_mu); - if (n != NULL) { - if (gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) > 1) { - workqueue_maybe_wakeup(pi); - } - grpc_closure *c = (grpc_closure *)n; - grpc_error *error = c->error_data.error; -#ifndef NDEBUG - c->scheduled = false; -#endif - c->cb(exec_ctx, c->cb_arg, error); - GRPC_ERROR_UNREF(error); - return true; - } else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) { - /* n == NULL might mean there's work but it's not available to be popped - * yet - try to ensure another workqueue wakes up to check shortly if so - */ - workqueue_maybe_wakeup(pi); - } - } - return false; -} - #define GRPC_EPOLL_MAX_EVENTS 100 /* Note: sig_mask contains the signal mask to use *during* epoll_wait() */ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, @@ -1408,72 +1245,61 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, PI_ADD_REF(pi, "ps_work"); gpr_mu_unlock(&pollset->po.mu); - /* If we get some workqueue work to do, it might end up completing an item on - the completion queue, so there's no need to poll... so we skip that and - redo the complete loop to verify */ - if (!maybe_do_workqueue_work(exec_ctx, pi)) { - gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1); - g_current_thread_polling_island = pi; - - GRPC_SCHEDULING_START_BLOCKING_REGION; - ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, - sig_mask); - GRPC_SCHEDULING_END_BLOCKING_REGION; - if (ep_rv < 0) { - if (errno != EINTR) { - gpr_asprintf(&err_msg, - "epoll_wait() epoll fd: %d failed with error: %d (%s)", - epoll_fd, errno, strerror(errno)); - append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); - } else { - /* We were interrupted. Save an interation by doing a zero timeout - epoll_wait to see if there are any other events of interest */ - GRPC_POLLING_TRACE( - "pollset_work: pollset: %p, worker: %p received kick", - (void *)pollset, (void *)worker); - ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0); - } + gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1); + g_current_thread_polling_island = pi; + + GRPC_SCHEDULING_START_BLOCKING_REGION; + ep_rv = + epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask); + GRPC_SCHEDULING_END_BLOCKING_REGION; + if (ep_rv < 0) { + if (errno != EINTR) { + gpr_asprintf(&err_msg, + "epoll_wait() epoll fd: %d failed with error: %d (%s)", + epoll_fd, errno, strerror(errno)); + append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); + } else { + /* We were interrupted. Save an interation by doing a zero timeout + epoll_wait to see if there are any other events of interest */ + GRPC_POLLING_TRACE("pollset_work: pollset: %p, worker: %p received kick", + (void *)pollset, (void *)worker); + ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0); } + } #ifdef GRPC_TSAN - /* See the definition of g_poll_sync for more details */ - gpr_atm_acq_load(&g_epoll_sync); + /* See the definition of g_poll_sync for more details */ + gpr_atm_acq_load(&g_epoll_sync); #endif /* defined(GRPC_TSAN) */ - for (int i = 0; i < ep_rv; ++i) { - void *data_ptr = ep_ev[i].data.ptr; - if (data_ptr == &pi->workqueue_wakeup_fd) { - append_error(error, - grpc_wakeup_fd_consume_wakeup(&pi->workqueue_wakeup_fd), - err_desc); - maybe_do_workqueue_work(exec_ctx, pi); - } else if (data_ptr == &polling_island_wakeup_fd) { - GRPC_POLLING_TRACE( - "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: " - "%d) got merged", - (void *)pollset, (void *)worker, epoll_fd); - /* This means that our polling island is merged with a different - island. We do not have to do anything here since the subsequent call - to the function pollset_work_and_unlock() will pick up the correct - epoll_fd */ - } else { - grpc_fd *fd = data_ptr; - int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP); - int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI); - int write_ev = ep_ev[i].events & EPOLLOUT; - if (read_ev || cancel) { - fd_become_readable(exec_ctx, fd, pollset); - } - if (write_ev || cancel) { - fd_become_writable(exec_ctx, fd); - } + for (int i = 0; i < ep_rv; ++i) { + void *data_ptr = ep_ev[i].data.ptr; + if (data_ptr == &polling_island_wakeup_fd) { + GRPC_POLLING_TRACE( + "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: " + "%d) got merged", + (void *)pollset, (void *)worker, epoll_fd); + /* This means that our polling island is merged with a different + island. We do not have to do anything here since the subsequent call + to the function pollset_work_and_unlock() will pick up the correct + epoll_fd */ + } else { + grpc_fd *fd = data_ptr; + int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP); + int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI); + int write_ev = ep_ev[i].events & EPOLLOUT; + if (read_ev || cancel) { + fd_become_readable(exec_ctx, fd, pollset); + } + if (write_ev || cancel) { + fd_become_writable(exec_ctx, fd); } } - - g_current_thread_polling_island = NULL; - gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1); } + g_current_thread_polling_island = NULL; + gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1); + GPR_ASSERT(pi != NULL); /* Before leaving, release the extra ref we added to the polling island. It @@ -1864,7 +1690,6 @@ static const grpc_event_engine_vtable vtable = { .fd_notify_on_read = fd_notify_on_read, .fd_notify_on_write = fd_notify_on_write, .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, - .fd_get_workqueue = fd_get_workqueue, .pollset_init = pollset_init, .pollset_shutdown = pollset_shutdown, @@ -1882,10 +1707,6 @@ static const grpc_event_engine_vtable vtable = { .pollset_set_add_fd = pollset_set_add_fd, .pollset_set_del_fd = pollset_set_del_fd, - .workqueue_ref = workqueue_ref, - .workqueue_unref = workqueue_unref, - .workqueue_scheduler = workqueue_scheduler, - .shutdown_engine = shutdown_engine, }; diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c index a8cf641458..808f7d46b4 100644 --- a/src/core/lib/iomgr/executor.c +++ b/src/core/lib/iomgr/executor.c @@ -148,9 +148,9 @@ static void executor_thread(void *arg) { static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error) { thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state); - gpr_atm cur_thread_count = gpr_atm_no_barrier_load(&g_cur_threads); + size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads); if (ts == NULL) { - ts = &g_thread_state[rand() % cur_thread_count]; + ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)]; } gpr_mu_lock(&ts->mu); grpc_closure_list_append(&ts->elems, closure, error); @@ -159,7 +159,7 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, ts->depth > MAX_DEPTH && cur_thread_count < g_max_threads; gpr_mu_unlock(&ts->mu); if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) { - cur_thread_count = gpr_atm_no_barrier_load(&g_cur_threads); + cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads); if (cur_thread_count < g_max_threads) { gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1); diff --git a/test/core/iomgr/ev_epollsig_linux_test.c b/test/core/iomgr/ev_epollsig_linux_test.c index 45c542de4e..a20c4f2b98 100644 --- a/test/core/iomgr/ev_epollsig_linux_test.c +++ b/test/core/iomgr/ev_epollsig_linux_test.c @@ -47,7 +47,6 @@ #include #include "src/core/lib/iomgr/iomgr.h" -#include "src/core/lib/iomgr/workqueue.h" #include "test/core/util/test_config.h" typedef struct test_pollset { @@ -131,86 +130,6 @@ static void test_pollset_cleanup(grpc_exec_ctx *exec_ctx, } } -static void increment(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - ++*(int *)arg; -} - -/* - * Validate that merging two workqueues preserves the closures in each queue. - * This is a regression test for a bug in - * polling_island_merge()[ev_epoll_linux.c], where the parent relationship was - * inverted. - */ - -#define NUM_FDS 2 -#define NUM_POLLSETS 2 -#define NUM_CLOSURES 4 - -static void test_pollset_queue_merge_items() { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - test_fd tfds[NUM_FDS]; - int fds[NUM_FDS]; - test_pollset pollsets[NUM_POLLSETS]; - grpc_closure closures[NUM_CLOSURES]; - int i; - int result = 0; - - test_fd_init(tfds, fds, NUM_FDS); - test_pollset_init(pollsets, NUM_POLLSETS); - - /* Two distinct polling islands, each with their own FD and pollset. */ - for (i = 0; i < NUM_FDS; i++) { - grpc_pollset_add_fd(&exec_ctx, pollsets[i].pollset, tfds[i].fd); - grpc_exec_ctx_flush(&exec_ctx); - } - - /* Enqeue the closures, 3 to polling island 0 and 1 to polling island 1. */ - grpc_closure_init( - closures, increment, &result, - grpc_workqueue_scheduler(grpc_fd_get_polling_island(tfds[0].fd))); - grpc_closure_init( - closures + 1, increment, &result, - grpc_workqueue_scheduler(grpc_fd_get_polling_island(tfds[0].fd))); - grpc_closure_init( - closures + 2, increment, &result, - grpc_workqueue_scheduler(grpc_fd_get_polling_island(tfds[0].fd))); - grpc_closure_init( - closures + 3, increment, &result, - grpc_workqueue_scheduler(grpc_fd_get_polling_island(tfds[1].fd))); - for (i = 0; i < NUM_CLOSURES; ++i) { - grpc_closure_sched(&exec_ctx, closures + i, GRPC_ERROR_NONE); - } - - /* Merge the two polling islands. */ - grpc_pollset_add_fd(&exec_ctx, pollsets[0].pollset, tfds[1].fd); - grpc_exec_ctx_flush(&exec_ctx); - - /* - * Execute the closures, verify we see each one execute when executing work on - * the merged polling island. - */ - grpc_pollset_worker *worker = NULL; - for (i = 0; i < NUM_CLOSURES; ++i) { - const gpr_timespec deadline = gpr_time_add( - gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(2, GPR_TIMESPAN)); - gpr_mu_lock(pollsets[1].mu); - GRPC_LOG_IF_ERROR( - "grpc_pollset_work", - grpc_pollset_work(&exec_ctx, pollsets[1].pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline)); - gpr_mu_unlock(pollsets[1].mu); - } - GPR_ASSERT(result == NUM_CLOSURES); - - test_fd_cleanup(&exec_ctx, tfds, NUM_FDS); - test_pollset_cleanup(&exec_ctx, pollsets, NUM_POLLSETS); - grpc_exec_ctx_finish(&exec_ctx); -} - -#undef NUM_FDS -#undef NUM_POLLSETS -#undef NUM_CLOSURES - /* * Cases to test: * case 1) Polling islands of both fd and pollset are NULL @@ -408,7 +327,6 @@ int main(int argc, char **argv) { poll_strategy = grpc_get_poll_strategy_name(); if (poll_strategy != NULL && strcmp(poll_strategy, "epollsig") == 0) { test_add_fd_to_pollset(); - test_pollset_queue_merge_items(); test_threading(); } else { gpr_log(GPR_INFO, -- cgit v1.2.3 From 5e56f00d3a82da4d28f66791fa58faf4644a9d09 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 16 May 2017 15:02:50 -0700 Subject: Fixes to new executor --- src/core/lib/iomgr/combiner.c | 2 +- src/core/lib/iomgr/executor.c | 81 +++++++++++++++++----------- src/core/lib/iomgr/executor.h | 9 +++- src/core/lib/iomgr/iomgr.c | 9 ++-- src/core/lib/iomgr/iomgr.h | 4 +- src/core/lib/surface/init.c | 6 ++- test/core/end2end/fuzzers/api_fuzzer.c | 6 +++ test/core/end2end/fuzzers/client_fuzzer.c | 2 + test/core/end2end/fuzzers/server_fuzzer.c | 2 + test/core/iomgr/ev_epollsig_linux_test.c | 12 ++--- test/core/iomgr/fd_conservation_posix_test.c | 15 +++--- test/core/iomgr/fd_posix_test.c | 4 +- test/core/iomgr/pollset_set_test.c | 4 +- test/core/iomgr/resolve_address_posix_test.c | 15 +++--- test/core/iomgr/resolve_address_test.c | 15 +++--- 15 files changed, 105 insertions(+), 81 deletions(-) (limited to 'test/core') diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c index aa7a8c1c70..38eace12c7 100644 --- a/src/core/lib/iomgr/combiner.c +++ b/src/core/lib/iomgr/combiner.c @@ -214,7 +214,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { lock, grpc_exec_ctx_ready_to_finish(exec_ctx), lock->time_to_execute_final_list)); - if (grpc_exec_ctx_ready_to_finish(exec_ctx)) { + if (grpc_exec_ctx_ready_to_finish(exec_ctx) && grpc_executor_is_threaded()) { GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0); // this execution context wants to move on, and we have a workqueue (and // so can help the execution context out): schedule remaining work to be diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c index 2a2544dc1f..513248ca57 100644 --- a/src/core/lib/iomgr/executor.c +++ b/src/core/lib/iomgr/executor.c @@ -66,22 +66,6 @@ GPR_TLS_DECL(g_this_thread_state); static void executor_thread(void *arg); -void grpc_executor_init() { - g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores()); - gpr_atm_no_barrier_store(&g_cur_threads, 1); - gpr_tls_init(&g_this_thread_state); - g_thread_state = gpr_zalloc(sizeof(thread_state) * g_max_threads); - for (size_t i = 0; i < g_max_threads; i++) { - gpr_mu_init(&g_thread_state[i].mu); - gpr_cv_init(&g_thread_state[i].cv); - g_thread_state[i].elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; - } - - gpr_thd_options opt = gpr_thd_options_default(); - gpr_thd_options_set_joinable(&opt); - gpr_thd_new(&g_thread_state[0].id, executor_thread, &g_thread_state[0], &opt); -} - static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) { size_t n = 0; @@ -100,24 +84,57 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) { return n; } -void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) { - for (size_t i = 0; i < g_max_threads; i++) { - gpr_mu_lock(&g_thread_state[i].mu); - g_thread_state[i].shutdown = true; - gpr_cv_signal(&g_thread_state[i].cv); - gpr_mu_unlock(&g_thread_state[i].mu); - } - for (gpr_atm i = 0; i < g_cur_threads; i++) { - gpr_thd_join(g_thread_state[i].id); +bool grpc_executor_is_threaded() { + return gpr_atm_no_barrier_load(&g_cur_threads) > 0; +} + +void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) { + gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads); + if (threading) { + if (cur_threads > 0) return; + g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores()); + gpr_atm_no_barrier_store(&g_cur_threads, 1); + gpr_tls_init(&g_this_thread_state); + g_thread_state = gpr_zalloc(sizeof(thread_state) * g_max_threads); + for (size_t i = 0; i < g_max_threads; i++) { + gpr_mu_init(&g_thread_state[i].mu); + gpr_cv_init(&g_thread_state[i].cv); + g_thread_state[i].elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; + } + + gpr_thd_options opt = gpr_thd_options_default(); + gpr_thd_options_set_joinable(&opt); + gpr_thd_new(&g_thread_state[0].id, executor_thread, &g_thread_state[0], + &opt); + } else { + if (cur_threads == 0) return; + for (size_t i = 0; i < g_max_threads; i++) { + gpr_mu_lock(&g_thread_state[i].mu); + g_thread_state[i].shutdown = true; + gpr_cv_signal(&g_thread_state[i].cv); + gpr_mu_unlock(&g_thread_state[i].mu); + } + for (gpr_atm i = 0; i < g_cur_threads; i++) { + gpr_thd_join(g_thread_state[i].id); + } + gpr_atm_no_barrier_store(&g_cur_threads, 0); + for (size_t i = 0; i < g_max_threads; i++) { + gpr_mu_destroy(&g_thread_state[i].mu); + gpr_cv_destroy(&g_thread_state[i].cv); + run_closures(exec_ctx, g_thread_state[i].elems); + } + gpr_free(g_thread_state); + gpr_tls_destroy(&g_this_thread_state); } +} + +void grpc_executor_init(grpc_exec_ctx *exec_ctx) { gpr_atm_no_barrier_store(&g_cur_threads, 0); - for (size_t i = 0; i < g_max_threads; i++) { - gpr_mu_destroy(&g_thread_state[i].mu); - gpr_cv_destroy(&g_thread_state[i].cv); - run_closures(exec_ctx, g_thread_state[i].elems); - } - gpr_free(g_thread_state); - gpr_tls_destroy(&g_this_thread_state); + grpc_executor_set_threading(exec_ctx, true); +} + +void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) { + grpc_executor_set_threading(exec_ctx, false); } static void executor_thread(void *arg) { diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h index 1213016383..792d5056cb 100644 --- a/src/core/lib/iomgr/executor.h +++ b/src/core/lib/iomgr/executor.h @@ -41,11 +41,18 @@ * This mechanism is meant to outsource work (grpc_closure instances) to a * thread, for those cases where blocking isn't an option but there isn't a * non-blocking solution available. */ -void grpc_executor_init(); +void grpc_executor_init(grpc_exec_ctx *exec_ctx); extern grpc_closure_scheduler *grpc_executor_scheduler; /** Shutdown the executor, running all pending work as part of the call */ void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx); +/** Is the executor multi-threaded? */ +bool grpc_executor_is_threaded(); + +/* enable/disable threading - must be called after grpc_executor_init and before + grpc_executor_shutdown */ +void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool enable); + #endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_H */ diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c index 9e0e4dbfe0..0623acc597 100644 --- a/src/core/lib/iomgr/iomgr.c +++ b/src/core/lib/iomgr/iomgr.c @@ -57,12 +57,12 @@ static gpr_cv g_rcv; static int g_shutdown; static grpc_iomgr_object g_root_object; -void grpc_iomgr_init(void) { +void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) { g_shutdown = 0; gpr_mu_init(&g_mu); gpr_cv_init(&g_rcv); grpc_exec_ctx_global_init(); - grpc_executor_init(); + grpc_executor_init(exec_ctx); grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC)); g_root_object.next = g_root_object.prev = &g_root_object; g_root_object.name = "root"; @@ -70,7 +70,7 @@ void grpc_iomgr_init(void) { grpc_iomgr_platform_init(); } -void grpc_iomgr_start(void) { grpc_timer_manager_init(); } +void grpc_iomgr_start(grpc_exec_ctx *exec_ctx) { grpc_timer_manager_init(); } static size_t count_objects(void) { grpc_iomgr_object *obj; @@ -95,6 +95,7 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) { grpc_timer_manager_shutdown(); grpc_iomgr_platform_flush(); + grpc_executor_shutdown(exec_ctx); gpr_mu_lock(&g_mu); g_shutdown = 1; @@ -145,8 +146,6 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) { grpc_timer_list_shutdown(exec_ctx); grpc_exec_ctx_flush(exec_ctx); - grpc_executor_shutdown(exec_ctx); - grpc_exec_ctx_flush(exec_ctx); /* ensure all threads have left g_mu */ gpr_mu_lock(&g_mu); diff --git a/src/core/lib/iomgr/iomgr.h b/src/core/lib/iomgr/iomgr.h index 6e2e023615..bd6ca4a0b8 100644 --- a/src/core/lib/iomgr/iomgr.h +++ b/src/core/lib/iomgr/iomgr.h @@ -38,10 +38,10 @@ #include "src/core/lib/iomgr/port.h" /** Initializes the iomgr. */ -void grpc_iomgr_init(void); +void grpc_iomgr_init(grpc_exec_ctx *exec_ctx); /** Starts any background threads for iomgr. */ -void grpc_iomgr_start(void); +void grpc_iomgr_start(grpc_exec_ctx *exec_ctx); /** Signals the intention to shutdown the iomgr. Expects to be able to flush * exec_ctx. */ diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c index 452e6c444b..86ce4bde61 100644 --- a/src/core/lib/surface/init.c +++ b/src/core/lib/surface/init.c @@ -128,6 +128,7 @@ void grpc_init(void) { int i; gpr_once_init(&g_basic_init, do_basic_init); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_mu_lock(&g_init_mu); if (++g_initializations == 1) { gpr_time_init(); @@ -154,7 +155,7 @@ void grpc_init(void) { grpc_register_tracer("pending_tags", &grpc_trace_pending_tags); #endif grpc_security_pre_init(); - grpc_iomgr_init(); + grpc_iomgr_init(&exec_ctx); gpr_timers_global_init(); grpc_handshaker_factory_registry_init(); grpc_security_init(); @@ -170,9 +171,10 @@ void grpc_init(void) { grpc_tracer_init("GRPC_TRACE"); /* no more changes to channel init pipelines */ grpc_channel_init_finalize(); - grpc_iomgr_start(); + grpc_iomgr_start(&exec_ctx); } gpr_mu_unlock(&g_init_mu); + grpc_exec_ctx_finish(&exec_ctx); GRPC_API_TRACE("grpc_init(void)", 0, ()); } diff --git a/test/core/end2end/fuzzers/api_fuzzer.c b/test/core/end2end/fuzzers/api_fuzzer.c index b33b43dac5..d8d34fba9c 100644 --- a/test/core/end2end/fuzzers/api_fuzzer.c +++ b/test/core/end2end/fuzzers/api_fuzzer.c @@ -41,6 +41,7 @@ #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/tcp_client.h" #include "src/core/lib/iomgr/timer.h" @@ -724,6 +725,11 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { gpr_now_impl = now_impl; grpc_init(); grpc_timer_manager_set_threading(false); + { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_executor_set_threading(&exec_ctx, false); + grpc_exec_ctx_finish(&exec_ctx); + } grpc_resolve_address = my_resolve_address; GPR_ASSERT(g_channel == NULL); diff --git a/test/core/end2end/fuzzers/client_fuzzer.c b/test/core/end2end/fuzzers/client_fuzzer.c index 6f49baffd2..2307a3c771 100644 --- a/test/core/end2end/fuzzers/client_fuzzer.c +++ b/test/core/end2end/fuzzers/client_fuzzer.c @@ -37,6 +37,7 @@ #include #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" +#include "src/core/lib/iomgr/executor.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/surface/channel.h" #include "test/core/util/memory_counters.h" @@ -58,6 +59,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { if (leak_check) grpc_memory_counters_init(); grpc_init(); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_executor_set_threading(&exec_ctx, false); grpc_resource_quota *resource_quota = grpc_resource_quota_create("client_fuzzer"); diff --git a/test/core/end2end/fuzzers/server_fuzzer.c b/test/core/end2end/fuzzers/server_fuzzer.c index 6d65fe1847..e6f6be2325 100644 --- a/test/core/end2end/fuzzers/server_fuzzer.c +++ b/test/core/end2end/fuzzers/server_fuzzer.c @@ -34,6 +34,7 @@ #include #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" +#include "src/core/lib/iomgr/executor.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/surface/server.h" #include "test/core/util/memory_counters.h" @@ -56,6 +57,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { if (leak_check) grpc_memory_counters_init(); grpc_init(); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_executor_set_threading(&exec_ctx, false); grpc_resource_quota *resource_quota = grpc_resource_quota_create("server_fuzzer"); diff --git a/test/core/iomgr/ev_epollsig_linux_test.c b/test/core/iomgr/ev_epollsig_linux_test.c index a20c4f2b98..952e774670 100644 --- a/test/core/iomgr/ev_epollsig_linux_test.c +++ b/test/core/iomgr/ev_epollsig_linux_test.c @@ -321,8 +321,9 @@ static void test_threading(void) { int main(int argc, char **argv) { const char *poll_strategy = NULL; grpc_test_init(argc, argv); - grpc_iomgr_init(); - grpc_iomgr_start(); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_iomgr_init(&exec_ctx); + grpc_iomgr_start(&exec_ctx); poll_strategy = grpc_get_poll_strategy_name(); if (poll_strategy != NULL && strcmp(poll_strategy, "epollsig") == 0) { @@ -335,11 +336,8 @@ int main(int argc, char **argv) { poll_strategy); } - { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_iomgr_shutdown(&exec_ctx); - grpc_exec_ctx_finish(&exec_ctx); - } + grpc_iomgr_shutdown(&exec_ctx); + grpc_exec_ctx_finish(&exec_ctx); return 0; } #else /* defined(GRPC_LINUX_EPOLL) */ diff --git a/test/core/iomgr/fd_conservation_posix_test.c b/test/core/iomgr/fd_conservation_posix_test.c index f662070655..18d8cf4ec8 100644 --- a/test/core/iomgr/fd_conservation_posix_test.c +++ b/test/core/iomgr/fd_conservation_posix_test.c @@ -45,8 +45,9 @@ int main(int argc, char **argv) { grpc_endpoint_pair p; grpc_test_init(argc, argv); - grpc_iomgr_init(); - grpc_iomgr_start(); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_iomgr_init(&exec_ctx); + grpc_iomgr_start(&exec_ctx); /* set max # of file descriptors to a low value, and verify we can create and destroy many more than this number @@ -57,19 +58,15 @@ int main(int argc, char **argv) { grpc_resource_quota_create("fd_conservation_posix_test"); for (i = 0; i < 100; i++) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; p = grpc_iomgr_create_endpoint_pair("test", NULL); grpc_endpoint_destroy(&exec_ctx, p.client); grpc_endpoint_destroy(&exec_ctx, p.server); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_flush(&exec_ctx); } grpc_resource_quota_unref(resource_quota); - { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_iomgr_shutdown(&exec_ctx); - grpc_exec_ctx_finish(&exec_ctx); - } + grpc_iomgr_shutdown(&exec_ctx); + grpc_exec_ctx_finish(&exec_ctx); return 0; } diff --git a/test/core/iomgr/fd_posix_test.c b/test/core/iomgr/fd_posix_test.c index 9e8fe8bffa..d0f31e087d 100644 --- a/test/core/iomgr/fd_posix_test.c +++ b/test/core/iomgr/fd_posix_test.c @@ -542,8 +542,8 @@ int main(int argc, char **argv) { grpc_closure destroyed; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_test_init(argc, argv); - grpc_iomgr_init(); - grpc_iomgr_start(); + grpc_iomgr_init(&exec_ctx); + grpc_iomgr_start(&exec_ctx); g_pollset = gpr_zalloc(grpc_pollset_size()); grpc_pollset_init(g_pollset, &g_mu); test_grpc_fd(); diff --git a/test/core/iomgr/pollset_set_test.c b/test/core/iomgr/pollset_set_test.c index 092711381d..587130704d 100644 --- a/test/core/iomgr/pollset_set_test.c +++ b/test/core/iomgr/pollset_set_test.c @@ -447,8 +447,8 @@ int main(int argc, char **argv) { const char *poll_strategy = grpc_get_poll_strategy_name(); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_test_init(argc, argv); - grpc_iomgr_init(); - grpc_iomgr_start(); + grpc_iomgr_init(&exec_ctx); + grpc_iomgr_start(&exec_ctx); if (poll_strategy != NULL && (strcmp(poll_strategy, "epoll") == 0 || diff --git a/test/core/iomgr/resolve_address_posix_test.c b/test/core/iomgr/resolve_address_posix_test.c index bee7036ec8..f07bd045b6 100644 --- a/test/core/iomgr/resolve_address_posix_test.c +++ b/test/core/iomgr/resolve_address_posix_test.c @@ -174,16 +174,13 @@ static void test_unix_socket_path_name_too_long(void) { int main(int argc, char **argv) { grpc_test_init(argc, argv); - grpc_executor_init(); - grpc_iomgr_init(); - grpc_iomgr_start(); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_iomgr_init(&exec_ctx); + grpc_iomgr_start(&exec_ctx); test_unix_socket(); test_unix_socket_path_name_too_long(); - { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_executor_shutdown(&exec_ctx); - grpc_iomgr_shutdown(&exec_ctx); - grpc_exec_ctx_finish(&exec_ctx); - } + grpc_executor_shutdown(&exec_ctx); + grpc_iomgr_shutdown(&exec_ctx); + grpc_exec_ctx_finish(&exec_ctx); return 0; } diff --git a/test/core/iomgr/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c index 83f73070dc..2c3240aaac 100644 --- a/test/core/iomgr/resolve_address_test.c +++ b/test/core/iomgr/resolve_address_test.c @@ -263,9 +263,9 @@ static void test_unparseable_hostports(void) { int main(int argc, char **argv) { grpc_test_init(argc, argv); - grpc_executor_init(); - grpc_iomgr_init(); - grpc_iomgr_start(); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_iomgr_init(&exec_ctx); + grpc_iomgr_start(&exec_ctx); test_localhost(); test_default_port(); test_non_numeric_default_port(); @@ -274,11 +274,8 @@ int main(int argc, char **argv) { test_ipv6_without_port(); test_invalid_ip_addresses(); test_unparseable_hostports(); - { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_executor_shutdown(&exec_ctx); - grpc_iomgr_shutdown(&exec_ctx); - grpc_exec_ctx_finish(&exec_ctx); - } + grpc_executor_shutdown(&exec_ctx); + grpc_iomgr_shutdown(&exec_ctx); + grpc_exec_ctx_finish(&exec_ctx); return 0; } -- cgit v1.2.3 From c7280c7bdbae7fb021f979c9216e1bc5e13fe5ac Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 17 May 2017 15:23:23 -0700 Subject: Fix merge --- test/core/end2end/fixtures/http_proxy_fixture.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'test/core') diff --git a/test/core/end2end/fixtures/http_proxy_fixture.c b/test/core/end2end/fixtures/http_proxy_fixture.c index 708409d865..c8e9559173 100644 --- a/test/core/end2end/fixtures/http_proxy_fixture.c +++ b/test/core/end2end/fixtures/http_proxy_fixture.c @@ -50,6 +50,7 @@ #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/http/parser.h" #include "src/core/lib/iomgr/closure.h" +#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/error.h" #include "src/core/lib/iomgr/exec_ctx.h" @@ -60,7 +61,6 @@ #include "src/core/lib/iomgr/tcp_client.h" #include "src/core/lib/iomgr/tcp_server.h" #include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/slice/slice_internal.h" #include "test/core/util/port.h" @@ -73,7 +73,7 @@ struct grpc_end2end_http_proxy { grpc_pollset* pollset; gpr_refcount users; - grpc_combiner *combiner; + grpc_combiner* combiner; }; // @@ -403,19 +403,19 @@ static void on_accept(grpc_exec_ctx* exec_ctx, void* arg, grpc_pollset_set_add_pollset(exec_ctx, conn->pollset_set, proxy->pollset); grpc_endpoint_add_to_pollset_set(exec_ctx, endpoint, conn->pollset_set); grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner, false)); + grpc_combiner_scheduler(conn->proxy->combiner)); grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner, false)); + grpc_combiner_scheduler(conn->proxy->combiner)); grpc_closure_init(&conn->on_write_response_done, on_write_response_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner, false)); + grpc_combiner_scheduler(conn->proxy->combiner)); grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner, false)); + grpc_combiner_scheduler(conn->proxy->combiner)); grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner, false)); + grpc_combiner_scheduler(conn->proxy->combiner)); grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner, false)); + grpc_combiner_scheduler(conn->proxy->combiner)); grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn, - grpc_combiner_scheduler(conn->proxy->combiner, false)); + grpc_combiner_scheduler(conn->proxy->combiner)); grpc_slice_buffer_init(&conn->client_read_buffer); grpc_slice_buffer_init(&conn->client_deferred_write_buffer); grpc_slice_buffer_init(&conn->client_write_buffer); @@ -456,7 +456,7 @@ grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) { grpc_end2end_http_proxy* proxy = (grpc_end2end_http_proxy*)gpr_malloc(sizeof(*proxy)); memset(proxy, 0, sizeof(*proxy)); - proxy->combiner = grpc_combiner_create(NULL); + proxy->combiner = grpc_combiner_create(); gpr_ref_init(&proxy->users, 1); // Construct proxy address. const int proxy_port = grpc_pick_unused_port_or_die(); @@ -508,7 +508,7 @@ void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy) { grpc_pollset_shutdown(&exec_ctx, proxy->pollset, grpc_closure_create(destroy_pollset, proxy->pollset, grpc_schedule_on_exec_ctx)); - grpc_combiner_unref(&exec_ctx, proxy->combiner); + grpc_combiner_unref(&exec_ctx, proxy->combiner); gpr_free(proxy); grpc_exec_ctx_finish(&exec_ctx); } -- cgit v1.2.3 From 125bb7ccca7903082efc76d16502419023b09018 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 23 May 2017 23:52:04 +0000 Subject: Add missing kick --- test/core/iomgr/tcp_posix_test.c | 1 + 1 file changed, 1 insertion(+) (limited to 'test/core') diff --git a/test/core/iomgr/tcp_posix_test.c b/test/core/iomgr/tcp_posix_test.c index a1c54e19c1..13131d1a52 100644 --- a/test/core/iomgr/tcp_posix_test.c +++ b/test/core/iomgr/tcp_posix_test.c @@ -162,6 +162,7 @@ static void read_cb(grpc_exec_ctx *exec_ctx, void *user_data, gpr_log(GPR_INFO, "Read %" PRIuPTR " bytes of %" PRIuPTR, read_bytes, state->target_read_bytes); if (state->read_bytes >= state->target_read_bytes) { + GPR_ASSERT(GRPC_LOG_IF_ERROR("kick", grpc_pollset_kick(g_pollset, NULL))); gpr_mu_unlock(g_mu); } else { grpc_endpoint_read(exec_ctx, state->ep, &state->incoming, &state->read_cb); -- cgit v1.2.3 From 8c4f5cd2c1f4cebd664a867b71cac4a599ef03e6 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 24 May 2017 00:00:06 +0000 Subject: Fix threading assumptions in test --- test/core/client_channel/resolvers/fake_resolver_test.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'test/core') diff --git a/test/core/client_channel/resolvers/fake_resolver_test.c b/test/core/client_channel/resolvers/fake_resolver_test.c index a20f119e64..88ff4f9b56 100644 --- a/test/core/client_channel/resolvers/fake_resolver_test.c +++ b/test/core/client_channel/resolvers/fake_resolver_test.c @@ -67,12 +67,11 @@ static grpc_resolver *build_fake_resolver( typedef struct on_resolution_arg { grpc_channel_args *resolver_result; grpc_channel_args *expected_resolver_result; - bool was_called; + gpr_event ev; } on_resolution_arg; void on_resolution_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { on_resolution_arg *res = arg; - res->was_called = true; // We only check the addresses channel arg because that's the only one // explicitly set by the test via // grpc_fake_resolver_response_generator_set_response. @@ -84,6 +83,7 @@ void on_resolution_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_lb_addresses_cmp(actual_lb_addresses, expected_lb_addresses) == 0); grpc_channel_args_destroy(exec_ctx, res->resolver_result); grpc_channel_args_destroy(exec_ctx, res->expected_resolver_result); + gpr_event_set(&res->ev, (void*)1); } static void test_fake_resolver() { @@ -115,6 +115,7 @@ static void test_fake_resolver() { on_resolution_arg on_res_arg; memset(&on_res_arg, 0, sizeof(on_res_arg)); on_res_arg.expected_resolver_result = results; + gpr_event_init(&on_res_arg.ev); grpc_closure *on_resolution = grpc_closure_create( on_resolution_cb, &on_res_arg, grpc_combiner_scheduler(combiner)); @@ -125,7 +126,7 @@ static void test_fake_resolver() { grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result, on_resolution); grpc_exec_ctx_flush(&exec_ctx); - GPR_ASSERT(on_res_arg.was_called); + GPR_ASSERT(gpr_event_wait(&on_res_arg.ev, grpc_timeout_seconds_to_deadline(5)) != NULL); // Setup update. grpc_uri *uris_update[] = { @@ -150,6 +151,7 @@ static void test_fake_resolver() { on_resolution_arg on_res_arg_update; memset(&on_res_arg_update, 0, sizeof(on_res_arg_update)); on_res_arg_update.expected_resolver_result = results_update; + gpr_event_init(&on_res_arg_update.ev); on_resolution = grpc_closure_create(on_resolution_cb, &on_res_arg_update, grpc_combiner_scheduler(combiner)); @@ -159,7 +161,7 @@ static void test_fake_resolver() { grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg_update.resolver_result, on_resolution); grpc_exec_ctx_flush(&exec_ctx); - GPR_ASSERT(on_res_arg.was_called); + GPR_ASSERT(gpr_event_wait(&on_res_arg_update.ev, grpc_timeout_seconds_to_deadline(5)) != NULL); // Requesting a new resolution without re-senting the response shouldn't // trigger the resolution callback. @@ -167,7 +169,7 @@ static void test_fake_resolver() { grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result, on_resolution); grpc_exec_ctx_flush(&exec_ctx); - GPR_ASSERT(!on_res_arg.was_called); + GPR_ASSERT(gpr_event_wait(&on_res_arg.ev, grpc_timeout_milliseconds_to_deadline(100)) == NULL); GRPC_COMBINER_UNREF(&exec_ctx, combiner, "test_fake_resolver"); GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test_fake_resolver"); -- cgit v1.2.3 From 2d485f0396ec3d84b7899526229f41a923434946 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 24 May 2017 00:03:19 +0000 Subject: Fix threading assumptions in test --- test/core/iomgr/endpoint_tests.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'test/core') diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c index e274796e23..85ba85373b 100644 --- a/test/core/iomgr/endpoint_tests.c +++ b/test/core/iomgr/endpoint_tests.c @@ -265,7 +265,10 @@ static void read_and_write_test(grpc_endpoint_test_config config, static void inc_on_failure(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { + gpr_mu_lock(g_mu); *(int *)arg += (error != GRPC_ERROR_NONE); + GPR_ASSERT(GRPC_LOG_IF_ERROR("kick", grpc_pollset_kick(g_pollset, NULL))); + gpr_mu_unlock(g_mu); } static void wait_for_fail_count(grpc_exec_ctx *exec_ctx, int *fail_count, -- cgit v1.2.3 From 55a8213041c88be5385289a6f340f10e49f18cc5 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 23 May 2017 17:39:19 -0700 Subject: Fixing test (not yet complete) --- test/core/iomgr/resource_quota_test.c | 320 ++++++++++++++++++++++------------ 1 file changed, 209 insertions(+), 111 deletions(-) (limited to 'test/core') diff --git a/test/core/iomgr/resource_quota_test.c b/test/core/iomgr/resource_quota_test.c index ebce8b9da6..6d461eb604 100644 --- a/test/core/iomgr/resource_quota_test.c +++ b/test/core/iomgr/resource_quota_test.c @@ -43,11 +43,11 @@ static void inc_int_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) { ++*(int *)a; } -static void set_bool_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) { - *(bool *)a = true; +static void set_event_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) { + gpr_event_set((gpr_event *)a, (void *)1); } -grpc_closure *set_bool(bool *p) { - return grpc_closure_create(set_bool_cb, p, grpc_schedule_on_exec_ctx); +grpc_closure *set_event(gpr_event *ev) { + return grpc_closure_create(set_event_cb, ev, grpc_schedule_on_exec_ctx); } typedef struct { @@ -154,11 +154,13 @@ static void test_simple_async_alloc(void) { grpc_resource_quota_resize(q, 1024 * 1024); grpc_resource_user *usr = grpc_resource_user_create(q, "usr"); { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -175,15 +177,18 @@ static void test_async_alloc_blocked_by_size(void) { grpc_resource_quota_create("test_async_alloc_blocked_by_size"); grpc_resource_quota_resize(q, 1); grpc_resource_user *usr = grpc_resource_user_create(q, "usr"); - bool done = false; + gpr_event ev; + gpr_event_init(&ev); { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(!done); + GPR_ASSERT(gpr_event_wait( + &ev, grpc_timeout_milliseconds_to_deadline(100)) == NULL); } grpc_resource_quota_resize(q, 1024); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != NULL); + ; { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_free(&exec_ctx, usr, 1024); @@ -200,11 +205,14 @@ static void test_scavenge(void) { grpc_resource_user *usr1 = grpc_resource_user_create(q, "usr1"); grpc_resource_user *usr2 = grpc_resource_user_create(q, "usr2"); { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -212,11 +220,14 @@ static void test_scavenge(void) { grpc_exec_ctx_finish(&exec_ctx); } { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -234,26 +245,31 @@ static void test_scavenge_blocked(void) { grpc_resource_quota_resize(q, 1024); grpc_resource_user *usr1 = grpc_resource_user_create(q, "usr1"); grpc_resource_user *usr2 = grpc_resource_user_create(q, "usr2"); - bool done; + gpr_event ev; { - done = false; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } { - done = false; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(!done); + GPR_ASSERT(gpr_event_wait( + &ev, grpc_timeout_milliseconds_to_deadline(100)) == NULL); } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_free(&exec_ctx, usr1, 1024); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -272,27 +288,35 @@ static void test_blocked_until_scheduled_reclaim(void) { grpc_resource_quota_resize(q, 1024); grpc_resource_user *usr = grpc_resource_user_create(q, "usr"); { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } - bool reclaim_done = false; + gpr_event reclaim_done; + gpr_event_init(&reclaim_done); { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_post_reclaimer( &exec_ctx, usr, false, - make_reclaimer(usr, 1024, set_bool(&reclaim_done))); + make_reclaimer(usr, 1024, set_event(&reclaim_done))); grpc_exec_ctx_finish(&exec_ctx); } { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(reclaim_done); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&reclaim_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -311,27 +335,35 @@ static void test_blocked_until_scheduled_reclaim_and_scavenge(void) { grpc_resource_user *usr1 = grpc_resource_user_create(q, "usr1"); grpc_resource_user *usr2 = grpc_resource_user_create(q, "usr2"); { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } - bool reclaim_done = false; + gpr_event reclaim_done; + gpr_event_init(&reclaim_done); { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_post_reclaimer( &exec_ctx, usr1, false, - make_reclaimer(usr1, 1024, set_bool(&reclaim_done))); + make_reclaimer(usr1, 1024, set_event(&reclaim_done))); grpc_exec_ctx_finish(&exec_ctx); } { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(reclaim_done); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&reclaim_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -350,27 +382,35 @@ static void test_blocked_until_scheduled_destructive_reclaim(void) { grpc_resource_quota_resize(q, 1024); grpc_resource_user *usr = grpc_resource_user_create(q, "usr"); { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } - bool reclaim_done = false; + gpr_event reclaim_done; + gpr_event_init(&reclaim_done); { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_post_reclaimer( &exec_ctx, usr, true, - make_reclaimer(usr, 1024, set_bool(&reclaim_done))); + make_reclaimer(usr, 1024, set_event(&reclaim_done))); grpc_exec_ctx_finish(&exec_ctx); } { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(reclaim_done); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&reclaim_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -387,23 +427,31 @@ static void test_unused_reclaim_is_cancelled(void) { grpc_resource_quota_create("test_unused_reclaim_is_cancelled"); grpc_resource_quota_resize(q, 1024); grpc_resource_user *usr = grpc_resource_user_create(q, "usr"); - bool benign_done = false; - bool destructive_done = false; + gpr_event benign_done; + gpr_event_init(&benign_done); + gpr_event destructive_done; + gpr_event_init(&destructive_done); { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_post_reclaimer( - &exec_ctx, usr, false, make_unused_reclaimer(set_bool(&benign_done))); + &exec_ctx, usr, false, make_unused_reclaimer(set_event(&benign_done))); grpc_resource_user_post_reclaimer( &exec_ctx, usr, true, - make_unused_reclaimer(set_bool(&destructive_done))); + make_unused_reclaimer(set_event(&destructive_done))); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(!benign_done); - GPR_ASSERT(!destructive_done); + GPR_ASSERT(gpr_event_wait(&benign_done, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); + GPR_ASSERT(gpr_event_wait(&destructive_done, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); } grpc_resource_quota_unref(q); destroy_user(usr); - GPR_ASSERT(benign_done); - GPR_ASSERT(destructive_done); + GPR_ASSERT(gpr_event_wait(&benign_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&destructive_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); } static void test_benign_reclaim_is_preferred(void) { @@ -412,35 +460,49 @@ static void test_benign_reclaim_is_preferred(void) { grpc_resource_quota_create("test_benign_reclaim_is_preferred"); grpc_resource_quota_resize(q, 1024); grpc_resource_user *usr = grpc_resource_user_create(q, "usr"); - bool benign_done = false; - bool destructive_done = false; + gpr_event benign_done; + gpr_event_init(&benign_done); + gpr_event destructive_done; + gpr_event_init(&destructive_done); { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_post_reclaimer( &exec_ctx, usr, false, - make_reclaimer(usr, 1024, set_bool(&benign_done))); + make_reclaimer(usr, 1024, set_event(&benign_done))); grpc_resource_user_post_reclaimer( &exec_ctx, usr, true, - make_unused_reclaimer(set_bool(&destructive_done))); + make_unused_reclaimer(set_event(&destructive_done))); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(!benign_done); - GPR_ASSERT(!destructive_done); + GPR_ASSERT(gpr_event_wait(&benign_done, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); + GPR_ASSERT(gpr_event_wait(&destructive_done, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); } { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(benign_done); - GPR_ASSERT(!destructive_done); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&benign_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&destructive_done, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -449,8 +511,10 @@ static void test_benign_reclaim_is_preferred(void) { } grpc_resource_quota_unref(q); destroy_user(usr); - GPR_ASSERT(benign_done); - GPR_ASSERT(destructive_done); + GPR_ASSERT(gpr_event_wait(&benign_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&destructive_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); } static void test_multiple_reclaims_can_be_triggered(void) { @@ -459,35 +523,49 @@ static void test_multiple_reclaims_can_be_triggered(void) { grpc_resource_quota_create("test_multiple_reclaims_can_be_triggered"); grpc_resource_quota_resize(q, 1024); grpc_resource_user *usr = grpc_resource_user_create(q, "usr"); - bool benign_done = false; - bool destructive_done = false; + gpr_event benign_done; + gpr_event_init(&benign_done); + gpr_event destructive_done; + gpr_event_init(&destructive_done); { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_post_reclaimer( &exec_ctx, usr, false, - make_reclaimer(usr, 512, set_bool(&benign_done))); + make_reclaimer(usr, 512, set_event(&benign_done))); grpc_resource_user_post_reclaimer( &exec_ctx, usr, true, - make_reclaimer(usr, 512, set_bool(&destructive_done))); + make_reclaimer(usr, 512, set_event(&destructive_done))); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(!benign_done); - GPR_ASSERT(!destructive_done); + GPR_ASSERT(gpr_event_wait(&benign_done, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); + GPR_ASSERT(gpr_event_wait(&destructive_done, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); } { - bool done = false; + gpr_event ev; + gpr_event_init(&ev); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(benign_done); - GPR_ASSERT(destructive_done); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&benign_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&destructive_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) != + NULL); + ; } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -496,8 +574,10 @@ static void test_multiple_reclaims_can_be_triggered(void) { } grpc_resource_quota_unref(q); destroy_user(usr); - GPR_ASSERT(benign_done); - GPR_ASSERT(destructive_done); + GPR_ASSERT(gpr_event_wait(&benign_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&destructive_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); } static void test_resource_user_stays_allocated_until_memory_released(void) { @@ -538,34 +618,44 @@ test_resource_user_stays_allocated_and_reclaimers_unrun_until_memory_released( grpc_resource_quota_resize(q, 1024); for (int i = 0; i < 10; i++) { grpc_resource_user *usr = grpc_resource_user_create(q, "usr"); - bool reclaimer_cancelled = false; + gpr_event reclaimer_cancelled; + gpr_event_init(&reclaimer_cancelled); { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_post_reclaimer( &exec_ctx, usr, false, - make_unused_reclaimer(set_bool(&reclaimer_cancelled))); + make_unused_reclaimer(set_event(&reclaimer_cancelled))); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(!reclaimer_cancelled); + GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); } { - bool allocated = false; + gpr_event allocated; + gpr_event_init(&allocated); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&allocated)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&allocated)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(allocated); - GPR_ASSERT(!reclaimer_cancelled); + GPR_ASSERT(gpr_event_wait(&allocated, + grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_unref(&exec_ctx, usr); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(!reclaimer_cancelled); + GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); } { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_free(&exec_ctx, usr, 1024); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(reclaimer_cancelled); + GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled, + grpc_timeout_seconds_to_deadline(5)) != NULL); } } grpc_resource_quota_unref(q); @@ -578,29 +668,37 @@ static void test_reclaimers_can_be_posted_repeatedly(void) { grpc_resource_quota_resize(q, 1024); grpc_resource_user *usr = grpc_resource_user_create(q, "usr"); { - bool allocated = false; + gpr_event allocated; + gpr_event_init(&allocated); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&allocated)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&allocated)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(allocated); + GPR_ASSERT(gpr_event_wait(&allocated, + grpc_timeout_seconds_to_deadline(5)) != NULL); } for (int i = 0; i < 10; i++) { - bool reclaimer_done = false; + gpr_event reclaimer_done; + gpr_event_init(&reclaimer_done); { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_post_reclaimer( &exec_ctx, usr, false, - make_reclaimer(usr, 1024, set_bool(&reclaimer_done))); + make_reclaimer(usr, 1024, set_event(&reclaimer_done))); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(!reclaimer_done); + GPR_ASSERT(gpr_event_wait(&reclaimer_done, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); } { - bool allocated = false; + gpr_event allocated; + gpr_event_init(&allocated); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&allocated)); + grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&allocated)); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(allocated); - GPR_ASSERT(reclaimer_done); + GPR_ASSERT(gpr_event_wait(&allocated, + grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&reclaimer_done, + grpc_timeout_seconds_to_deadline(5)) != NULL); } } { -- cgit v1.2.3 From bb29c724dde15cbcc1d0496918e4bee43886045f Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 24 May 2017 14:02:36 -0700 Subject: Fix combiner test --- test/core/iomgr/combiner_test.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) (limited to 'test/core') diff --git a/test/core/iomgr/combiner_test.c b/test/core/iomgr/combiner_test.c index 92935ca017..beb5c47623 100644 --- a/test/core/iomgr/combiner_test.c +++ b/test/core/iomgr/combiner_test.c @@ -48,23 +48,25 @@ static void test_no_op(void) { grpc_exec_ctx_finish(&exec_ctx); } -static void set_bool_to_true(grpc_exec_ctx *exec_ctx, void *value, - grpc_error *error) { - *(bool *)value = true; +static void set_event_to_true(grpc_exec_ctx *exec_ctx, void *value, + grpc_error *error) { + gpr_event_set(value, (void *)1); } static void test_execute_one(void) { gpr_log(GPR_DEBUG, "test_execute_one"); grpc_combiner *lock = grpc_combiner_create(); - bool done = false; + gpr_event done; + gpr_event_init(&done); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_closure_sched(&exec_ctx, - grpc_closure_create(set_bool_to_true, &done, + grpc_closure_create(set_event_to_true, &done, grpc_combiner_scheduler(lock)), GRPC_ERROR_NONE); grpc_exec_ctx_flush(&exec_ctx); - GPR_ASSERT(done); + GPR_ASSERT(gpr_event_wait(&done, grpc_timeout_seconds_to_deadline(5)) != + NULL); GRPC_COMBINER_UNREF(&exec_ctx, lock, "test_execute_one"); grpc_exec_ctx_finish(&exec_ctx); } @@ -72,6 +74,7 @@ static void test_execute_one(void) { typedef struct { size_t ctr; grpc_combiner *lock; + gpr_event done; } thd_args; typedef struct { @@ -105,6 +108,10 @@ static void execute_many_loop(void *a) { // picking it up gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(100)); } + grpc_closure_sched(&exec_ctx, + grpc_closure_create(set_event_to_true, &args->done, + grpc_combiner_scheduler(args->lock)), + GRPC_ERROR_NONE); grpc_exec_ctx_finish(&exec_ctx); } @@ -119,9 +126,12 @@ static void test_execute_many(void) { gpr_thd_options_set_joinable(&options); ta[i].ctr = 0; ta[i].lock = lock; + gpr_event_init(&ta[i].done); GPR_ASSERT(gpr_thd_new(&thds[i], execute_many_loop, &ta[i], &options)); } for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) { + GPR_ASSERT(gpr_event_wait(&ta[i].done, + gpr_inf_future(GPR_CLOCK_REALTIME)) != NULL); gpr_thd_join(thds[i]); } grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -129,15 +139,15 @@ static void test_execute_many(void) { grpc_exec_ctx_finish(&exec_ctx); } -static bool got_in_finally = false; +static gpr_event got_in_finally; static void in_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - got_in_finally = true; + gpr_event_set(&got_in_finally, (void *)1); } static void add_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_closure_sched(exec_ctx, - grpc_closure_create(in_finally, NULL, + grpc_closure_create(in_finally, arg, grpc_combiner_finally_scheduler(arg)), GRPC_ERROR_NONE); } @@ -147,12 +157,14 @@ static void test_execute_finally(void) { grpc_combiner *lock = grpc_combiner_create(); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + gpr_event_init(&got_in_finally); grpc_closure_sched( &exec_ctx, grpc_closure_create(add_finally, lock, grpc_combiner_scheduler(lock)), GRPC_ERROR_NONE); grpc_exec_ctx_flush(&exec_ctx); - GPR_ASSERT(got_in_finally); + GPR_ASSERT(gpr_event_wait(&got_in_finally, + grpc_timeout_seconds_to_deadline(5)) != NULL); GRPC_COMBINER_UNREF(&exec_ctx, lock, "test_execute_finally"); grpc_exec_ctx_finish(&exec_ctx); } -- cgit v1.2.3 From cfa8313fbb85f7fc07c37ef1b7fda0106e040e12 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 24 May 2017 21:37:39 +0000 Subject: Fix race --- test/core/iomgr/endpoint_tests.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'test/core') diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c index 85ba85373b..400a79fd33 100644 --- a/test/core/iomgr/endpoint_tests.c +++ b/test/core/iomgr/endpoint_tests.c @@ -274,19 +274,19 @@ static void inc_on_failure(grpc_exec_ctx *exec_ctx, void *arg, static void wait_for_fail_count(grpc_exec_ctx *exec_ctx, int *fail_count, int want_fail_count) { grpc_exec_ctx_flush(exec_ctx); - for (int i = 0; i < 5 && *fail_count < want_fail_count; i++) { + gpr_mu_lock(g_mu); + gpr_timespec deadline = grpc_timeout_seconds_to_deadline(10); + while (gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0 && *fail_count < want_fail_count) { grpc_pollset_worker *worker = NULL; - gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME); - gpr_timespec deadline = - gpr_time_add(now, gpr_time_from_seconds(1, GPR_TIMESPAN)); - gpr_mu_lock(g_mu); GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(exec_ctx, g_pollset, &worker, now, deadline))); + grpc_pollset_work(exec_ctx, g_pollset, &worker, gpr_now(deadline.clock_type), deadline))); gpr_mu_unlock(g_mu); grpc_exec_ctx_flush(exec_ctx); + gpr_mu_lock(g_mu); } GPR_ASSERT(*fail_count == want_fail_count); + gpr_mu_unlock(g_mu); } static void multiple_shutdown_test(grpc_endpoint_test_config config) { -- cgit v1.2.3 From 6cb61569d23e9cd2e6152bd92fcea536d3e661b7 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 24 May 2017 22:08:29 +0000 Subject: Fix race --- test/core/iomgr/resource_quota_test.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) (limited to 'test/core') diff --git a/test/core/iomgr/resource_quota_test.c b/test/core/iomgr/resource_quota_test.c index 6d461eb604..488086d6ab 100644 --- a/test/core/iomgr/resource_quota_test.c +++ b/test/core/iomgr/resource_quota_test.c @@ -39,8 +39,23 @@ #include "src/core/lib/slice/slice_internal.h" #include "test/core/util/test_config.h" +gpr_mu g_mu; +gpr_cv g_cv; + static void inc_int_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) { + gpr_mu_lock(&g_mu); ++*(int *)a; + gpr_cv_signal(&g_cv); + gpr_mu_unlock(&g_mu); +} + +static void assert_counter_becomes(int *ctr, int value) { + gpr_mu_lock(&g_mu); + gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5); + while (*ctr != value) { + GPR_ASSERT(!gpr_cv_wait(&g_cv, &g_mu, deadline)); + } + gpr_mu_unlock(&g_mu); } static void set_event_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) { @@ -730,7 +745,7 @@ static void test_one_slice(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(num_allocs == start_allocs + 1); + assert_counter_becomes(&num_allocs, start_allocs + 1); } { @@ -763,7 +778,7 @@ static void test_one_slice_deleted_late(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(num_allocs == start_allocs + 1); + assert_counter_becomes(&num_allocs, start_allocs + 1); } { @@ -807,7 +822,7 @@ static void test_negative_rq_free_pool(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(num_allocs == start_allocs + 1); + assert_counter_becomes(&num_allocs, start_allocs + 1); } grpc_resource_quota_resize(q, 512); @@ -833,6 +848,8 @@ static void test_negative_rq_free_pool(void) { int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_init(); + gpr_mu_init(&g_mu); + gpr_cv_init(&g_cv); test_no_op(); test_resize_then_destroy(); test_resource_user_no_op(); @@ -855,6 +872,8 @@ int main(int argc, char **argv) { test_one_slice_deleted_late(); test_resize_to_zero(); test_negative_rq_free_pool(); + gpr_mu_destroy(&g_mu); + gpr_cv_destroy(&g_cv); grpc_shutdown(); return 0; } -- cgit v1.2.3 From 26e69f6534f0ce6f122f397b4682d5aff97a7605 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 24 May 2017 15:09:23 -0700 Subject: clang-format --- .../ext/filters/client_channel/client_channel.c | 23 +++++++++++++--------- .../client_channel/resolvers/fake_resolver_test.c | 12 +++++++---- test/core/iomgr/endpoint_tests.c | 6 ++++-- 3 files changed, 26 insertions(+), 15 deletions(-) (limited to 'test/core') diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c index a0813d00b3..67b011aff3 100644 --- a/src/core/ext/filters/client_channel/client_channel.c +++ b/src/core/ext/filters/client_channel/client_channel.c @@ -822,11 +822,12 @@ static bool set_call_or_error(call_data *p, call_or_error coe) { } GPR_ASSERT(existing.subchannel_call == NULL); if (coe.error != GRPC_ERROR_NONE) { - GPR_ASSERT(coe.subchannel_call == NULL); - gpr_atm_rel_store(&p->subchannel_call_or_error, 1|(gpr_atm)coe.error); + GPR_ASSERT(coe.subchannel_call == NULL); + gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error); } else { - GPR_ASSERT(coe.subchannel_call != NULL); - gpr_atm_rel_store(&p->subchannel_call_or_error, (gpr_atm)coe.subchannel_call); + GPR_ASSERT(coe.subchannel_call != NULL); + gpr_atm_rel_store(&p->subchannel_call_or_error, + (gpr_atm)coe.subchannel_call); } return true; } @@ -973,7 +974,8 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg, .context = calld->subchannel_call_context}; grpc_error *new_error = grpc_connected_subchannel_create_call( exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call); - GPR_ASSERT(set_call_or_error(calld, (call_or_error){.subchannel_call = subchannel_call})); + GPR_ASSERT(set_call_or_error( + calld, (call_or_error){.subchannel_call = subchannel_call})); if (new_error != GRPC_ERROR_NONE) { new_error = grpc_error_add_child(new_error, error); fail_locked(exec_ctx, calld, new_error); @@ -1176,7 +1178,7 @@ static void start_transport_stream_op_batch_locked_inner( fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error)); } grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, - GRPC_ERROR_REF(error)); + GRPC_ERROR_REF(error)); /* early out */ return; } @@ -1201,9 +1203,11 @@ static void start_transport_stream_op_batch_locked_inner( if (calld->connected_subchannel == NULL) { grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Call dropped by load balancing policy"); - set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)}); + set_call_or_error(calld, + (call_or_error){.error = GRPC_ERROR_REF(error)}); fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error)); - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, GRPC_ERROR_REF(error)); + grpc_transport_stream_op_batch_finish_with_failure( + exec_ctx, op, GRPC_ERROR_REF(error)); return; // Early out. } } else { @@ -1223,7 +1227,8 @@ static void start_transport_stream_op_batch_locked_inner( .context = calld->subchannel_call_context}; grpc_error *error = grpc_connected_subchannel_create_call( exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call); - GPR_ASSERT(set_call_or_error(calld, (call_or_error){.subchannel_call = subchannel_call})); + GPR_ASSERT(set_call_or_error( + calld, (call_or_error){.subchannel_call = subchannel_call})); if (error != GRPC_ERROR_NONE) { fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error)); grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error); diff --git a/test/core/client_channel/resolvers/fake_resolver_test.c b/test/core/client_channel/resolvers/fake_resolver_test.c index 88ff4f9b56..934fb1b074 100644 --- a/test/core/client_channel/resolvers/fake_resolver_test.c +++ b/test/core/client_channel/resolvers/fake_resolver_test.c @@ -83,7 +83,7 @@ void on_resolution_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_lb_addresses_cmp(actual_lb_addresses, expected_lb_addresses) == 0); grpc_channel_args_destroy(exec_ctx, res->resolver_result); grpc_channel_args_destroy(exec_ctx, res->expected_resolver_result); - gpr_event_set(&res->ev, (void*)1); + gpr_event_set(&res->ev, (void *)1); } static void test_fake_resolver() { @@ -126,7 +126,8 @@ static void test_fake_resolver() { grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result, on_resolution); grpc_exec_ctx_flush(&exec_ctx); - GPR_ASSERT(gpr_event_wait(&on_res_arg.ev, grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&on_res_arg.ev, + grpc_timeout_seconds_to_deadline(5)) != NULL); // Setup update. grpc_uri *uris_update[] = { @@ -161,7 +162,8 @@ static void test_fake_resolver() { grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg_update.resolver_result, on_resolution); grpc_exec_ctx_flush(&exec_ctx); - GPR_ASSERT(gpr_event_wait(&on_res_arg_update.ev, grpc_timeout_seconds_to_deadline(5)) != NULL); + GPR_ASSERT(gpr_event_wait(&on_res_arg_update.ev, + grpc_timeout_seconds_to_deadline(5)) != NULL); // Requesting a new resolution without re-senting the response shouldn't // trigger the resolution callback. @@ -169,7 +171,9 @@ static void test_fake_resolver() { grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result, on_resolution); grpc_exec_ctx_flush(&exec_ctx); - GPR_ASSERT(gpr_event_wait(&on_res_arg.ev, grpc_timeout_milliseconds_to_deadline(100)) == NULL); + GPR_ASSERT(gpr_event_wait(&on_res_arg.ev, + grpc_timeout_milliseconds_to_deadline(100)) == + NULL); GRPC_COMBINER_UNREF(&exec_ctx, combiner, "test_fake_resolver"); GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test_fake_resolver"); diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c index 400a79fd33..6f86d40a29 100644 --- a/test/core/iomgr/endpoint_tests.c +++ b/test/core/iomgr/endpoint_tests.c @@ -276,11 +276,13 @@ static void wait_for_fail_count(grpc_exec_ctx *exec_ctx, int *fail_count, grpc_exec_ctx_flush(exec_ctx); gpr_mu_lock(g_mu); gpr_timespec deadline = grpc_timeout_seconds_to_deadline(10); - while (gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0 && *fail_count < want_fail_count) { + while (gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0 && + *fail_count < want_fail_count) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(exec_ctx, g_pollset, &worker, gpr_now(deadline.clock_type), deadline))); + grpc_pollset_work(exec_ctx, g_pollset, &worker, + gpr_now(deadline.clock_type), deadline))); gpr_mu_unlock(g_mu); grpc_exec_ctx_flush(exec_ctx); gpr_mu_lock(g_mu); -- cgit v1.2.3 From 0f016bdcf791685eb44cdc6e276b42618f5750f8 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 8 Jun 2017 09:26:42 -0700 Subject: Fix test verification --- .../bad_client/tests/server_registered_method.c | 26 +++++----------------- 1 file changed, 5 insertions(+), 21 deletions(-) (limited to 'test/core') diff --git a/test/core/bad_client/tests/server_registered_method.c b/test/core/bad_client/tests/server_registered_method.c index 20cc714cc0..f52350302b 100644 --- a/test/core/bad_client/tests/server_registered_method.c +++ b/test/core/bad_client/tests/server_registered_method.c @@ -68,27 +68,11 @@ static void verifier_succeeds(grpc_server *server, grpc_completion_queue *cq, static void verifier_fails(grpc_server *server, grpc_completion_queue *cq, void *registered_method) { - grpc_call_error error; - grpc_call *s; - cq_verifier *cqv = cq_verifier_create(cq); - grpc_metadata_array request_metadata_recv; - gpr_timespec deadline; - grpc_byte_buffer *payload = NULL; - - grpc_metadata_array_init(&request_metadata_recv); - - error = grpc_server_request_registered_call(server, registered_method, &s, - &deadline, &request_metadata_recv, - &payload, cq, cq, tag(101)); - GPR_ASSERT(GRPC_CALL_OK == error); - CQ_EXPECT_COMPLETION(cqv, tag(101), 1); - cq_verify(cqv); - - GPR_ASSERT(payload == NULL); - - grpc_metadata_array_destroy(&request_metadata_recv); - grpc_call_unref(s); - cq_verifier_destroy(cqv); + while (grpc_server_has_open_connections(server)) { + GPR_ASSERT(grpc_completion_queue_next( + cq, grpc_timeout_milliseconds_to_deadline(20), NULL) + .type == GRPC_QUEUE_TIMEOUT); + } } int main(int argc, char **argv) { -- cgit v1.2.3 From 8239b804598c114be892e0c4ee96041d9781521f Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 8 Jun 2017 21:09:59 +0000 Subject: Ensure a poller exists --- test/core/bad_client/bad_client.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'test/core') diff --git a/test/core/bad_client/bad_client.c b/test/core/bad_client/bad_client.c index cfe1ce51f8..1b63bd93cc 100644 --- a/test/core/bad_client/bad_client.c +++ b/test/core/bad_client/bad_client.c @@ -169,8 +169,11 @@ void grpc_run_bad_client_test( grpc_endpoint_read(&exec_ctx, sfd.client, &args.incoming, &read_done_closure); grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT( - gpr_event_wait(&args.read_done, grpc_timeout_seconds_to_deadline(5))); + gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5); + while (!gpr_event_get(&args.read_done)) { + GPR_ASSERT(gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0); + GPR_ASSERT(grpc_completion_queue_next(a.cq, grpc_timeout_milliseconds_to_deadline(100), NULL).type == GRPC_QUEUE_TIMEOUT); + } grpc_slice_buffer_destroy_internal(&exec_ctx, &args.incoming); } // Shutdown. -- cgit v1.2.3 From ae6083674ad9fef86223968c5ffe12bc5a133d83 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 8 Jun 2017 14:10:49 -0700 Subject: clang-format --- src/core/ext/filters/client_channel/client_channel.c | 3 +-- src/core/lib/iomgr/tcp_windows.c | 13 ++++--------- test/core/bad_client/bad_client.c | 4 +++- 3 files changed, 8 insertions(+), 12 deletions(-) (limited to 'test/core') diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c index 0ce26aff37..3ec539884b 100644 --- a/src/core/ext/filters/client_channel/client_channel.c +++ b/src/core/ext/filters/client_channel/client_channel.c @@ -1222,8 +1222,7 @@ static void start_transport_stream_op_batch_locked_inner( set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)}); fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error)); - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, op, error); + grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error); return; // Early out. } } else { diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c index 3f41e7a7fb..dbae42e936 100644 --- a/src/core/lib/iomgr/tcp_windows.c +++ b/src/core/lib/iomgr/tcp_windows.c @@ -402,15 +402,10 @@ static grpc_resource_user *win_get_resource_user(grpc_endpoint *ep) { static int win_get_fd(grpc_endpoint *ep) { return -1; } -static grpc_endpoint_vtable vtable = {win_read, - win_write, - win_add_to_pollset, - win_add_to_pollset_set, - win_shutdown, - win_destroy, - win_get_resource_user, - win_get_peer, - win_get_fd}; +static grpc_endpoint_vtable vtable = { + win_read, win_write, win_add_to_pollset, win_add_to_pollset_set, + win_shutdown, win_destroy, win_get_resource_user, win_get_peer, + win_get_fd}; grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket, grpc_channel_args *channel_args, diff --git a/test/core/bad_client/bad_client.c b/test/core/bad_client/bad_client.c index 1b63bd93cc..4f8e428278 100644 --- a/test/core/bad_client/bad_client.c +++ b/test/core/bad_client/bad_client.c @@ -172,7 +172,9 @@ void grpc_run_bad_client_test( gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5); while (!gpr_event_get(&args.read_done)) { GPR_ASSERT(gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0); - GPR_ASSERT(grpc_completion_queue_next(a.cq, grpc_timeout_milliseconds_to_deadline(100), NULL).type == GRPC_QUEUE_TIMEOUT); + GPR_ASSERT(grpc_completion_queue_next( + a.cq, grpc_timeout_milliseconds_to_deadline(100), NULL) + .type == GRPC_QUEUE_TIMEOUT); } grpc_slice_buffer_destroy_internal(&exec_ctx, &args.incoming); } -- cgit v1.2.3