aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/lib/iomgr/ev_poll_and_epoll_posix.c')
-rw-r--r--src/core/lib/iomgr/ev_poll_and_epoll_posix.c273
1 files changed, 190 insertions, 83 deletions
diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
index 3c8127e1a8..9e306af5fa 100644
--- a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
@@ -126,6 +126,9 @@ struct grpc_fd {
grpc_closure *on_done_closure;
grpc_iomgr_object iomgr_object;
+
+ /* The pollset that last noticed and notified that the fd is readable */
+ grpc_pollset *read_notifier_pollset;
};
/* Begin polling on an fd.
@@ -147,7 +150,8 @@ static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
if got_read or got_write are 1, also does the become_{readable,writable} as
appropriate. */
static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
- int got_read, int got_write);
+ int got_read, int got_write,
+ grpc_pollset *read_notifier_pollset);
/* Return 1 if this fd is orphaned, 0 otherwise */
static bool fd_is_orphaned(grpc_fd *fd);
@@ -217,9 +221,10 @@ struct grpc_pollset {
struct grpc_pollset_vtable {
void (*add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd, int and_unlock_pollset);
- void (*maybe_work_and_unlock)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- gpr_timespec deadline, gpr_timespec now);
+ grpc_error *(*maybe_work_and_unlock)(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
+ grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now);
void (*finish_shutdown)(grpc_pollset *pollset);
void (*destroy)(grpc_pollset *pollset);
};
@@ -247,9 +252,9 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
/* As per pollset_kick, with an extended set of flags (defined above)
-- mostly for fd_posix's use. */
-static void pollset_kick_ext(grpc_pollset *p,
- grpc_pollset_worker *specific_worker,
- uint32_t flags);
+static grpc_error *pollset_kick_ext(grpc_pollset *p,
+ grpc_pollset_worker *specific_worker,
+ uint32_t flags) GRPC_MUST_USE_RESULT;
/* turn a pollset into a multipoller: platform specific */
typedef void (*platform_become_multipoller_type)(grpc_exec_ctx *exec_ctx,
@@ -342,6 +347,7 @@ static grpc_fd *alloc_fd(int fd) {
r->on_done_closure = NULL;
r->closed = 0;
r->released = 0;
+ r->read_notifier_pollset = NULL;
gpr_mu_unlock(&r->mu);
return r;
}
@@ -415,12 +421,13 @@ static bool fd_is_orphaned(grpc_fd *fd) {
return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
}
-static void pollset_kick_locked(grpc_fd_watcher *watcher) {
+static grpc_error *pollset_kick_locked(grpc_fd_watcher *watcher) {
gpr_mu_lock(&watcher->pollset->mu);
GPR_ASSERT(watcher->worker);
- pollset_kick_ext(watcher->pollset, watcher->worker,
- GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
+ grpc_error *err = pollset_kick_ext(watcher->pollset, watcher->worker,
+ GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
gpr_mu_unlock(&watcher->pollset->mu);
+ return err;
}
static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
@@ -459,7 +466,7 @@ static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
} else {
remove_fd_from_all_epoll_sets(fd->fd);
}
- grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, true, NULL);
+ grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE, NULL);
}
static int fd_wrapped_fd(grpc_fd *fd) {
@@ -508,15 +515,27 @@ static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
#endif
+static grpc_error *fd_shutdown_error(bool shutdown) {
+ if (!shutdown) {
+ return GRPC_ERROR_NONE;
+ } else {
+ return GRPC_ERROR_CREATE("FD shutdown");
+ }
+}
+
static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st, grpc_closure *closure) {
- if (*st == CLOSURE_NOT_READY) {
+ if (fd->shutdown) {
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"),
+ NULL);
+ } else if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */
*st = closure;
} else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
- grpc_exec_ctx_enqueue(exec_ctx, closure, !fd->shutdown, NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown),
+ NULL);
maybe_wake_one_watcher_locked(fd);
} else {
/* upcallptr was set to a different closure. This is an error! */
@@ -539,19 +558,35 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
return 0;
} else {
/* waiting ==> queue closure */
- grpc_exec_ctx_enqueue(exec_ctx, *st, !fd->shutdown, NULL);
+ grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL);
*st = CLOSURE_NOT_READY;
return 1;
}
}
+static void set_read_notifier_pollset_locked(
+ grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *read_notifier_pollset) {
+ fd->read_notifier_pollset = read_notifier_pollset;
+}
+
static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
gpr_mu_lock(&fd->mu);
- GPR_ASSERT(!fd->shutdown);
- fd->shutdown = 1;
- set_ready_locked(exec_ctx, fd, &fd->read_closure);
- set_ready_locked(exec_ctx, fd, &fd->write_closure);
+ /* only shutdown once */
+ if (!fd->shutdown) {
+ fd->shutdown = 1;
+ /* signal read/write closed to OS so that future operations fail */
+ shutdown(fd->fd, SHUT_RDWR);
+ set_ready_locked(exec_ctx, fd, &fd->read_closure);
+ set_ready_locked(exec_ctx, fd, &fd->write_closure);
+ }
+ gpr_mu_unlock(&fd->mu);
+}
+
+static bool fd_is_shutdown(grpc_fd *fd) {
+ gpr_mu_lock(&fd->mu);
+ bool r = fd->shutdown;
gpr_mu_unlock(&fd->mu);
+ return r;
}
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
@@ -568,6 +603,18 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
gpr_mu_unlock(&fd->mu);
}
+/* Return the read-notifier pollset */
+static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_fd *fd) {
+ grpc_pollset *notifier = NULL;
+
+ gpr_mu_lock(&fd->mu);
+ notifier = fd->read_notifier_pollset;
+ gpr_mu_unlock(&fd->mu);
+
+ return notifier;
+}
+
static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
grpc_pollset_worker *worker, uint32_t read_mask,
uint32_t write_mask, grpc_fd_watcher *watcher) {
@@ -620,7 +667,8 @@ static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
}
static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
- int got_read, int got_write) {
+ int got_read, int got_write,
+ grpc_pollset *read_notifier_pollset) {
int was_polling = 0;
int kick = 0;
grpc_fd *fd = watcher->fd;
@@ -656,6 +704,10 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) {
kick = 1;
}
+
+ if (read_notifier_pollset != NULL) {
+ set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset);
+ }
}
if (got_write) {
if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) {
@@ -717,10 +769,19 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev->next = worker->next->prev = worker;
}
-static void pollset_kick_ext(grpc_pollset *p,
- grpc_pollset_worker *specific_worker,
- uint32_t flags) {
+static void kick_append_error(grpc_error **composite, grpc_error *error) {
+ if (error == GRPC_ERROR_NONE) return;
+ if (*composite == GRPC_ERROR_NONE) {
+ *composite = GRPC_ERROR_CREATE("Kick Failure");
+ }
+ *composite = grpc_error_add_child(*composite, error);
+}
+
+static grpc_error *pollset_kick_ext(grpc_pollset *p,
+ grpc_pollset_worker *specific_worker,
+ uint32_t flags) {
GPR_TIMER_BEGIN("pollset_kick_ext", 0);
+ grpc_error *error = GRPC_ERROR_NONE;
/* pollset->mu already held */
if (specific_worker != NULL) {
@@ -730,25 +791,28 @@ static void pollset_kick_ext(grpc_pollset *p,
for (specific_worker = p->root_worker.next;
specific_worker != &p->root_worker;
specific_worker = specific_worker->next) {
- grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
+ kick_append_error(
+ &error, grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
}
- p->kicked_without_pollers = 1;
+ p->kicked_without_pollers = true;
GPR_TIMER_END("pollset_kick_ext.broadcast", 0);
} else if (gpr_tls_get(&g_current_thread_worker) !=
(intptr_t)specific_worker) {
GPR_TIMER_MARK("different_thread_worker", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
- specific_worker->reevaluate_polling_on_wakeup = 1;
+ specific_worker->reevaluate_polling_on_wakeup = true;
}
- specific_worker->kicked_specifically = 1;
- grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
+ specific_worker->kicked_specifically = true;
+ kick_append_error(&error,
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
} else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
GPR_TIMER_MARK("kick_yoself", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
- specific_worker->reevaluate_polling_on_wakeup = 1;
+ specific_worker->reevaluate_polling_on_wakeup = true;
}
- specific_worker->kicked_specifically = 1;
- grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
+ specific_worker->kicked_specifically = true;
+ kick_append_error(&error,
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
}
} else if (gpr_tls_get(&g_current_thread_poller) != (intptr_t)p) {
GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
@@ -769,39 +833,41 @@ static void pollset_kick_ext(grpc_pollset *p,
if (specific_worker != NULL) {
GPR_TIMER_MARK("finally_kick", 0);
push_back_worker(p, specific_worker);
- grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
+ kick_append_error(
+ &error, grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
}
} else {
GPR_TIMER_MARK("kicked_no_pollers", 0);
- p->kicked_without_pollers = 1;
+ p->kicked_without_pollers = true;
}
}
GPR_TIMER_END("pollset_kick_ext", 0);
+ return error;
}
-static void pollset_kick(grpc_pollset *p,
- grpc_pollset_worker *specific_worker) {
- pollset_kick_ext(p, specific_worker, 0);
+static grpc_error *pollset_kick(grpc_pollset *p,
+ grpc_pollset_worker *specific_worker) {
+ return pollset_kick_ext(p, specific_worker, 0);
}
/* global state management */
-static void pollset_global_init(void) {
+static grpc_error *pollset_global_init(void) {
gpr_tls_init(&g_current_thread_poller);
gpr_tls_init(&g_current_thread_worker);
- grpc_wakeup_fd_global_init();
- grpc_wakeup_fd_init(&grpc_global_wakeup_fd);
+ return grpc_wakeup_fd_init(&grpc_global_wakeup_fd);
}
static void pollset_global_shutdown(void) {
grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
gpr_tls_destroy(&g_current_thread_poller);
gpr_tls_destroy(&g_current_thread_worker);
- grpc_wakeup_fd_global_destroy();
}
-static void kick_poller(void) { grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd); }
+static grpc_error *kick_poller(void) {
+ return grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd);
+}
/* main interface */
@@ -864,14 +930,23 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs));
pollset->vtable->finish_shutdown(pollset);
- grpc_exec_ctx_enqueue(exec_ctx, pollset->shutdown_done, true, NULL);
+ grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
+}
+
+static void work_combine_error(grpc_error **composite, grpc_error *error) {
+ if (error == GRPC_ERROR_NONE) return;
+ if (*composite == GRPC_ERROR_NONE) {
+ *composite = GRPC_ERROR_CREATE("pollset_work");
+ }
+ *composite = grpc_error_add_child(*composite, error);
}
-static void pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl, gpr_timespec now,
- gpr_timespec deadline) {
+static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker **worker_hdl,
+ gpr_timespec now, gpr_timespec deadline) {
grpc_pollset_worker worker;
*worker_hdl = &worker;
+ grpc_error *error = GRPC_ERROR_NONE;
/* pollset->mu already held */
int added_worker = 0;
@@ -887,7 +962,10 @@ static void pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->local_wakeup_cache = worker.wakeup_fd->next;
} else {
worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
- grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
+ error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
+ if (error != GRPC_ERROR_NONE) {
+ return error;
+ }
}
worker.kicked_specifically = 0;
/* If there's work waiting for the pollset to be idle, and the
@@ -924,8 +1002,9 @@ static void pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset);
GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
- pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, &worker,
- deadline, now);
+ work_combine_error(&error,
+ pollset->vtable->maybe_work_and_unlock(
+ exec_ctx, pollset, &worker, deadline, now));
GPR_TIMER_END("maybe_work_and_unlock", 0);
locked = 0;
gpr_tls_set(&g_current_thread_poller, 0);
@@ -987,6 +1066,7 @@ static void pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
*worker_hdl = NULL;
GPR_TIMER_END("pollset_work", 0);
+ return error;
}
static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@@ -1035,7 +1115,7 @@ typedef struct grpc_unary_promote_args {
} grpc_unary_promote_args;
static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args,
- bool success) {
+ grpc_error *error) {
grpc_unary_promote_args *up_args = args;
const grpc_pollset_vtable *original_vtable = up_args->original_vtable;
grpc_pollset *pollset = up_args->pollset;
@@ -1137,7 +1217,8 @@ static void basic_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
up_args->promotion_closure.cb = basic_do_promote;
up_args->promotion_closure.cb_arg = up_args;
- grpc_closure_list_add(&pollset->idle_jobs, &up_args->promotion_closure, 1);
+ grpc_closure_list_append(&pollset->idle_jobs, &up_args->promotion_closure,
+ GRPC_ERROR_NONE);
pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
exit:
@@ -1146,11 +1227,9 @@ exit:
}
}
-static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- gpr_timespec deadline,
- gpr_timespec now) {
+static grpc_error *basic_pollset_maybe_work_and_unlock(
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now) {
#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
@@ -1160,6 +1239,7 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
int timeout;
int r;
nfds_t nfds;
+ grpc_error *error = GRPC_ERROR_NONE;
fd = pollset->data.ptr;
if (fd && fd_is_orphaned(fd)) {
@@ -1200,33 +1280,37 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
if (r < 0) {
if (errno != EINTR) {
- gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
+ work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
}
if (fd) {
- fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
+ fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
}
} else if (r == 0) {
if (fd) {
- fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
+ fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
}
} else {
if (pfd[0].revents & POLLIN_CHECK) {
- grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
+ work_combine_error(&error,
+ grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd));
}
if (pfd[1].revents & POLLIN_CHECK) {
- grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
+ work_combine_error(&error,
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd));
}
if (nfds > 2) {
fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
- pfd[2].revents & POLLOUT_CHECK);
+ pfd[2].revents & POLLOUT_CHECK, pollset);
} else if (fd) {
- fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
+ fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
}
}
if (fd) {
GRPC_FD_UNREF(fd, "basicpoll_begin");
}
+
+ return error;
}
static void basic_pollset_destroy(grpc_pollset *pollset) {
@@ -1287,7 +1371,7 @@ exit:
}
}
-static void multipoll_with_poll_pollset_maybe_work_and_unlock(
+static grpc_error *multipoll_with_poll_pollset_maybe_work_and_unlock(
grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline, gpr_timespec now) {
#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
@@ -1301,6 +1385,7 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
/* TODO(ctiller): inline some elements to avoid an allocation */
grpc_fd_watcher *watchers;
struct pollfd *pfds;
+ grpc_error *error = GRPC_ERROR_NONE;
h = pollset->data.ptr;
timeout = poll_deadline_to_millis_timeout(deadline, now);
@@ -1353,34 +1438,38 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
if (r < 0) {
if (errno != EINTR) {
- gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
+ work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
}
for (i = 2; i < pfd_count; i++) {
- fd_end_poll(exec_ctx, &watchers[i], 0, 0);
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
}
} else if (r == 0) {
for (i = 2; i < pfd_count; i++) {
- fd_end_poll(exec_ctx, &watchers[i], 0, 0);
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
- grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
+ work_combine_error(&error,
+ grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd));
}
if (pfds[1].revents & POLLIN_CHECK) {
- grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
+ work_combine_error(&error,
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd));
}
for (i = 2; i < pfd_count; i++) {
if (watchers[i].fd == NULL) {
- fd_end_poll(exec_ctx, &watchers[i], 0, 0);
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
continue;
}
fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
- pfds[i].revents & POLLOUT_CHECK);
+ pfds[i].revents & POLLOUT_CHECK, pollset);
}
}
gpr_free(pfds);
gpr_free(watchers);
+
+ return error;
}
static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) {
@@ -1451,20 +1540,31 @@ static void poll_become_multipoller(grpc_exec_ctx *exec_ctx,
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
-static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st) {
+static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st,
+ grpc_pollset *read_notifier_pollset) {
/* only one set_ready can be active at once (but there may be a racing
notify_on) */
gpr_mu_lock(&fd->mu);
set_ready_locked(exec_ctx, fd, st);
+
+ /* A non-NULL read_notifier_pollset means that the fd is readable. */
+ if (read_notifier_pollset != NULL) {
+ /* Note: Since the fd might be a part of multiple pollsets, this might be
+ * called multiple times (for each time the fd becomes readable) and it is
+ * okay to set the fd's read-notifier pollset to anyone of these pollsets */
+ set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset);
+ }
+
gpr_mu_unlock(&fd->mu);
}
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- set_ready(exec_ctx, fd, &fd->read_closure);
+static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_pollset *notifier_pollset) {
+ set_ready(exec_ctx, fd, &fd->read_closure, notifier_pollset);
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- set_ready(exec_ctx, fd, &fd->write_closure);
+ set_ready(exec_ctx, fd, &fd->write_closure, NULL);
}
struct epoll_fd_list {
@@ -1556,11 +1656,11 @@ static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
}
}
- fd_end_poll(exec_ctx, &watcher, 0, 0);
+ fd_end_poll(exec_ctx, &watcher, 0, 0, NULL);
}
static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg,
- bool iomgr_status) {
+ grpc_error *error) {
delayed_add *da = arg;
if (!fd_is_orphaned(da->fd)) {
@@ -1573,7 +1673,8 @@ static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg,
/* We don't care about this pollset anymore. */
if (da->pollset->in_flight_cbs == 0 && !da->pollset->called_shutdown) {
da->pollset->called_shutdown = 1;
- grpc_exec_ctx_enqueue(exec_ctx, da->pollset->shutdown_done, true, NULL);
+ grpc_exec_ctx_sched(exec_ctx, da->pollset->shutdown_done, GRPC_ERROR_NONE,
+ NULL);
}
}
gpr_mu_unlock(&da->pollset->mu);
@@ -1597,14 +1698,14 @@ static void multipoll_with_epoll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
GRPC_FD_REF(fd, "delayed_add");
grpc_closure_init(&da->closure, perform_delayed_add, da);
pollset->in_flight_cbs++;
- grpc_exec_ctx_enqueue(exec_ctx, &da->closure, true, NULL);
+ grpc_exec_ctx_sched(exec_ctx, &da->closure, GRPC_ERROR_NONE, NULL);
}
}
/* TODO(klempner): We probably want to turn this down a bit */
#define GRPC_EPOLL_MAX_EVENTS 1000
-static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
+static grpc_error *multipoll_with_epoll_pollset_maybe_work_and_unlock(
grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline, gpr_timespec now) {
struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
@@ -1613,6 +1714,7 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
epoll_hdr *h = pollset->data.ptr;
int timeout_ms;
struct pollfd pfds[2];
+ grpc_error *error = GRPC_ERROR_NONE;
/* If you want to ignore epoll's ability to sanely handle parallel pollers,
* for a more apples-to-apples performance comparison with poll, add a
@@ -1641,13 +1743,14 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
if (poll_rv < 0) {
if (errno != EINTR) {
- gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
+ work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
}
} else if (poll_rv == 0) {
/* do nothing */
} else {
if (pfds[0].revents) {
- grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
+ work_combine_error(&error,
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd));
}
if (pfds[1].revents) {
do {
@@ -1655,7 +1758,7 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
if (ep_rv < 0) {
if (errno != EINTR) {
- gpr_log(GPR_ERROR, "epoll_wait() failed: %s", strerror(errno));
+ work_combine_error(&error, GRPC_OS_ERROR(errno, "epoll_wait"));
}
} else {
int i;
@@ -1667,10 +1770,11 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
if (fd == NULL) {
- grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
+ work_combine_error(&error, grpc_wakeup_fd_consume_wakeup(
+ &grpc_global_wakeup_fd));
} else {
if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd);
+ fd_become_readable(exec_ctx, fd, pollset);
}
if (write_ev || cancel) {
fd_become_writable(exec_ctx, fd);
@@ -1681,6 +1785,7 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
} while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
}
}
+ return error;
}
static void multipoll_with_epoll_pollset_finish_shutdown(
@@ -1897,8 +2002,10 @@ static const grpc_event_engine_vtable vtable = {
.fd_wrapped_fd = fd_wrapped_fd,
.fd_orphan = fd_orphan,
.fd_shutdown = fd_shutdown,
+ .fd_is_shutdown = fd_is_shutdown,
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
+ .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,