aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c')
-rw-r--r--src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c292
1 files changed, 54 insertions, 238 deletions
diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
index d23bf6c06c..2c91ad357c 100644
--- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2017, Google Inc.
- * All rights reserved.
+ * Copyright 2017 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -61,7 +46,6 @@
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/env.h"
@@ -163,8 +147,7 @@ struct grpc_fd {
};
/* Reference counting for fds */
-// #define GRPC_FD_REF_COUNT_DEBUG
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
int line);
@@ -184,18 +167,18 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
+#else
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
-#endif /* !defined(GRPC_PI_REF_COUNT_DEBUG) */
+#endif
typedef struct worker_node {
struct worker_node *next;
@@ -204,8 +187,6 @@ typedef struct worker_node {
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
- grpc_closure_scheduler workqueue_scheduler;
-
gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
the refcount.
@@ -226,15 +207,6 @@ typedef struct polling_island {
/* Number of threads currently polling on this island */
gpr_atm poller_count;
- /* Mutex guarding the read end of the workqueue (must be held to pop from
- * workqueue_items) */
- gpr_mu workqueue_read_mu;
- /* Queue of closures to be executed */
- gpr_mpscq workqueue_items;
- /* Count of items in workqueue_items */
- gpr_atm workqueue_item_count;
- /* Wakeup fd used to wake pollers to check the contents of workqueue_items */
- grpc_wakeup_fd workqueue_wakeup_fd;
/* The list of workers waiting to do polling on this polling island */
gpr_mu worker_list_mu;
@@ -323,8 +295,6 @@ static __thread polling_island *g_current_thread_polling_island;
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
@@ -337,57 +307,30 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
-static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
- workqueue_enqueue, workqueue_enqueue, "workqueue"};
-
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#ifndef NDEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ pi, old_cnt, old_cnt + 1, reason, file, line);
+ }
pi_add_ref(pi);
- gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
- (void *)pi, old_cnt, old_cnt + 1, reason, file, line);
}
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
const char *reason, const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&pi->ref_count);
- pi_unref(exec_ctx, pi);
- gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
- (void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
-}
-
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
- const char *file, int line,
- const char *reason) {
- if (workqueue != NULL) {
- pi_add_ref_dbg((polling_island *)workqueue, reason, file, line);
- }
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {
- if (workqueue != NULL) {
- pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line);
- }
-}
-#else
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
- if (workqueue != NULL) {
- pi_add_ref((polling_island *)workqueue);
- }
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {
- if (workqueue != NULL) {
- pi_unref(exec_ctx, (polling_island *)workqueue);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ pi, old_cnt, (old_cnt - 1), reason, file, line);
}
+ pi_unref(exec_ctx, pi);
}
#endif
@@ -592,17 +535,12 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
*error = GRPC_ERROR_NONE;
pi = gpr_malloc(sizeof(*pi));
- pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
pi->fds = NULL;
pi->epoll_fd = -1;
- gpr_mu_init(&pi->workqueue_read_mu);
- gpr_mpscq_init(&pi->workqueue_items);
- gpr_atm_rel_store(&pi->workqueue_item_count, 0);
-
gpr_atm_rel_store(&pi->ref_count, 0);
gpr_atm_rel_store(&pi->poller_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
@@ -610,11 +548,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
gpr_mu_init(&pi->worker_list_mu);
worker_node_init(&pi->worker_list_head);
- if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd),
- err_desc)) {
- goto done;
- }
-
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
@@ -622,8 +555,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
goto done;
}
- polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
-
if (initial_fd != NULL) {
polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
}
@@ -642,11 +573,7 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
if (pi->epoll_fd >= 0) {
close(pi->epoll_fd);
}
- GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0);
- gpr_mu_destroy(&pi->workqueue_read_mu);
- gpr_mpscq_destroy(&pi->workqueue_items);
gpr_mu_destroy(&pi->mu);
- grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd);
gpr_mu_destroy(&pi->worker_list_mu);
GPR_ASSERT(is_worker_node_detached(&pi->worker_list_head));
@@ -794,45 +721,6 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
}
}
-static void workqueue_maybe_wakeup(polling_island *pi) {
- /* If this thread is the current poller, then it may be that it's about to
- decrement the current poller count, so we need to look past this thread */
- bool is_current_poller = (g_current_thread_polling_island == pi);
- gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
- gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
- /* Only issue a wakeup if it's likely that some poller could come in and take
- it right now. Note that since we do an anticipatory mpscq_pop every poll
- loop, it's ok if we miss the wakeup here, as we'll get the work item when
- the next poller enters anyway. */
- if (current_pollers > min_current_pollers_for_wakeup) {
- GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
- grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd));
- }
-}
-
-static void workqueue_move_items_to_parent(polling_island *q) {
- polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to);
- if (p == NULL) {
- return;
- }
- gpr_mu_lock(&q->workqueue_read_mu);
- int num_added = 0;
- while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) {
- gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items);
- if (n != NULL) {
- gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1);
- gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1);
- gpr_mpscq_push(&p->workqueue_items, n);
- num_added++;
- }
- }
- gpr_mu_unlock(&q->workqueue_read_mu);
- if (num_added > 0) {
- workqueue_maybe_wakeup(p);
- }
- workqueue_move_items_to_parent(p);
-}
-
static polling_island *polling_island_merge(polling_island *p,
polling_island *q,
grpc_error **error) {
@@ -857,8 +745,6 @@ static polling_island *polling_island_merge(polling_island *p,
/* Add the 'merged_to' link from p --> q */
gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
-
- workqueue_move_items_to_parent(p);
}
/* else if p == q, nothing needs to be done */
@@ -869,32 +755,6 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
- GPR_TIMER_BEGIN("workqueue.enqueue", 0);
- grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler;
- /* take a ref to the workqueue: otherwise it can happen that whatever events
- * this kicks off ends up destroying the workqueue before this function
- * completes */
- GRPC_WORKQUEUE_REF(workqueue, "enqueue");
- polling_island *pi = (polling_island *)workqueue;
- gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1);
- closure->error_data.error = error;
- gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next);
- if (last == 0) {
- workqueue_maybe_wakeup(pi);
- }
- workqueue_move_items_to_parent(pi);
- GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
- GPR_TIMER_END("workqueue.enqueue", 0);
-}
-
-static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
- polling_island *pi = (polling_island *)workqueue;
- return workqueue == NULL ? grpc_schedule_on_exec_ctx
- : &pi->workqueue_scheduler;
-}
-
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@@ -935,14 +795,17 @@ static void polling_island_global_shutdown() {
static grpc_fd *fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_log(GPR_DEBUG, "FD %d %p ref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ }
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
@@ -951,18 +814,19 @@ static void ref_by(grpc_fd *fd, int n) {
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_atm old;
- gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ }
#else
static void unref_by(grpc_fd *fd, int n) {
- gpr_atm old;
#endif
- old = gpr_atm_full_fetch_add(&fd->refst, -n);
+ gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
/* Add the fd to the freelist */
gpr_mu_lock(&fd_freelist_mu);
@@ -980,7 +844,7 @@ static void unref_by(grpc_fd *fd, int n) {
}
/* Increment refcount by two to avoid changing the orphan bit */
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
int line) {
ref_by(fd, 2, reason, file, line);
@@ -1048,8 +912,10 @@ static grpc_fd *fd_create(int fd, const char *name) {
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
+ }
#endif
gpr_free(fd_name);
return new_fd;
@@ -1108,7 +974,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
fd->po.pi = NULL;
}
- grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->po.mu);
UNREF_BY(fd, 2, reason); /* Drop the reference */
@@ -1153,14 +1019,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
-static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
- gpr_mu_lock(&fd->po.mu);
- grpc_workqueue *workqueue =
- GRPC_WORKQUEUE_REF((grpc_workqueue *)fd->po.pi, "fd_get_workqueue");
- gpr_mu_unlock(&fd->po.mu);
- return workqueue;
-}
-
/*******************************************************************************
* Pollset Definitions
*/
@@ -1401,7 +1259,7 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
/* Release the ref and set pollset->po.pi to NULL */
pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
- grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
}
/* pollset->po.mu lock must be held by the caller before calling this */
@@ -1432,33 +1290,6 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->po.mu);
}
-static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
- polling_island *pi) {
- if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
- gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items);
- gpr_mu_unlock(&pi->workqueue_read_mu);
- if (n != NULL) {
- if (gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) > 1) {
- workqueue_maybe_wakeup(pi);
- }
- grpc_closure *c = (grpc_closure *)n;
- grpc_error *error = c->error_data.error;
-#ifndef NDEBUG
- c->scheduled = false;
-#endif
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- return true;
- } else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
- /* n == NULL might mean there's work but it's not available to be popped
- * yet - try to ensure another workqueue wakes up to check shortly if so
- */
- workqueue_maybe_wakeup(pi);
- }
- }
- return false;
-}
-
/* NOTE: This function may modify 'now' */
static bool acquire_polling_lease(grpc_pollset_worker *worker,
polling_island *pi, gpr_timespec deadline,
@@ -1594,12 +1425,7 @@ static void pollset_do_epoll_pwait(grpc_exec_ctx *exec_ctx, int epoll_fd,
for (int i = 0; i < ep_rv; ++i) {
void *data_ptr = ep_ev[i].data.ptr;
- if (data_ptr == &pi->workqueue_wakeup_fd) {
- append_error(error,
- grpc_wakeup_fd_consume_wakeup(&pi->workqueue_wakeup_fd),
- err_desc);
- maybe_do_workqueue_work(exec_ctx, pi);
- } else if (data_ptr == &polling_island_wakeup_fd) {
+ if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
"%d) got merged",
@@ -1675,15 +1501,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
PI_ADD_REF(pi, "ps_work");
gpr_mu_unlock(&pollset->po.mu);
- /* If we get some workqueue work to do, it might end up completing an item on
- the completion queue, so there's no need to poll... so we skip that and
- redo the complete loop to verify */
- if (!maybe_do_workqueue_work(exec_ctx, pi)) {
- g_current_thread_polling_island = pi;
- pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now,
- deadline, sig_mask, error);
- g_current_thread_polling_island = NULL;
- }
+ g_current_thread_polling_island = pi;
+ pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now, deadline,
+ sig_mask, error);
+ g_current_thread_polling_island = NULL;
GPR_ASSERT(pi != NULL);
@@ -2036,7 +1857,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
- .fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@@ -2054,10 +1874,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
- .workqueue_ref = workqueue_ref,
- .workqueue_unref = workqueue_unref,
- .workqueue_scheduler = workqueue_scheduler,
-
.shutdown_engine = shutdown_engine,
};