aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib/iomgr
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-05-12 10:56:03 -0700
committerGravatar Craig Tiller <ctiller@google.com>2017-05-12 10:56:03 -0700
commitee4b14521380f8c387c27f4cd351565d0afa3d61 (patch)
treefb3c9263bc12f0c52e52775162de1e6b0887c998 /src/core/lib/iomgr
parentbc6a9cb24aa3c1fcc1817962169773a44a1d50ee (diff)
Remove workqueue, covered_by_poller as concepts, get Mac build up
Diffstat (limited to 'src/core/lib/iomgr')
-rw-r--r--src/core/lib/iomgr/combiner.c213
-rw-r--r--src/core/lib/iomgr/combiner.h8
-rw-r--r--src/core/lib/iomgr/endpoint.c4
-rw-r--r--src/core/lib/iomgr/endpoint.h4
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c31
-rw-r--r--src/core/lib/iomgr/ev_posix.c26
-rw-r--r--src/core/lib/iomgr/ev_posix.h16
-rw-r--r--src/core/lib/iomgr/exec_ctx.c1
-rw-r--r--src/core/lib/iomgr/resource_quota.c31
-rw-r--r--src/core/lib/iomgr/tcp_posix.c19
-rw-r--r--src/core/lib/iomgr/workqueue.h87
-rw-r--r--src/core/lib/iomgr/workqueue_uv.c65
-rw-r--r--src/core/lib/iomgr/workqueue_uv.h37
-rw-r--r--src/core/lib/iomgr/workqueue_windows.c63
-rw-r--r--src/core/lib/iomgr/workqueue_windows.h37
15 files changed, 77 insertions, 565 deletions
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index 863f22c614..aa7a8c1c70 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -39,7 +39,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include "src/core/lib/iomgr/workqueue.h"
+#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
grpc_tracer_flag grpc_combiner_trace = GRPC_TRACER_INITIALIZER(false);
@@ -56,93 +56,42 @@ grpc_tracer_flag grpc_combiner_trace = GRPC_TRACER_INITIALIZER(false);
struct grpc_combiner {
grpc_combiner *next_combiner_on_this_exec_ctx;
- grpc_workqueue *optional_workqueue;
- grpc_closure_scheduler uncovered_scheduler;
- grpc_closure_scheduler covered_scheduler;
- grpc_closure_scheduler uncovered_finally_scheduler;
- grpc_closure_scheduler covered_finally_scheduler;
+ grpc_closure_scheduler scheduler;
+ grpc_closure_scheduler finally_scheduler;
gpr_mpscq queue;
// state is:
// lower bit - zero if orphaned (STATE_UNORPHANED)
// other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT)
gpr_atm state;
- // number of elements in the list that are covered by a poller: if >0, we can
- // offload safely
- gpr_atm elements_covered_by_poller;
bool time_to_execute_final_list;
- bool final_list_covered_by_poller;
grpc_closure_list final_list;
grpc_closure offload;
gpr_refcount refs;
};
-static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx,
- grpc_closure *closure, grpc_error *error);
-static void combiner_exec_covered(grpc_exec_ctx *exec_ctx,
+static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error);
+static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error);
-static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
- grpc_closure *closure,
- grpc_error *error);
-static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
- grpc_closure *closure,
- grpc_error *error);
-
-static const grpc_closure_scheduler_vtable scheduler_uncovered = {
- combiner_exec_uncovered, combiner_exec_uncovered,
- "combiner:immediately:uncovered"};
-static const grpc_closure_scheduler_vtable scheduler_covered = {
- combiner_exec_covered, combiner_exec_covered,
- "combiner:immediately:covered"};
-static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = {
- combiner_finally_exec_uncovered, combiner_finally_exec_uncovered,
- "combiner:finally:uncovered"};
-static const grpc_closure_scheduler_vtable finally_scheduler_covered = {
- combiner_finally_exec_covered, combiner_finally_exec_covered,
- "combiner:finally:covered"};
-static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-
-typedef struct {
- grpc_error *error;
- bool covered_by_poller;
-} error_data;
+static const grpc_closure_scheduler_vtable scheduler = {
+ combiner_exec, combiner_exec, "combiner:immediately"};
+static const grpc_closure_scheduler_vtable finally_scheduler = {
+ combiner_finally_exec, combiner_finally_exec, "combiner:finally"};
-static uintptr_t pack_error_data(error_data d) {
- return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0);
-}
-
-static error_data unpack_error_data(uintptr_t p) {
- return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1};
-}
-
-static bool is_covered_by_poller(grpc_combiner *lock) {
- return lock->final_list_covered_by_poller ||
- gpr_atm_acq_load(&lock->elements_covered_by_poller) > 0;
-}
-
-#define IS_COVERED_BY_POLLER_FMT "(final=%d elems=%" PRIdPTR ")->%d"
-#define IS_COVERED_BY_POLLER_ARGS(lock) \
- (lock)->final_list_covered_by_poller, \
- gpr_atm_acq_load(&(lock)->elements_covered_by_poller), \
- is_covered_by_poller((lock))
+static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
+grpc_combiner *grpc_combiner_create(void) {
grpc_combiner *lock = gpr_malloc(sizeof(*lock));
gpr_ref_init(&lock->refs, 1);
lock->next_combiner_on_this_exec_ctx = NULL;
lock->time_to_execute_final_list = false;
- lock->optional_workqueue = optional_workqueue;
- lock->final_list_covered_by_poller = false;
- lock->uncovered_scheduler.vtable = &scheduler_uncovered;
- lock->covered_scheduler.vtable = &scheduler_covered;
- lock->uncovered_finally_scheduler.vtable = &finally_scheduler_uncovered;
- lock->covered_finally_scheduler.vtable = &finally_scheduler_covered;
+ lock->scheduler.vtable = &scheduler;
+ lock->finally_scheduler.vtable = &finally_scheduler;
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
- gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0);
gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list);
- grpc_closure_init(&lock->offload, offload, lock,
- grpc_workqueue_scheduler(lock->optional_workqueue));
+ grpc_closure_init(&lock->offload, offload, lock, grpc_executor_scheduler);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@@ -151,7 +100,6 @@ static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock));
GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
gpr_mpscq_destroy(&lock->queue);
- GRPC_WORKQUEUE_UNREF(exec_ctx, lock->optional_workqueue, "combiner");
gpr_free(lock);
}
@@ -208,21 +156,21 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
}
}
-static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
- grpc_closure *cl, grpc_error *error,
- bool covered_by_poller) {
+#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
+ ((grpc_combiner *)(((char *)((closure)->scheduler)) - \
+ offsetof(grpc_combiner, scheduler_name)))
+
+static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
+ grpc_error *error) {
GPR_TIMER_BEGIN("combiner.execute", 0);
+ grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
- GRPC_COMBINER_TRACE(gpr_log(
- GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock,
- cl, covered_by_poller, last));
+ GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
+ "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
+ lock, cl, last));
GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed
assert(cl->cb);
- cl->error_data.scratch =
- pack_error_data((error_data){error, covered_by_poller});
- if (covered_by_poller) {
- gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, 1);
- }
+ cl->error_data.error = error;
gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
if (last == 1) {
// first element on this list: add it to the list of combiner locks
@@ -232,24 +180,6 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
GPR_TIMER_END("combiner.execute", 0);
}
-#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
- ((grpc_combiner *)(((char *)((closure)->scheduler)) - \
- offsetof(grpc_combiner, scheduler_name)))
-
-static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
- grpc_error *error) {
- combiner_exec(exec_ctx,
- COMBINER_FROM_CLOSURE_SCHEDULER(cl, uncovered_scheduler), cl,
- error, false);
-}
-
-static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
- grpc_error *error) {
- combiner_exec(exec_ctx,
- COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_scheduler), cl,
- error, true);
-}
-
static void move_next(grpc_exec_ctx *exec_ctx) {
exec_ctx->active_combiner =
exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
@@ -265,8 +195,7 @@ static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
move_next(exec_ctx);
- GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
- lock->optional_workqueue));
+ GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
grpc_closure_sched(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
}
@@ -278,18 +207,14 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
return false;
}
- GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG,
- "C:%p grpc_combiner_continue_exec_ctx workqueue=%p "
- "is_covered_by_poller=" IS_COVERED_BY_POLLER_FMT
- " exec_ctx_ready_to_finish=%d "
- "time_to_execute_final_list=%d",
- lock, lock->optional_workqueue, IS_COVERED_BY_POLLER_ARGS(lock),
- grpc_exec_ctx_ready_to_finish(exec_ctx),
- lock->time_to_execute_final_list));
-
- if (lock->optional_workqueue != NULL && is_covered_by_poller(lock) &&
- grpc_exec_ctx_ready_to_finish(exec_ctx)) {
+ GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
+ "C:%p grpc_combiner_continue_exec_ctx "
+ "exec_ctx_ready_to_finish=%d "
+ "time_to_execute_final_list=%d",
+ lock, grpc_exec_ctx_ready_to_finish(exec_ctx),
+ lock->time_to_execute_final_list));
+
+ if (grpc_exec_ctx_ready_to_finish(exec_ctx)) {
GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
// this execution context wants to move on, and we have a workqueue (and
// so can help the execution context out): schedule remaining work to be
@@ -310,29 +235,23 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
// queue is in an inconsistent state: use this as a cue that we should
// go off and do something else for a while (and come back later)
GPR_TIMER_MARK("delay_busy", 0);
- if (lock->optional_workqueue != NULL && is_covered_by_poller(lock)) {
- queue_offload(exec_ctx, lock);
- }
+ queue_offload(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
GPR_TIMER_BEGIN("combiner.exec1", 0);
grpc_closure *cl = (grpc_closure *)n;
- error_data err = unpack_error_data(cl->error_data.scratch);
+ grpc_error *cl_err = cl->error_data.error;
#ifndef NDEBUG
cl->scheduled = false;
#endif
- cl->cb(exec_ctx, cl->cb_arg, err.error);
- if (err.covered_by_poller) {
- gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, -1);
- }
- GRPC_ERROR_UNREF(err.error);
+ cl->cb(exec_ctx, cl->cb_arg, cl_err);
+ GRPC_ERROR_UNREF(cl_err);
GPR_TIMER_END("combiner.exec1", 0);
} else {
grpc_closure *c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
- lock->final_list_covered_by_poller = false;
int loops = 0;
while (c != NULL) {
GPR_TIMER_BEGIN("combiner.exec_1final", 0);
@@ -398,20 +317,20 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error);
-static void combiner_execute_finally(grpc_exec_ctx *exec_ctx,
- grpc_combiner *lock, grpc_closure *closure,
- grpc_error *error,
- bool covered_by_poller) {
- GRPC_COMBINER_TRACE(gpr_log(
- GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
- closure, exec_ctx->active_combiner, covered_by_poller));
+static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
+ grpc_closure *closure, grpc_error *error) {
+ grpc_combiner *lock =
+ COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
+ GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
+ "C:%p grpc_combiner_execute_finally c=%p; ac=%p",
+ lock, closure, exec_ctx->active_combiner));
GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
- grpc_closure_sched(
- exec_ctx, grpc_closure_create(enqueue_finally, closure,
- grpc_combiner_scheduler(lock, false)),
- error);
+ grpc_closure_sched(exec_ctx,
+ grpc_closure_create(enqueue_finally, closure,
+ grpc_combiner_scheduler(lock)),
+ error);
GPR_TIMER_END("combiner.execute_finally", 0);
return;
}
@@ -419,42 +338,20 @@ static void combiner_execute_finally(grpc_exec_ctx *exec_ctx,
if (grpc_closure_list_empty(lock->final_list)) {
gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
}
- if (covered_by_poller) {
- lock->final_list_covered_by_poller = true;
- }
grpc_closure_list_append(&lock->final_list, closure, error);
GPR_TIMER_END("combiner.execute_finally", 0);
}
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error) {
- combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure,
- GRPC_ERROR_REF(error), false);
-}
-
-static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
- grpc_closure *cl,
- grpc_error *error) {
- combiner_execute_finally(exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(
- cl, uncovered_finally_scheduler),
- cl, error, false);
-}
-
-static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
- grpc_closure *cl, grpc_error *error) {
- combiner_execute_finally(
- exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_finally_scheduler),
- cl, error, true);
+ combiner_finally_exec(exec_ctx, closure, GRPC_ERROR_REF(error));
}
-grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner,
- bool covered_by_poller) {
- return covered_by_poller ? &combiner->covered_scheduler
- : &combiner->uncovered_scheduler;
+grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) {
+ return &combiner->scheduler;
}
grpc_closure_scheduler *grpc_combiner_finally_scheduler(
- grpc_combiner *combiner, bool covered_by_poller) {
- return covered_by_poller ? &combiner->covered_finally_scheduler
- : &combiner->uncovered_finally_scheduler;
+ grpc_combiner *combiner) {
+ return &combiner->finally_scheduler;
}
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index 6ab7a2b26b..5aa8443667 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -48,7 +48,7 @@
// Initialize the lock, with an optional workqueue to shift load to when
// necessary
-grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
+grpc_combiner *grpc_combiner_create(void);
//#define GRPC_COMBINER_REFCOUNT_DEBUG
#ifdef GRPC_COMBINER_REFCOUNT_DEBUG
@@ -71,11 +71,9 @@ grpc_combiner *grpc_combiner_ref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS);
void grpc_combiner_unref(grpc_exec_ctx *exec_ctx,
grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS);
// Fetch a scheduler to schedule closures against
-grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock,
- bool covered_by_poller);
+grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock);
// Scheduler to execute \a action within the lock just prior to unlocking.
-grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock,
- bool covered_by_poller);
+grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock);
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
diff --git a/src/core/lib/iomgr/endpoint.c b/src/core/lib/iomgr/endpoint.c
index bf6e98146a..60b8410a45 100644
--- a/src/core/lib/iomgr/endpoint.c
+++ b/src/core/lib/iomgr/endpoint.c
@@ -69,10 +69,6 @@ char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
int grpc_endpoint_get_fd(grpc_endpoint* ep) { return ep->vtable->get_fd(ep); }
-grpc_workqueue* grpc_endpoint_get_workqueue(grpc_endpoint* ep) {
- return ep->vtable->get_workqueue(ep);
-}
-
grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* ep) {
return ep->vtable->get_resource_user(ep);
}
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index 740357ecc5..f8cee0158b 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -52,7 +52,6 @@ struct grpc_endpoint_vtable {
grpc_slice_buffer *slices, grpc_closure *cb);
void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_slice_buffer *slices, grpc_closure *cb);
- grpc_workqueue *(*get_workqueue)(grpc_endpoint *ep);
void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset *pollset);
void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -78,9 +77,6 @@ char *grpc_endpoint_get_peer(grpc_endpoint *ep);
*/
int grpc_endpoint_get_fd(grpc_endpoint *ep);
-/* Retrieve a reference to the workqueue associated with this endpoint */
-grpc_workqueue *grpc_endpoint_get_workqueue(grpc_endpoint *ep);
-
/* Write slices out to the socket.
If the connection is ready for more data after the end of the call, it
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 3a7648ac32..acf425751d 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -648,8 +648,6 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
GRPC_FD_UNREF(fd, "poll");
}
-static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; }
-
/*******************************************************************************
* pollset_posix.c
*/
@@ -1289,30 +1287,6 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
}
/*******************************************************************************
- * workqueue stubs
- */
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
- const char *file, int line,
- const char *reason) {
- return workqueue;
-}
-static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {}
-#else
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
- return workqueue;
-}
-static void workqueue_unref(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {}
-#endif
-
-static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
- return grpc_schedule_on_exec_ctx;
-}
-
-/*******************************************************************************
* Condition Variable polling extensions
*/
@@ -1529,7 +1503,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
- .fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@@ -1547,10 +1520,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
- .workqueue_ref = workqueue_ref,
- .workqueue_unref = workqueue_unref,
- .workqueue_scheduler = workqueue_scheduler,
-
.shutdown_engine = shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index c4d2f23e29..c633aec5ac 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -171,10 +171,6 @@ grpc_fd *grpc_fd_create(int fd, const char *name) {
return g_event_engine->fd_create(fd, name);
}
-grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd) {
- return g_event_engine->fd_get_workqueue(fd);
-}
-
int grpc_fd_wrapped_fd(grpc_fd *fd) {
return g_event_engine->fd_wrapped_fd(fd);
}
@@ -276,26 +272,4 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
g_event_engine->pollset_set_del_fd(exec_ctx, pollset_set, fd);
}
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
- int line, const char *reason) {
- return g_event_engine->workqueue_ref(workqueue, file, line, reason);
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {
- g_event_engine->workqueue_unref(exec_ctx, workqueue, file, line, reason);
-}
-#else
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
- return g_event_engine->workqueue_ref(workqueue);
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
- g_event_engine->workqueue_unref(exec_ctx, workqueue);
-}
-#endif
-
-grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
- return g_event_engine->workqueue_scheduler(workqueue);
-}
-
#endif // GRPC_POSIX_SOCKET
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 80619aab5f..cbe5862372 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -41,7 +41,6 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/iomgr/workqueue.h"
extern grpc_tracer_flag grpc_polling_trace; /* Disabled by default */
@@ -60,7 +59,6 @@ typedef struct grpc_event_engine_vtable {
void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure);
bool (*fd_is_shutdown)(grpc_fd *fd);
- grpc_workqueue *(*fd_get_workqueue)(grpc_fd *fd);
grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx,
grpc_fd *fd);
@@ -97,17 +95,6 @@ typedef struct grpc_event_engine_vtable {
grpc_pollset_set *pollset_set, grpc_fd *fd);
void (*shutdown_engine)(void);
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
- grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue, const char *file,
- int line, const char *reason);
- void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason);
-#else
- grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue);
- void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
-#endif
- grpc_closure_scheduler *(*workqueue_scheduler)(grpc_workqueue *workqueue);
} grpc_event_engine_vtable;
void grpc_event_engine_init(void);
@@ -121,9 +108,6 @@ const char *grpc_get_poll_strategy_name();
This takes ownership of closing fd. */
grpc_fd *grpc_fd_create(int fd, const char *name);
-/* Get a workqueue that's associated with this fd */
-grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd);
-
/* Return the wrapped fd, or -1 if it has been released or closed. */
int grpc_fd_wrapped_fd(grpc_fd *fd);
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c
index 318bb2b713..44e2676c3d 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.c
@@ -38,7 +38,6 @@
#include <grpc/support/thd.h>
#include "src/core/lib/iomgr/combiner.h"
-#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) {
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
index 6b2b85cce0..3c6a6ea1a5 100644
--- a/src/core/lib/iomgr/resource_quota.c
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -581,7 +581,7 @@ static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
grpc_resource_quota *grpc_resource_quota_create(const char *name) {
grpc_resource_quota *resource_quota = gpr_malloc(sizeof(*resource_quota));
gpr_ref_init(&resource_quota->refs, 1);
- resource_quota->combiner = grpc_combiner_create(NULL);
+ resource_quota->combiner = grpc_combiner_create();
resource_quota->free_pool = INT64_MAX;
resource_quota->size = INT64_MAX;
gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX);
@@ -594,12 +594,11 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) {
gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR,
(intptr_t)resource_quota);
}
- grpc_closure_init(
- &resource_quota->rq_step_closure, rq_step, resource_quota,
- grpc_combiner_finally_scheduler(resource_quota->combiner, true));
+ grpc_closure_init(&resource_quota->rq_step_closure, rq_step, resource_quota,
+ grpc_combiner_finally_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_quota->rq_reclamation_done_closure,
rq_reclamation_done, resource_quota,
- grpc_combiner_scheduler(resource_quota->combiner, false));
+ grpc_combiner_scheduler(resource_quota->combiner));
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
resource_quota->roots[i] = NULL;
}
@@ -704,18 +703,18 @@ grpc_resource_user *grpc_resource_user_create(
grpc_resource_quota_ref_internal(resource_quota);
grpc_closure_init(&resource_user->allocate_closure, &ru_allocate,
resource_user,
- grpc_combiner_scheduler(resource_quota->combiner, false));
+ grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->add_to_free_pool_closure,
&ru_add_to_free_pool, resource_user,
- grpc_combiner_scheduler(resource_quota->combiner, false));
+ grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->post_reclaimer_closure[0],
&ru_post_benign_reclaimer, resource_user,
- grpc_combiner_scheduler(resource_quota->combiner, false));
+ grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->post_reclaimer_closure[1],
&ru_post_destructive_reclaimer, resource_user,
- grpc_combiner_scheduler(resource_quota->combiner, false));
+ grpc_combiner_scheduler(resource_quota->combiner));
grpc_closure_init(&resource_user->destroy_closure, &ru_destroy, resource_user,
- grpc_combiner_scheduler(resource_quota->combiner, false));
+ grpc_combiner_scheduler(resource_quota->combiner));
gpr_mu_init(&resource_user->mu);
gpr_atm_rel_store(&resource_user->refs, 1);
gpr_atm_rel_store(&resource_user->shutdown, 0);
@@ -772,12 +771,12 @@ void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx,
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
- grpc_closure_sched(exec_ctx,
- grpc_closure_create(
- ru_shutdown, resource_user,
- grpc_combiner_scheduler(
- resource_user->resource_quota->combiner, false)),
- GRPC_ERROR_NONE);
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_create(
+ ru_shutdown, resource_user,
+ grpc_combiner_scheduler(resource_user->resource_quota->combiner)),
+ GRPC_ERROR_NONE);
}
}
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 5d360b0b80..f066881237 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -558,26 +558,15 @@ static int tcp_get_fd(grpc_endpoint *ep) {
return tcp->fd;
}
-static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
- return grpc_fd_get_workqueue(tcp->em_fd);
-}
-
static grpc_resource_user *tcp_get_resource_user(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return tcp->resource_user;
}
-static const grpc_endpoint_vtable vtable = {tcp_read,
- tcp_write,
- tcp_get_workqueue,
- tcp_add_to_pollset,
- tcp_add_to_pollset_set,
- tcp_shutdown,
- tcp_destroy,
- tcp_get_resource_user,
- tcp_get_peer,
- tcp_get_fd};
+static const grpc_endpoint_vtable vtable = {
+ tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set,
+ tcp_shutdown, tcp_destroy, tcp_get_resource_user, tcp_get_peer,
+ tcp_get_fd};
#define MAX_CHUNK_SIZE 32 * 1024 * 1024
diff --git a/src/core/lib/iomgr/workqueue.h b/src/core/lib/iomgr/workqueue.h
deleted file mode 100644
index 371b0f55dc..0000000000
--- a/src/core/lib/iomgr/workqueue.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_H
-#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_H
-
-#include "src/core/lib/iomgr/closure.h"
-#include "src/core/lib/iomgr/exec_ctx.h"
-#include "src/core/lib/iomgr/iomgr.h"
-#include "src/core/lib/iomgr/pollset.h"
-#include "src/core/lib/iomgr/pollset_set.h"
-#include "src/core/lib/iomgr/port.h"
-
-#ifdef GPR_WINDOWS
-#include "src/core/lib/iomgr/workqueue_windows.h"
-#endif
-
-/* grpc_workqueue is forward declared in exec_ctx.h */
-
-/* Reference counting functions. Use the macro's always
- (GRPC_WORKQUEUE_{REF,UNREF}).
-
- Pass in a descriptive reason string for reffing/unreffing as the last
- argument to each macro. When GRPC_WORKQUEUE_REFCOUNT_DEBUG is defined, that
- string will be printed alongside the refcount. When it is not defined, the
- string will be discarded at compilation time. */
-
-/*#define GRPC_WORKQUEUE_REFCOUNT_DEBUG*/
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-#define GRPC_WORKQUEUE_REF(p, r) \
- grpc_workqueue_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_WORKQUEUE_UNREF(exec_ctx, p, r) \
- grpc_workqueue_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
- int line, const char *reason);
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason);
-#else
-#define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p))
-#define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p))
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue);
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
-#endif
-
-/** Fetch the workqueue closure scheduler. Items added to a work queue will be
- started in approximately the order they were enqueued, on some thread that
- may or may not be the current thread. Successive closures enqueued onto a
- workqueue MAY be executed concurrently.
-
- It is generally more expensive to add a closure to a workqueue than to the
- execution context, both in terms of CPU work and in execution latency.
-
- Use work queues when it's important that other threads be given a chance to
- tackle some workload. */
-grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue);
-
-#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_H */
diff --git a/src/core/lib/iomgr/workqueue_uv.c b/src/core/lib/iomgr/workqueue_uv.c
deleted file mode 100644
index 4d61b40912..0000000000
--- a/src/core/lib/iomgr/workqueue_uv.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- *
- * Copyright 2016, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/lib/iomgr/port.h"
-
-#ifdef GRPC_UV
-
-#include "src/core/lib/iomgr/workqueue.h"
-
-// Minimal implementation of grpc_workqueue for libuv
-// Works by directly enqueuing workqueue items onto the current execution
-// context, which is at least correct, if not performant or in the spirit of
-// workqueues.
-
-void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
- int line, const char *reason) {
- return workqueue;
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {}
-#else
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
- return workqueue;
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
-#endif
-
-grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
- return grpc_schedule_on_exec_ctx;
-}
-
-#endif /* GPR_UV */
diff --git a/src/core/lib/iomgr/workqueue_uv.h b/src/core/lib/iomgr/workqueue_uv.h
deleted file mode 100644
index be3f8e4d93..0000000000
--- a/src/core/lib/iomgr/workqueue_uv.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- *
- * Copyright 2016, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H
-#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H
-
-#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H */
diff --git a/src/core/lib/iomgr/workqueue_windows.c b/src/core/lib/iomgr/workqueue_windows.c
deleted file mode 100644
index 234b47cdf5..0000000000
--- a/src/core/lib/iomgr/workqueue_windows.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#ifdef GPR_WINDOWS
-
-#include "src/core/lib/iomgr/workqueue.h"
-
-// Minimal implementation of grpc_workqueue for Windows
-// Works by directly enqueuing workqueue items onto the current execution
-// context, which is at least correct, if not performant or in the spirit of
-// workqueues.
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
- int line, const char *reason) {
- return workqueue;
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {}
-#else
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
- return workqueue;
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
-#endif
-
-grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
- return grpc_schedule_on_exec_ctx;
-}
-
-#endif /* GPR_WINDOWS */
diff --git a/src/core/lib/iomgr/workqueue_windows.h b/src/core/lib/iomgr/workqueue_windows.h
deleted file mode 100644
index e5d59130bb..0000000000
--- a/src/core/lib/iomgr/workqueue_windows.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_WINDOWS_H
-#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_WINDOWS_H
-
-#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_WINDOWS_H */