aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/lib/iomgr/ev_poll_and_epoll_posix.c76
-rw-r--r--src/core/lib/iomgr/ev_posix.c5
-rw-r--r--src/core/lib/iomgr/ev_posix.h6
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c16
-rw-r--r--src/core/lib/surface/completion_queue.c13
-rw-r--r--src/core/lib/surface/completion_queue.h4
-rw-r--r--src/core/lib/surface/server.c53
7 files changed, 130 insertions, 43 deletions
diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
index aeb6e28665..943c404f91 100644
--- a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
@@ -126,6 +126,9 @@ struct grpc_fd {
grpc_closure *on_done_closure;
grpc_iomgr_object iomgr_object;
+
+ /* The pollset that last noticed and notified that the fd is readable */
+ grpc_pollset *read_notifier_pollset;
};
/* Begin polling on an fd.
@@ -147,7 +150,8 @@ static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
if got_read or got_write are 1, also does the become_{readable,writable} as
appropriate. */
static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
- int got_read, int got_write);
+ int got_read, int got_write,
+ grpc_pollset *read_notifier_pollset);
/* Return 1 if this fd is orphaned, 0 otherwise */
static bool fd_is_orphaned(grpc_fd *fd);
@@ -342,6 +346,7 @@ static grpc_fd *alloc_fd(int fd) {
r->on_done_closure = NULL;
r->closed = 0;
r->released = 0;
+ r->read_notifier_pollset = NULL;
gpr_mu_unlock(&r->mu);
return r;
}
@@ -545,6 +550,11 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
}
}
+static void set_read_notifier_pollset_locked(
+ grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *read_notifier_pollset) {
+ fd->read_notifier_pollset = read_notifier_pollset;
+}
+
static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
gpr_mu_lock(&fd->mu);
GPR_ASSERT(!fd->shutdown);
@@ -568,6 +578,18 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
gpr_mu_unlock(&fd->mu);
}
+/* Return the read-notifier pollset */
+static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_fd *fd) {
+ grpc_pollset *notifier = NULL;
+
+ gpr_mu_lock(&fd->mu);
+ notifier = fd->read_notifier_pollset;
+ gpr_mu_unlock(&fd->mu);
+
+ return notifier;
+}
+
static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
grpc_pollset_worker *worker, uint32_t read_mask,
uint32_t write_mask, grpc_fd_watcher *watcher) {
@@ -620,7 +642,8 @@ static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
}
static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
- int got_read, int got_write) {
+ int got_read, int got_write,
+ grpc_pollset *read_notifier_pollset) {
int was_polling = 0;
int kick = 0;
grpc_fd *fd = watcher->fd;
@@ -656,6 +679,10 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) {
kick = 1;
}
+
+ if (read_notifier_pollset != NULL) {
+ set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset);
+ }
}
if (got_write) {
if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) {
@@ -756,9 +783,14 @@ static void pollset_kick_ext(grpc_pollset *p,
specific_worker = pop_front_worker(p);
if (specific_worker != NULL) {
if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
+ /* Prefer not to kick self. Push the worker to the end of the list and
+ * pop the one from front */
GPR_TIMER_MARK("kick_anonymous_not_self", 0);
push_back_worker(p, specific_worker);
specific_worker = pop_front_worker(p);
+ /* If there was only one worker on the pollset, we would get the same
+ * worker we pushed (the one set on current thread local) back. If so,
+ * kick it only if GRPC_POLLSET_CAN_KICK_SELF flag is set */
if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
@@ -1201,11 +1233,11 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
}
if (fd) {
- fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
+ fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
}
} else if (r == 0) {
if (fd) {
- fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
+ fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
}
} else {
if (pfd[0].revents & POLLIN_CHECK) {
@@ -1216,9 +1248,9 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
}
if (nfds > 2) {
fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
- pfd[2].revents & POLLOUT_CHECK);
+ pfd[2].revents & POLLOUT_CHECK, pollset);
} else if (fd) {
- fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
+ fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
}
}
@@ -1354,11 +1386,11 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
}
for (i = 2; i < pfd_count; i++) {
- fd_end_poll(exec_ctx, &watchers[i], 0, 0);
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
}
} else if (r == 0) {
for (i = 2; i < pfd_count; i++) {
- fd_end_poll(exec_ctx, &watchers[i], 0, 0);
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
@@ -1369,11 +1401,11 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
}
for (i = 2; i < pfd_count; i++) {
if (watchers[i].fd == NULL) {
- fd_end_poll(exec_ctx, &watchers[i], 0, 0);
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
continue;
}
fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
- pfds[i].revents & POLLOUT_CHECK);
+ pfds[i].revents & POLLOUT_CHECK, pollset);
}
}
@@ -1449,20 +1481,31 @@ static void poll_become_multipoller(grpc_exec_ctx *exec_ctx,
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
-static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st) {
+static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st,
+ grpc_pollset *read_notifier_pollset) {
/* only one set_ready can be active at once (but there may be a racing
notify_on) */
gpr_mu_lock(&fd->mu);
set_ready_locked(exec_ctx, fd, st);
+
+ /* A non-NULL read_notifier_pollset means that the fd is readable. */
+ if (read_notifier_pollset != NULL) {
+ /* Note: Since the fd might be a part of multiple pollsets, this might be
+ * called multiple times (for each time the fd becomes readable) and it is
+ * okay to set the fd's read-notifier pollset to anyone of these pollsets */
+ set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset);
+ }
+
gpr_mu_unlock(&fd->mu);
}
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- set_ready(exec_ctx, fd, &fd->read_closure);
+static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_pollset *notifier_pollset) {
+ set_ready(exec_ctx, fd, &fd->read_closure, notifier_pollset);
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- set_ready(exec_ctx, fd, &fd->write_closure);
+ set_ready(exec_ctx, fd, &fd->write_closure, NULL);
}
struct epoll_fd_list {
@@ -1554,7 +1597,7 @@ static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
}
}
- fd_end_poll(exec_ctx, &watcher, 0, 0);
+ fd_end_poll(exec_ctx, &watcher, 0, 0, NULL);
}
static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg,
@@ -1668,7 +1711,7 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
} else {
if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd);
+ fd_become_readable(exec_ctx, fd, pollset);
}
if (write_ev || cancel) {
fd_become_writable(exec_ctx, fd);
@@ -1897,6 +1940,7 @@ static const grpc_event_engine_vtable vtable = {
.fd_shutdown = fd_shutdown,
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
+ .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index a7dfc9552d..6477b05dcd 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -163,6 +163,11 @@ void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
g_event_engine->fd_notify_on_write(exec_ctx, fd, closure);
}
+grpc_pollset *grpc_fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_fd *fd) {
+ return g_event_engine->fd_get_read_notifier_pollset(exec_ctx, fd);
+}
+
size_t grpc_pollset_size(void) { return g_event_engine->pollset_size; }
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 1fa9f5ef2d..344bf63438 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -55,6 +55,8 @@ typedef struct grpc_event_engine_vtable {
grpc_closure *closure);
void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure);
+ grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx,
+ grpc_fd *fd);
void (*pollset_init)(grpc_pollset *pollset, gpr_mu **mu);
void (*pollset_shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@@ -137,6 +139,10 @@ void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure);
+/* Return the read notifier pollset from the fd */
+grpc_pollset *grpc_fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_fd *fd);
+
/* pollset_posix functions */
/* Add an fd to a pollset */
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index aaeb384f6e..97c945b834 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -310,13 +310,15 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
grpc_tcp_listener *sp = arg;
grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index,
sp->fd_index};
+ grpc_pollset *read_notifier_pollset = NULL;
grpc_fd *fdobj;
- size_t i;
if (!success) {
goto error;
}
+ read_notifier_pollset = grpc_fd_get_read_notifier_pollset(exec_ctx, sp->emfd);
+
/* loop until accept4 returns EAGAIN, and then re-arm notification */
for (;;) {
struct sockaddr_storage addr;
@@ -349,12 +351,14 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
}
fdobj = grpc_fd_create(fd, name);
- /* TODO(ctiller): revise this when we have server-side sharding
- of channels -- we certainly should not be automatically adding every
- incoming channel to every pollset owned by the server */
- for (i = 0; i < sp->server->pollset_count; i++) {
- grpc_pollset_add_fd(exec_ctx, sp->server->pollsets[i], fdobj);
+
+ if (read_notifier_pollset == NULL) {
+ gpr_log(GPR_ERROR, "Read notifier pollset is not set on the fd");
+ goto error;
}
+
+ grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj);
+
sp->server->on_accept_cb(
exec_ctx, sp->server->on_accept_cb_arg,
grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 1f82c3bad2..ae78f8f616 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,8 @@ struct grpc_completion_queue {
int shutdown;
int shutdown_called;
int is_server_cq;
+ /** Can the server cq accept incoming channels */
+ int is_non_listening_server_cq;
int num_pluckers;
plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
grpc_closure pollset_shutdown_done;
@@ -149,6 +151,7 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
cc->shutdown = 0;
cc->shutdown_called = 0;
cc->is_server_cq = 0;
+ cc->is_non_listening_server_cq = 0;
cc->num_pluckers = 0;
#ifndef NDEBUG
cc->outstanding_tag_count = 0;
@@ -511,6 +514,14 @@ grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
return POLLSET_FROM_CQ(cc);
}
+void grpc_cq_mark_non_listening_server_cq(grpc_completion_queue *cc) {
+ cc->is_non_listening_server_cq = 1;
+}
+
+bool grpc_cq_is_non_listening_server_cq(grpc_completion_queue *cc) {
+ return (cc->is_non_listening_server_cq == 1);
+}
+
void grpc_cq_mark_server_cq(grpc_completion_queue *cc) { cc->is_server_cq = 1; }
int grpc_cq_is_server_cq(grpc_completion_queue *cc) { return cc->is_server_cq; }
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
index eef82cf014..1528ca4ad8 100644
--- a/src/core/lib/surface/completion_queue.h
+++ b/src/core/lib/surface/completion_queue.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -82,6 +82,8 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc);
+void grpc_cq_mark_non_listening_server_cq(grpc_completion_queue *cc);
+bool grpc_cq_is_non_listening_server_cq(grpc_completion_queue *cc);
void grpc_cq_mark_server_cq(grpc_completion_queue *cc);
int grpc_cq_is_server_cq(grpc_completion_queue *cc);
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 2db95b9cdf..c9b458faf2 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -895,23 +895,45 @@ const grpc_channel_filter grpc_server_top_filter = {
"server",
};
-void grpc_server_register_completion_queue(grpc_server *server,
- grpc_completion_queue *cq,
- void *reserved) {
+static void register_completion_queue(grpc_server *server,
+ grpc_completion_queue *cq,
+ bool is_non_listening, void *reserved) {
size_t i, n;
- GRPC_API_TRACE(
- "grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3,
- (server, cq, reserved));
GPR_ASSERT(!reserved);
for (i = 0; i < server->cq_count; i++) {
if (server->cqs[i] == cq) return;
}
- GRPC_CQ_INTERNAL_REF(cq, "server");
+
grpc_cq_mark_server_cq(cq);
- n = server->cq_count++;
- server->cqs = gpr_realloc(server->cqs,
- server->cq_count * sizeof(grpc_completion_queue *));
- server->cqs[n] = cq;
+
+ /* Non-listening completion queues are not added to server->cqs */
+ if (is_non_listening) {
+ grpc_cq_mark_non_listening_server_cq(cq);
+ } else {
+ GRPC_CQ_INTERNAL_REF(cq, "server");
+ n = server->cq_count++;
+ server->cqs = gpr_realloc(
+ server->cqs, server->cq_count * sizeof(grpc_completion_queue *));
+ server->cqs[n] = cq;
+ }
+}
+
+void grpc_server_register_completion_queue(grpc_server *server,
+ grpc_completion_queue *cq,
+ void *reserved) {
+ GRPC_API_TRACE(
+ "grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3,
+ (server, cq, reserved));
+ register_completion_queue(server, cq, false, reserved);
+}
+
+void grpc_server_register_non_listening_completion_queue(
+ grpc_server *server, grpc_completion_queue *cq, void *reserved) {
+ GRPC_API_TRACE(
+ "grpc_server_register_non_listening_completion_queue(server=%p, cq=%p, "
+ "reserved=%p)",
+ 3, (server, cq, reserved));
+ register_completion_queue(server, cq, true, reserved);
}
grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
@@ -1018,7 +1040,6 @@ void grpc_server_start(grpc_server *server) {
void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
grpc_transport *transport,
const grpc_channel_args *args) {
- size_t i;
size_t num_registered_methods;
size_t alloc;
registered_method *rm;
@@ -1033,12 +1054,6 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
uint32_t max_probes = 0;
grpc_transport_op op;
- for (i = 0; i < s->cq_count; i++) {
- memset(&op, 0, sizeof(op));
- op.bind_pollset = grpc_cq_pollset(s->cqs[i]);
- grpc_transport_perform_op(exec_ctx, transport, &op);
- }
-
channel =
grpc_channel_create(exec_ctx, NULL, args, GRPC_SERVER_CHANNEL, transport);
chand = (channel_data *)grpc_channel_stack_element(