aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/lib')
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc36
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc35
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.cc37
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc1
-rw-r--r--src/core/lib/iomgr/ev_posix.cc4
-rw-r--r--src/core/lib/iomgr/lockfree_event.cc130
-rw-r--r--src/core/lib/iomgr/lockfree_event.h44
-rw-r--r--src/core/lib/iomgr/network_status_tracker.h8
-rw-r--r--src/core/lib/iomgr/pollset_uv.cc20
-rw-r--r--src/core/lib/iomgr/timer_generic.cc28
-rw-r--r--src/core/lib/security/transport/client_auth_filter.cc2
-rw-r--r--src/core/lib/support/cmdline.cc2
-rw-r--r--src/core/lib/support/mpscq.cc37
-rw-r--r--src/core/lib/support/mpscq.h30
-rw-r--r--src/core/lib/surface/completion_queue.cc3
-rw-r--r--src/core/lib/surface/server.cc181
16 files changed, 317 insertions, 281 deletions
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 504c659874..61da996781 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -18,6 +18,8 @@
#include "src/core/lib/iomgr/port.h"
+#include <grpc/support/log.h>
+
/* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL
#include "src/core/lib/iomgr/ev_epoll1_linux.h"
@@ -34,7 +36,6 @@
#include <grpc/support/alloc.h>
#include <grpc/support/cpu.h>
-#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
@@ -46,6 +47,7 @@
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/manual_constructor.h"
#include "src/core/lib/support/string.h"
static grpc_wakeup_fd global_wakeup_fd;
@@ -111,8 +113,8 @@ static void epoll_set_shutdown() {
struct grpc_fd {
int fd;
- gpr_atm read_closure;
- gpr_atm write_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
struct grpc_fd* freelist_next;
@@ -264,8 +266,8 @@ static grpc_fd* fd_create(int fd, const char* name) {
}
new_fd->fd = fd;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
+ new_fd->read_closure.Init();
+ new_fd->write_closure.Init();
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
@@ -297,12 +299,11 @@ static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
* shutdown() syscall on that fd) */
static void fd_shutdown_internal(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_error* why, bool releasing_fd) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
if (!releasing_fd) {
shutdown(fd->fd, SHUT_RDWR);
}
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
@@ -318,7 +319,7 @@ static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_error* error = GRPC_ERROR_NONE;
bool is_release_fd = (release_fd != NULL);
- if (!grpc_lfev_is_shutdown(&fd->read_closure)) {
+ if (!fd->read_closure->IsShutdown()) {
fd_shutdown_internal(exec_ctx, fd,
GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
is_release_fd);
@@ -335,8 +336,8 @@ static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error));
grpc_iomgr_unregister_object(&fd->iomgr_object);
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
+ fd->read_closure.Destroy();
+ fd->write_closure.Destroy();
gpr_mu_lock(&fd_freelist_mu);
fd->freelist_next = fd_freelist;
@@ -351,28 +352,28 @@ static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
}
static bool fd_is_shutdown(grpc_fd* fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
+ return fd->read_closure->IsShutdown();
}
static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_pollset* notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
+ fd->read_closure->SetReady(exec_ctx);
/* Use release store to match with acquire load in fd_get_read_notifier */
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
+ fd->write_closure->SetReady(exec_ctx);
}
/*******************************************************************************
@@ -1230,6 +1231,7 @@ static const grpc_event_engine_vtable vtable = {
* support is available */
const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
if (!grpc_has_wakeup_fd()) {
+ gpr_log(GPR_ERROR, "Skipping epoll1 because of no wakeup fd.");
return NULL;
}
@@ -1254,6 +1256,8 @@ const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
+ gpr_log(GPR_ERROR,
+ "Skipping epoll1 becuase GRPC_LINUX_EPOLL is not defined.");
return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index aafdd690c7..caaee76b8c 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -18,6 +18,8 @@
#include "src/core/lib/iomgr/port.h"
+#include <grpc/support/log.h>
+
/* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL
@@ -34,7 +36,6 @@
#include <unistd.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
@@ -48,6 +49,7 @@
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/manual_constructor.h"
#include "src/core/lib/support/spinlock.h"
// debug aid: create workers on the heap (allows asan to spot
@@ -153,8 +155,8 @@ struct grpc_fd {
gpr_mu pollable_mu;
pollable* pollable_obj;
- gpr_atm read_closure;
- gpr_atm write_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
struct grpc_fd* freelist_next;
grpc_closure* on_done_closure;
@@ -286,8 +288,8 @@ static void fd_destroy(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
fd->freelist_next = fd_freelist;
fd_freelist = fd;
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
+ fd->read_closure.Destroy();
+ fd->write_closure.Destroy();
gpr_mu_unlock(&fd_freelist_mu);
}
@@ -347,8 +349,8 @@ static grpc_fd* fd_create(int fd, const char* name) {
new_fd->pollable_obj = NULL;
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
+ new_fd->read_closure.Init();
+ new_fd->write_closure.Init();
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
@@ -411,27 +413,26 @@ static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
}
static bool fd_is_shutdown(grpc_fd* fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
+ return fd->read_closure->IsShutdown();
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
/*******************************************************************************
@@ -702,7 +703,7 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_pollset* notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
+ fd->read_closure->SetReady(exec_ctx);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@@ -714,7 +715,7 @@ static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
}
static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
+ fd->write_closure->SetReady(exec_ctx);
}
static grpc_error* fd_get_or_become_pollable(grpc_fd* fd, pollable** p) {
@@ -1451,10 +1452,12 @@ const grpc_event_engine_vtable* grpc_init_epollex_linux(
}
if (!grpc_has_wakeup_fd()) {
+ gpr_log(GPR_ERROR, "Skipping epollex because of no wakeup fd.");
return NULL;
}
if (!grpc_is_epollexclusive_available()) {
+ gpr_log(GPR_INFO, "Skipping epollex because it is not supported.");
return NULL;
}
@@ -1480,6 +1483,8 @@ const grpc_event_engine_vtable* grpc_init_epollex_linux(
* NULL */
const grpc_event_engine_vtable* grpc_init_epollex_linux(
bool explicitly_requested) {
+ gpr_log(GPR_ERROR,
+ "Skipping epollex becuase GRPC_LINUX_EPOLL is not defined.");
return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc
index d5f3122abc..42806e9d14 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.cc
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -19,6 +19,7 @@
#include "src/core/lib/iomgr/port.h"
#include <grpc/grpc_posix.h>
+#include <grpc/support/log.h>
/* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL
@@ -37,7 +38,6 @@
#include <unistd.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
@@ -50,6 +50,7 @@
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/manual_constructor.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1)
@@ -127,8 +128,8 @@ struct grpc_fd {
valid */
bool orphaned;
- gpr_atm read_closure;
- gpr_atm write_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
struct grpc_fd* freelist_next;
grpc_closure* on_done_closure;
@@ -766,8 +767,8 @@ static void unref_by(grpc_fd* fd, int n) {
fd_freelist = fd;
grpc_iomgr_unregister_object(&fd->iomgr_object);
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
+ fd->read_closure.Destroy();
+ fd->write_closure.Destroy();
gpr_mu_unlock(&fd_freelist_mu);
} else {
@@ -832,8 +833,8 @@ static grpc_fd* fd_create(int fd, const char* name) {
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
new_fd->orphaned = false;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
+ new_fd->read_closure.Init();
+ new_fd->write_closure.Init();
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
@@ -924,27 +925,26 @@ static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
}
static bool fd_is_shutdown(grpc_fd* fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
+ return fd->read_closure->IsShutdown();
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
/*******************************************************************************
@@ -1108,7 +1108,7 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_pollset* notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
+ fd->read_closure->SetReady(exec_ctx);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@@ -1120,7 +1120,7 @@ static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
}
static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
+ fd->write_closure->SetReady(exec_ctx);
}
static void pollset_release_polling_island(grpc_exec_ctx* exec_ctx,
@@ -1711,14 +1711,17 @@ const grpc_event_engine_vtable* grpc_init_epollsig_linux(
bool explicit_request) {
/* If use of signals is disabled, we cannot use epoll engine*/
if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) {
+ gpr_log(GPR_ERROR, "Skipping epollsig because use of signals is disabled.");
return NULL;
}
if (!grpc_has_wakeup_fd()) {
+ gpr_log(GPR_ERROR, "Skipping epollsig because of no wakeup fd.");
return NULL;
}
if (!is_epoll_available()) {
+ gpr_log(GPR_ERROR, "Skipping epollsig because epoll is unavailable.");
return NULL;
}
@@ -1726,6 +1729,8 @@ const grpc_event_engine_vtable* grpc_init_epollsig_linux(
if (explicit_request) {
grpc_use_signal(SIGRTMIN + 6);
} else {
+ gpr_log(GPR_ERROR,
+ "Skipping epollsig because uninitialized wakeup signal.");
return NULL;
}
}
@@ -1751,6 +1756,8 @@ const grpc_event_engine_vtable* grpc_init_epollsig_linux(
* NULL */
const grpc_event_engine_vtable* grpc_init_epollsig_linux(
bool explicit_request) {
+ gpr_log(GPR_ERROR,
+ "Skipping epollsig becuase GRPC_LINUX_EPOLL is not defined.");
return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 554a438e6a..5745a2ae5b 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -1712,6 +1712,7 @@ static const grpc_event_engine_vtable vtable = {
const grpc_event_engine_vtable* grpc_init_poll_posix(bool explicit_request) {
if (!grpc_has_wakeup_fd()) {
+ gpr_log(GPR_ERROR, "Skipping poll because of no wakeup fd.");
return NULL;
}
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index f72f5088f0..a05279a4aa 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -172,12 +172,12 @@ void grpc_event_engine_init(void) {
gpr_free(strings[i]);
}
gpr_free(strings);
- gpr_free(s);
if (g_event_engine == NULL) {
- gpr_log(GPR_ERROR, "No event engine could be initialized");
+ gpr_log(GPR_ERROR, "No event engine could be initialized from %s", s);
abort();
}
+ gpr_free(s);
}
void grpc_event_engine_shutdown(void) {
diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc
index 443a8375b2..40e2ed6219 100644
--- a/src/core/lib/iomgr/lockfree_event.cc
+++ b/src/core/lib/iomgr/lockfree_event.cc
@@ -26,92 +26,96 @@ extern grpc_tracer_flag grpc_polling_trace;
/* 'state' holds the to call when the fd is readable or writable respectively.
It can contain one of the following values:
- CLOSURE_READY : The fd has an I/O event of interest but there is no
+ kClosureReady : The fd has an I/O event of interest but there is no
closure yet to execute
- CLOSURE_NOT_READY : The fd has no I/O event of interest
+ kClosureNotReady : The fd has no I/O event of interest
closure ptr : The closure to be executed when the fd has an I/O
event of interest
- shutdown_error | FD_SHUTDOWN_BIT :
- 'shutdown_error' field ORed with FD_SHUTDOWN_BIT.
+ shutdown_error | kShutdownBit :
+ 'shutdown_error' field ORed with kShutdownBit.
This indicates that the fd is shutdown. Since all
memory allocations are word-aligned, the lower two
bits of the shutdown_error pointer are always 0. So
- it is safe to OR these with FD_SHUTDOWN_BIT
+ it is safe to OR these with kShutdownBit
Valid state transitions:
- <closure ptr> <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY
+ <closure ptr> <-----3------ kClosureNotReady -----1-------> kClosureReady
| | ^ | ^ | |
| | | | | | |
| +--------------4----------+ 6 +---------2---------------+ |
| | |
| v |
- +-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+
+ +-----5-------> [shutdown_error | kShutdownBit] <-------7---------+
- For 1, 4 : See grpc_lfev_set_ready() function
- For 2, 3 : See grpc_lfev_notify_on() function
- For 5,6,7: See grpc_lfev_set_shutdown() function */
+ For 1, 4 : See SetReady() function
+ For 2, 3 : See NotifyOn() function
+ For 5,6,7: See SetShutdown() function */
-#define CLOSURE_NOT_READY ((gpr_atm)0)
-#define CLOSURE_READY ((gpr_atm)2)
+namespace grpc_core {
-#define FD_SHUTDOWN_BIT ((gpr_atm)1)
+LockfreeEvent::LockfreeEvent() {
+ /* Perform an atomic store to start the state machine.
-void grpc_lfev_init(gpr_atm* state) {
- gpr_atm_no_barrier_store(state, CLOSURE_NOT_READY);
+ Note carefully that LockfreeEvent *MAY* be used whilst in a destroyed
+ state, while a file descriptor is on a freelist. In such a state it may
+ be SetReady'd, and so we need to perform an atomic operation here to
+ ensure no races */
+ gpr_atm_no_barrier_store(&state_, kClosureNotReady);
}
-void grpc_lfev_destroy(gpr_atm* state) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
- if (curr & FD_SHUTDOWN_BIT) {
- GRPC_ERROR_UNREF((grpc_error*)(curr & ~FD_SHUTDOWN_BIT));
- } else {
- GPR_ASSERT(curr == CLOSURE_NOT_READY || curr == CLOSURE_READY);
- }
-}
-
-bool grpc_lfev_is_shutdown(gpr_atm* state) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
- return (curr & FD_SHUTDOWN_BIT) != 0;
+LockfreeEvent::~LockfreeEvent() {
+ gpr_atm curr;
+ do {
+ curr = gpr_atm_no_barrier_load(&state_);
+ if (curr & kShutdownBit) {
+ GRPC_ERROR_UNREF((grpc_error*)(curr & ~kShutdownBit));
+ } else {
+ GPR_ASSERT(curr == kClosureNotReady || curr == kClosureReady);
+ }
+ /* we CAS in a shutdown, no error value here. If this event is interacted
+ with post-deletion (see the note in the constructor) we want the bit
+ pattern to prevent error retention in a deleted object */
+ } while (!gpr_atm_no_barrier_cas(&state_, curr,
+ kShutdownBit /* shutdown, no error */));
}
-void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_closure* closure, const char* variable) {
+void LockfreeEvent::NotifyOn(grpc_exec_ctx* exec_ctx, grpc_closure* closure) {
while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
+ gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_ERROR, "lfev_notify_on[%s]: %p curr=%p closure=%p", variable,
- state, (void*)curr, closure);
+ gpr_log(GPR_ERROR, "LockfreeEvent::NotifyOn: %p curr=%p closure=%p", this,
+ (void*)curr, closure);
}
switch (curr) {
- case CLOSURE_NOT_READY: {
- /* CLOSURE_NOT_READY -> <closure>.
+ case kClosureNotReady: {
+ /* kClosureNotReady -> <closure>.
We're guaranteed by API that there's an acquire barrier before here,
so there's no need to double-dip and this can be a release-only.
The release itself pairs with the acquire half of a set_ready full
barrier. */
- if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) {
+ if (gpr_atm_rel_cas(&state_, kClosureNotReady, (gpr_atm)closure)) {
return; /* Successful. Return */
}
break; /* retry */
}
- case CLOSURE_READY: {
- /* Change the state to CLOSURE_NOT_READY. Schedule the closure if
+ case kClosureReady: {
+ /* Change the state to kClosureNotReady. Schedule the closure if
successful. If not, the state most likely transitioned to shutdown.
We should retry.
This can be a no-barrier cas since the state is being transitioned to
- CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any
+ kClosureNotReady; set_ready and set_shutdown do not schedule any
closure when transitioning out of CLOSURE_NO_READY state (i.e there
is no other code that needs to 'happen-after' this) */
- if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) {
+ if (gpr_atm_no_barrier_cas(&state_, kClosureReady, kClosureNotReady)) {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
return; /* Successful. Return */
}
@@ -123,8 +127,8 @@ void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
/* 'curr' is either a closure or the fd is shutdown(in which case 'curr'
contains a pointer to the shutdown-error). If the fd is shutdown,
schedule the closure with the shutdown error */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
- grpc_error* shutdown_err = (grpc_error*)(curr & ~FD_SHUTDOWN_BIT);
+ if ((curr & kShutdownBit) > 0) {
+ grpc_error* shutdown_err = (grpc_error*)(curr & ~kShutdownBit);
GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
@@ -133,7 +137,8 @@ void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
/* There is already a closure!. This indicates a bug in the code */
gpr_log(GPR_ERROR,
- "notify_on called with a previous callback still pending");
+ "LockfreeEvent::NotifyOn: notify_on called with a previous "
+ "callback still pending");
abort();
}
}
@@ -142,22 +147,22 @@ void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
GPR_UNREACHABLE_CODE(return );
}
-bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_error* shutdown_err) {
- gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT;
+bool LockfreeEvent::SetShutdown(grpc_exec_ctx* exec_ctx,
+ grpc_error* shutdown_err) {
+ gpr_atm new_state = (gpr_atm)shutdown_err | kShutdownBit;
while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
+ gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_ERROR, "lfev_set_shutdown: %p curr=%p err=%s", state,
- (void*)curr, grpc_error_string(shutdown_err));
+ gpr_log(GPR_ERROR, "LockfreeEvent::SetShutdown: %p curr=%p err=%s",
+ &state_, (void*)curr, grpc_error_string(shutdown_err));
}
switch (curr) {
- case CLOSURE_READY:
- case CLOSURE_NOT_READY:
+ case kClosureReady:
+ case kClosureNotReady:
/* Need a full barrier here so that the initial load in notify_on
doesn't need a barrier */
- if (gpr_atm_full_cas(state, curr, new_state)) {
+ if (gpr_atm_full_cas(&state_, curr, new_state)) {
return true; /* early out */
}
break; /* retry */
@@ -166,7 +171,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
/* 'curr' is either a closure or the fd is already shutdown */
/* If fd is already shutdown, we are done */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ if ((curr & kShutdownBit) > 0) {
GRPC_ERROR_UNREF(shutdown_err);
return false;
}
@@ -176,7 +181,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
Needs an acquire to pair with setting the closure (and get a
happens-after on that edge), and a release to pair with anything
loading the shutdown state. */
- if (gpr_atm_full_cas(state, curr, new_state)) {
+ if (gpr_atm_full_cas(&state_, curr, new_state)) {
GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
@@ -193,26 +198,25 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
GPR_UNREACHABLE_CODE(return false);
}
-void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- const char* variable) {
+void LockfreeEvent::SetReady(grpc_exec_ctx* exec_ctx) {
while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
+ gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_ERROR, "lfev_set_ready[%s]: %p curr=%p", variable, state,
+ gpr_log(GPR_ERROR, "LockfreeEvent::SetReady: %p curr=%p", &state_,
(void*)curr);
}
switch (curr) {
- case CLOSURE_READY: {
+ case kClosureReady: {
/* Already ready. We are done here */
return;
}
- case CLOSURE_NOT_READY: {
+ case kClosureNotReady: {
/* No barrier required as we're transitioning to a state that does not
involve a closure */
- if (gpr_atm_no_barrier_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) {
+ if (gpr_atm_no_barrier_cas(&state_, kClosureNotReady, kClosureReady)) {
return; /* early out */
}
break; /* retry */
@@ -220,14 +224,14 @@ void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
default: {
/* 'curr' is either a closure or the fd is shutdown */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ if ((curr & kShutdownBit) > 0) {
/* The fd is shutdown. Do nothing */
return;
}
/* Full cas: acquire pairs with this cas' release in the event of a
spurious set_ready; release pairs with this or the acquire in
notify_on (or set_shutdown) */
- else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) {
+ else if (gpr_atm_full_cas(&state_, curr, kClosureNotReady)) {
GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr, GRPC_ERROR_NONE);
return;
}
@@ -239,3 +243,5 @@ void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
}
}
}
+
+} // namespace grpc_core
diff --git a/src/core/lib/iomgr/lockfree_event.h b/src/core/lib/iomgr/lockfree_event.h
index 75526d6b9f..c667dcd3bc 100644
--- a/src/core/lib/iomgr/lockfree_event.h
+++ b/src/core/lib/iomgr/lockfree_event.h
@@ -25,24 +25,30 @@
#include "src/core/lib/iomgr/exec_ctx.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void grpc_lfev_init(gpr_atm* state);
-void grpc_lfev_destroy(gpr_atm* state);
-bool grpc_lfev_is_shutdown(gpr_atm* state);
-
-void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_closure* closure, const char* variable);
-/* Returns true on first successful shutdown */
-bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_error* shutdown_err);
-void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- const char* variable);
-
-#ifdef __cplusplus
-}
-#endif
+namespace grpc_core {
+
+class LockfreeEvent {
+ public:
+ LockfreeEvent();
+ ~LockfreeEvent();
+
+ LockfreeEvent(const LockfreeEvent&) = delete;
+ LockfreeEvent& operator=(const LockfreeEvent&) = delete;
+
+ bool IsShutdown() const {
+ return (gpr_atm_no_barrier_load(&state_) & kShutdownBit) != 0;
+ }
+
+ void NotifyOn(grpc_exec_ctx* exec_ctx, grpc_closure* closure);
+ bool SetShutdown(grpc_exec_ctx* exec_ctx, grpc_error* error);
+ void SetReady(grpc_exec_ctx* exec_ctx);
+
+ private:
+ enum State { kClosureNotReady = 0, kClosureReady = 2, kShutdownBit = 1 };
+
+ gpr_atm state_;
+};
+
+} // namespace grpc_core
#endif /* GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H */
diff --git a/src/core/lib/iomgr/network_status_tracker.h b/src/core/lib/iomgr/network_status_tracker.h
index 3033e0a833..32244d9b77 100644
--- a/src/core/lib/iomgr/network_status_tracker.h
+++ b/src/core/lib/iomgr/network_status_tracker.h
@@ -20,10 +20,6 @@
#define GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H
#include "src/core/lib/iomgr/endpoint.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
void grpc_network_status_init(void);
void grpc_network_status_shutdown(void);
@@ -31,8 +27,4 @@ void grpc_network_status_register_endpoint(grpc_endpoint* ep);
void grpc_network_status_unregister_endpoint(grpc_endpoint* ep);
void grpc_network_status_shutdown_all_endpoints();
-#ifdef __cplusplus
-}
-#endif
-
#endif /* GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H */
diff --git a/src/core/lib/iomgr/pollset_uv.cc b/src/core/lib/iomgr/pollset_uv.cc
index 6b9c53c01c..1d54942c1d 100644
--- a/src/core/lib/iomgr/pollset_uv.cc
+++ b/src/core/lib/iomgr/pollset_uv.cc
@@ -40,7 +40,7 @@ grpc_tracer_flag grpc_trace_fd_refcount =
#endif
struct grpc_pollset {
- uv_timer_t timer;
+ uv_timer_t* timer;
int shutting_down;
};
@@ -78,12 +78,16 @@ void grpc_pollset_global_shutdown(void) {
static void timer_run_cb(uv_timer_t* timer) {}
-static void timer_close_cb(uv_handle_t* handle) { handle->data = (void*)1; }
+static void timer_close_cb(uv_handle_t* handle) {
+ handle->data = (void*)1;
+ gpr_free(handle);
+}
void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
GRPC_UV_ASSERT_SAME_THREAD();
*mu = &grpc_polling_mu;
- uv_timer_init(uv_default_loop(), &pollset->timer);
+ pollset->timer = (uv_timer_t*)gpr_malloc(sizeof(uv_timer_t));
+ uv_timer_init(uv_default_loop(), pollset->timer);
pollset->shutting_down = 0;
}
@@ -104,11 +108,11 @@ void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
GRPC_UV_ASSERT_SAME_THREAD();
- uv_close((uv_handle_t*)&pollset->timer, timer_close_cb);
+ uv_close((uv_handle_t*)pollset->timer, timer_close_cb);
// timer.data is a boolean indicating that the timer has finished closing
- pollset->timer.data = (void*)0;
+ pollset->timer->data = (void*)0;
if (grpc_pollset_work_run_loop) {
- while (!pollset->timer.data) {
+ while (!pollset->timer->data) {
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
}
}
@@ -130,11 +134,11 @@ grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
/* We special-case timeout=0 so that we don't bother with the timer when
the loop won't block anyway */
if (timeout > 0) {
- uv_timer_start(&pollset->timer, timer_run_cb, timeout, 0);
+ uv_timer_start(pollset->timer, timer_run_cb, timeout, 0);
/* Run until there is some I/O activity or the timer triggers. It doesn't
matter which happens */
uv_run(uv_default_loop(), UV_RUN_ONCE);
- uv_timer_stop(&pollset->timer);
+ uv_timer_stop(pollset->timer);
} else {
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
}
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index 2333f180d4..38ac66ea2f 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -25,6 +25,7 @@
#include "src/core/lib/iomgr/timer.h"
#include <grpc/support/alloc.h>
+#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
@@ -37,8 +38,6 @@
#define INVALID_HEAP_INDEX 0xffffffffu
-#define LOG2_NUM_SHARDS 5
-#define NUM_SHARDS (1 << LOG2_NUM_SHARDS)
#define ADD_DEADLINE_SCALE 0.33
#define MIN_QUEUE_WINDOW_DURATION 0.01
#define MAX_QUEUE_WINDOW_DURATION 1
@@ -74,14 +73,16 @@ typedef struct {
grpc_timer list;
} timer_shard;
+static size_t g_num_shards;
+
/* Array of timer shards. Whenever a timer (grpc_timer *) is added, its address
* is hashed to select the timer shard to add the timer to */
-static timer_shard g_shards[NUM_SHARDS];
+static timer_shard* g_shards;
/* Maintains a sorted list of timer shards (sorted by their min_deadline, i.e
* the deadline of the next timer in each shard).
* Access to this is protected by g_shared_mutables.mu */
-static timer_shard* g_shard_queue[NUM_SHARDS];
+static timer_shard** g_shard_queue;
#ifndef NDEBUG
@@ -241,6 +242,11 @@ static gpr_atm compute_min_deadline(timer_shard* shard) {
void grpc_timer_list_init(grpc_exec_ctx* exec_ctx) {
uint32_t i;
+ g_num_shards = GPR_MIN(1, 2 * gpr_cpu_num_cores());
+ g_shards = (timer_shard*)gpr_zalloc(g_num_shards * sizeof(*g_shards));
+ g_shard_queue =
+ (timer_shard**)gpr_zalloc(g_num_shards * sizeof(*g_shard_queue));
+
g_shared_mutables.initialized = true;
g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mu_init(&g_shared_mutables.mu);
@@ -250,7 +256,7 @@ void grpc_timer_list_init(grpc_exec_ctx* exec_ctx) {
grpc_register_tracer(&grpc_timer_trace);
grpc_register_tracer(&grpc_timer_check_trace);
- for (i = 0; i < NUM_SHARDS; i++) {
+ for (i = 0; i < g_num_shards; i++) {
timer_shard* shard = &g_shards[i];
gpr_mu_init(&shard->mu);
grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1,
@@ -267,17 +273,19 @@ void grpc_timer_list_init(grpc_exec_ctx* exec_ctx) {
}
void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx) {
- int i;
+ size_t i;
run_some_expired_timers(
exec_ctx, GPR_ATM_MAX, NULL,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
- for (i = 0; i < NUM_SHARDS; i++) {
+ for (i = 0; i < g_num_shards; i++) {
timer_shard* shard = &g_shards[i];
gpr_mu_destroy(&shard->mu);
grpc_timer_heap_destroy(&shard->heap);
}
gpr_mu_destroy(&g_shared_mutables.mu);
gpr_tls_destroy(&g_last_seen_min_timer);
+ gpr_free(g_shards);
+ gpr_free(g_shard_queue);
g_shared_mutables.initialized = false;
}
@@ -311,7 +319,7 @@ static void note_deadline_change(timer_shard* shard) {
g_shard_queue[shard->shard_queue_index - 1]->min_deadline) {
swap_adjacent_shards_in_queue(shard->shard_queue_index - 1);
}
- while (shard->shard_queue_index < NUM_SHARDS - 1 &&
+ while (shard->shard_queue_index < g_num_shards - 1 &&
shard->min_deadline >
g_shard_queue[shard->shard_queue_index + 1]->min_deadline) {
swap_adjacent_shards_in_queue(shard->shard_queue_index);
@@ -323,7 +331,7 @@ void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = false; }
void grpc_timer_init(grpc_exec_ctx* exec_ctx, grpc_timer* timer,
grpc_millis deadline, grpc_closure* closure) {
int is_first_timer = 0;
- timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
+ timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
timer->closure = closure;
timer->deadline = deadline;
@@ -417,7 +425,7 @@ void grpc_timer_cancel(grpc_exec_ctx* exec_ctx, grpc_timer* timer) {
return;
}
- timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
+ timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
gpr_mu_lock(&shard->mu);
if (GRPC_TRACER_ON(grpc_timer_trace)) {
gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc
index 8f7530ed27..11f5a13ccc 100644
--- a/src/core/lib/security/transport/client_auth_filter.cc
+++ b/src/core/lib/security/transport/client_auth_filter.cc
@@ -139,7 +139,7 @@ void grpc_auth_metadata_context_build(
method_name = gpr_strdup(last_slash + 1);
}
char* host_and_port = grpc_slice_to_c_string(call_host);
- if (strcmp(url_scheme, GRPC_SSL_URL_SCHEME) == 0) {
+ if (url_scheme != NULL && strcmp(url_scheme, GRPC_SSL_URL_SCHEME) == 0) {
/* Remove the port if it is 443. */
char* port_delimiter = strrchr(host_and_port, ':');
if (port_delimiter != NULL && strcmp(port_delimiter + 1, "443") == 0) {
diff --git a/src/core/lib/support/cmdline.cc b/src/core/lib/support/cmdline.cc
index 49b34194c3..d2785d2f30 100644
--- a/src/core/lib/support/cmdline.cc
+++ b/src/core/lib/support/cmdline.cc
@@ -105,7 +105,7 @@ void gpr_cmdline_add_flag(gpr_cmdline* cl, const char* name, const char* help,
}
void gpr_cmdline_add_string(gpr_cmdline* cl, const char* name, const char* help,
- char** value) {
+ const char** value) {
add_arg(cl, name, help, ARGTYPE_STRING, value);
}
diff --git a/src/core/lib/support/mpscq.cc b/src/core/lib/support/mpscq.cc
index db25f24264..b270777d5c 100644
--- a/src/core/lib/support/mpscq.cc
+++ b/src/core/lib/support/mpscq.cc
@@ -31,11 +31,12 @@ void gpr_mpscq_destroy(gpr_mpscq* q) {
GPR_ASSERT(q->tail == &q->stub);
}
-void gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n) {
+bool gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n) {
gpr_atm_no_barrier_store(&n->next, (gpr_atm)NULL);
gpr_mpscq_node* prev =
(gpr_mpscq_node*)gpr_atm_full_xchg(&q->head, (gpr_atm)n);
gpr_atm_rel_store(&prev->next, (gpr_atm)n);
+ return prev == &q->stub;
}
gpr_mpscq_node* gpr_mpscq_pop(gpr_mpscq* q) {
@@ -77,3 +78,37 @@ gpr_mpscq_node* gpr_mpscq_pop_and_check_end(gpr_mpscq* q, bool* empty) {
*empty = false;
return NULL;
}
+
+void gpr_locked_mpscq_init(gpr_locked_mpscq* q) {
+ gpr_mpscq_init(&q->queue);
+ gpr_mu_init(&q->mu);
+}
+
+void gpr_locked_mpscq_destroy(gpr_locked_mpscq* q) {
+ gpr_mpscq_destroy(&q->queue);
+ gpr_mu_destroy(&q->mu);
+}
+
+bool gpr_locked_mpscq_push(gpr_locked_mpscq* q, gpr_mpscq_node* n) {
+ return gpr_mpscq_push(&q->queue, n);
+}
+
+gpr_mpscq_node* gpr_locked_mpscq_try_pop(gpr_locked_mpscq* q) {
+ if (gpr_mu_trylock(&q->mu)) {
+ gpr_mpscq_node* n = gpr_mpscq_pop(&q->queue);
+ gpr_mu_unlock(&q->mu);
+ return n;
+ }
+ return NULL;
+}
+
+gpr_mpscq_node* gpr_locked_mpscq_pop(gpr_locked_mpscq* q) {
+ gpr_mu_lock(&q->mu);
+ bool empty = false;
+ gpr_mpscq_node* n;
+ do {
+ n = gpr_mpscq_pop_and_check_end(&q->queue, &empty);
+ } while (n == NULL && !empty);
+ gpr_mu_unlock(&q->mu);
+ return n;
+}
diff --git a/src/core/lib/support/mpscq.h b/src/core/lib/support/mpscq.h
index 1cc9d89feb..fb22742050 100644
--- a/src/core/lib/support/mpscq.h
+++ b/src/core/lib/support/mpscq.h
@@ -20,6 +20,7 @@
#define GRPC_CORE_LIB_SUPPORT_MPSCQ_H
#include <grpc/support/atm.h>
+#include <grpc/support/sync.h>
#include <stdbool.h>
#include <stddef.h>
@@ -49,13 +50,40 @@ typedef struct gpr_mpscq {
void gpr_mpscq_init(gpr_mpscq* q);
void gpr_mpscq_destroy(gpr_mpscq* q);
// Push a node
-void gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n);
+// Thread safe - can be called from multiple threads concurrently
+// Returns true if this was possibly the first node (may return true
+// sporadically, will not return false sporadically)
+bool gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n);
// Pop a node (returns NULL if no node is ready - which doesn't indicate that
// the queue is empty!!)
+// Thread compatible - can only be called from one thread at a time
gpr_mpscq_node* gpr_mpscq_pop(gpr_mpscq* q);
// Pop a node; sets *empty to true if the queue is empty, or false if it is not
gpr_mpscq_node* gpr_mpscq_pop_and_check_end(gpr_mpscq* q, bool* empty);
+// An mpscq with a lock: it's safe to pop from multiple threads, but doing
+// only one thread will succeed concurrently
+typedef struct gpr_locked_mpscq {
+ gpr_mpscq queue;
+ gpr_mu mu;
+} gpr_locked_mpscq;
+
+void gpr_locked_mpscq_init(gpr_locked_mpscq* q);
+void gpr_locked_mpscq_destroy(gpr_locked_mpscq* q);
+// Push a node
+// Thread safe - can be called from multiple threads concurrently
+// Returns true if this was possibly the first node (may return true
+// sporadically, will not return false sporadically)
+bool gpr_locked_mpscq_push(gpr_locked_mpscq* q, gpr_mpscq_node* n);
+
+// Pop a node (returns NULL if no node is ready - which doesn't indicate that
+// the queue is empty!!)
+// Thread safe - can be called from multiple threads concurrently
+gpr_mpscq_node* gpr_locked_mpscq_try_pop(gpr_locked_mpscq* q);
+
+// Pop a node. Returns NULL only if the queue was empty at some point after
+// calling this function
+gpr_mpscq_node* gpr_locked_mpscq_pop(gpr_locked_mpscq* q);
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc
index 9dabe76510..922df923ae 100644
--- a/src/core/lib/surface/completion_queue.cc
+++ b/src/core/lib/surface/completion_queue.cc
@@ -127,6 +127,7 @@ static grpc_error* non_polling_poller_work(grpc_exec_ctx* exec_ctx,
while (!npp->shutdown && !w.kicked &&
!gpr_cv_wait(&w.cv, &npp->mu, deadline_ts))
;
+ grpc_exec_ctx_invalidate_now(exec_ctx);
if (&w == npp->root) {
npp->root = w.next;
if (&w == npp->root) {
@@ -375,8 +376,8 @@ int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue* cq,
(grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq) {
*tag = storage->tag;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- storage->done(&exec_ctx, storage->done_arg, storage);
*ok = (storage->next & (uintptr_t)(1)) == 1;
+ storage->done(&exec_ctx, storage->done_arg, storage);
ret = 1;
cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index eb7a4e2d30..da7ae17bef 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -33,7 +33,8 @@
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
-#include "src/core/lib/support/stack_lockfree.h"
+#include "src/core/lib/support/mpscq.h"
+#include "src/core/lib/support/spinlock.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
@@ -63,6 +64,7 @@ grpc_tracer_flag grpc_server_channel_trace =
GRPC_TRACER_INITIALIZER(false, "server_channel");
typedef struct requested_call {
+ gpr_mpscq_node request_link; /* must be first */
requested_call_type type;
size_t cq_idx;
void* tag;
@@ -128,10 +130,7 @@ typedef struct request_matcher request_matcher;
struct call_data {
grpc_call* call;
- /** protects state */
- gpr_mu mu_state;
- /** the current state of a call - see call_state */
- call_state state;
+ gpr_atm state;
bool path_set;
bool host_set;
@@ -162,7 +161,7 @@ struct request_matcher {
grpc_server* server;
call_data* pending_head;
call_data* pending_tail;
- gpr_stack_lockfree** requests_per_cq;
+ gpr_locked_mpscq* requests_per_cq;
};
struct registered_method {
@@ -207,11 +206,6 @@ struct grpc_server {
registered_method* registered_methods;
/** one request matcher for unregistered methods */
request_matcher unregistered_request_matcher;
- /** free list of available requested_calls_per_cq indices */
- gpr_stack_lockfree** request_freelist_per_cq;
- /** requested call backing data */
- requested_call** requested_calls_per_cq;
- int max_requested_calls_per_cq;
gpr_atm shutdown_flag;
uint8_t shutdown_published;
@@ -313,21 +307,20 @@ static void channel_broadcaster_shutdown(grpc_exec_ctx* exec_ctx,
* request_matcher
*/
-static void request_matcher_init(request_matcher* rm, size_t entries,
- grpc_server* server) {
+static void request_matcher_init(request_matcher* rm, grpc_server* server) {
memset(rm, 0, sizeof(*rm));
rm->server = server;
- rm->requests_per_cq = (gpr_stack_lockfree**)gpr_malloc(
+ rm->requests_per_cq = (gpr_locked_mpscq*)gpr_malloc(
sizeof(*rm->requests_per_cq) * server->cq_count);
for (size_t i = 0; i < server->cq_count; i++) {
- rm->requests_per_cq[i] = gpr_stack_lockfree_create(entries);
+ gpr_locked_mpscq_init(&rm->requests_per_cq[i]);
}
}
static void request_matcher_destroy(request_matcher* rm) {
for (size_t i = 0; i < rm->server->cq_count; i++) {
- GPR_ASSERT(gpr_stack_lockfree_pop(rm->requests_per_cq[i]) == -1);
- gpr_stack_lockfree_destroy(rm->requests_per_cq[i]);
+ GPR_ASSERT(gpr_locked_mpscq_pop(&rm->requests_per_cq[i]) == NULL);
+ gpr_locked_mpscq_destroy(&rm->requests_per_cq[i]);
}
gpr_free(rm->requests_per_cq);
}
@@ -342,9 +335,7 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx* exec_ctx,
while (rm->pending_head) {
call_data* calld = rm->pending_head;
rm->pending_head = calld->pending_next;
- gpr_mu_lock(&calld->mu_state);
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
@@ -357,13 +348,17 @@ static void request_matcher_kill_requests(grpc_exec_ctx* exec_ctx,
grpc_server* server,
request_matcher* rm,
grpc_error* error) {
- int request_id;
+ requested_call* rc;
for (size_t i = 0; i < server->cq_count; i++) {
- while ((request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[i])) !=
- -1) {
- fail_call(exec_ctx, server, i,
- &server->requested_calls_per_cq[i][request_id],
- GRPC_ERROR_REF(error));
+ /* Here we know:
+ 1. no requests are being added (since the server is shut down)
+ 2. no other threads are pulling (since the shut down process is single
+ threaded)
+ So, we can ignore the queue lock and just pop, with the guarantee that a
+ NULL returned here truly means that the queue is empty */
+ while ((rc = (requested_call*)gpr_mpscq_pop(
+ &rm->requests_per_cq[i].queue)) != NULL) {
+ fail_call(exec_ctx, server, i, rc, GRPC_ERROR_REF(error));
}
}
GRPC_ERROR_UNREF(error);
@@ -398,13 +393,7 @@ static void server_delete(grpc_exec_ctx* exec_ctx, grpc_server* server) {
}
for (i = 0; i < server->cq_count; i++) {
GRPC_CQ_INTERNAL_UNREF(exec_ctx, server->cqs[i], "server");
- if (server->started) {
- gpr_stack_lockfree_destroy(server->request_freelist_per_cq[i]);
- gpr_free(server->requested_calls_per_cq[i]);
- }
}
- gpr_free(server->request_freelist_per_cq);
- gpr_free(server->requested_calls_per_cq);
gpr_free(server->cqs);
gpr_free(server->pollsets);
gpr_free(server->shutdown_tags);
@@ -462,21 +451,7 @@ static void destroy_channel(grpc_exec_ctx* exec_ctx, channel_data* chand,
static void done_request_event(grpc_exec_ctx* exec_ctx, void* req,
grpc_cq_completion* c) {
- requested_call* rc = (requested_call*)req;
- grpc_server* server = rc->server;
-
- if (rc >= server->requested_calls_per_cq[rc->cq_idx] &&
- rc < server->requested_calls_per_cq[rc->cq_idx] +
- server->max_requested_calls_per_cq) {
- GPR_ASSERT(rc - server->requested_calls_per_cq[rc->cq_idx] <= INT_MAX);
- gpr_stack_lockfree_push(
- server->request_freelist_per_cq[rc->cq_idx],
- (int)(rc - server->requested_calls_per_cq[rc->cq_idx]));
- } else {
- gpr_free(req);
- }
-
- server_unref(exec_ctx, server);
+ gpr_free(req);
}
static void publish_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
@@ -508,10 +483,6 @@ static void publish_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
GPR_UNREACHABLE_CODE(return );
}
- grpc_call_element* elem =
- grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
- channel_data* chand = (channel_data*)elem->channel_data;
- server_ref(chand->server);
grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, GRPC_ERROR_NONE,
done_request_event, rc, &rc->completion);
}
@@ -525,9 +496,7 @@ static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* arg,
grpc_server* server = rm->server;
if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) {
- gpr_mu_lock(&calld->mu_state);
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
@@ -539,16 +508,14 @@ static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* arg,
for (size_t i = 0; i < server->cq_count; i++) {
size_t cq_idx = (chand->cq_idx + i) % server->cq_count;
- int request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[cq_idx]);
- if (request_id == -1) {
+ requested_call* rc =
+ (requested_call*)gpr_locked_mpscq_try_pop(&rm->requests_per_cq[cq_idx]);
+ if (rc == NULL) {
continue;
} else {
GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i);
- gpr_mu_lock(&calld->mu_state);
- calld->state = ACTIVATED;
- gpr_mu_unlock(&calld->mu_state);
- publish_call(exec_ctx, server, calld, cq_idx,
- &server->requested_calls_per_cq[cq_idx][request_id]);
+ gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
+ publish_call(exec_ctx, server, calld, cq_idx, rc);
return; /* early out */
}
}
@@ -556,9 +523,27 @@ static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* arg,
/* no cq to take the request found: queue it on the slow list */
GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx);
gpr_mu_lock(&server->mu_call);
- gpr_mu_lock(&calld->mu_state);
- calld->state = PENDING;
- gpr_mu_unlock(&calld->mu_state);
+
+ // We need to ensure that all the queues are empty. We do this under
+ // the server mu_call lock to ensure that if something is added to
+ // an empty request queue, it will block until the call is actually
+ // added to the pending list.
+ for (size_t i = 0; i < server->cq_count; i++) {
+ size_t cq_idx = (chand->cq_idx + i) % server->cq_count;
+ requested_call* rc =
+ (requested_call*)gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]);
+ if (rc == NULL) {
+ continue;
+ } else {
+ gpr_mu_unlock(&server->mu_call);
+ GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i + server->cq_count);
+ gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
+ publish_call(exec_ctx, server, calld, cq_idx, rc);
+ return; /* early out */
+ }
+ }
+
+ gpr_atm_no_barrier_store(&calld->state, PENDING);
if (rm->pending_head == NULL) {
rm->pending_tail = rm->pending_head = calld;
} else {
@@ -576,9 +561,7 @@ static void finish_start_new_rpc(
call_data* calld = (call_data*)elem->call_data;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
- gpr_mu_lock(&calld->mu_state);
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
@@ -807,21 +790,14 @@ static void got_initial_metadata(grpc_exec_ctx* exec_ctx, void* ptr,
if (error == GRPC_ERROR_NONE) {
start_new_rpc(exec_ctx, elem);
} else {
- gpr_mu_lock(&calld->mu_state);
- if (calld->state == NOT_STARTED) {
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ if (gpr_atm_full_cas(&calld->state, NOT_STARTED, ZOMBIED)) {
GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE);
- } else if (calld->state == PENDING) {
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ } else if (gpr_atm_full_cas(&calld->state, PENDING, ZOMBIED)) {
/* zombied call will be destroyed when it's removed from the pending
queue... later */
- } else {
- gpr_mu_unlock(&calld->mu_state);
}
}
}
@@ -885,7 +861,6 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
memset(calld, 0, sizeof(call_data));
calld->deadline = GRPC_MILLIS_INF_FUTURE;
calld->call = grpc_call_from_top_element(elem);
- gpr_mu_init(&calld->mu_state);
GRPC_CLOSURE_INIT(&calld->server_on_recv_initial_metadata,
server_on_recv_initial_metadata, elem,
@@ -912,8 +887,6 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_metadata_array_destroy(&calld->initial_metadata);
grpc_byte_buffer_destroy(calld->payload);
- gpr_mu_destroy(&calld->mu_state);
-
server_unref(exec_ctx, chand->server);
}
@@ -1020,8 +993,6 @@ grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) {
server->root_channel_data.next = server->root_channel_data.prev =
&server->root_channel_data;
- /* TODO(ctiller): expose a channel_arg for this */
- server->max_requested_calls_per_cq = 32768;
server->channel_args = grpc_channel_args_copy(args);
return server;
@@ -1095,29 +1066,15 @@ void grpc_server_start(grpc_server* server) {
server->pollset_count = 0;
server->pollsets =
(grpc_pollset**)gpr_malloc(sizeof(grpc_pollset*) * server->cq_count);
- server->request_freelist_per_cq = (gpr_stack_lockfree**)gpr_malloc(
- sizeof(*server->request_freelist_per_cq) * server->cq_count);
- server->requested_calls_per_cq = (requested_call**)gpr_malloc(
- sizeof(*server->requested_calls_per_cq) * server->cq_count);
for (i = 0; i < server->cq_count; i++) {
if (grpc_cq_can_listen(server->cqs[i])) {
server->pollsets[server->pollset_count++] =
grpc_cq_pollset(server->cqs[i]);
}
- server->request_freelist_per_cq[i] =
- gpr_stack_lockfree_create((size_t)server->max_requested_calls_per_cq);
- for (int j = 0; j < server->max_requested_calls_per_cq; j++) {
- gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j);
- }
- server->requested_calls_per_cq[i] =
- (requested_call*)gpr_malloc((size_t)server->max_requested_calls_per_cq *
- sizeof(*server->requested_calls_per_cq[i]));
}
- request_matcher_init(&server->unregistered_request_matcher,
- (size_t)server->max_requested_calls_per_cq, server);
+ request_matcher_init(&server->unregistered_request_matcher, server);
for (registered_method* rm = server->registered_methods; rm; rm = rm->next) {
- request_matcher_init(&rm->matcher,
- (size_t)server->max_requested_calls_per_cq, server);
+ request_matcher_init(&rm->matcher, server);
}
server_ref(server);
@@ -1373,21 +1330,11 @@ static grpc_call_error queue_call_request(grpc_exec_ctx* exec_ctx,
requested_call* rc) {
call_data* calld = NULL;
request_matcher* rm = NULL;
- int request_id;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
fail_call(exec_ctx, server, cq_idx, rc,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
return GRPC_CALL_OK;
}
- request_id = gpr_stack_lockfree_pop(server->request_freelist_per_cq[cq_idx]);
- if (request_id == -1) {
- /* out of request ids: just fail this one */
- fail_call(exec_ctx, server, cq_idx, rc,
- grpc_error_set_int(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Out of request ids"),
- GRPC_ERROR_INT_LIMIT, server->max_requested_calls_per_cq));
- return GRPC_CALL_OK;
- }
switch (rc->type) {
case BATCH_CALL:
rm = &server->unregistered_request_matcher;
@@ -1396,20 +1343,17 @@ static grpc_call_error queue_call_request(grpc_exec_ctx* exec_ctx,
rm = &rc->data.registered.method->matcher;
break;
}
- server->requested_calls_per_cq[cq_idx][request_id] = *rc;
- gpr_free(rc);
- if (gpr_stack_lockfree_push(rm->requests_per_cq[cq_idx], request_id)) {
+ if (gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link)) {
/* this was the first queued request: we need to lock and start
matching calls */
gpr_mu_lock(&server->mu_call);
while ((calld = rm->pending_head) != NULL) {
- request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[cq_idx]);
- if (request_id == -1) break;
+ rc = (requested_call*)gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]);
+ if (rc == NULL) break;
rm->pending_head = calld->pending_next;
gpr_mu_unlock(&server->mu_call);
- gpr_mu_lock(&calld->mu_state);
- if (calld->state == ZOMBIED) {
- gpr_mu_unlock(&calld->mu_state);
+ if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) {
+ // Zombied Call
GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
@@ -1417,11 +1361,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx* exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE);
} else {
- GPR_ASSERT(calld->state == PENDING);
- calld->state = ACTIVATED;
- gpr_mu_unlock(&calld->mu_state);
- publish_call(exec_ctx, server, calld, cq_idx,
- &server->requested_calls_per_cq[cq_idx][request_id]);
+ publish_call(exec_ctx, server, calld, cq_idx, rc);
}
gpr_mu_lock(&server->mu_call);
}
@@ -1540,7 +1480,6 @@ static void fail_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
rc->initial_metadata->count = 0;
GPR_ASSERT(error != GRPC_ERROR_NONE);
- server_ref(server);
grpc_cq_end_op(exec_ctx, server->cqs[cq_idx], rc->tag, error,
done_request_event, rc, &rc->completion);
}