aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib
diff options
context:
space:
mode:
authorGravatar Sree Kuchibhotla <sreek@google.com>2017-11-08 10:21:59 -0800
committerGravatar Sree Kuchibhotla <sreek@google.com>2017-11-08 10:21:59 -0800
commit52f7039dc4319804a902cc35df4d0cd08ba50421 (patch)
treecbd9c05c471ac7685aed1b4e095d620ed7e546f2 /src/core/lib
parent9a0db888855e0905946ab476892ba697a6a9200d (diff)
parent99dcb6953e3a01e2a2795ba3f09e6b6f64114d95 (diff)
Merge branch 'master' into cc-tsan-1
Diffstat (limited to 'src/core/lib')
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc30
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc28
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.cc28
-rw-r--r--src/core/lib/iomgr/lockfree_event.cc115
-rw-r--r--src/core/lib/iomgr/lockfree_event.h44
-rw-r--r--src/core/lib/iomgr/network_status_tracker.h8
-rw-r--r--src/core/lib/security/transport/client_auth_filter.cc2
-rw-r--r--src/core/lib/support/cmdline.cc2
-rw-r--r--src/core/lib/support/mpscq.cc37
-rw-r--r--src/core/lib/support/mpscq.h30
-rw-r--r--src/core/lib/surface/completion_queue.cc2
-rw-r--r--src/core/lib/surface/server.cc181
12 files changed, 248 insertions, 259 deletions
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 504c659874..9e3643fa28 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -46,6 +46,7 @@
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/manual_constructor.h"
#include "src/core/lib/support/string.h"
static grpc_wakeup_fd global_wakeup_fd;
@@ -111,8 +112,8 @@ static void epoll_set_shutdown() {
struct grpc_fd {
int fd;
- gpr_atm read_closure;
- gpr_atm write_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
struct grpc_fd* freelist_next;
@@ -264,8 +265,8 @@ static grpc_fd* fd_create(int fd, const char* name) {
}
new_fd->fd = fd;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
+ new_fd->read_closure.Init();
+ new_fd->write_closure.Init();
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
@@ -297,12 +298,11 @@ static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
* shutdown() syscall on that fd) */
static void fd_shutdown_internal(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_error* why, bool releasing_fd) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
if (!releasing_fd) {
shutdown(fd->fd, SHUT_RDWR);
}
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
@@ -318,7 +318,7 @@ static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_error* error = GRPC_ERROR_NONE;
bool is_release_fd = (release_fd != NULL);
- if (!grpc_lfev_is_shutdown(&fd->read_closure)) {
+ if (!fd->read_closure->IsShutdown()) {
fd_shutdown_internal(exec_ctx, fd,
GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
is_release_fd);
@@ -335,8 +335,8 @@ static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error));
grpc_iomgr_unregister_object(&fd->iomgr_object);
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
+ fd->read_closure.Destroy();
+ fd->write_closure.Destroy();
gpr_mu_lock(&fd_freelist_mu);
fd->freelist_next = fd_freelist;
@@ -351,28 +351,28 @@ static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
}
static bool fd_is_shutdown(grpc_fd* fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
+ return fd->read_closure->IsShutdown();
}
static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_pollset* notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
+ fd->read_closure->SetReady(exec_ctx);
/* Use release store to match with acquire load in fd_get_read_notifier */
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
+ fd->write_closure->SetReady(exec_ctx);
}
/*******************************************************************************
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index aafdd690c7..26ed1f6747 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -48,6 +48,7 @@
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/manual_constructor.h"
#include "src/core/lib/support/spinlock.h"
// debug aid: create workers on the heap (allows asan to spot
@@ -153,8 +154,8 @@ struct grpc_fd {
gpr_mu pollable_mu;
pollable* pollable_obj;
- gpr_atm read_closure;
- gpr_atm write_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
struct grpc_fd* freelist_next;
grpc_closure* on_done_closure;
@@ -286,8 +287,8 @@ static void fd_destroy(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
fd->freelist_next = fd_freelist;
fd_freelist = fd;
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
+ fd->read_closure.Destroy();
+ fd->write_closure.Destroy();
gpr_mu_unlock(&fd_freelist_mu);
}
@@ -347,8 +348,8 @@ static grpc_fd* fd_create(int fd, const char* name) {
new_fd->pollable_obj = NULL;
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
+ new_fd->read_closure.Init();
+ new_fd->write_closure.Init();
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
@@ -411,27 +412,26 @@ static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
}
static bool fd_is_shutdown(grpc_fd* fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
+ return fd->read_closure->IsShutdown();
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
/*******************************************************************************
@@ -702,7 +702,7 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_pollset* notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
+ fd->read_closure->SetReady(exec_ctx);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@@ -714,7 +714,7 @@ static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
}
static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
+ fd->write_closure->SetReady(exec_ctx);
}
static grpc_error* fd_get_or_become_pollable(grpc_fd* fd, pollable** p) {
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc
index d5f3122abc..9a127806fa 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.cc
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -50,6 +50,7 @@
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/manual_constructor.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1)
@@ -127,8 +128,8 @@ struct grpc_fd {
valid */
bool orphaned;
- gpr_atm read_closure;
- gpr_atm write_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
struct grpc_fd* freelist_next;
grpc_closure* on_done_closure;
@@ -766,8 +767,8 @@ static void unref_by(grpc_fd* fd, int n) {
fd_freelist = fd;
grpc_iomgr_unregister_object(&fd->iomgr_object);
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
+ fd->read_closure.Destroy();
+ fd->write_closure.Destroy();
gpr_mu_unlock(&fd_freelist_mu);
} else {
@@ -832,8 +833,8 @@ static grpc_fd* fd_create(int fd, const char* name) {
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
new_fd->orphaned = false;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
+ new_fd->read_closure.Init();
+ new_fd->write_closure.Init();
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
@@ -924,27 +925,26 @@ static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
}
static bool fd_is_shutdown(grpc_fd* fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
+ return fd->read_closure->IsShutdown();
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
/*******************************************************************************
@@ -1108,7 +1108,7 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_pollset* notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
+ fd->read_closure->SetReady(exec_ctx);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@@ -1120,7 +1120,7 @@ static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
}
static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
+ fd->write_closure->SetReady(exec_ctx);
}
static void pollset_release_polling_island(grpc_exec_ctx* exec_ctx,
diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc
index 443a8375b2..98e19f89df 100644
--- a/src/core/lib/iomgr/lockfree_event.cc
+++ b/src/core/lib/iomgr/lockfree_event.cc
@@ -26,92 +26,79 @@ extern grpc_tracer_flag grpc_polling_trace;
/* 'state' holds the to call when the fd is readable or writable respectively.
It can contain one of the following values:
- CLOSURE_READY : The fd has an I/O event of interest but there is no
+ kClosureReady : The fd has an I/O event of interest but there is no
closure yet to execute
- CLOSURE_NOT_READY : The fd has no I/O event of interest
+ kClosureNotReady : The fd has no I/O event of interest
closure ptr : The closure to be executed when the fd has an I/O
event of interest
- shutdown_error | FD_SHUTDOWN_BIT :
- 'shutdown_error' field ORed with FD_SHUTDOWN_BIT.
+ shutdown_error | kShutdownBit :
+ 'shutdown_error' field ORed with kShutdownBit.
This indicates that the fd is shutdown. Since all
memory allocations are word-aligned, the lower two
bits of the shutdown_error pointer are always 0. So
- it is safe to OR these with FD_SHUTDOWN_BIT
+ it is safe to OR these with kShutdownBit
Valid state transitions:
- <closure ptr> <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY
+ <closure ptr> <-----3------ kClosureNotReady -----1-------> kClosureReady
| | ^ | ^ | |
| | | | | | |
| +--------------4----------+ 6 +---------2---------------+ |
| | |
| v |
- +-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+
+ +-----5-------> [shutdown_error | kShutdownBit] <-------7---------+
- For 1, 4 : See grpc_lfev_set_ready() function
- For 2, 3 : See grpc_lfev_notify_on() function
- For 5,6,7: See grpc_lfev_set_shutdown() function */
+ For 1, 4 : See SetReady() function
+ For 2, 3 : See NotifyOn() function
+ For 5,6,7: See SetShutdown() function */
-#define CLOSURE_NOT_READY ((gpr_atm)0)
-#define CLOSURE_READY ((gpr_atm)2)
+namespace grpc_core {
-#define FD_SHUTDOWN_BIT ((gpr_atm)1)
-
-void grpc_lfev_init(gpr_atm* state) {
- gpr_atm_no_barrier_store(state, CLOSURE_NOT_READY);
-}
-
-void grpc_lfev_destroy(gpr_atm* state) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
- if (curr & FD_SHUTDOWN_BIT) {
- GRPC_ERROR_UNREF((grpc_error*)(curr & ~FD_SHUTDOWN_BIT));
+LockfreeEvent::~LockfreeEvent() {
+ gpr_atm curr = gpr_atm_no_barrier_load(&state_);
+ if (curr & kShutdownBit) {
+ GRPC_ERROR_UNREF((grpc_error*)(curr & ~kShutdownBit));
} else {
- GPR_ASSERT(curr == CLOSURE_NOT_READY || curr == CLOSURE_READY);
+ GPR_ASSERT(curr == kClosureNotReady || curr == kClosureReady);
}
}
-bool grpc_lfev_is_shutdown(gpr_atm* state) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
- return (curr & FD_SHUTDOWN_BIT) != 0;
-}
-
-void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_closure* closure, const char* variable) {
+void LockfreeEvent::NotifyOn(grpc_exec_ctx* exec_ctx, grpc_closure* closure) {
while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
+ gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_ERROR, "lfev_notify_on[%s]: %p curr=%p closure=%p", variable,
- state, (void*)curr, closure);
+ gpr_log(GPR_ERROR, "LockfreeEvent::NotifyOn: %p curr=%p closure=%p", this,
+ (void*)curr, closure);
}
switch (curr) {
- case CLOSURE_NOT_READY: {
- /* CLOSURE_NOT_READY -> <closure>.
+ case kClosureNotReady: {
+ /* kClosureNotReady -> <closure>.
We're guaranteed by API that there's an acquire barrier before here,
so there's no need to double-dip and this can be a release-only.
The release itself pairs with the acquire half of a set_ready full
barrier. */
- if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) {
+ if (gpr_atm_rel_cas(&state_, kClosureNotReady, (gpr_atm)closure)) {
return; /* Successful. Return */
}
break; /* retry */
}
- case CLOSURE_READY: {
- /* Change the state to CLOSURE_NOT_READY. Schedule the closure if
+ case kClosureReady: {
+ /* Change the state to kClosureNotReady. Schedule the closure if
successful. If not, the state most likely transitioned to shutdown.
We should retry.
This can be a no-barrier cas since the state is being transitioned to
- CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any
+ kClosureNotReady; set_ready and set_shutdown do not schedule any
closure when transitioning out of CLOSURE_NO_READY state (i.e there
is no other code that needs to 'happen-after' this) */
- if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) {
+ if (gpr_atm_no_barrier_cas(&state_, kClosureReady, kClosureNotReady)) {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
return; /* Successful. Return */
}
@@ -123,8 +110,8 @@ void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
/* 'curr' is either a closure or the fd is shutdown(in which case 'curr'
contains a pointer to the shutdown-error). If the fd is shutdown,
schedule the closure with the shutdown error */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
- grpc_error* shutdown_err = (grpc_error*)(curr & ~FD_SHUTDOWN_BIT);
+ if ((curr & kShutdownBit) > 0) {
+ grpc_error* shutdown_err = (grpc_error*)(curr & ~kShutdownBit);
GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
@@ -133,7 +120,8 @@ void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
/* There is already a closure!. This indicates a bug in the code */
gpr_log(GPR_ERROR,
- "notify_on called with a previous callback still pending");
+ "LockfreeEvent::NotifyOn: notify_on called with a previous "
+ "callback still pending");
abort();
}
}
@@ -142,22 +130,22 @@ void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
GPR_UNREACHABLE_CODE(return );
}
-bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_error* shutdown_err) {
- gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT;
+bool LockfreeEvent::SetShutdown(grpc_exec_ctx* exec_ctx,
+ grpc_error* shutdown_err) {
+ gpr_atm new_state = (gpr_atm)shutdown_err | kShutdownBit;
while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
+ gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_ERROR, "lfev_set_shutdown: %p curr=%p err=%s", state,
- (void*)curr, grpc_error_string(shutdown_err));
+ gpr_log(GPR_ERROR, "LockfreeEvent::SetShutdown: %p curr=%p err=%s",
+ &state_, (void*)curr, grpc_error_string(shutdown_err));
}
switch (curr) {
- case CLOSURE_READY:
- case CLOSURE_NOT_READY:
+ case kClosureReady:
+ case kClosureNotReady:
/* Need a full barrier here so that the initial load in notify_on
doesn't need a barrier */
- if (gpr_atm_full_cas(state, curr, new_state)) {
+ if (gpr_atm_full_cas(&state_, curr, new_state)) {
return true; /* early out */
}
break; /* retry */
@@ -166,7 +154,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
/* 'curr' is either a closure or the fd is already shutdown */
/* If fd is already shutdown, we are done */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ if ((curr & kShutdownBit) > 0) {
GRPC_ERROR_UNREF(shutdown_err);
return false;
}
@@ -176,7 +164,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
Needs an acquire to pair with setting the closure (and get a
happens-after on that edge), and a release to pair with anything
loading the shutdown state. */
- if (gpr_atm_full_cas(state, curr, new_state)) {
+ if (gpr_atm_full_cas(&state_, curr, new_state)) {
GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
@@ -193,26 +181,25 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
GPR_UNREACHABLE_CODE(return false);
}
-void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- const char* variable) {
+void LockfreeEvent::SetReady(grpc_exec_ctx* exec_ctx) {
while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
+ gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_ERROR, "lfev_set_ready[%s]: %p curr=%p", variable, state,
+ gpr_log(GPR_ERROR, "LockfreeEvent::SetReady: %p curr=%p", &state_,
(void*)curr);
}
switch (curr) {
- case CLOSURE_READY: {
+ case kClosureReady: {
/* Already ready. We are done here */
return;
}
- case CLOSURE_NOT_READY: {
+ case kClosureNotReady: {
/* No barrier required as we're transitioning to a state that does not
involve a closure */
- if (gpr_atm_no_barrier_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) {
+ if (gpr_atm_no_barrier_cas(&state_, kClosureNotReady, kClosureReady)) {
return; /* early out */
}
break; /* retry */
@@ -220,14 +207,14 @@ void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
default: {
/* 'curr' is either a closure or the fd is shutdown */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ if ((curr & kShutdownBit) > 0) {
/* The fd is shutdown. Do nothing */
return;
}
/* Full cas: acquire pairs with this cas' release in the event of a
spurious set_ready; release pairs with this or the acquire in
notify_on (or set_shutdown) */
- else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) {
+ else if (gpr_atm_full_cas(&state_, curr, kClosureNotReady)) {
GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr, GRPC_ERROR_NONE);
return;
}
@@ -239,3 +226,5 @@ void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
}
}
}
+
+} // namespace grpc_core
diff --git a/src/core/lib/iomgr/lockfree_event.h b/src/core/lib/iomgr/lockfree_event.h
index 75526d6b9f..47d0089c01 100644
--- a/src/core/lib/iomgr/lockfree_event.h
+++ b/src/core/lib/iomgr/lockfree_event.h
@@ -25,24 +25,30 @@
#include "src/core/lib/iomgr/exec_ctx.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void grpc_lfev_init(gpr_atm* state);
-void grpc_lfev_destroy(gpr_atm* state);
-bool grpc_lfev_is_shutdown(gpr_atm* state);
-
-void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_closure* closure, const char* variable);
-/* Returns true on first successful shutdown */
-bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_error* shutdown_err);
-void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- const char* variable);
-
-#ifdef __cplusplus
-}
-#endif
+namespace grpc_core {
+
+class LockfreeEvent {
+ public:
+ LockfreeEvent() = default;
+ ~LockfreeEvent();
+
+ LockfreeEvent(const LockfreeEvent&) = delete;
+ LockfreeEvent& operator=(const LockfreeEvent&) = delete;
+
+ bool IsShutdown() const {
+ return (gpr_atm_no_barrier_load(&state_) & kShutdownBit) != 0;
+ }
+
+ void NotifyOn(grpc_exec_ctx* exec_ctx, grpc_closure* closure);
+ bool SetShutdown(grpc_exec_ctx* exec_ctx, grpc_error* error);
+ void SetReady(grpc_exec_ctx* exec_ctx);
+
+ private:
+ enum State { kClosureNotReady = 0, kClosureReady = 2, kShutdownBit = 1 };
+
+ gpr_atm state_ = kClosureNotReady;
+};
+
+} // namespace grpc_core
#endif /* GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H */
diff --git a/src/core/lib/iomgr/network_status_tracker.h b/src/core/lib/iomgr/network_status_tracker.h
index 3033e0a833..32244d9b77 100644
--- a/src/core/lib/iomgr/network_status_tracker.h
+++ b/src/core/lib/iomgr/network_status_tracker.h
@@ -20,10 +20,6 @@
#define GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H
#include "src/core/lib/iomgr/endpoint.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
void grpc_network_status_init(void);
void grpc_network_status_shutdown(void);
@@ -31,8 +27,4 @@ void grpc_network_status_register_endpoint(grpc_endpoint* ep);
void grpc_network_status_unregister_endpoint(grpc_endpoint* ep);
void grpc_network_status_shutdown_all_endpoints();
-#ifdef __cplusplus
-}
-#endif
-
#endif /* GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H */
diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc
index 8f7530ed27..11f5a13ccc 100644
--- a/src/core/lib/security/transport/client_auth_filter.cc
+++ b/src/core/lib/security/transport/client_auth_filter.cc
@@ -139,7 +139,7 @@ void grpc_auth_metadata_context_build(
method_name = gpr_strdup(last_slash + 1);
}
char* host_and_port = grpc_slice_to_c_string(call_host);
- if (strcmp(url_scheme, GRPC_SSL_URL_SCHEME) == 0) {
+ if (url_scheme != NULL && strcmp(url_scheme, GRPC_SSL_URL_SCHEME) == 0) {
/* Remove the port if it is 443. */
char* port_delimiter = strrchr(host_and_port, ':');
if (port_delimiter != NULL && strcmp(port_delimiter + 1, "443") == 0) {
diff --git a/src/core/lib/support/cmdline.cc b/src/core/lib/support/cmdline.cc
index 49b34194c3..d2785d2f30 100644
--- a/src/core/lib/support/cmdline.cc
+++ b/src/core/lib/support/cmdline.cc
@@ -105,7 +105,7 @@ void gpr_cmdline_add_flag(gpr_cmdline* cl, const char* name, const char* help,
}
void gpr_cmdline_add_string(gpr_cmdline* cl, const char* name, const char* help,
- char** value) {
+ const char** value) {
add_arg(cl, name, help, ARGTYPE_STRING, value);
}
diff --git a/src/core/lib/support/mpscq.cc b/src/core/lib/support/mpscq.cc
index db25f24264..b270777d5c 100644
--- a/src/core/lib/support/mpscq.cc
+++ b/src/core/lib/support/mpscq.cc
@@ -31,11 +31,12 @@ void gpr_mpscq_destroy(gpr_mpscq* q) {
GPR_ASSERT(q->tail == &q->stub);
}
-void gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n) {
+bool gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n) {
gpr_atm_no_barrier_store(&n->next, (gpr_atm)NULL);
gpr_mpscq_node* prev =
(gpr_mpscq_node*)gpr_atm_full_xchg(&q->head, (gpr_atm)n);
gpr_atm_rel_store(&prev->next, (gpr_atm)n);
+ return prev == &q->stub;
}
gpr_mpscq_node* gpr_mpscq_pop(gpr_mpscq* q) {
@@ -77,3 +78,37 @@ gpr_mpscq_node* gpr_mpscq_pop_and_check_end(gpr_mpscq* q, bool* empty) {
*empty = false;
return NULL;
}
+
+void gpr_locked_mpscq_init(gpr_locked_mpscq* q) {
+ gpr_mpscq_init(&q->queue);
+ gpr_mu_init(&q->mu);
+}
+
+void gpr_locked_mpscq_destroy(gpr_locked_mpscq* q) {
+ gpr_mpscq_destroy(&q->queue);
+ gpr_mu_destroy(&q->mu);
+}
+
+bool gpr_locked_mpscq_push(gpr_locked_mpscq* q, gpr_mpscq_node* n) {
+ return gpr_mpscq_push(&q->queue, n);
+}
+
+gpr_mpscq_node* gpr_locked_mpscq_try_pop(gpr_locked_mpscq* q) {
+ if (gpr_mu_trylock(&q->mu)) {
+ gpr_mpscq_node* n = gpr_mpscq_pop(&q->queue);
+ gpr_mu_unlock(&q->mu);
+ return n;
+ }
+ return NULL;
+}
+
+gpr_mpscq_node* gpr_locked_mpscq_pop(gpr_locked_mpscq* q) {
+ gpr_mu_lock(&q->mu);
+ bool empty = false;
+ gpr_mpscq_node* n;
+ do {
+ n = gpr_mpscq_pop_and_check_end(&q->queue, &empty);
+ } while (n == NULL && !empty);
+ gpr_mu_unlock(&q->mu);
+ return n;
+}
diff --git a/src/core/lib/support/mpscq.h b/src/core/lib/support/mpscq.h
index 1cc9d89feb..fb22742050 100644
--- a/src/core/lib/support/mpscq.h
+++ b/src/core/lib/support/mpscq.h
@@ -20,6 +20,7 @@
#define GRPC_CORE_LIB_SUPPORT_MPSCQ_H
#include <grpc/support/atm.h>
+#include <grpc/support/sync.h>
#include <stdbool.h>
#include <stddef.h>
@@ -49,13 +50,40 @@ typedef struct gpr_mpscq {
void gpr_mpscq_init(gpr_mpscq* q);
void gpr_mpscq_destroy(gpr_mpscq* q);
// Push a node
-void gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n);
+// Thread safe - can be called from multiple threads concurrently
+// Returns true if this was possibly the first node (may return true
+// sporadically, will not return false sporadically)
+bool gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n);
// Pop a node (returns NULL if no node is ready - which doesn't indicate that
// the queue is empty!!)
+// Thread compatible - can only be called from one thread at a time
gpr_mpscq_node* gpr_mpscq_pop(gpr_mpscq* q);
// Pop a node; sets *empty to true if the queue is empty, or false if it is not
gpr_mpscq_node* gpr_mpscq_pop_and_check_end(gpr_mpscq* q, bool* empty);
+// An mpscq with a lock: it's safe to pop from multiple threads, but doing
+// only one thread will succeed concurrently
+typedef struct gpr_locked_mpscq {
+ gpr_mpscq queue;
+ gpr_mu mu;
+} gpr_locked_mpscq;
+
+void gpr_locked_mpscq_init(gpr_locked_mpscq* q);
+void gpr_locked_mpscq_destroy(gpr_locked_mpscq* q);
+// Push a node
+// Thread safe - can be called from multiple threads concurrently
+// Returns true if this was possibly the first node (may return true
+// sporadically, will not return false sporadically)
+bool gpr_locked_mpscq_push(gpr_locked_mpscq* q, gpr_mpscq_node* n);
+
+// Pop a node (returns NULL if no node is ready - which doesn't indicate that
+// the queue is empty!!)
+// Thread safe - can be called from multiple threads concurrently
+gpr_mpscq_node* gpr_locked_mpscq_try_pop(gpr_locked_mpscq* q);
+
+// Pop a node. Returns NULL only if the queue was empty at some point after
+// calling this function
+gpr_mpscq_node* gpr_locked_mpscq_pop(gpr_locked_mpscq* q);
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc
index 14054e82e7..922df923ae 100644
--- a/src/core/lib/surface/completion_queue.cc
+++ b/src/core/lib/surface/completion_queue.cc
@@ -376,8 +376,8 @@ int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue* cq,
(grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq) {
*tag = storage->tag;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- storage->done(&exec_ctx, storage->done_arg, storage);
*ok = (storage->next & (uintptr_t)(1)) == 1;
+ storage->done(&exec_ctx, storage->done_arg, storage);
ret = 1;
cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index eb7a4e2d30..da7ae17bef 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -33,7 +33,8 @@
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
-#include "src/core/lib/support/stack_lockfree.h"
+#include "src/core/lib/support/mpscq.h"
+#include "src/core/lib/support/spinlock.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
@@ -63,6 +64,7 @@ grpc_tracer_flag grpc_server_channel_trace =
GRPC_TRACER_INITIALIZER(false, "server_channel");
typedef struct requested_call {
+ gpr_mpscq_node request_link; /* must be first */
requested_call_type type;
size_t cq_idx;
void* tag;
@@ -128,10 +130,7 @@ typedef struct request_matcher request_matcher;
struct call_data {
grpc_call* call;
- /** protects state */
- gpr_mu mu_state;
- /** the current state of a call - see call_state */
- call_state state;
+ gpr_atm state;
bool path_set;
bool host_set;
@@ -162,7 +161,7 @@ struct request_matcher {
grpc_server* server;
call_data* pending_head;
call_data* pending_tail;
- gpr_stack_lockfree** requests_per_cq;
+ gpr_locked_mpscq* requests_per_cq;
};
struct registered_method {
@@ -207,11 +206,6 @@ struct grpc_server {
registered_method* registered_methods;
/** one request matcher for unregistered methods */
request_matcher unregistered_request_matcher;
- /** free list of available requested_calls_per_cq indices */
- gpr_stack_lockfree** request_freelist_per_cq;
- /** requested call backing data */
- requested_call** requested_calls_per_cq;
- int max_requested_calls_per_cq;
gpr_atm shutdown_flag;
uint8_t shutdown_published;
@@ -313,21 +307,20 @@ static void channel_broadcaster_shutdown(grpc_exec_ctx* exec_ctx,
* request_matcher
*/
-static void request_matcher_init(request_matcher* rm, size_t entries,
- grpc_server* server) {
+static void request_matcher_init(request_matcher* rm, grpc_server* server) {
memset(rm, 0, sizeof(*rm));
rm->server = server;
- rm->requests_per_cq = (gpr_stack_lockfree**)gpr_malloc(
+ rm->requests_per_cq = (gpr_locked_mpscq*)gpr_malloc(
sizeof(*rm->requests_per_cq) * server->cq_count);
for (size_t i = 0; i < server->cq_count; i++) {
- rm->requests_per_cq[i] = gpr_stack_lockfree_create(entries);
+ gpr_locked_mpscq_init(&rm->requests_per_cq[i]);
}
}
static void request_matcher_destroy(request_matcher* rm) {
for (size_t i = 0; i < rm->server->cq_count; i++) {
- GPR_ASSERT(gpr_stack_lockfree_pop(rm->requests_per_cq[i]) == -1);
- gpr_stack_lockfree_destroy(rm->requests_per_cq[i]);
+ GPR_ASSERT(gpr_locked_mpscq_pop(&rm->requests_per_cq[i]) == NULL);
+ gpr_locked_mpscq_destroy(&rm->requests_per_cq[i]);
}
gpr_free(rm->requests_per_cq);
}
@@ -342,9 +335,7 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx* exec_ctx,
while (rm->pending_head) {
call_data* calld = rm->pending_head;
rm->pending_head = calld->pending_next;
- gpr_mu_lock(&calld->mu_state);
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
@@ -357,13 +348,17 @@ static void request_matcher_kill_requests(grpc_exec_ctx* exec_ctx,
grpc_server* server,
request_matcher* rm,
grpc_error* error) {
- int request_id;
+ requested_call* rc;
for (size_t i = 0; i < server->cq_count; i++) {
- while ((request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[i])) !=
- -1) {
- fail_call(exec_ctx, server, i,
- &server->requested_calls_per_cq[i][request_id],
- GRPC_ERROR_REF(error));
+ /* Here we know:
+ 1. no requests are being added (since the server is shut down)
+ 2. no other threads are pulling (since the shut down process is single
+ threaded)
+ So, we can ignore the queue lock and just pop, with the guarantee that a
+ NULL returned here truly means that the queue is empty */
+ while ((rc = (requested_call*)gpr_mpscq_pop(
+ &rm->requests_per_cq[i].queue)) != NULL) {
+ fail_call(exec_ctx, server, i, rc, GRPC_ERROR_REF(error));
}
}
GRPC_ERROR_UNREF(error);
@@ -398,13 +393,7 @@ static void server_delete(grpc_exec_ctx* exec_ctx, grpc_server* server) {
}
for (i = 0; i < server->cq_count; i++) {
GRPC_CQ_INTERNAL_UNREF(exec_ctx, server->cqs[i], "server");
- if (server->started) {
- gpr_stack_lockfree_destroy(server->request_freelist_per_cq[i]);
- gpr_free(server->requested_calls_per_cq[i]);
- }
}
- gpr_free(server->request_freelist_per_cq);
- gpr_free(server->requested_calls_per_cq);
gpr_free(server->cqs);
gpr_free(server->pollsets);
gpr_free(server->shutdown_tags);
@@ -462,21 +451,7 @@ static void destroy_channel(grpc_exec_ctx* exec_ctx, channel_data* chand,
static void done_request_event(grpc_exec_ctx* exec_ctx, void* req,
grpc_cq_completion* c) {
- requested_call* rc = (requested_call*)req;
- grpc_server* server = rc->server;
-
- if (rc >= server->requested_calls_per_cq[rc->cq_idx] &&
- rc < server->requested_calls_per_cq[rc->cq_idx] +
- server->max_requested_calls_per_cq) {
- GPR_ASSERT(rc - server->requested_calls_per_cq[rc->cq_idx] <= INT_MAX);
- gpr_stack_lockfree_push(
- server->request_freelist_per_cq[rc->cq_idx],
- (int)(rc - server->requested_calls_per_cq[rc->cq_idx]));
- } else {
- gpr_free(req);
- }
-
- server_unref(exec_ctx, server);
+ gpr_free(req);
}
static void publish_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
@@ -508,10 +483,6 @@ static void publish_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
GPR_UNREACHABLE_CODE(return );
}
- grpc_call_element* elem =
- grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
- channel_data* chand = (channel_data*)elem->channel_data;
- server_ref(chand->server);
grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, GRPC_ERROR_NONE,
done_request_event, rc, &rc->completion);
}
@@ -525,9 +496,7 @@ static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* arg,
grpc_server* server = rm->server;
if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) {
- gpr_mu_lock(&calld->mu_state);
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
@@ -539,16 +508,14 @@ static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* arg,
for (size_t i = 0; i < server->cq_count; i++) {
size_t cq_idx = (chand->cq_idx + i) % server->cq_count;
- int request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[cq_idx]);
- if (request_id == -1) {
+ requested_call* rc =
+ (requested_call*)gpr_locked_mpscq_try_pop(&rm->requests_per_cq[cq_idx]);
+ if (rc == NULL) {
continue;
} else {
GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i);
- gpr_mu_lock(&calld->mu_state);
- calld->state = ACTIVATED;
- gpr_mu_unlock(&calld->mu_state);
- publish_call(exec_ctx, server, calld, cq_idx,
- &server->requested_calls_per_cq[cq_idx][request_id]);
+ gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
+ publish_call(exec_ctx, server, calld, cq_idx, rc);
return; /* early out */
}
}
@@ -556,9 +523,27 @@ static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* arg,
/* no cq to take the request found: queue it on the slow list */
GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx);
gpr_mu_lock(&server->mu_call);
- gpr_mu_lock(&calld->mu_state);
- calld->state = PENDING;
- gpr_mu_unlock(&calld->mu_state);
+
+ // We need to ensure that all the queues are empty. We do this under
+ // the server mu_call lock to ensure that if something is added to
+ // an empty request queue, it will block until the call is actually
+ // added to the pending list.
+ for (size_t i = 0; i < server->cq_count; i++) {
+ size_t cq_idx = (chand->cq_idx + i) % server->cq_count;
+ requested_call* rc =
+ (requested_call*)gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]);
+ if (rc == NULL) {
+ continue;
+ } else {
+ gpr_mu_unlock(&server->mu_call);
+ GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i + server->cq_count);
+ gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
+ publish_call(exec_ctx, server, calld, cq_idx, rc);
+ return; /* early out */
+ }
+ }
+
+ gpr_atm_no_barrier_store(&calld->state, PENDING);
if (rm->pending_head == NULL) {
rm->pending_tail = rm->pending_head = calld;
} else {
@@ -576,9 +561,7 @@ static void finish_start_new_rpc(
call_data* calld = (call_data*)elem->call_data;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
- gpr_mu_lock(&calld->mu_state);
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
@@ -807,21 +790,14 @@ static void got_initial_metadata(grpc_exec_ctx* exec_ctx, void* ptr,
if (error == GRPC_ERROR_NONE) {
start_new_rpc(exec_ctx, elem);
} else {
- gpr_mu_lock(&calld->mu_state);
- if (calld->state == NOT_STARTED) {
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ if (gpr_atm_full_cas(&calld->state, NOT_STARTED, ZOMBIED)) {
GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE);
- } else if (calld->state == PENDING) {
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ } else if (gpr_atm_full_cas(&calld->state, PENDING, ZOMBIED)) {
/* zombied call will be destroyed when it's removed from the pending
queue... later */
- } else {
- gpr_mu_unlock(&calld->mu_state);
}
}
}
@@ -885,7 +861,6 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
memset(calld, 0, sizeof(call_data));
calld->deadline = GRPC_MILLIS_INF_FUTURE;
calld->call = grpc_call_from_top_element(elem);
- gpr_mu_init(&calld->mu_state);
GRPC_CLOSURE_INIT(&calld->server_on_recv_initial_metadata,
server_on_recv_initial_metadata, elem,
@@ -912,8 +887,6 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_metadata_array_destroy(&calld->initial_metadata);
grpc_byte_buffer_destroy(calld->payload);
- gpr_mu_destroy(&calld->mu_state);
-
server_unref(exec_ctx, chand->server);
}
@@ -1020,8 +993,6 @@ grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) {
server->root_channel_data.next = server->root_channel_data.prev =
&server->root_channel_data;
- /* TODO(ctiller): expose a channel_arg for this */
- server->max_requested_calls_per_cq = 32768;
server->channel_args = grpc_channel_args_copy(args);
return server;
@@ -1095,29 +1066,15 @@ void grpc_server_start(grpc_server* server) {
server->pollset_count = 0;
server->pollsets =
(grpc_pollset**)gpr_malloc(sizeof(grpc_pollset*) * server->cq_count);
- server->request_freelist_per_cq = (gpr_stack_lockfree**)gpr_malloc(
- sizeof(*server->request_freelist_per_cq) * server->cq_count);
- server->requested_calls_per_cq = (requested_call**)gpr_malloc(
- sizeof(*server->requested_calls_per_cq) * server->cq_count);
for (i = 0; i < server->cq_count; i++) {
if (grpc_cq_can_listen(server->cqs[i])) {
server->pollsets[server->pollset_count++] =
grpc_cq_pollset(server->cqs[i]);
}
- server->request_freelist_per_cq[i] =
- gpr_stack_lockfree_create((size_t)server->max_requested_calls_per_cq);
- for (int j = 0; j < server->max_requested_calls_per_cq; j++) {
- gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j);
- }
- server->requested_calls_per_cq[i] =
- (requested_call*)gpr_malloc((size_t)server->max_requested_calls_per_cq *
- sizeof(*server->requested_calls_per_cq[i]));
}
- request_matcher_init(&server->unregistered_request_matcher,
- (size_t)server->max_requested_calls_per_cq, server);
+ request_matcher_init(&server->unregistered_request_matcher, server);
for (registered_method* rm = server->registered_methods; rm; rm = rm->next) {
- request_matcher_init(&rm->matcher,
- (size_t)server->max_requested_calls_per_cq, server);
+ request_matcher_init(&rm->matcher, server);
}
server_ref(server);
@@ -1373,21 +1330,11 @@ static grpc_call_error queue_call_request(grpc_exec_ctx* exec_ctx,
requested_call* rc) {
call_data* calld = NULL;
request_matcher* rm = NULL;
- int request_id;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
fail_call(exec_ctx, server, cq_idx, rc,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
return GRPC_CALL_OK;
}
- request_id = gpr_stack_lockfree_pop(server->request_freelist_per_cq[cq_idx]);
- if (request_id == -1) {
- /* out of request ids: just fail this one */
- fail_call(exec_ctx, server, cq_idx, rc,
- grpc_error_set_int(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Out of request ids"),
- GRPC_ERROR_INT_LIMIT, server->max_requested_calls_per_cq));
- return GRPC_CALL_OK;
- }
switch (rc->type) {
case BATCH_CALL:
rm = &server->unregistered_request_matcher;
@@ -1396,20 +1343,17 @@ static grpc_call_error queue_call_request(grpc_exec_ctx* exec_ctx,
rm = &rc->data.registered.method->matcher;
break;
}
- server->requested_calls_per_cq[cq_idx][request_id] = *rc;
- gpr_free(rc);
- if (gpr_stack_lockfree_push(rm->requests_per_cq[cq_idx], request_id)) {
+ if (gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link)) {
/* this was the first queued request: we need to lock and start
matching calls */
gpr_mu_lock(&server->mu_call);
while ((calld = rm->pending_head) != NULL) {
- request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[cq_idx]);
- if (request_id == -1) break;
+ rc = (requested_call*)gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]);
+ if (rc == NULL) break;
rm->pending_head = calld->pending_next;
gpr_mu_unlock(&server->mu_call);
- gpr_mu_lock(&calld->mu_state);
- if (calld->state == ZOMBIED) {
- gpr_mu_unlock(&calld->mu_state);
+ if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) {
+ // Zombied Call
GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
@@ -1417,11 +1361,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx* exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE);
} else {
- GPR_ASSERT(calld->state == PENDING);
- calld->state = ACTIVATED;
- gpr_mu_unlock(&calld->mu_state);
- publish_call(exec_ctx, server, calld, cq_idx,
- &server->requested_calls_per_cq[cq_idx][request_id]);
+ publish_call(exec_ctx, server, calld, cq_idx, rc);
}
gpr_mu_lock(&server->mu_call);
}
@@ -1540,7 +1480,6 @@ static void fail_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
rc->initial_metadata->count = 0;
GPR_ASSERT(error != GRPC_ERROR_NONE);
- server_ref(server);
grpc_cq_end_op(exec_ctx, server->cqs[cq_idx], rc->tag, error,
done_request_event, rc, &rc->completion);
}