aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.cc9
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.cc4
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.cc31
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h12
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.cc6
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc36
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc35
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.cc37
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc1
-rw-r--r--src/core/lib/iomgr/ev_posix.cc4
-rw-r--r--src/core/lib/iomgr/lockfree_event.cc130
-rw-r--r--src/core/lib/iomgr/lockfree_event.h44
-rw-r--r--src/core/lib/iomgr/network_status_tracker.h8
-rw-r--r--src/core/lib/iomgr/pollset_uv.cc20
-rw-r--r--src/core/lib/support/cmdline.cc2
-rw-r--r--src/core/lib/support/mpscq.cc37
-rw-r--r--src/core/lib/support/mpscq.h30
-rw-r--r--src/core/lib/surface/completion_queue.cc2
-rw-r--r--src/core/lib/surface/server.cc181
19 files changed, 338 insertions, 291 deletions
diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc
index 466bf86bc0..c3795c35c1 100644
--- a/src/core/ext/filters/client_channel/backup_poller.cc
+++ b/src/core/ext/filters/client_channel/backup_poller.cc
@@ -143,9 +143,16 @@ void grpc_client_channel_start_backup_polling(
grpc_exec_ctx_now(exec_ctx) + g_poll_interval_ms,
&g_poller->run_poller_closure);
}
+
gpr_ref(&g_poller->refs);
+ /* Get a reference to g_poller->pollset before releasing g_poller_mu to make
+ * TSAN happy. Otherwise, reading from g_poller (i.e g_poller->pollset) after
+ * releasing the lock and setting g_poller to NULL in g_poller_unref() is
+ * being flagged as a data-race by TSAN */
+ grpc_pollset* pollset = g_poller->pollset;
gpr_mu_unlock(&g_poller_mu);
- grpc_pollset_set_add_pollset(exec_ctx, interested_parties, g_poller->pollset);
+
+ grpc_pollset_set_add_pollset(exec_ctx, interested_parties, pollset);
}
void grpc_client_channel_stop_backup_polling(
diff --git a/src/core/ext/filters/client_channel/uri_parser.cc b/src/core/ext/filters/client_channel/uri_parser.cc
index 917e65342b..1cc52dec12 100644
--- a/src/core/ext/filters/client_channel/uri_parser.cc
+++ b/src/core/ext/filters/client_channel/uri_parser.cc
@@ -59,7 +59,9 @@ static grpc_uri* bad_uri(const char* uri_text, size_t pos, const char* section,
static char* decode_and_copy_component(grpc_exec_ctx* exec_ctx, const char* src,
size_t begin, size_t end) {
grpc_slice component =
- grpc_slice_from_copied_buffer(src + begin, end - begin);
+ (begin == NOT_SET || end == NOT_SET)
+ ? grpc_empty_slice()
+ : grpc_slice_from_copied_buffer(src + begin, end - begin);
grpc_slice decoded_component =
grpc_permissive_percent_decode_slice(component);
char* out = grpc_dump_slice(decoded_component, GPR_DUMP_ASCII);
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 034e6ed8ca..a955ec2589 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -205,6 +205,8 @@ static void destruct_transport(grpc_exec_ctx* exec_ctx,
GPR_ASSERT(t->lists[i].tail == NULL);
}
+ GRPC_ERROR_UNREF(t->goaway_error);
+
GPR_ASSERT(grpc_chttp2_stream_map_size(&t->stream_map) == 0);
grpc_chttp2_stream_map_destroy(&t->stream_map);
@@ -320,6 +322,7 @@ static void init_transport(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
keepalive_watchdog_fired_locked, t,
grpc_combiner_scheduler(t->combiner));
+ t->goaway_error = GRPC_ERROR_NONE;
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(exec_ctx, &t->hpack_parser);
@@ -1123,7 +1126,16 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx* exec_ctx,
grpc_slice goaway_text) {
// GRPC_CHTTP2_IF_TRACING(
// gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
- t->seen_goaway = 1;
+
+ // Discard the error from a previous goaway frame (if any)
+ if (t->goaway_error != GRPC_ERROR_NONE) {
+ GRPC_ERROR_UNREF(t->goaway_error);
+ }
+ t->goaway_error = grpc_error_set_str(
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("GOAWAY received"),
+ GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)goaway_error),
+ GRPC_ERROR_STR_RAW_BYTES, goaway_text);
/* When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
* data equal to "too_many_pings", it should log the occurrence at a log level
@@ -1144,14 +1156,8 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx* exec_ctx,
/* lie: use transient failure from the transport to indicate goaway has been
* received */
- connectivity_state_set(
- exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
- grpc_error_set_str(
- grpc_error_set_int(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("GOAWAY received"),
- GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)goaway_error),
- GRPC_ERROR_STR_RAW_BYTES, goaway_text),
- "got_goaway");
+ connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_REF(t->goaway_error), "got_goaway");
}
static void maybe_start_some_streams(grpc_exec_ctx* exec_ctx,
@@ -2078,7 +2084,6 @@ void grpc_chttp2_fake_status(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
grpc_status_code status;
grpc_slice slice;
grpc_error_get_status(exec_ctx, error, s->deadline, &status, &slice, NULL);
-
if (status != GRPC_STATUS_OK) {
s->seen_error = true;
}
@@ -2546,6 +2551,12 @@ static void read_action_locked(grpc_exec_ctx* exec_ctx, void* tp,
"Transport closed", &t->closed_with_error, 1);
}
if (error != GRPC_ERROR_NONE) {
+ /* If a goaway frame was received, this might be the reason why the read
+ * failed. Add this info to the error */
+ if (t->goaway_error != GRPC_ERROR_NONE) {
+ error = grpc_error_add_child(error, GRPC_ERROR_REF(t->goaway_error));
+ }
+
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
t->endpoint_reading = 0;
} else if (t->closed_with_error == GRPC_ERROR_NONE) {
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index a5a0a804a2..60cc280c43 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -306,9 +306,10 @@ struct grpc_chttp2_transport {
*/
uint32_t write_buffer_size;
- /** have we seen a goaway */
- bool seen_goaway;
- /** have we sent a goaway */
+ /** Set to a grpc_error object if a goaway frame is received. By default, set
+ * to GRPC_ERROR_NONE */
+ grpc_error* goaway_error;
+
grpc_chttp2_sent_goaway_state sent_goaway_state;
/** are the local settings dirty and need to be sent? */
@@ -376,11 +377,6 @@ struct grpc_chttp2_transport {
grpc_chttp2_transport* t, grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
- /* goaway data */
- grpc_status_code goaway_error;
- uint32_t goaway_last_stream_index;
- grpc_slice goaway_text;
-
grpc_chttp2_write_cb* write_cb_pool;
/* bdp estimator */
diff --git a/src/core/ext/transport/chttp2/transport/parsing.cc b/src/core/ext/transport/chttp2/transport/parsing.cc
index 8a3774d688..6737c26e72 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.cc
+++ b/src/core/ext/transport/chttp2/transport/parsing.cc
@@ -590,7 +590,11 @@ static grpc_error* init_header_frame_parser(grpc_exec_ctx* exec_ctx,
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_ERROR, "ignoring new grpc_chttp2_stream creation on client"));
}
- return init_skip_frame_parser(exec_ctx, t, 1);
+ grpc_error* err = init_skip_frame_parser(exec_ctx, t, 1);
+ if (t->incoming_frame_flags & GRPC_CHTTP2_FLAG_HAS_PRIORITY) {
+ grpc_chttp2_hpack_parser_set_has_priority(&t->hpack_parser);
+ }
+ return err;
} else if (t->last_new_stream_id >= t->incoming_stream_id) {
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_ERROR,
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 504c659874..61da996781 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -18,6 +18,8 @@
#include "src/core/lib/iomgr/port.h"
+#include <grpc/support/log.h>
+
/* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL
#include "src/core/lib/iomgr/ev_epoll1_linux.h"
@@ -34,7 +36,6 @@
#include <grpc/support/alloc.h>
#include <grpc/support/cpu.h>
-#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
@@ -46,6 +47,7 @@
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/manual_constructor.h"
#include "src/core/lib/support/string.h"
static grpc_wakeup_fd global_wakeup_fd;
@@ -111,8 +113,8 @@ static void epoll_set_shutdown() {
struct grpc_fd {
int fd;
- gpr_atm read_closure;
- gpr_atm write_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
struct grpc_fd* freelist_next;
@@ -264,8 +266,8 @@ static grpc_fd* fd_create(int fd, const char* name) {
}
new_fd->fd = fd;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
+ new_fd->read_closure.Init();
+ new_fd->write_closure.Init();
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
@@ -297,12 +299,11 @@ static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
* shutdown() syscall on that fd) */
static void fd_shutdown_internal(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_error* why, bool releasing_fd) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
if (!releasing_fd) {
shutdown(fd->fd, SHUT_RDWR);
}
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
@@ -318,7 +319,7 @@ static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_error* error = GRPC_ERROR_NONE;
bool is_release_fd = (release_fd != NULL);
- if (!grpc_lfev_is_shutdown(&fd->read_closure)) {
+ if (!fd->read_closure->IsShutdown()) {
fd_shutdown_internal(exec_ctx, fd,
GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
is_release_fd);
@@ -335,8 +336,8 @@ static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error));
grpc_iomgr_unregister_object(&fd->iomgr_object);
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
+ fd->read_closure.Destroy();
+ fd->write_closure.Destroy();
gpr_mu_lock(&fd_freelist_mu);
fd->freelist_next = fd_freelist;
@@ -351,28 +352,28 @@ static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
}
static bool fd_is_shutdown(grpc_fd* fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
+ return fd->read_closure->IsShutdown();
}
static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_pollset* notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
+ fd->read_closure->SetReady(exec_ctx);
/* Use release store to match with acquire load in fd_get_read_notifier */
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
+ fd->write_closure->SetReady(exec_ctx);
}
/*******************************************************************************
@@ -1230,6 +1231,7 @@ static const grpc_event_engine_vtable vtable = {
* support is available */
const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
if (!grpc_has_wakeup_fd()) {
+ gpr_log(GPR_ERROR, "Skipping epoll1 because of no wakeup fd.");
return NULL;
}
@@ -1254,6 +1256,8 @@ const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
+ gpr_log(GPR_ERROR,
+ "Skipping epoll1 becuase GRPC_LINUX_EPOLL is not defined.");
return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index aafdd690c7..caaee76b8c 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -18,6 +18,8 @@
#include "src/core/lib/iomgr/port.h"
+#include <grpc/support/log.h>
+
/* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL
@@ -34,7 +36,6 @@
#include <unistd.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
@@ -48,6 +49,7 @@
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/manual_constructor.h"
#include "src/core/lib/support/spinlock.h"
// debug aid: create workers on the heap (allows asan to spot
@@ -153,8 +155,8 @@ struct grpc_fd {
gpr_mu pollable_mu;
pollable* pollable_obj;
- gpr_atm read_closure;
- gpr_atm write_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
struct grpc_fd* freelist_next;
grpc_closure* on_done_closure;
@@ -286,8 +288,8 @@ static void fd_destroy(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
fd->freelist_next = fd_freelist;
fd_freelist = fd;
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
+ fd->read_closure.Destroy();
+ fd->write_closure.Destroy();
gpr_mu_unlock(&fd_freelist_mu);
}
@@ -347,8 +349,8 @@ static grpc_fd* fd_create(int fd, const char* name) {
new_fd->pollable_obj = NULL;
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
+ new_fd->read_closure.Init();
+ new_fd->write_closure.Init();
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
@@ -411,27 +413,26 @@ static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
}
static bool fd_is_shutdown(grpc_fd* fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
+ return fd->read_closure->IsShutdown();
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
/*******************************************************************************
@@ -702,7 +703,7 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_pollset* notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
+ fd->read_closure->SetReady(exec_ctx);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@@ -714,7 +715,7 @@ static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
}
static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
+ fd->write_closure->SetReady(exec_ctx);
}
static grpc_error* fd_get_or_become_pollable(grpc_fd* fd, pollable** p) {
@@ -1451,10 +1452,12 @@ const grpc_event_engine_vtable* grpc_init_epollex_linux(
}
if (!grpc_has_wakeup_fd()) {
+ gpr_log(GPR_ERROR, "Skipping epollex because of no wakeup fd.");
return NULL;
}
if (!grpc_is_epollexclusive_available()) {
+ gpr_log(GPR_INFO, "Skipping epollex because it is not supported.");
return NULL;
}
@@ -1480,6 +1483,8 @@ const grpc_event_engine_vtable* grpc_init_epollex_linux(
* NULL */
const grpc_event_engine_vtable* grpc_init_epollex_linux(
bool explicitly_requested) {
+ gpr_log(GPR_ERROR,
+ "Skipping epollex becuase GRPC_LINUX_EPOLL is not defined.");
return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc
index d5f3122abc..42806e9d14 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.cc
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -19,6 +19,7 @@
#include "src/core/lib/iomgr/port.h"
#include <grpc/grpc_posix.h>
+#include <grpc/support/log.h>
/* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL
@@ -37,7 +38,6 @@
#include <unistd.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
@@ -50,6 +50,7 @@
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/manual_constructor.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1)
@@ -127,8 +128,8 @@ struct grpc_fd {
valid */
bool orphaned;
- gpr_atm read_closure;
- gpr_atm write_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
+ grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
struct grpc_fd* freelist_next;
grpc_closure* on_done_closure;
@@ -766,8 +767,8 @@ static void unref_by(grpc_fd* fd, int n) {
fd_freelist = fd;
grpc_iomgr_unregister_object(&fd->iomgr_object);
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
+ fd->read_closure.Destroy();
+ fd->write_closure.Destroy();
gpr_mu_unlock(&fd_freelist_mu);
} else {
@@ -832,8 +833,8 @@ static grpc_fd* fd_create(int fd, const char* name) {
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
new_fd->orphaned = false;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
+ new_fd->read_closure.Init();
+ new_fd->write_closure.Init();
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
@@ -924,27 +925,26 @@ static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
}
static bool fd_is_shutdown(grpc_fd* fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
+ return fd->read_closure->IsShutdown();
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_closure* closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
/*******************************************************************************
@@ -1108,7 +1108,7 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
grpc_pollset* notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
+ fd->read_closure->SetReady(exec_ctx);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@@ -1120,7 +1120,7 @@ static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
}
static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
+ fd->write_closure->SetReady(exec_ctx);
}
static void pollset_release_polling_island(grpc_exec_ctx* exec_ctx,
@@ -1711,14 +1711,17 @@ const grpc_event_engine_vtable* grpc_init_epollsig_linux(
bool explicit_request) {
/* If use of signals is disabled, we cannot use epoll engine*/
if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) {
+ gpr_log(GPR_ERROR, "Skipping epollsig because use of signals is disabled.");
return NULL;
}
if (!grpc_has_wakeup_fd()) {
+ gpr_log(GPR_ERROR, "Skipping epollsig because of no wakeup fd.");
return NULL;
}
if (!is_epoll_available()) {
+ gpr_log(GPR_ERROR, "Skipping epollsig because epoll is unavailable.");
return NULL;
}
@@ -1726,6 +1729,8 @@ const grpc_event_engine_vtable* grpc_init_epollsig_linux(
if (explicit_request) {
grpc_use_signal(SIGRTMIN + 6);
} else {
+ gpr_log(GPR_ERROR,
+ "Skipping epollsig because uninitialized wakeup signal.");
return NULL;
}
}
@@ -1751,6 +1756,8 @@ const grpc_event_engine_vtable* grpc_init_epollsig_linux(
* NULL */
const grpc_event_engine_vtable* grpc_init_epollsig_linux(
bool explicit_request) {
+ gpr_log(GPR_ERROR,
+ "Skipping epollsig becuase GRPC_LINUX_EPOLL is not defined.");
return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 554a438e6a..5745a2ae5b 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -1712,6 +1712,7 @@ static const grpc_event_engine_vtable vtable = {
const grpc_event_engine_vtable* grpc_init_poll_posix(bool explicit_request) {
if (!grpc_has_wakeup_fd()) {
+ gpr_log(GPR_ERROR, "Skipping poll because of no wakeup fd.");
return NULL;
}
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index f72f5088f0..a05279a4aa 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -172,12 +172,12 @@ void grpc_event_engine_init(void) {
gpr_free(strings[i]);
}
gpr_free(strings);
- gpr_free(s);
if (g_event_engine == NULL) {
- gpr_log(GPR_ERROR, "No event engine could be initialized");
+ gpr_log(GPR_ERROR, "No event engine could be initialized from %s", s);
abort();
}
+ gpr_free(s);
}
void grpc_event_engine_shutdown(void) {
diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc
index 443a8375b2..40e2ed6219 100644
--- a/src/core/lib/iomgr/lockfree_event.cc
+++ b/src/core/lib/iomgr/lockfree_event.cc
@@ -26,92 +26,96 @@ extern grpc_tracer_flag grpc_polling_trace;
/* 'state' holds the to call when the fd is readable or writable respectively.
It can contain one of the following values:
- CLOSURE_READY : The fd has an I/O event of interest but there is no
+ kClosureReady : The fd has an I/O event of interest but there is no
closure yet to execute
- CLOSURE_NOT_READY : The fd has no I/O event of interest
+ kClosureNotReady : The fd has no I/O event of interest
closure ptr : The closure to be executed when the fd has an I/O
event of interest
- shutdown_error | FD_SHUTDOWN_BIT :
- 'shutdown_error' field ORed with FD_SHUTDOWN_BIT.
+ shutdown_error | kShutdownBit :
+ 'shutdown_error' field ORed with kShutdownBit.
This indicates that the fd is shutdown. Since all
memory allocations are word-aligned, the lower two
bits of the shutdown_error pointer are always 0. So
- it is safe to OR these with FD_SHUTDOWN_BIT
+ it is safe to OR these with kShutdownBit
Valid state transitions:
- <closure ptr> <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY
+ <closure ptr> <-----3------ kClosureNotReady -----1-------> kClosureReady
| | ^ | ^ | |
| | | | | | |
| +--------------4----------+ 6 +---------2---------------+ |
| | |
| v |
- +-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+
+ +-----5-------> [shutdown_error | kShutdownBit] <-------7---------+
- For 1, 4 : See grpc_lfev_set_ready() function
- For 2, 3 : See grpc_lfev_notify_on() function
- For 5,6,7: See grpc_lfev_set_shutdown() function */
+ For 1, 4 : See SetReady() function
+ For 2, 3 : See NotifyOn() function
+ For 5,6,7: See SetShutdown() function */
-#define CLOSURE_NOT_READY ((gpr_atm)0)
-#define CLOSURE_READY ((gpr_atm)2)
+namespace grpc_core {
-#define FD_SHUTDOWN_BIT ((gpr_atm)1)
+LockfreeEvent::LockfreeEvent() {
+ /* Perform an atomic store to start the state machine.
-void grpc_lfev_init(gpr_atm* state) {
- gpr_atm_no_barrier_store(state, CLOSURE_NOT_READY);
+ Note carefully that LockfreeEvent *MAY* be used whilst in a destroyed
+ state, while a file descriptor is on a freelist. In such a state it may
+ be SetReady'd, and so we need to perform an atomic operation here to
+ ensure no races */
+ gpr_atm_no_barrier_store(&state_, kClosureNotReady);
}
-void grpc_lfev_destroy(gpr_atm* state) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
- if (curr & FD_SHUTDOWN_BIT) {
- GRPC_ERROR_UNREF((grpc_error*)(curr & ~FD_SHUTDOWN_BIT));
- } else {
- GPR_ASSERT(curr == CLOSURE_NOT_READY || curr == CLOSURE_READY);
- }
-}
-
-bool grpc_lfev_is_shutdown(gpr_atm* state) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
- return (curr & FD_SHUTDOWN_BIT) != 0;
+LockfreeEvent::~LockfreeEvent() {
+ gpr_atm curr;
+ do {
+ curr = gpr_atm_no_barrier_load(&state_);
+ if (curr & kShutdownBit) {
+ GRPC_ERROR_UNREF((grpc_error*)(curr & ~kShutdownBit));
+ } else {
+ GPR_ASSERT(curr == kClosureNotReady || curr == kClosureReady);
+ }
+ /* we CAS in a shutdown, no error value here. If this event is interacted
+ with post-deletion (see the note in the constructor) we want the bit
+ pattern to prevent error retention in a deleted object */
+ } while (!gpr_atm_no_barrier_cas(&state_, curr,
+ kShutdownBit /* shutdown, no error */));
}
-void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_closure* closure, const char* variable) {
+void LockfreeEvent::NotifyOn(grpc_exec_ctx* exec_ctx, grpc_closure* closure) {
while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
+ gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_ERROR, "lfev_notify_on[%s]: %p curr=%p closure=%p", variable,
- state, (void*)curr, closure);
+ gpr_log(GPR_ERROR, "LockfreeEvent::NotifyOn: %p curr=%p closure=%p", this,
+ (void*)curr, closure);
}
switch (curr) {
- case CLOSURE_NOT_READY: {
- /* CLOSURE_NOT_READY -> <closure>.
+ case kClosureNotReady: {
+ /* kClosureNotReady -> <closure>.
We're guaranteed by API that there's an acquire barrier before here,
so there's no need to double-dip and this can be a release-only.
The release itself pairs with the acquire half of a set_ready full
barrier. */
- if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) {
+ if (gpr_atm_rel_cas(&state_, kClosureNotReady, (gpr_atm)closure)) {
return; /* Successful. Return */
}
break; /* retry */
}
- case CLOSURE_READY: {
- /* Change the state to CLOSURE_NOT_READY. Schedule the closure if
+ case kClosureReady: {
+ /* Change the state to kClosureNotReady. Schedule the closure if
successful. If not, the state most likely transitioned to shutdown.
We should retry.
This can be a no-barrier cas since the state is being transitioned to
- CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any
+ kClosureNotReady; set_ready and set_shutdown do not schedule any
closure when transitioning out of CLOSURE_NO_READY state (i.e there
is no other code that needs to 'happen-after' this) */
- if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) {
+ if (gpr_atm_no_barrier_cas(&state_, kClosureReady, kClosureNotReady)) {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
return; /* Successful. Return */
}
@@ -123,8 +127,8 @@ void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
/* 'curr' is either a closure or the fd is shutdown(in which case 'curr'
contains a pointer to the shutdown-error). If the fd is shutdown,
schedule the closure with the shutdown error */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
- grpc_error* shutdown_err = (grpc_error*)(curr & ~FD_SHUTDOWN_BIT);
+ if ((curr & kShutdownBit) > 0) {
+ grpc_error* shutdown_err = (grpc_error*)(curr & ~kShutdownBit);
GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
@@ -133,7 +137,8 @@ void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
/* There is already a closure!. This indicates a bug in the code */
gpr_log(GPR_ERROR,
- "notify_on called with a previous callback still pending");
+ "LockfreeEvent::NotifyOn: notify_on called with a previous "
+ "callback still pending");
abort();
}
}
@@ -142,22 +147,22 @@ void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
GPR_UNREACHABLE_CODE(return );
}
-bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_error* shutdown_err) {
- gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT;
+bool LockfreeEvent::SetShutdown(grpc_exec_ctx* exec_ctx,
+ grpc_error* shutdown_err) {
+ gpr_atm new_state = (gpr_atm)shutdown_err | kShutdownBit;
while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
+ gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_ERROR, "lfev_set_shutdown: %p curr=%p err=%s", state,
- (void*)curr, grpc_error_string(shutdown_err));
+ gpr_log(GPR_ERROR, "LockfreeEvent::SetShutdown: %p curr=%p err=%s",
+ &state_, (void*)curr, grpc_error_string(shutdown_err));
}
switch (curr) {
- case CLOSURE_READY:
- case CLOSURE_NOT_READY:
+ case kClosureReady:
+ case kClosureNotReady:
/* Need a full barrier here so that the initial load in notify_on
doesn't need a barrier */
- if (gpr_atm_full_cas(state, curr, new_state)) {
+ if (gpr_atm_full_cas(&state_, curr, new_state)) {
return true; /* early out */
}
break; /* retry */
@@ -166,7 +171,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
/* 'curr' is either a closure or the fd is already shutdown */
/* If fd is already shutdown, we are done */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ if ((curr & kShutdownBit) > 0) {
GRPC_ERROR_UNREF(shutdown_err);
return false;
}
@@ -176,7 +181,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
Needs an acquire to pair with setting the closure (and get a
happens-after on that edge), and a release to pair with anything
loading the shutdown state. */
- if (gpr_atm_full_cas(state, curr, new_state)) {
+ if (gpr_atm_full_cas(&state_, curr, new_state)) {
GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
@@ -193,26 +198,25 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
GPR_UNREACHABLE_CODE(return false);
}
-void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- const char* variable) {
+void LockfreeEvent::SetReady(grpc_exec_ctx* exec_ctx) {
while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
+ gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_ERROR, "lfev_set_ready[%s]: %p curr=%p", variable, state,
+ gpr_log(GPR_ERROR, "LockfreeEvent::SetReady: %p curr=%p", &state_,
(void*)curr);
}
switch (curr) {
- case CLOSURE_READY: {
+ case kClosureReady: {
/* Already ready. We are done here */
return;
}
- case CLOSURE_NOT_READY: {
+ case kClosureNotReady: {
/* No barrier required as we're transitioning to a state that does not
involve a closure */
- if (gpr_atm_no_barrier_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) {
+ if (gpr_atm_no_barrier_cas(&state_, kClosureNotReady, kClosureReady)) {
return; /* early out */
}
break; /* retry */
@@ -220,14 +224,14 @@ void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
default: {
/* 'curr' is either a closure or the fd is shutdown */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ if ((curr & kShutdownBit) > 0) {
/* The fd is shutdown. Do nothing */
return;
}
/* Full cas: acquire pairs with this cas' release in the event of a
spurious set_ready; release pairs with this or the acquire in
notify_on (or set_shutdown) */
- else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) {
+ else if (gpr_atm_full_cas(&state_, curr, kClosureNotReady)) {
GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr, GRPC_ERROR_NONE);
return;
}
@@ -239,3 +243,5 @@ void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
}
}
}
+
+} // namespace grpc_core
diff --git a/src/core/lib/iomgr/lockfree_event.h b/src/core/lib/iomgr/lockfree_event.h
index 75526d6b9f..c667dcd3bc 100644
--- a/src/core/lib/iomgr/lockfree_event.h
+++ b/src/core/lib/iomgr/lockfree_event.h
@@ -25,24 +25,30 @@
#include "src/core/lib/iomgr/exec_ctx.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void grpc_lfev_init(gpr_atm* state);
-void grpc_lfev_destroy(gpr_atm* state);
-bool grpc_lfev_is_shutdown(gpr_atm* state);
-
-void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_closure* closure, const char* variable);
-/* Returns true on first successful shutdown */
-bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- grpc_error* shutdown_err);
-void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
- const char* variable);
-
-#ifdef __cplusplus
-}
-#endif
+namespace grpc_core {
+
+class LockfreeEvent {
+ public:
+ LockfreeEvent();
+ ~LockfreeEvent();
+
+ LockfreeEvent(const LockfreeEvent&) = delete;
+ LockfreeEvent& operator=(const LockfreeEvent&) = delete;
+
+ bool IsShutdown() const {
+ return (gpr_atm_no_barrier_load(&state_) & kShutdownBit) != 0;
+ }
+
+ void NotifyOn(grpc_exec_ctx* exec_ctx, grpc_closure* closure);
+ bool SetShutdown(grpc_exec_ctx* exec_ctx, grpc_error* error);
+ void SetReady(grpc_exec_ctx* exec_ctx);
+
+ private:
+ enum State { kClosureNotReady = 0, kClosureReady = 2, kShutdownBit = 1 };
+
+ gpr_atm state_;
+};
+
+} // namespace grpc_core
#endif /* GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H */
diff --git a/src/core/lib/iomgr/network_status_tracker.h b/src/core/lib/iomgr/network_status_tracker.h
index 3033e0a833..32244d9b77 100644
--- a/src/core/lib/iomgr/network_status_tracker.h
+++ b/src/core/lib/iomgr/network_status_tracker.h
@@ -20,10 +20,6 @@
#define GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H
#include "src/core/lib/iomgr/endpoint.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
void grpc_network_status_init(void);
void grpc_network_status_shutdown(void);
@@ -31,8 +27,4 @@ void grpc_network_status_register_endpoint(grpc_endpoint* ep);
void grpc_network_status_unregister_endpoint(grpc_endpoint* ep);
void grpc_network_status_shutdown_all_endpoints();
-#ifdef __cplusplus
-}
-#endif
-
#endif /* GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H */
diff --git a/src/core/lib/iomgr/pollset_uv.cc b/src/core/lib/iomgr/pollset_uv.cc
index 6b9c53c01c..1d54942c1d 100644
--- a/src/core/lib/iomgr/pollset_uv.cc
+++ b/src/core/lib/iomgr/pollset_uv.cc
@@ -40,7 +40,7 @@ grpc_tracer_flag grpc_trace_fd_refcount =
#endif
struct grpc_pollset {
- uv_timer_t timer;
+ uv_timer_t* timer;
int shutting_down;
};
@@ -78,12 +78,16 @@ void grpc_pollset_global_shutdown(void) {
static void timer_run_cb(uv_timer_t* timer) {}
-static void timer_close_cb(uv_handle_t* handle) { handle->data = (void*)1; }
+static void timer_close_cb(uv_handle_t* handle) {
+ handle->data = (void*)1;
+ gpr_free(handle);
+}
void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
GRPC_UV_ASSERT_SAME_THREAD();
*mu = &grpc_polling_mu;
- uv_timer_init(uv_default_loop(), &pollset->timer);
+ pollset->timer = (uv_timer_t*)gpr_malloc(sizeof(uv_timer_t));
+ uv_timer_init(uv_default_loop(), pollset->timer);
pollset->shutting_down = 0;
}
@@ -104,11 +108,11 @@ void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
GRPC_UV_ASSERT_SAME_THREAD();
- uv_close((uv_handle_t*)&pollset->timer, timer_close_cb);
+ uv_close((uv_handle_t*)pollset->timer, timer_close_cb);
// timer.data is a boolean indicating that the timer has finished closing
- pollset->timer.data = (void*)0;
+ pollset->timer->data = (void*)0;
if (grpc_pollset_work_run_loop) {
- while (!pollset->timer.data) {
+ while (!pollset->timer->data) {
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
}
}
@@ -130,11 +134,11 @@ grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
/* We special-case timeout=0 so that we don't bother with the timer when
the loop won't block anyway */
if (timeout > 0) {
- uv_timer_start(&pollset->timer, timer_run_cb, timeout, 0);
+ uv_timer_start(pollset->timer, timer_run_cb, timeout, 0);
/* Run until there is some I/O activity or the timer triggers. It doesn't
matter which happens */
uv_run(uv_default_loop(), UV_RUN_ONCE);
- uv_timer_stop(&pollset->timer);
+ uv_timer_stop(pollset->timer);
} else {
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
}
diff --git a/src/core/lib/support/cmdline.cc b/src/core/lib/support/cmdline.cc
index 49b34194c3..d2785d2f30 100644
--- a/src/core/lib/support/cmdline.cc
+++ b/src/core/lib/support/cmdline.cc
@@ -105,7 +105,7 @@ void gpr_cmdline_add_flag(gpr_cmdline* cl, const char* name, const char* help,
}
void gpr_cmdline_add_string(gpr_cmdline* cl, const char* name, const char* help,
- char** value) {
+ const char** value) {
add_arg(cl, name, help, ARGTYPE_STRING, value);
}
diff --git a/src/core/lib/support/mpscq.cc b/src/core/lib/support/mpscq.cc
index db25f24264..b270777d5c 100644
--- a/src/core/lib/support/mpscq.cc
+++ b/src/core/lib/support/mpscq.cc
@@ -31,11 +31,12 @@ void gpr_mpscq_destroy(gpr_mpscq* q) {
GPR_ASSERT(q->tail == &q->stub);
}
-void gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n) {
+bool gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n) {
gpr_atm_no_barrier_store(&n->next, (gpr_atm)NULL);
gpr_mpscq_node* prev =
(gpr_mpscq_node*)gpr_atm_full_xchg(&q->head, (gpr_atm)n);
gpr_atm_rel_store(&prev->next, (gpr_atm)n);
+ return prev == &q->stub;
}
gpr_mpscq_node* gpr_mpscq_pop(gpr_mpscq* q) {
@@ -77,3 +78,37 @@ gpr_mpscq_node* gpr_mpscq_pop_and_check_end(gpr_mpscq* q, bool* empty) {
*empty = false;
return NULL;
}
+
+void gpr_locked_mpscq_init(gpr_locked_mpscq* q) {
+ gpr_mpscq_init(&q->queue);
+ gpr_mu_init(&q->mu);
+}
+
+void gpr_locked_mpscq_destroy(gpr_locked_mpscq* q) {
+ gpr_mpscq_destroy(&q->queue);
+ gpr_mu_destroy(&q->mu);
+}
+
+bool gpr_locked_mpscq_push(gpr_locked_mpscq* q, gpr_mpscq_node* n) {
+ return gpr_mpscq_push(&q->queue, n);
+}
+
+gpr_mpscq_node* gpr_locked_mpscq_try_pop(gpr_locked_mpscq* q) {
+ if (gpr_mu_trylock(&q->mu)) {
+ gpr_mpscq_node* n = gpr_mpscq_pop(&q->queue);
+ gpr_mu_unlock(&q->mu);
+ return n;
+ }
+ return NULL;
+}
+
+gpr_mpscq_node* gpr_locked_mpscq_pop(gpr_locked_mpscq* q) {
+ gpr_mu_lock(&q->mu);
+ bool empty = false;
+ gpr_mpscq_node* n;
+ do {
+ n = gpr_mpscq_pop_and_check_end(&q->queue, &empty);
+ } while (n == NULL && !empty);
+ gpr_mu_unlock(&q->mu);
+ return n;
+}
diff --git a/src/core/lib/support/mpscq.h b/src/core/lib/support/mpscq.h
index 1cc9d89feb..fb22742050 100644
--- a/src/core/lib/support/mpscq.h
+++ b/src/core/lib/support/mpscq.h
@@ -20,6 +20,7 @@
#define GRPC_CORE_LIB_SUPPORT_MPSCQ_H
#include <grpc/support/atm.h>
+#include <grpc/support/sync.h>
#include <stdbool.h>
#include <stddef.h>
@@ -49,13 +50,40 @@ typedef struct gpr_mpscq {
void gpr_mpscq_init(gpr_mpscq* q);
void gpr_mpscq_destroy(gpr_mpscq* q);
// Push a node
-void gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n);
+// Thread safe - can be called from multiple threads concurrently
+// Returns true if this was possibly the first node (may return true
+// sporadically, will not return false sporadically)
+bool gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n);
// Pop a node (returns NULL if no node is ready - which doesn't indicate that
// the queue is empty!!)
+// Thread compatible - can only be called from one thread at a time
gpr_mpscq_node* gpr_mpscq_pop(gpr_mpscq* q);
// Pop a node; sets *empty to true if the queue is empty, or false if it is not
gpr_mpscq_node* gpr_mpscq_pop_and_check_end(gpr_mpscq* q, bool* empty);
+// An mpscq with a lock: it's safe to pop from multiple threads, but doing
+// only one thread will succeed concurrently
+typedef struct gpr_locked_mpscq {
+ gpr_mpscq queue;
+ gpr_mu mu;
+} gpr_locked_mpscq;
+
+void gpr_locked_mpscq_init(gpr_locked_mpscq* q);
+void gpr_locked_mpscq_destroy(gpr_locked_mpscq* q);
+// Push a node
+// Thread safe - can be called from multiple threads concurrently
+// Returns true if this was possibly the first node (may return true
+// sporadically, will not return false sporadically)
+bool gpr_locked_mpscq_push(gpr_locked_mpscq* q, gpr_mpscq_node* n);
+
+// Pop a node (returns NULL if no node is ready - which doesn't indicate that
+// the queue is empty!!)
+// Thread safe - can be called from multiple threads concurrently
+gpr_mpscq_node* gpr_locked_mpscq_try_pop(gpr_locked_mpscq* q);
+
+// Pop a node. Returns NULL only if the queue was empty at some point after
+// calling this function
+gpr_mpscq_node* gpr_locked_mpscq_pop(gpr_locked_mpscq* q);
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc
index 14054e82e7..922df923ae 100644
--- a/src/core/lib/surface/completion_queue.cc
+++ b/src/core/lib/surface/completion_queue.cc
@@ -376,8 +376,8 @@ int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue* cq,
(grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq) {
*tag = storage->tag;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- storage->done(&exec_ctx, storage->done_arg, storage);
*ok = (storage->next & (uintptr_t)(1)) == 1;
+ storage->done(&exec_ctx, storage->done_arg, storage);
ret = 1;
cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index eb7a4e2d30..da7ae17bef 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -33,7 +33,8 @@
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
-#include "src/core/lib/support/stack_lockfree.h"
+#include "src/core/lib/support/mpscq.h"
+#include "src/core/lib/support/spinlock.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
@@ -63,6 +64,7 @@ grpc_tracer_flag grpc_server_channel_trace =
GRPC_TRACER_INITIALIZER(false, "server_channel");
typedef struct requested_call {
+ gpr_mpscq_node request_link; /* must be first */
requested_call_type type;
size_t cq_idx;
void* tag;
@@ -128,10 +130,7 @@ typedef struct request_matcher request_matcher;
struct call_data {
grpc_call* call;
- /** protects state */
- gpr_mu mu_state;
- /** the current state of a call - see call_state */
- call_state state;
+ gpr_atm state;
bool path_set;
bool host_set;
@@ -162,7 +161,7 @@ struct request_matcher {
grpc_server* server;
call_data* pending_head;
call_data* pending_tail;
- gpr_stack_lockfree** requests_per_cq;
+ gpr_locked_mpscq* requests_per_cq;
};
struct registered_method {
@@ -207,11 +206,6 @@ struct grpc_server {
registered_method* registered_methods;
/** one request matcher for unregistered methods */
request_matcher unregistered_request_matcher;
- /** free list of available requested_calls_per_cq indices */
- gpr_stack_lockfree** request_freelist_per_cq;
- /** requested call backing data */
- requested_call** requested_calls_per_cq;
- int max_requested_calls_per_cq;
gpr_atm shutdown_flag;
uint8_t shutdown_published;
@@ -313,21 +307,20 @@ static void channel_broadcaster_shutdown(grpc_exec_ctx* exec_ctx,
* request_matcher
*/
-static void request_matcher_init(request_matcher* rm, size_t entries,
- grpc_server* server) {
+static void request_matcher_init(request_matcher* rm, grpc_server* server) {
memset(rm, 0, sizeof(*rm));
rm->server = server;
- rm->requests_per_cq = (gpr_stack_lockfree**)gpr_malloc(
+ rm->requests_per_cq = (gpr_locked_mpscq*)gpr_malloc(
sizeof(*rm->requests_per_cq) * server->cq_count);
for (size_t i = 0; i < server->cq_count; i++) {
- rm->requests_per_cq[i] = gpr_stack_lockfree_create(entries);
+ gpr_locked_mpscq_init(&rm->requests_per_cq[i]);
}
}
static void request_matcher_destroy(request_matcher* rm) {
for (size_t i = 0; i < rm->server->cq_count; i++) {
- GPR_ASSERT(gpr_stack_lockfree_pop(rm->requests_per_cq[i]) == -1);
- gpr_stack_lockfree_destroy(rm->requests_per_cq[i]);
+ GPR_ASSERT(gpr_locked_mpscq_pop(&rm->requests_per_cq[i]) == NULL);
+ gpr_locked_mpscq_destroy(&rm->requests_per_cq[i]);
}
gpr_free(rm->requests_per_cq);
}
@@ -342,9 +335,7 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx* exec_ctx,
while (rm->pending_head) {
call_data* calld = rm->pending_head;
rm->pending_head = calld->pending_next;
- gpr_mu_lock(&calld->mu_state);
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
@@ -357,13 +348,17 @@ static void request_matcher_kill_requests(grpc_exec_ctx* exec_ctx,
grpc_server* server,
request_matcher* rm,
grpc_error* error) {
- int request_id;
+ requested_call* rc;
for (size_t i = 0; i < server->cq_count; i++) {
- while ((request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[i])) !=
- -1) {
- fail_call(exec_ctx, server, i,
- &server->requested_calls_per_cq[i][request_id],
- GRPC_ERROR_REF(error));
+ /* Here we know:
+ 1. no requests are being added (since the server is shut down)
+ 2. no other threads are pulling (since the shut down process is single
+ threaded)
+ So, we can ignore the queue lock and just pop, with the guarantee that a
+ NULL returned here truly means that the queue is empty */
+ while ((rc = (requested_call*)gpr_mpscq_pop(
+ &rm->requests_per_cq[i].queue)) != NULL) {
+ fail_call(exec_ctx, server, i, rc, GRPC_ERROR_REF(error));
}
}
GRPC_ERROR_UNREF(error);
@@ -398,13 +393,7 @@ static void server_delete(grpc_exec_ctx* exec_ctx, grpc_server* server) {
}
for (i = 0; i < server->cq_count; i++) {
GRPC_CQ_INTERNAL_UNREF(exec_ctx, server->cqs[i], "server");
- if (server->started) {
- gpr_stack_lockfree_destroy(server->request_freelist_per_cq[i]);
- gpr_free(server->requested_calls_per_cq[i]);
- }
}
- gpr_free(server->request_freelist_per_cq);
- gpr_free(server->requested_calls_per_cq);
gpr_free(server->cqs);
gpr_free(server->pollsets);
gpr_free(server->shutdown_tags);
@@ -462,21 +451,7 @@ static void destroy_channel(grpc_exec_ctx* exec_ctx, channel_data* chand,
static void done_request_event(grpc_exec_ctx* exec_ctx, void* req,
grpc_cq_completion* c) {
- requested_call* rc = (requested_call*)req;
- grpc_server* server = rc->server;
-
- if (rc >= server->requested_calls_per_cq[rc->cq_idx] &&
- rc < server->requested_calls_per_cq[rc->cq_idx] +
- server->max_requested_calls_per_cq) {
- GPR_ASSERT(rc - server->requested_calls_per_cq[rc->cq_idx] <= INT_MAX);
- gpr_stack_lockfree_push(
- server->request_freelist_per_cq[rc->cq_idx],
- (int)(rc - server->requested_calls_per_cq[rc->cq_idx]));
- } else {
- gpr_free(req);
- }
-
- server_unref(exec_ctx, server);
+ gpr_free(req);
}
static void publish_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
@@ -508,10 +483,6 @@ static void publish_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
GPR_UNREACHABLE_CODE(return );
}
- grpc_call_element* elem =
- grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
- channel_data* chand = (channel_data*)elem->channel_data;
- server_ref(chand->server);
grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, GRPC_ERROR_NONE,
done_request_event, rc, &rc->completion);
}
@@ -525,9 +496,7 @@ static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* arg,
grpc_server* server = rm->server;
if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) {
- gpr_mu_lock(&calld->mu_state);
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
@@ -539,16 +508,14 @@ static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* arg,
for (size_t i = 0; i < server->cq_count; i++) {
size_t cq_idx = (chand->cq_idx + i) % server->cq_count;
- int request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[cq_idx]);
- if (request_id == -1) {
+ requested_call* rc =
+ (requested_call*)gpr_locked_mpscq_try_pop(&rm->requests_per_cq[cq_idx]);
+ if (rc == NULL) {
continue;
} else {
GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i);
- gpr_mu_lock(&calld->mu_state);
- calld->state = ACTIVATED;
- gpr_mu_unlock(&calld->mu_state);
- publish_call(exec_ctx, server, calld, cq_idx,
- &server->requested_calls_per_cq[cq_idx][request_id]);
+ gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
+ publish_call(exec_ctx, server, calld, cq_idx, rc);
return; /* early out */
}
}
@@ -556,9 +523,27 @@ static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* arg,
/* no cq to take the request found: queue it on the slow list */
GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx);
gpr_mu_lock(&server->mu_call);
- gpr_mu_lock(&calld->mu_state);
- calld->state = PENDING;
- gpr_mu_unlock(&calld->mu_state);
+
+ // We need to ensure that all the queues are empty. We do this under
+ // the server mu_call lock to ensure that if something is added to
+ // an empty request queue, it will block until the call is actually
+ // added to the pending list.
+ for (size_t i = 0; i < server->cq_count; i++) {
+ size_t cq_idx = (chand->cq_idx + i) % server->cq_count;
+ requested_call* rc =
+ (requested_call*)gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]);
+ if (rc == NULL) {
+ continue;
+ } else {
+ gpr_mu_unlock(&server->mu_call);
+ GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i + server->cq_count);
+ gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
+ publish_call(exec_ctx, server, calld, cq_idx, rc);
+ return; /* early out */
+ }
+ }
+
+ gpr_atm_no_barrier_store(&calld->state, PENDING);
if (rm->pending_head == NULL) {
rm->pending_tail = rm->pending_head = calld;
} else {
@@ -576,9 +561,7 @@ static void finish_start_new_rpc(
call_data* calld = (call_data*)elem->call_data;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
- gpr_mu_lock(&calld->mu_state);
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
@@ -807,21 +790,14 @@ static void got_initial_metadata(grpc_exec_ctx* exec_ctx, void* ptr,
if (error == GRPC_ERROR_NONE) {
start_new_rpc(exec_ctx, elem);
} else {
- gpr_mu_lock(&calld->mu_state);
- if (calld->state == NOT_STARTED) {
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ if (gpr_atm_full_cas(&calld->state, NOT_STARTED, ZOMBIED)) {
GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE);
- } else if (calld->state == PENDING) {
- calld->state = ZOMBIED;
- gpr_mu_unlock(&calld->mu_state);
+ } else if (gpr_atm_full_cas(&calld->state, PENDING, ZOMBIED)) {
/* zombied call will be destroyed when it's removed from the pending
queue... later */
- } else {
- gpr_mu_unlock(&calld->mu_state);
}
}
}
@@ -885,7 +861,6 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
memset(calld, 0, sizeof(call_data));
calld->deadline = GRPC_MILLIS_INF_FUTURE;
calld->call = grpc_call_from_top_element(elem);
- gpr_mu_init(&calld->mu_state);
GRPC_CLOSURE_INIT(&calld->server_on_recv_initial_metadata,
server_on_recv_initial_metadata, elem,
@@ -912,8 +887,6 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_metadata_array_destroy(&calld->initial_metadata);
grpc_byte_buffer_destroy(calld->payload);
- gpr_mu_destroy(&calld->mu_state);
-
server_unref(exec_ctx, chand->server);
}
@@ -1020,8 +993,6 @@ grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) {
server->root_channel_data.next = server->root_channel_data.prev =
&server->root_channel_data;
- /* TODO(ctiller): expose a channel_arg for this */
- server->max_requested_calls_per_cq = 32768;
server->channel_args = grpc_channel_args_copy(args);
return server;
@@ -1095,29 +1066,15 @@ void grpc_server_start(grpc_server* server) {
server->pollset_count = 0;
server->pollsets =
(grpc_pollset**)gpr_malloc(sizeof(grpc_pollset*) * server->cq_count);
- server->request_freelist_per_cq = (gpr_stack_lockfree**)gpr_malloc(
- sizeof(*server->request_freelist_per_cq) * server->cq_count);
- server->requested_calls_per_cq = (requested_call**)gpr_malloc(
- sizeof(*server->requested_calls_per_cq) * server->cq_count);
for (i = 0; i < server->cq_count; i++) {
if (grpc_cq_can_listen(server->cqs[i])) {
server->pollsets[server->pollset_count++] =
grpc_cq_pollset(server->cqs[i]);
}
- server->request_freelist_per_cq[i] =
- gpr_stack_lockfree_create((size_t)server->max_requested_calls_per_cq);
- for (int j = 0; j < server->max_requested_calls_per_cq; j++) {
- gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j);
- }
- server->requested_calls_per_cq[i] =
- (requested_call*)gpr_malloc((size_t)server->max_requested_calls_per_cq *
- sizeof(*server->requested_calls_per_cq[i]));
}
- request_matcher_init(&server->unregistered_request_matcher,
- (size_t)server->max_requested_calls_per_cq, server);
+ request_matcher_init(&server->unregistered_request_matcher, server);
for (registered_method* rm = server->registered_methods; rm; rm = rm->next) {
- request_matcher_init(&rm->matcher,
- (size_t)server->max_requested_calls_per_cq, server);
+ request_matcher_init(&rm->matcher, server);
}
server_ref(server);
@@ -1373,21 +1330,11 @@ static grpc_call_error queue_call_request(grpc_exec_ctx* exec_ctx,
requested_call* rc) {
call_data* calld = NULL;
request_matcher* rm = NULL;
- int request_id;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
fail_call(exec_ctx, server, cq_idx, rc,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
return GRPC_CALL_OK;
}
- request_id = gpr_stack_lockfree_pop(server->request_freelist_per_cq[cq_idx]);
- if (request_id == -1) {
- /* out of request ids: just fail this one */
- fail_call(exec_ctx, server, cq_idx, rc,
- grpc_error_set_int(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Out of request ids"),
- GRPC_ERROR_INT_LIMIT, server->max_requested_calls_per_cq));
- return GRPC_CALL_OK;
- }
switch (rc->type) {
case BATCH_CALL:
rm = &server->unregistered_request_matcher;
@@ -1396,20 +1343,17 @@ static grpc_call_error queue_call_request(grpc_exec_ctx* exec_ctx,
rm = &rc->data.registered.method->matcher;
break;
}
- server->requested_calls_per_cq[cq_idx][request_id] = *rc;
- gpr_free(rc);
- if (gpr_stack_lockfree_push(rm->requests_per_cq[cq_idx], request_id)) {
+ if (gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link)) {
/* this was the first queued request: we need to lock and start
matching calls */
gpr_mu_lock(&server->mu_call);
while ((calld = rm->pending_head) != NULL) {
- request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[cq_idx]);
- if (request_id == -1) break;
+ rc = (requested_call*)gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]);
+ if (rc == NULL) break;
rm->pending_head = calld->pending_next;
gpr_mu_unlock(&server->mu_call);
- gpr_mu_lock(&calld->mu_state);
- if (calld->state == ZOMBIED) {
- gpr_mu_unlock(&calld->mu_state);
+ if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) {
+ // Zombied Call
GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
@@ -1417,11 +1361,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx* exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE);
} else {
- GPR_ASSERT(calld->state == PENDING);
- calld->state = ACTIVATED;
- gpr_mu_unlock(&calld->mu_state);
- publish_call(exec_ctx, server, calld, cq_idx,
- &server->requested_calls_per_cq[cq_idx][request_id]);
+ publish_call(exec_ctx, server, calld, cq_idx, rc);
}
gpr_mu_lock(&server->mu_call);
}
@@ -1540,7 +1480,6 @@ static void fail_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
rc->initial_metadata->count = 0;
GPR_ASSERT(error != GRPC_ERROR_NONE);
- server_ref(server);
grpc_cq_end_op(exec_ctx, server->cqs[cq_idx], rc->tag, error,
done_request_event, rc, &rc->completion);
}