diff options
author | Craig Tiller <ctiller@google.com> | 2016-06-30 14:29:18 -0700 |
---|---|---|
committer | Craig Tiller <ctiller@google.com> | 2016-06-30 14:29:18 -0700 |
commit | 3a2ef9ded10f4071e15cd2927bf417779e4e50fc (patch) | |
tree | 0f85b312003f5a3de4306350f68fa6f567bee0e7 /src/core/lib/iomgr | |
parent | 09c464a1424067fc3b5e5f0b1280a8f8c3444342 (diff) | |
parent | a5596db1a53723789d7c90c23d9cbfbb8207f949 (diff) |
Merge github.com:grpc/grpc into delayed-write
Diffstat (limited to 'src/core/lib/iomgr')
34 files changed, 4634 insertions, 101 deletions
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h index 3877ceb1e2..f9808bbda1 100644 --- a/src/core/lib/iomgr/endpoint.h +++ b/src/core/lib/iomgr/endpoint.h @@ -82,7 +82,7 @@ char *grpc_endpoint_get_peer(grpc_endpoint *ep); void grpc_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, gpr_slice_buffer *slices, grpc_closure *cb); -/* Causes any pending read/write callbacks to run immediately with +/* Causes any pending and future read/write callbacks to run immediately with success==0 */ void grpc_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); void grpc_endpoint_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c index 35154268f3..e20a0169c5 100644 --- a/src/core/lib/iomgr/error.c +++ b/src/core/lib/iomgr/error.c @@ -1,6 +1,6 @@ /* * - * Copyright 2015, Google Inc. + * Copyright 2016, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,19 +33,23 @@ #include "src/core/lib/iomgr/error.h" +#include <inttypes.h> #include <stdbool.h> #include <string.h> +#include <grpc/status.h> #include <grpc/support/alloc.h> #include <grpc/support/avl.h> #include <grpc/support/log.h> #include <grpc/support/string_util.h> #include <grpc/support/useful.h> -#ifdef GPR_WIN32 -#include <grpc/support/log_win32.h> +#ifdef GPR_WINDOWS +#include <grpc/support/log_windows.h> #endif +#include "src/core/lib/profiling/timers.h" + static void destroy_integer(void *key) {} static void *copy_integer(void *key) { return key; } @@ -88,22 +92,16 @@ static const gpr_avl_vtable avl_vtable_errs = { static const char *error_int_name(grpc_error_ints key) { switch (key) { - case GRPC_ERROR_INT_STATUS_CODE: - return "status_code"; case GRPC_ERROR_INT_ERRNO: return "errno"; case GRPC_ERROR_INT_FILE_LINE: return "file_line"; - case GRPC_ERROR_INT_WARNING: - return "warning"; case GRPC_ERROR_INT_STREAM_ID: return "stream_id"; case GRPC_ERROR_INT_GRPC_STATUS: return "grpc_status"; case GRPC_ERROR_INT_OFFSET: return "offset"; - case GRPC_ERROR_INT_LIMIT: - return "limit"; case GRPC_ERROR_INT_INDEX: return "index"; case GRPC_ERROR_INT_SIZE: @@ -118,6 +116,10 @@ static const char *error_int_name(grpc_error_ints key) { return "fd"; case GRPC_ERROR_INT_WSA_ERROR: return "wsa_error"; + case GRPC_ERROR_INT_HTTP_STATUS: + return "http_status"; + case GRPC_ERROR_INT_LIMIT: + return "limit"; } GPR_UNREACHABLE_CODE(return "unknown"); } @@ -216,6 +218,7 @@ void grpc_error_unref(grpc_error *err) { grpc_error *grpc_error_create(const char *file, int line, const char *desc, grpc_error **referencing, size_t num_referencing) { + GPR_TIMER_BEGIN("grpc_error_create", 0); grpc_error *err = gpr_malloc(sizeof(*err)); if (err == NULL) { // TODO(ctiller): make gpr_malloc return NULL return GRPC_ERROR_OOM; @@ -241,57 +244,91 @@ grpc_error *grpc_error_create(const char *file, int line, const char *desc, (void *)(uintptr_t)GRPC_ERROR_TIME_CREATED, box_time(gpr_now(GPR_CLOCK_REALTIME))); gpr_ref_init(&err->refs, 1); + GPR_TIMER_END("grpc_error_create", 0); return err; } static grpc_error *copy_error_and_unref(grpc_error *in) { + GPR_TIMER_BEGIN("copy_error_and_unref", 0); + grpc_error *out; if (is_special(in)) { - if (in == GRPC_ERROR_NONE) return GRPC_ERROR_CREATE("no error"); - if (in == GRPC_ERROR_OOM) return GRPC_ERROR_CREATE("oom"); - if (in == GRPC_ERROR_CANCELLED) return GRPC_ERROR_CREATE("cancelled"); - return GRPC_ERROR_CREATE("unknown"); - } - grpc_error *out = gpr_malloc(sizeof(*out)); + if (in == GRPC_ERROR_NONE) + out = GRPC_ERROR_CREATE("no error"); + else if (in == GRPC_ERROR_OOM) + out = GRPC_ERROR_CREATE("oom"); + else if (in == GRPC_ERROR_CANCELLED) + out = + grpc_error_set_int(GRPC_ERROR_CREATE("cancelled"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_CANCELLED); + else + out = GRPC_ERROR_CREATE("unknown"); + } else { + out = gpr_malloc(sizeof(*out)); #ifdef GRPC_ERROR_REFCOUNT_DEBUG - gpr_log(GPR_DEBUG, "%p create copying", out); + gpr_log(GPR_DEBUG, "%p create copying", out); #endif - out->ints = gpr_avl_ref(in->ints); - out->strs = gpr_avl_ref(in->strs); - out->errs = gpr_avl_ref(in->errs); - out->times = gpr_avl_ref(in->times); - out->next_err = in->next_err; - gpr_ref_init(&out->refs, 1); - GRPC_ERROR_UNREF(in); + out->ints = gpr_avl_ref(in->ints); + out->strs = gpr_avl_ref(in->strs); + out->errs = gpr_avl_ref(in->errs); + out->times = gpr_avl_ref(in->times); + out->next_err = in->next_err; + gpr_ref_init(&out->refs, 1); + GRPC_ERROR_UNREF(in); + } + GPR_TIMER_END("copy_error_and_unref", 0); return out; } grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which, intptr_t value) { + GPR_TIMER_BEGIN("grpc_error_set_int", 0); grpc_error *new = copy_error_and_unref(src); new->ints = gpr_avl_add(new->ints, (void *)(uintptr_t)which, (void *)value); + GPR_TIMER_END("grpc_error_set_int", 0); return new; } bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) { + GPR_TIMER_BEGIN("grpc_error_get_int", 0); void *pp; + if (is_special(err)) { + if (err == GRPC_ERROR_CANCELLED && which == GRPC_ERROR_INT_GRPC_STATUS) { + *p = GRPC_STATUS_CANCELLED; + GPR_TIMER_END("grpc_error_get_int", 0); + return true; + } + GPR_TIMER_END("grpc_error_get_int", 0); + return false; + } if (gpr_avl_maybe_get(err->ints, (void *)(uintptr_t)which, &pp)) { if (p != NULL) *p = (intptr_t)pp; + GPR_TIMER_END("grpc_error_get_int", 0); return true; } + GPR_TIMER_END("grpc_error_get_int", 0); return false; } grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which, const char *value) { + GPR_TIMER_BEGIN("grpc_error_set_str", 0); grpc_error *new = copy_error_and_unref(src); new->strs = gpr_avl_add(new->strs, (void *)(uintptr_t)which, gpr_strdup(value)); + GPR_TIMER_END("grpc_error_set_str", 0); return new; } +const char *grpc_error_get_str(grpc_error *err, grpc_error_strs which) { + if (is_special(err)) return NULL; + return gpr_avl_get(err->strs, (void *)(uintptr_t)which); +} + grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) { + GPR_TIMER_BEGIN("grpc_error_add_child", 0); grpc_error *new = copy_error_and_unref(src); new->errs = gpr_avl_add(new->errs, (void *)(new->next_err++), child); + GPR_TIMER_END("grpc_error_add_child", 0); return new; } @@ -342,7 +379,7 @@ static char *key_time(void *p) { static char *fmt_int(void *p) { char *s; - gpr_asprintf(&s, "%lld", (intptr_t)p); + gpr_asprintf(&s, "%" PRIdPTR, (intptr_t)p); return s; } @@ -424,7 +461,7 @@ static char *fmt_time(void *p) { pfx = ""; break; } - gpr_asprintf(&out, "\"%s%d.%09d\"", pfx, tm.tv_sec, tm.tv_nsec); + gpr_asprintf(&out, "\"%s%" PRId64 ".%09d\"", pfx, tm.tv_sec, tm.tv_nsec); return out; } @@ -483,6 +520,7 @@ void grpc_error_free_string(const char *str) { } const char *grpc_error_string(grpc_error *err) { + GPR_TIMER_BEGIN("grpc_error_string", 0); if (err == GRPC_ERROR_NONE) return no_error_string; if (err == GRPC_ERROR_OOM) return oom_error_string; if (err == GRPC_ERROR_CANCELLED) return cancelled_error_string; @@ -499,7 +537,9 @@ const char *grpc_error_string(grpc_error *err) { qsort(kvs.kvs, kvs.num_kvs, sizeof(kv_pair), cmp_kvs); - return finish_kvs(&kvs); + const char *out = finish_kvs(&kvs); + GPR_TIMER_END("grpc_error_string", 0); + return out; } grpc_error *grpc_os_error(const char *file, int line, int err, @@ -512,7 +552,7 @@ grpc_error *grpc_os_error(const char *file, int line, int err, GRPC_ERROR_STR_SYSCALL, call_name); } -#ifdef GPR_WIN32 +#ifdef GPR_WINDOWS grpc_error *grpc_wsa_error(const char *file, int line, int err, const char *call_name) { char *utf8_message = gpr_format_message(err); diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h index ede159526e..13f898e31a 100644 --- a/src/core/lib/iomgr/error.h +++ b/src/core/lib/iomgr/error.h @@ -1,6 +1,6 @@ /* * - * Copyright 2015, Google Inc. + * Copyright 2016, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -39,50 +39,86 @@ #include <grpc/support/time.h> -// Opaque representation of an error. -// Errors are refcounted objects that represent the result of an operation. -// Ownership laws: -// if a grpc_error is returned by a function, the caller owns a ref to that -// instance -// if a grpc_error is passed to a grpc_closure callback function (functions -// with the signature: -// void (*f)(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error)) -// then those functions do not automatically own a ref to error -// if a grpc_error is passed to *ANY OTHER FUNCTION* then that function takes -// ownership of the error +/// Opaque representation of an error. +/// Errors are refcounted objects that represent the result of an operation. +/// Ownership laws: +/// if a grpc_error is returned by a function, the caller owns a ref to that +/// instance +/// if a grpc_error is passed to a grpc_closure callback function (functions +/// with the signature: +/// void (*f)(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error)) +/// then those functions do not automatically own a ref to error +/// if a grpc_error is passed to *ANY OTHER FUNCTION* then that function takes +/// ownership of the error +/// Errors have: +/// a set of ints, strings, and timestamps that describe the error +/// always present are: +/// GRPC_ERROR_STR_FILE, GRPC_ERROR_INT_FILE_LINE - source location the error +/// was generated +/// GRPC_ERROR_STR_DESCRIPTION - a human readable description of the error +/// GRPC_ERROR_TIME_CREATED - a timestamp indicating when the error happened +/// an error can also have children; these are other errors that are believed +/// to have contributed to this one. By accumulating children, we can begin +/// to root cause high level failures from low level failures, without having +/// to derive execution paths from log lines typedef struct grpc_error grpc_error; typedef enum { + /// 'errno' from the operating system GRPC_ERROR_INT_ERRNO, + /// __LINE__ from the call site creating the error GRPC_ERROR_INT_FILE_LINE, - GRPC_ERROR_INT_STATUS_CODE, - GRPC_ERROR_INT_WARNING, + /// stream identifier: for errors that are associated with an individual + /// wire stream GRPC_ERROR_INT_STREAM_ID, + /// grpc status code representing this error GRPC_ERROR_INT_GRPC_STATUS, + /// offset into some binary blob (usually represented by + /// GRPC_ERROR_STR_RAW_BYTES) where the error occurred GRPC_ERROR_INT_OFFSET, + /// context sensitive index associated with the error GRPC_ERROR_INT_INDEX, + /// context sensitive size associated with the error GRPC_ERROR_INT_SIZE, - GRPC_ERROR_INT_LIMIT, + /// http2 error code associated with the error (see the HTTP2 RFC) GRPC_ERROR_INT_HTTP2_ERROR, + /// TSI status code associated with the error GRPC_ERROR_INT_TSI_CODE, + /// grpc_security_status associated with the error GRPC_ERROR_INT_SECURITY_STATUS, + /// WSAGetLastError() reported when this error occurred GRPC_ERROR_INT_WSA_ERROR, + /// File descriptor associated with this error GRPC_ERROR_INT_FD, + /// HTTP status (i.e. 404) + GRPC_ERROR_INT_HTTP_STATUS, + /// context sensitive limit associated with the error + GRPC_ERROR_INT_LIMIT, } grpc_error_ints; typedef enum { + /// top-level textual description of this error GRPC_ERROR_STR_DESCRIPTION, + /// source file in which this error occurred GRPC_ERROR_STR_FILE, + /// operating system description of this error GRPC_ERROR_STR_OS_ERROR, + /// syscall that generated this error GRPC_ERROR_STR_SYSCALL, + /// peer that we were trying to communicate when this error occurred GRPC_ERROR_STR_TARGET_ADDRESS, + /// grpc status message associated with this error GRPC_ERROR_STR_GRPC_MESSAGE, + /// hex dump (or similar) with the data that generated this error GRPC_ERROR_STR_RAW_BYTES, + /// tsi error string associated with this error GRPC_ERROR_STR_TSI_ERROR, + /// filename that we were trying to read/write when this error occurred GRPC_ERROR_STR_FILENAME, } grpc_error_strs; typedef enum { + /// timestamp of error creation GRPC_ERROR_TIME_CREATED, } grpc_error_times; @@ -93,8 +129,17 @@ typedef enum { const char *grpc_error_string(grpc_error *error); void grpc_error_free_string(const char *str); +/// Create an error - but use GRPC_ERROR_CREATE instead grpc_error *grpc_error_create(const char *file, int line, const char *desc, grpc_error **referencing, size_t num_referencing); +/// Create an error (this is the preferred way of generating an error that is +/// not due to a system call - for system calls, use GRPC_OS_ERROR or +/// GRPC_WSA_ERROR as appropriate) +/// \a referencing is an array of num_referencing elements indicating one or +/// more errors that are believed to have contributed to this one +/// err = grpc_error_create(x, y, z, r, nr) is equivalent to: +/// err = grpc_error_create(x, y, z, NULL, 0); +/// for (i=0; i<nr; i++) err = grpc_error_add_child(err, r[i]); #define GRPC_ERROR_CREATE(desc) \ grpc_error_create(__FILE__, __LINE__, desc, NULL, 0) @@ -120,19 +165,26 @@ void grpc_error_unref(grpc_error *err); #endif grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which, - intptr_t value); + intptr_t value) GRPC_MUST_USE_RESULT; bool grpc_error_get_int(grpc_error *error, grpc_error_ints which, intptr_t *p); grpc_error *grpc_error_set_time(grpc_error *src, grpc_error_times which, - gpr_timespec value); + gpr_timespec value) GRPC_MUST_USE_RESULT; grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which, - const char *value); -grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child); + const char *value) GRPC_MUST_USE_RESULT; +const char *grpc_error_get_str(grpc_error *error, grpc_error_strs which); +/// Add a child error: an error that is believed to have contributed to this +/// error occurring. Allows root causing high level errors from lower level +/// errors that contributed to them. +grpc_error *grpc_error_add_child(grpc_error *src, + grpc_error *child) GRPC_MUST_USE_RESULT; grpc_error *grpc_os_error(const char *file, int line, int err, - const char *call_name); + const char *call_name) GRPC_MUST_USE_RESULT; +/// create an error associated with errno!=0 (an 'operating system' error) #define GRPC_OS_ERROR(err, call_name) \ grpc_os_error(__FILE__, __LINE__, err, call_name) grpc_error *grpc_wsa_error(const char *file, int line, int err, - const char *call_name); + const char *call_name) GRPC_MUST_USE_RESULT; +/// windows only: create an error associated with WSAGetLastError()!=0 #define GRPC_WSA_ERROR(err, call_name) \ grpc_wsa_error(__FILE__, __LINE__, err, call_name) diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c new file mode 100644 index 0000000000..cf0fe736a0 --- /dev/null +++ b/src/core/lib/iomgr/ev_epoll_linux.c @@ -0,0 +1,1872 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include <grpc/grpc_posix.h> +#include <grpc/support/port_platform.h> + +/* This polling engine is only relevant on linux kernels supporting epoll() */ +#ifdef GPR_LINUX_EPOLL + +#include "src/core/lib/iomgr/ev_epoll_linux.h" + +#include <assert.h> +#include <errno.h> +#include <poll.h> +#include <signal.h> +#include <string.h> +#include <sys/epoll.h> +#include <sys/socket.h> +#include <unistd.h> + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> +#include <grpc/support/string_util.h> +#include <grpc/support/tls.h> +#include <grpc/support/useful.h> + +#include "src/core/lib/iomgr/ev_posix.h" +#include "src/core/lib/iomgr/iomgr_internal.h" +#include "src/core/lib/iomgr/wakeup_fd_posix.h" +#include "src/core/lib/profiling/timers.h" +#include "src/core/lib/support/block_annotate.h" + +/* TODO: sreek - Move this to init.c and initialize this like other tracers. */ +static int grpc_polling_trace = 0; /* Disabled by default */ +#define GRPC_POLLING_TRACE(fmt, ...) \ + if (grpc_polling_trace) { \ + gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \ + } + +static int grpc_wakeup_signal = -1; +static bool is_grpc_wakeup_signal_initialized = false; + +/* Implements the function defined in grpc_posix.h. This function might be + * called before even calling grpc_init() to set either a different signal to + * use. If signum == -1, then the use of signals is disabled */ +void grpc_use_signal(int signum) { + grpc_wakeup_signal = signum; + is_grpc_wakeup_signal_initialized = true; + + if (grpc_wakeup_signal < 0) { + gpr_log(GPR_INFO, + "Use of signals is disabled. Epoll engine will not be used"); + } else { + gpr_log(GPR_INFO, "epoll engine will be using signal: %d", + grpc_wakeup_signal); + } +} + +struct polling_island; + +/******************************************************************************* + * Fd Declarations + */ +struct grpc_fd { + int fd; + /* refst format: + bit 0 : 1=Active / 0=Orphaned + bits 1-n : refcount + Ref/Unref by two to avoid altering the orphaned bit */ + gpr_atm refst; + + gpr_mu mu; + + /* Indicates that the fd is shutdown and that any pending read/write closures + should fail */ + bool shutdown; + + /* The fd is either closed or we relinquished control of it. In either cases, + this indicates that the 'fd' on this structure is no longer valid */ + bool orphaned; + + /* TODO: sreek - Move this to a lockfree implementation */ + grpc_closure *read_closure; + grpc_closure *write_closure; + + /* The polling island to which this fd belongs to and the mutex protecting the + the field */ + gpr_mu pi_mu; + struct polling_island *polling_island; + + struct grpc_fd *freelist_next; + grpc_closure *on_done_closure; + + /* The pollset that last noticed that the fd is readable */ + grpc_pollset *read_notifier_pollset; + + grpc_iomgr_object iomgr_object; +}; + +/* Reference counting for fds */ +// #define GRPC_FD_REF_COUNT_DEBUG +#ifdef GRPC_FD_REF_COUNT_DEBUG +static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line); +static void fd_unref(grpc_fd *fd, const char *reason, const char *file, + int line); +#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__) +#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__) +#else +static void fd_ref(grpc_fd *fd); +static void fd_unref(grpc_fd *fd); +#define GRPC_FD_REF(fd, reason) fd_ref(fd) +#define GRPC_FD_UNREF(fd, reason) fd_unref(fd) +#endif + +static void fd_global_init(void); +static void fd_global_shutdown(void); + +#define CLOSURE_NOT_READY ((grpc_closure *)0) +#define CLOSURE_READY ((grpc_closure *)1) + +/******************************************************************************* + * Polling island Declarations + */ + +// #define GRPC_PI_REF_COUNT_DEBUG +#ifdef GRPC_PI_REF_COUNT_DEBUG + +#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__) +#define PI_UNREF(p, r) pi_unref_dbg((p), (r), __FILE__, __LINE__) + +#else /* defined(GRPC_PI_REF_COUNT_DEBUG) */ + +#define PI_ADD_REF(p, r) pi_add_ref((p)) +#define PI_UNREF(p, r) pi_unref((p)) + +#endif /* !defined(GPRC_PI_REF_COUNT_DEBUG) */ + +typedef struct polling_island { + gpr_mu mu; + /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement + the refcount. + Once the ref count becomes zero, this structure is destroyed which means + we should ensure that there is never a scenario where a PI_ADD_REF() is + racing with a PI_UNREF() that just made the ref_count zero. */ + gpr_refcount ref_count; + + /* Pointer to the polling_island this merged into. + * merged_to value is only set once in polling_island's lifetime (and that too + * only if the island is merged with another island). Because of this, we can + * use gpr_atm type here so that we can do atomic access on this and reduce + * lock contention on 'mu' mutex. + * + * Note that if this field is not NULL (i.e not 0), all the remaining fields + * (except mu and ref_count) are invalid and must be ignored. */ + gpr_atm merged_to; + + /* The fd of the underlying epoll set */ + int epoll_fd; + + /* The file descriptors in the epoll set */ + size_t fd_cnt; + size_t fd_capacity; + grpc_fd **fds; + + /* Polling islands that are no longer needed are kept in a freelist so that + they can be reused. This field points to the next polling island in the + free list */ + struct polling_island *next_free; +} polling_island; + +/******************************************************************************* + * Pollset Declarations + */ +struct grpc_pollset_worker { + /* Thread id of this worker */ + pthread_t pt_id; + + /* Used to prevent a worker from getting kicked multiple times */ + gpr_atm is_kicked; + struct grpc_pollset_worker *next; + struct grpc_pollset_worker *prev; +}; + +struct grpc_pollset { + gpr_mu mu; + grpc_pollset_worker root_worker; + bool kicked_without_pollers; + + bool shutting_down; /* Is the pollset shutting down ? */ + bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */ + grpc_closure *shutdown_done; /* Called after after shutdown is complete */ + + /* The polling island to which this pollset belongs to */ + struct polling_island *polling_island; +}; + +/******************************************************************************* + * Pollset-set Declarations + */ +/* TODO: sreek - Change the pollset_set implementation such that a pollset_set + * directly points to a polling_island (and adding an fd/pollset/pollset_set to + * the current pollset_set would result in polling island merges. This would + * remove the need to maintain fd_count here. This will also significantly + * simplify the grpc_fd structure since we would no longer need to explicitly + * maintain the orphaned state */ +struct grpc_pollset_set { + gpr_mu mu; + + size_t pollset_count; + size_t pollset_capacity; + grpc_pollset **pollsets; + + size_t pollset_set_count; + size_t pollset_set_capacity; + struct grpc_pollset_set **pollset_sets; + + size_t fd_count; + size_t fd_capacity; + grpc_fd **fds; +}; + +/******************************************************************************* + * Common helpers + */ + +static void append_error(grpc_error **composite, grpc_error *error, + const char *desc) { + if (error == GRPC_ERROR_NONE) return; + if (*composite == GRPC_ERROR_NONE) { + *composite = GRPC_ERROR_CREATE(desc); + } + *composite = grpc_error_add_child(*composite, error); +} + +/******************************************************************************* + * Polling island Definitions + */ + +/* The wakeup fd that is used to wake up all threads in a Polling island. This + is useful in the polling island merge operation where we need to wakeup all + the threads currently polling the smaller polling island (so that they can + start polling the new/merged polling island) + + NOTE: This fd is initialized to be readable and MUST NOT be consumed i.e the + threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */ +static grpc_wakeup_fd polling_island_wakeup_fd; + +/* Polling island freelist */ +static gpr_mu g_pi_freelist_mu; +static polling_island *g_pi_freelist = NULL; + +static void polling_island_delete(); /* Forward declaration */ + +#ifdef GRPC_TSAN +/* Currently TSAN may incorrectly flag data races between epoll_ctl and + epoll_wait for any grpc_fd structs that are added to the epoll set via + epoll_ctl and are returned (within a very short window) via epoll_wait(). + + To work-around this race, we establish a happens-before relation between + the code just-before epoll_ctl() and the code after epoll_wait() by using + this atomic */ +gpr_atm g_epoll_sync; +#endif /* defined(GRPC_TSAN) */ + +#ifdef GRPC_PI_REF_COUNT_DEBUG +void pi_add_ref(polling_island *pi); +void pi_unref(polling_island *pi); + +void pi_add_ref_dbg(polling_island *pi, char *reason, char *file, int line) { + long old_cnt = gpr_atm_acq_load(&(pi->ref_count.count)); + pi_add_ref(pi); + gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)", + (void *)pi, old_cnt, old_cnt + 1, reason, file, line); +} + +void pi_unref_dbg(polling_island *pi, char *reason, char *file, int line) { + long old_cnt = gpr_atm_acq_load(&(pi->ref_count.count)); + pi_unref(pi); + gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)", + (void *)pi, old_cnt, (old_cnt - 1), reason, file, line); +} +#endif + +void pi_add_ref(polling_island *pi) { gpr_ref(&pi->ref_count); } + +void pi_unref(polling_island *pi) { + /* If ref count went to zero, delete the polling island. + Note that this deletion not be done under a lock. Once the ref count goes + to zero, we are guaranteed that no one else holds a reference to the + polling island (and that there is no racing pi_add_ref() call either). + + Also, if we are deleting the polling island and the merged_to field is + non-empty, we should remove a ref to the merged_to polling island + */ + if (gpr_unref(&pi->ref_count)) { + polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); + polling_island_delete(pi); + if (next != NULL) { + PI_UNREF(next, "pi_delete"); /* Recursive call */ + } + } +} + +/* The caller is expected to hold pi->mu lock before calling this function */ +static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds, + size_t fd_count, bool add_fd_refs, + grpc_error **error) { + int err; + size_t i; + struct epoll_event ev; + char *err_msg; + const char *err_desc = "polling_island_add_fds"; + +#ifdef GRPC_TSAN + /* See the definition of g_epoll_sync for more context */ + gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0); +#endif /* defined(GRPC_TSAN) */ + + for (i = 0; i < fd_count; i++) { + ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET); + ev.data.ptr = fds[i]; + err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, fds[i]->fd, &ev); + + if (err < 0) { + if (errno != EEXIST) { + gpr_asprintf( + &err_msg, + "epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)", + pi->epoll_fd, fds[i]->fd, errno, strerror(errno)); + append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); + gpr_free(err_msg); + } + + continue; + } + + if (pi->fd_cnt == pi->fd_capacity) { + pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2); + pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity); + } + + pi->fds[pi->fd_cnt++] = fds[i]; + if (add_fd_refs) { + GRPC_FD_REF(fds[i], "polling_island"); + } + } +} + +/* The caller is expected to hold pi->mu before calling this */ +static void polling_island_add_wakeup_fd_locked(polling_island *pi, + grpc_wakeup_fd *wakeup_fd, + grpc_error **error) { + struct epoll_event ev; + int err; + char *err_msg; + const char *err_desc = "polling_island_add_wakeup_fd"; + + ev.events = (uint32_t)(EPOLLIN | EPOLLET); + ev.data.ptr = wakeup_fd; + err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, + GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev); + if (err < 0 && errno != EEXIST) { + gpr_asprintf(&err_msg, + "epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with " + "error: %d (%s)", + pi->epoll_fd, + GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd), errno, + strerror(errno)); + append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); + gpr_free(err_msg); + } +} + +/* The caller is expected to hold pi->mu lock before calling this function */ +static void polling_island_remove_all_fds_locked(polling_island *pi, + bool remove_fd_refs, + grpc_error **error) { + int err; + size_t i; + char *err_msg; + const char *err_desc = "polling_island_remove_fds"; + + for (i = 0; i < pi->fd_cnt; i++) { + err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, pi->fds[i]->fd, NULL); + if (err < 0 && errno != ENOENT) { + gpr_asprintf(&err_msg, + "epoll_ctl (epoll_fd: %d) delete fds[%zu]: %d failed with " + "error: %d (%s)", + pi->epoll_fd, i, pi->fds[i]->fd, errno, strerror(errno)); + append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); + gpr_free(err_msg); + } + + if (remove_fd_refs) { + GRPC_FD_UNREF(pi->fds[i], "polling_island"); + } + } + + pi->fd_cnt = 0; +} + +/* The caller is expected to hold pi->mu lock before calling this function */ +static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd, + bool is_fd_closed, + grpc_error **error) { + int err; + size_t i; + char *err_msg; + const char *err_desc = "polling_island_remove_fd"; + + /* If fd is already closed, then it would have been automatically been removed + from the epoll set */ + if (!is_fd_closed) { + err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL); + if (err < 0 && errno != ENOENT) { + gpr_asprintf( + &err_msg, + "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)", + pi->epoll_fd, fd->fd, errno, strerror(errno)); + append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); + gpr_free(err_msg); + } + } + + for (i = 0; i < pi->fd_cnt; i++) { + if (pi->fds[i] == fd) { + pi->fds[i] = pi->fds[--pi->fd_cnt]; + GRPC_FD_UNREF(fd, "polling_island"); + break; + } + } +} + +/* Might return NULL in case of an error */ +static polling_island *polling_island_create(grpc_fd *initial_fd, + grpc_error **error) { + polling_island *pi = NULL; + char *err_msg; + const char *err_desc = "polling_island_create"; + + /* Try to get one from the polling island freelist */ + gpr_mu_lock(&g_pi_freelist_mu); + if (g_pi_freelist != NULL) { + pi = g_pi_freelist; + g_pi_freelist = g_pi_freelist->next_free; + pi->next_free = NULL; + } + gpr_mu_unlock(&g_pi_freelist_mu); + + /* Create new polling island if we could not get one from the free list */ + if (pi == NULL) { + pi = gpr_malloc(sizeof(*pi)); + gpr_mu_init(&pi->mu); + pi->fd_cnt = 0; + pi->fd_capacity = 0; + pi->fds = NULL; + } + + gpr_ref_init(&pi->ref_count, 0); + gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL); + + pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC); + + if (pi->epoll_fd < 0) { + gpr_asprintf(&err_msg, "epoll_create1 failed with error %d (%s)", errno, + strerror(errno)); + append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); + gpr_free(err_msg); + } else { + polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error); + pi->next_free = NULL; + + if (initial_fd != NULL) { + /* Lock the polling island here just in case we got this structure from + the freelist and the polling island lock was not released yet (by the + code that adds the polling island to the freelist) */ + gpr_mu_lock(&pi->mu); + polling_island_add_fds_locked(pi, &initial_fd, 1, true, error); + gpr_mu_unlock(&pi->mu); + } + } + + return pi; +} + +static void polling_island_delete(polling_island *pi) { + GPR_ASSERT(pi->fd_cnt == 0); + + gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL); + + close(pi->epoll_fd); + pi->epoll_fd = -1; + + gpr_mu_lock(&g_pi_freelist_mu); + pi->next_free = g_pi_freelist; + g_pi_freelist = pi; + gpr_mu_unlock(&g_pi_freelist_mu); +} + +/* Attempts to gets the last polling island in the linked list (liked by the + * 'merged_to' field). Since this does not lock the polling island, there are no + * guarantees that the island returned is the last island */ +static polling_island *polling_island_maybe_get_latest(polling_island *pi) { + polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); + while (next != NULL) { + pi = next; + next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); + } + + return pi; +} + +/* Gets the lock on the *latest* polling island i.e the last polling island in + the linked list (linked by the 'merged_to' field). Call gpr_mu_unlock on the + returned polling island's mu. + Usage: To lock/unlock polling island "pi", do the following: + polling_island *pi_latest = polling_island_lock(pi); + ... + ... critical section .. + ... + gpr_mu_unlock(&pi_latest->mu); // NOTE: use pi_latest->mu. NOT pi->mu */ +static polling_island *polling_island_lock(polling_island *pi) { + polling_island *next = NULL; + + while (true) { + next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); + if (next == NULL) { + /* Looks like 'pi' is the last node in the linked list but unless we check + this by holding the pi->mu lock, we cannot be sure (i.e without the + pi->mu lock, we don't prevent island merges). + To be absolutely sure, check once more by holding the pi->mu lock */ + gpr_mu_lock(&pi->mu); + next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); + if (next == NULL) { + /* pi is infact the last node and we have the pi->mu lock. we're done */ + break; + } + + /* pi->merged_to is not NULL i.e pi isn't the last node anymore. pi->mu + * isn't the lock we are interested in. Continue traversing the list */ + gpr_mu_unlock(&pi->mu); + } + + pi = next; + } + + return pi; +} + +/* Gets the lock on the *latest* polling islands in the linked lists pointed by + *p and *q (and also updates *p and *q to point to the latest polling islands) + + This function is needed because calling the following block of code to obtain + locks on polling islands (*p and *q) is prone to deadlocks. + { + polling_island_lock(*p, true); + polling_island_lock(*q, true); + } + + Usage/example: + polling_island *p1; + polling_island *p2; + .. + polling_island_lock_pair(&p1, &p2); + .. + .. Critical section with both p1 and p2 locked + .. + // Release locks: Always call polling_island_unlock_pair() to release locks + polling_island_unlock_pair(p1, p2); +*/ +static void polling_island_lock_pair(polling_island **p, polling_island **q) { + polling_island *pi_1 = *p; + polling_island *pi_2 = *q; + polling_island *next_1 = NULL; + polling_island *next_2 = NULL; + + /* The algorithm is simple: + - Go to the last polling islands in the linked lists *pi_1 and *pi_2 (and + keep updating pi_1 and pi_2) + - Then obtain locks on the islands by following a lock order rule of + locking polling_island with lower address first + Special case: Before obtaining the locks, check if pi_1 and pi_2 are + pointing to the same island. If that is the case, we can just call + polling_island_lock() + - After obtaining both the locks, double check that the polling islands + are still the last polling islands in their respective linked lists + (this is because there might have been polling island merges before + we got the lock) + - If the polling islands are the last islands, we are done. If not, + release the locks and continue the process from the first step */ + while (true) { + next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to); + while (next_1 != NULL) { + pi_1 = next_1; + next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to); + } + + next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to); + while (next_2 != NULL) { + pi_2 = next_2; + next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to); + } + + if (pi_1 == pi_2) { + pi_1 = pi_2 = polling_island_lock(pi_1); + break; + } + + if (pi_1 < pi_2) { + gpr_mu_lock(&pi_1->mu); + gpr_mu_lock(&pi_2->mu); + } else { + gpr_mu_lock(&pi_2->mu); + gpr_mu_lock(&pi_1->mu); + } + + next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to); + next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to); + if (next_1 == NULL && next_2 == NULL) { + break; + } + + gpr_mu_unlock(&pi_1->mu); + gpr_mu_unlock(&pi_2->mu); + } + + *p = pi_1; + *q = pi_2; +} + +static void polling_island_unlock_pair(polling_island *p, polling_island *q) { + if (p == q) { + gpr_mu_unlock(&p->mu); + } else { + gpr_mu_unlock(&p->mu); + gpr_mu_unlock(&q->mu); + } +} + +static polling_island *polling_island_merge(polling_island *p, + polling_island *q, + grpc_error **error) { + /* Get locks on both the polling islands */ + polling_island_lock_pair(&p, &q); + + if (p != q) { + /* Make sure that p points to the polling island with fewer fds than q */ + if (p->fd_cnt > q->fd_cnt) { + GPR_SWAP(polling_island *, p, q); + } + + /* Merge p with q i.e move all the fds from p (The one with fewer fds) to q + Note that the refcounts on the fds being moved will not change here. + This is why the last param in the following two functions is 'false') */ + polling_island_add_fds_locked(q, p->fds, p->fd_cnt, false, error); + polling_island_remove_all_fds_locked(p, false, error); + + /* Wakeup all the pollers (if any) on p so that they pickup this change */ + polling_island_add_wakeup_fd_locked(p, &polling_island_wakeup_fd, error); + + /* Add the 'merged_to' link from p --> q */ + gpr_atm_rel_store(&p->merged_to, (gpr_atm)q); + PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */ + } + /* else if p == q, nothing needs to be done */ + + polling_island_unlock_pair(p, q); + + /* Return the merged polling island (Note that no merge would have happened + if p == q which is ok) */ + return q; +} + +static grpc_error *polling_island_global_init() { + grpc_error *error = GRPC_ERROR_NONE; + + gpr_mu_init(&g_pi_freelist_mu); + g_pi_freelist = NULL; + + error = grpc_wakeup_fd_init(&polling_island_wakeup_fd); + if (error == GRPC_ERROR_NONE) { + error = grpc_wakeup_fd_wakeup(&polling_island_wakeup_fd); + } + + return error; +} + +static void polling_island_global_shutdown() { + polling_island *next; + gpr_mu_lock(&g_pi_freelist_mu); + gpr_mu_unlock(&g_pi_freelist_mu); + while (g_pi_freelist != NULL) { + next = g_pi_freelist->next_free; + gpr_mu_destroy(&g_pi_freelist->mu); + gpr_free(g_pi_freelist->fds); + gpr_free(g_pi_freelist); + g_pi_freelist = next; + } + gpr_mu_destroy(&g_pi_freelist_mu); + + grpc_wakeup_fd_destroy(&polling_island_wakeup_fd); +} + +/******************************************************************************* + * Fd Definitions + */ + +/* We need to keep a freelist not because of any concerns of malloc performance + * but instead so that implementations with multiple threads in (for example) + * epoll_wait deal with the race between pollset removal and incoming poll + * notifications. + * + * The problem is that the poller ultimately holds a reference to this + * object, so it is very difficult to know when is safe to free it, at least + * without some expensive synchronization. + * + * If we keep the object freelisted, in the worst case losing this race just + * becomes a spurious read notification on a reused fd. + */ + +/* The alarm system needs to be able to wakeup 'some poller' sometimes + * (specifically when a new alarm needs to be triggered earlier than the next + * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a + * case occurs. */ + +/* TODO: sreek: Right now, this wakes up all pollers. In future we should make + * sure to wake up one polling thread (which can wake up other threads if + * needed) */ +grpc_wakeup_fd grpc_global_wakeup_fd; + +static grpc_fd *fd_freelist = NULL; +static gpr_mu fd_freelist_mu; + +#ifdef GRPC_FD_REF_COUNT_DEBUG +#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__) +#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__) +static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, + int line) { + gpr_log(GPR_DEBUG, "FD %d %p ref %d %ld -> %ld [%s; %s:%d]", fd->fd, + (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst), + gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); +#else +#define REF_BY(fd, n, reason) ref_by(fd, n) +#define UNREF_BY(fd, n, reason) unref_by(fd, n) +static void ref_by(grpc_fd *fd, int n) { +#endif + GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); +} + +#ifdef GRPC_FD_REF_COUNT_DEBUG +static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, + int line) { + gpr_atm old; + gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd, + (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst), + gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); +#else +static void unref_by(grpc_fd *fd, int n) { + gpr_atm old; +#endif + old = gpr_atm_full_fetch_add(&fd->refst, -n); + if (old == n) { + /* Add the fd to the freelist */ + gpr_mu_lock(&fd_freelist_mu); + fd->freelist_next = fd_freelist; + fd_freelist = fd; + grpc_iomgr_unregister_object(&fd->iomgr_object); + + gpr_mu_unlock(&fd_freelist_mu); + } else { + GPR_ASSERT(old > n); + } +} + +/* Increment refcount by two to avoid changing the orphan bit */ +#ifdef GRPC_FD_REF_COUNT_DEBUG +static void fd_ref(grpc_fd *fd, const char *reason, const char *file, + int line) { + ref_by(fd, 2, reason, file, line); +} + +static void fd_unref(grpc_fd *fd, const char *reason, const char *file, + int line) { + unref_by(fd, 2, reason, file, line); +} +#else +static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); } +static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); } +#endif + +static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); } + +static void fd_global_shutdown(void) { + gpr_mu_lock(&fd_freelist_mu); + gpr_mu_unlock(&fd_freelist_mu); + while (fd_freelist != NULL) { + grpc_fd *fd = fd_freelist; + fd_freelist = fd_freelist->freelist_next; + gpr_mu_destroy(&fd->mu); + gpr_free(fd); + } + gpr_mu_destroy(&fd_freelist_mu); +} + +static grpc_fd *fd_create(int fd, const char *name) { + grpc_fd *new_fd = NULL; + + gpr_mu_lock(&fd_freelist_mu); + if (fd_freelist != NULL) { + new_fd = fd_freelist; + fd_freelist = fd_freelist->freelist_next; + } + gpr_mu_unlock(&fd_freelist_mu); + + if (new_fd == NULL) { + new_fd = gpr_malloc(sizeof(grpc_fd)); + gpr_mu_init(&new_fd->mu); + gpr_mu_init(&new_fd->pi_mu); + } + + /* Note: It is not really needed to get the new_fd->mu lock here. If this is a + newly created fd (or an fd we got from the freelist), no one else would be + holding a lock to it anyway. */ + gpr_mu_lock(&new_fd->mu); + + gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1); + new_fd->fd = fd; + new_fd->shutdown = false; + new_fd->orphaned = false; + new_fd->read_closure = CLOSURE_NOT_READY; + new_fd->write_closure = CLOSURE_NOT_READY; + new_fd->polling_island = NULL; + new_fd->freelist_next = NULL; + new_fd->on_done_closure = NULL; + new_fd->read_notifier_pollset = NULL; + + gpr_mu_unlock(&new_fd->mu); + + char *fd_name; + gpr_asprintf(&fd_name, "%s fd=%d", name, fd); + grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name); +#ifdef GRPC_FD_REF_COUNT_DEBUG + gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name); +#endif + gpr_free(fd_name); + return new_fd; +} + +static bool fd_is_orphaned(grpc_fd *fd) { + return (gpr_atm_acq_load(&fd->refst) & 1) == 0; +} + +static int fd_wrapped_fd(grpc_fd *fd) { + int ret_fd = -1; + gpr_mu_lock(&fd->mu); + if (!fd->orphaned) { + ret_fd = fd->fd; + } + gpr_mu_unlock(&fd->mu); + + return ret_fd; +} + +static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_closure *on_done, int *release_fd, + const char *reason) { + bool is_fd_closed = false; + grpc_error *error = GRPC_ERROR_NONE; + + gpr_mu_lock(&fd->mu); + fd->on_done_closure = on_done; + + /* If release_fd is not NULL, we should be relinquishing control of the file + descriptor fd->fd (but we still own the grpc_fd structure). */ + if (release_fd != NULL) { + *release_fd = fd->fd; + } else { + close(fd->fd); + is_fd_closed = true; + } + + fd->orphaned = true; + + /* Remove the active status but keep referenced. We want this grpc_fd struct + to be alive (and not added to freelist) until the end of this function */ + REF_BY(fd, 1, reason); + + /* Remove the fd from the polling island: + - Get a lock on the latest polling island (i.e the last island in the + linked list pointed by fd->polling_island). This is the island that + would actually contain the fd + - Remove the fd from the latest polling island + - Unlock the latest polling island + - Set fd->polling_island to NULL (but remove the ref on the polling island + before doing this.) */ + gpr_mu_lock(&fd->pi_mu); + if (fd->polling_island != NULL) { + polling_island *pi_latest = polling_island_lock(fd->polling_island); + polling_island_remove_fd_locked(pi_latest, fd, is_fd_closed, &error); + gpr_mu_unlock(&pi_latest->mu); + + PI_UNREF(fd->polling_island, "fd_orphan"); + fd->polling_island = NULL; + } + gpr_mu_unlock(&fd->pi_mu); + + grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, error, NULL); + + gpr_mu_unlock(&fd->mu); + UNREF_BY(fd, 2, reason); /* Drop the reference */ + GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error)); +} + +static grpc_error *fd_shutdown_error(bool shutdown) { + if (!shutdown) { + return GRPC_ERROR_NONE; + } else { + return GRPC_ERROR_CREATE("FD shutdown"); + } +} + +static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_closure **st, grpc_closure *closure) { + if (fd->shutdown) { + grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"), + NULL); + } else if (*st == CLOSURE_NOT_READY) { + /* not ready ==> switch to a waiting state by setting the closure */ + *st = closure; + } else if (*st == CLOSURE_READY) { + /* already ready ==> queue the closure to run immediately */ + *st = CLOSURE_NOT_READY; + grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown), + NULL); + } else { + /* upcallptr was set to a different closure. This is an error! */ + gpr_log(GPR_ERROR, + "User called a notify_on function with a previous callback still " + "pending"); + abort(); + } +} + +/* returns 1 if state becomes not ready */ +static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_closure **st) { + if (*st == CLOSURE_READY) { + /* duplicate ready ==> ignore */ + return 0; + } else if (*st == CLOSURE_NOT_READY) { + /* not ready, and not waiting ==> flag ready */ + *st = CLOSURE_READY; + return 0; + } else { + /* waiting ==> queue closure */ + grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL); + *st = CLOSURE_NOT_READY; + return 1; + } +} + +static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, + grpc_fd *fd) { + grpc_pollset *notifier = NULL; + + gpr_mu_lock(&fd->mu); + notifier = fd->read_notifier_pollset; + gpr_mu_unlock(&fd->mu); + + return notifier; +} + +static bool fd_is_shutdown(grpc_fd *fd) { + gpr_mu_lock(&fd->mu); + const bool r = fd->shutdown; + gpr_mu_unlock(&fd->mu); + return r; +} + +/* Might be called multiple times */ +static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { + gpr_mu_lock(&fd->mu); + /* Do the actual shutdown only once */ + if (!fd->shutdown) { + fd->shutdown = true; + + shutdown(fd->fd, SHUT_RDWR); + /* Flush any pending read and write closures. Since fd->shutdown is 'true' + at this point, the closures would be called with 'success = false' */ + set_ready_locked(exec_ctx, fd, &fd->read_closure); + set_ready_locked(exec_ctx, fd, &fd->write_closure); + } + gpr_mu_unlock(&fd->mu); +} + +static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_closure *closure) { + gpr_mu_lock(&fd->mu); + notify_on_locked(exec_ctx, fd, &fd->read_closure, closure); + gpr_mu_unlock(&fd->mu); +} + +static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_closure *closure) { + gpr_mu_lock(&fd->mu); + notify_on_locked(exec_ctx, fd, &fd->write_closure, closure); + gpr_mu_unlock(&fd->mu); +} + +/******************************************************************************* + * Pollset Definitions + */ +GPR_TLS_DECL(g_current_thread_pollset); +GPR_TLS_DECL(g_current_thread_worker); +static __thread bool g_initialized_sigmask; +static __thread sigset_t g_orig_sigmask; + +static void sig_handler(int sig_num) { +#ifdef GRPC_EPOLL_DEBUG + gpr_log(GPR_INFO, "Received signal %d", sig_num); +#endif +} + +static void poller_kick_init() { signal(grpc_wakeup_signal, sig_handler); } + +/* Global state management */ +static grpc_error *pollset_global_init(void) { + gpr_tls_init(&g_current_thread_pollset); + gpr_tls_init(&g_current_thread_worker); + poller_kick_init(); + return grpc_wakeup_fd_init(&grpc_global_wakeup_fd); +} + +static void pollset_global_shutdown(void) { + grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd); + gpr_tls_destroy(&g_current_thread_pollset); + gpr_tls_destroy(&g_current_thread_worker); +} + +static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) { + grpc_error *err = GRPC_ERROR_NONE; + + /* Kick the worker only if it was not already kicked */ + if (gpr_atm_no_barrier_cas(&worker->is_kicked, (gpr_atm)0, (gpr_atm)1)) { + GRPC_POLLING_TRACE( + "pollset_worker_kick: Kicking worker: %p (thread id: %ld)", + (void *)worker, worker->pt_id); + int err_num = pthread_kill(worker->pt_id, grpc_wakeup_signal); + if (err_num != 0) { + err = GRPC_OS_ERROR(err_num, "pthread_kill"); + } + } + return err; +} + +/* Return 1 if the pollset has active threads in pollset_work (pollset must + * be locked) */ +static int pollset_has_workers(grpc_pollset *p) { + return p->root_worker.next != &p->root_worker; +} + +static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) { + worker->prev->next = worker->next; + worker->next->prev = worker->prev; +} + +static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) { + if (pollset_has_workers(p)) { + grpc_pollset_worker *w = p->root_worker.next; + remove_worker(p, w); + return w; + } else { + return NULL; + } +} + +static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) { + worker->next = &p->root_worker; + worker->prev = worker->next->prev; + worker->prev->next = worker->next->prev = worker; +} + +static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) { + worker->prev = &p->root_worker; + worker->next = worker->prev->next; + worker->prev->next = worker->next->prev = worker; +} + +/* p->mu must be held before calling this function */ +static grpc_error *pollset_kick(grpc_pollset *p, + grpc_pollset_worker *specific_worker) { + GPR_TIMER_BEGIN("pollset_kick", 0); + grpc_error *error = GRPC_ERROR_NONE; + const char *err_desc = "Kick Failure"; + grpc_pollset_worker *worker = specific_worker; + if (worker != NULL) { + if (worker == GRPC_POLLSET_KICK_BROADCAST) { + if (pollset_has_workers(p)) { + GPR_TIMER_BEGIN("pollset_kick.broadcast", 0); + for (worker = p->root_worker.next; worker != &p->root_worker; + worker = worker->next) { + if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) { + append_error(&error, pollset_worker_kick(worker), err_desc); + } + } + GPR_TIMER_END("pollset_kick.broadcast", 0); + } else { + p->kicked_without_pollers = true; + } + } else { + GPR_TIMER_MARK("kicked_specifically", 0); + if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) { + append_error(&error, pollset_worker_kick(worker), err_desc); + } + } + } else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) { + /* Since worker == NULL, it means that we can kick "any" worker on this + pollset 'p'. If 'p' happens to be the same pollset this thread is + currently polling (i.e in pollset_work() function), then there is no need + to kick any other worker since the current thread can just absorb the + kick. This is the reason why we enter this case only when + g_current_thread_pollset is != p */ + + GPR_TIMER_MARK("kick_anonymous", 0); + worker = pop_front_worker(p); + if (worker != NULL) { + GPR_TIMER_MARK("finally_kick", 0); + push_back_worker(p, worker); + append_error(&error, pollset_worker_kick(worker), err_desc); + } else { + GPR_TIMER_MARK("kicked_no_pollers", 0); + p->kicked_without_pollers = true; + } + } + + GPR_TIMER_END("pollset_kick", 0); + GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error)); + return error; +} + +static grpc_error *kick_poller(void) { + return grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd); +} + +static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { + gpr_mu_init(&pollset->mu); + *mu = &pollset->mu; + + pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker; + pollset->kicked_without_pollers = false; + + pollset->shutting_down = false; + pollset->finish_shutdown_called = false; + pollset->shutdown_done = NULL; + + pollset->polling_island = NULL; +} + +/* Convert a timespec to milliseconds: + - Very small or negative poll times are clamped to zero to do a non-blocking + poll (which becomes spin polling) + - Other small values are rounded up to one millisecond + - Longer than a millisecond polls are rounded up to the next nearest + millisecond to avoid spinning + - Infinite timeouts are converted to -1 */ +static int poll_deadline_to_millis_timeout(gpr_timespec deadline, + gpr_timespec now) { + gpr_timespec timeout; + static const int64_t max_spin_polling_us = 10; + if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { + return -1; + } + + if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros( + max_spin_polling_us, + GPR_TIMESPAN))) <= 0) { + return 0; + } + timeout = gpr_time_sub(deadline, now); + return gpr_time_to_millis(gpr_time_add( + timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN))); +} + +static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_pollset *notifier) { + /* Need the fd->mu since we might be racing with fd_notify_on_read */ + gpr_mu_lock(&fd->mu); + set_ready_locked(exec_ctx, fd, &fd->read_closure); + fd->read_notifier_pollset = notifier; + gpr_mu_unlock(&fd->mu); +} + +static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { + /* Need the fd->mu since we might be racing with fd_notify_on_write */ + gpr_mu_lock(&fd->mu); + set_ready_locked(exec_ctx, fd, &fd->write_closure); + gpr_mu_unlock(&fd->mu); +} + +static void pollset_release_polling_island(grpc_pollset *ps, char *reason) { + if (ps->polling_island != NULL) { + PI_UNREF(ps->polling_island, reason); + } + ps->polling_island = NULL; +} + +static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, + grpc_pollset *pollset) { + /* The pollset cannot have any workers if we are at this stage */ + GPR_ASSERT(!pollset_has_workers(pollset)); + + pollset->finish_shutdown_called = true; + + /* Release the ref and set pollset->polling_island to NULL */ + pollset_release_polling_island(pollset, "ps_shutdown"); + grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL); +} + +/* pollset->mu lock must be held by the caller before calling this */ +static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_closure *closure) { + GPR_TIMER_BEGIN("pollset_shutdown", 0); + GPR_ASSERT(!pollset->shutting_down); + pollset->shutting_down = true; + pollset->shutdown_done = closure; + pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); + + /* If the pollset has any workers, we cannot call finish_shutdown_locked() + because it would release the underlying polling island. In such a case, we + let the last worker call finish_shutdown_locked() from pollset_work() */ + if (!pollset_has_workers(pollset)) { + GPR_ASSERT(!pollset->finish_shutdown_called); + GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0); + finish_shutdown_locked(exec_ctx, pollset); + } + GPR_TIMER_END("pollset_shutdown", 0); +} + +/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other + * than destroying the mutexes, there is nothing special that needs to be done + * here */ +static void pollset_destroy(grpc_pollset *pollset) { + GPR_ASSERT(!pollset_has_workers(pollset)); + gpr_mu_destroy(&pollset->mu); +} + +static void pollset_reset(grpc_pollset *pollset) { + GPR_ASSERT(pollset->shutting_down); + GPR_ASSERT(!pollset_has_workers(pollset)); + pollset->shutting_down = false; + pollset->finish_shutdown_called = false; + pollset->kicked_without_pollers = false; + pollset->shutdown_done = NULL; + pollset_release_polling_island(pollset, "ps_reset"); +} + +#define GRPC_EPOLL_MAX_EVENTS 1000 +/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */ +static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, + grpc_pollset *pollset, + grpc_pollset_worker *worker, int timeout_ms, + sigset_t *sig_mask, grpc_error **error) { + struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS]; + int epoll_fd = -1; + int ep_rv; + polling_island *pi = NULL; + char *err_msg; + const char *err_desc = "pollset_work_and_unlock"; + GPR_TIMER_BEGIN("pollset_work_and_unlock", 0); + + /* We need to get the epoll_fd to wait on. The epoll_fd is in inside the + latest polling island pointed by pollset->polling_island. + + Since epoll_fd is immutable, we can read it without obtaining the polling + island lock. There is however a possibility that the polling island (from + which we got the epoll_fd) got merged with another island while we are + in this function. This is still okay because in such a case, we will wakeup + right-away from epoll_wait() and pick up the latest polling_island the next + this function (i.e pollset_work_and_unlock()) is called */ + + if (pollset->polling_island == NULL) { + pollset->polling_island = polling_island_create(NULL, error); + if (pollset->polling_island == NULL) { + GPR_TIMER_END("pollset_work_and_unlock", 0); + return; /* Fatal error. We cannot continue */ + } + + PI_ADD_REF(pollset->polling_island, "ps"); + GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p", + (void *)pollset, (void *)pollset->polling_island); + } + + pi = polling_island_maybe_get_latest(pollset->polling_island); + epoll_fd = pi->epoll_fd; + + /* Update the pollset->polling_island since the island being pointed by + pollset->polling_island maybe older than the one pointed by pi) */ + if (pollset->polling_island != pi) { + /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the + polling island to be deleted */ + PI_ADD_REF(pi, "ps"); + PI_UNREF(pollset->polling_island, "ps"); + pollset->polling_island = pi; + } + + /* Add an extra ref so that the island does not get destroyed (which means + the epoll_fd won't be closed) while we are are doing an epoll_wait() on the + epoll_fd */ + PI_ADD_REF(pi, "ps_work"); + gpr_mu_unlock(&pollset->mu); + + do { + ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, + sig_mask); + if (ep_rv < 0) { + if (errno != EINTR) { + gpr_asprintf(&err_msg, + "epoll_wait() epoll fd: %d failed with error: %d (%s)", + epoll_fd, errno, strerror(errno)); + append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); + } else { + /* We were interrupted. Save an interation by doing a zero timeout + epoll_wait to see if there are any other events of interest */ + GRPC_POLLING_TRACE( + "pollset_work: pollset: %p, worker: %p received kick", + (void *)pollset, (void *)worker); + ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0); + } + } + +#ifdef GRPC_TSAN + /* See the definition of g_poll_sync for more details */ + gpr_atm_acq_load(&g_epoll_sync); +#endif /* defined(GRPC_TSAN) */ + + for (int i = 0; i < ep_rv; ++i) { + void *data_ptr = ep_ev[i].data.ptr; + if (data_ptr == &grpc_global_wakeup_fd) { + append_error(error, + grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd), + err_desc); + } else if (data_ptr == &polling_island_wakeup_fd) { + GRPC_POLLING_TRACE( + "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: " + "%d) got merged", + (void *)pollset, (void *)worker, epoll_fd); + /* This means that our polling island is merged with a different + island. We do not have to do anything here since the subsequent call + to the function pollset_work_and_unlock() will pick up the correct + epoll_fd */ + } else { + grpc_fd *fd = data_ptr; + int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP); + int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI); + int write_ev = ep_ev[i].events & EPOLLOUT; + if (read_ev || cancel) { + fd_become_readable(exec_ctx, fd, pollset); + } + if (write_ev || cancel) { + fd_become_writable(exec_ctx, fd); + } + } + } + } while (ep_rv == GRPC_EPOLL_MAX_EVENTS); + + GPR_ASSERT(pi != NULL); + + /* Before leaving, release the extra ref we added to the polling island. It + is important to use "pi" here (i.e our old copy of pollset->polling_island + that we got before releasing the polling island lock). This is because + pollset->polling_island pointer might get udpated in other parts of the + code when there is an island merge while we are doing epoll_wait() above */ + PI_UNREF(pi, "ps_work"); + + GPR_TIMER_END("pollset_work_and_unlock", 0); +} + +/* pollset->mu lock must be held by the caller before calling this. + The function pollset_work() may temporarily release the lock (pollset->mu) + during the course of its execution but it will always re-acquire the lock and + ensure that it is held by the time the function returns */ +static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_pollset_worker **worker_hdl, + gpr_timespec now, gpr_timespec deadline) { + GPR_TIMER_BEGIN("pollset_work", 0); + grpc_error *error = GRPC_ERROR_NONE; + int timeout_ms = poll_deadline_to_millis_timeout(deadline, now); + + sigset_t new_mask; + + grpc_pollset_worker worker; + worker.next = worker.prev = NULL; + worker.pt_id = pthread_self(); + gpr_atm_no_barrier_store(&worker.is_kicked, (gpr_atm)0); + + *worker_hdl = &worker; + + gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); + gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); + + if (pollset->kicked_without_pollers) { + /* If the pollset was kicked without pollers, pretend that the current + worker got the kick and skip polling. A kick indicates that there is some + work that needs attention like an event on the completion queue or an + alarm */ + GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0); + pollset->kicked_without_pollers = 0; + } else if (!pollset->shutting_down) { + /* We use the posix-signal with number 'grpc_wakeup_signal' for waking up + (i.e 'kicking') a worker in the pollset. A 'kick' is a way to inform the + worker that there is some pending work that needs immediate attention + (like an event on the completion queue, or a polling island merge that + results in a new epoll-fd to wait on) and that the worker should not + spend time waiting in epoll_pwait(). + + A worker can be kicked anytime from the point it is added to the pollset + via push_front_worker() (or push_back_worker()) to the point it is + removed via remove_worker(). + If the worker is kicked before/during it calls epoll_pwait(), it should + immediately exit from epoll_wait(). If the worker is kicked after it + returns from epoll_wait(), then nothing really needs to be done. + + To accomplish this, we mask 'grpc_wakeup_signal' on this thread at all + times *except* when it is in epoll_pwait(). This way, the worker never + misses acting on a kick */ + + if (!g_initialized_sigmask) { + sigemptyset(&new_mask); + sigaddset(&new_mask, grpc_wakeup_signal); + pthread_sigmask(SIG_BLOCK, &new_mask, &g_orig_sigmask); + sigdelset(&g_orig_sigmask, grpc_wakeup_signal); + g_initialized_sigmask = true; + /* new_mask: The new thread mask which blocks 'grpc_wakeup_signal'. + This is the mask used at all times *except during + epoll_wait()*" + g_orig_sigmask: The thread mask which allows 'grpc_wakeup_signal' and + this is the mask to use *during epoll_wait()* + + The new_mask is set on the worker before it is added to the pollset + (i.e before it can be kicked) */ + } + + push_front_worker(pollset, &worker); /* Add worker to pollset */ + + pollset_work_and_unlock(exec_ctx, pollset, &worker, timeout_ms, + &g_orig_sigmask, &error); + grpc_exec_ctx_flush(exec_ctx); + + gpr_mu_lock(&pollset->mu); + + /* Note: There is no need to reset worker.is_kicked to 0 since we are no + longer going to use this worker */ + remove_worker(pollset, &worker); + } + + /* If we are the last worker on the pollset (i.e pollset_has_workers() is + false at this point) and the pollset is shutting down, we may have to + finish the shutdown process by calling finish_shutdown_locked(). + See pollset_shutdown() for more details. + + Note: Continuing to access pollset here is safe; it is the caller's + responsibility to not destroy a pollset when it has outstanding calls to + pollset_work() */ + if (pollset->shutting_down && !pollset_has_workers(pollset) && + !pollset->finish_shutdown_called) { + GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0); + finish_shutdown_locked(exec_ctx, pollset); + + gpr_mu_unlock(&pollset->mu); + grpc_exec_ctx_flush(exec_ctx); + gpr_mu_lock(&pollset->mu); + } + + *worker_hdl = NULL; + + gpr_tls_set(&g_current_thread_pollset, (intptr_t)0); + gpr_tls_set(&g_current_thread_worker, (intptr_t)0); + + GPR_TIMER_END("pollset_work", 0); + + GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error)); + return error; +} + +static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_fd *fd) { + grpc_error *error = GRPC_ERROR_NONE; + + gpr_mu_lock(&pollset->mu); + gpr_mu_lock(&fd->pi_mu); + + polling_island *pi_new = NULL; + + /* 1) If fd->polling_island and pollset->polling_island are both non-NULL and + * equal, do nothing. + * 2) If fd->polling_island and pollset->polling_island are both NULL, create + * a new polling island (with a refcount of 2) and make the polling_island + * fields in both fd and pollset to point to the new island + * 3) If one of fd->polling_island or pollset->polling_island is NULL, update + * the NULL polling_island field to point to the non-NULL polling_island + * field (ensure that the refcount on the polling island is incremented by + * 1 to account for the newly added reference) + * 4) Finally, if fd->polling_island and pollset->polling_island are non-NULL + * and different, merge both the polling islands and update the + * polling_island fields in both fd and pollset to point to the merged + * polling island. + */ + if (fd->polling_island == pollset->polling_island) { + pi_new = fd->polling_island; + if (pi_new == NULL) { + pi_new = polling_island_create(fd, &error); + + GRPC_POLLING_TRACE( + "pollset_add_fd: Created new polling island. pi_new: %p (fd: %d, " + "pollset: %p)", + (void *)pi_new, fd->fd, (void *)pollset); + } + } else if (fd->polling_island == NULL) { + pi_new = polling_island_lock(pollset->polling_island); + polling_island_add_fds_locked(pi_new, &fd, 1, true, &error); + gpr_mu_unlock(&pi_new->mu); + + GRPC_POLLING_TRACE( + "pollset_add_fd: fd->pi was NULL. pi_new: %p (fd: %d, pollset: %p, " + "pollset->pi: %p)", + (void *)pi_new, fd->fd, (void *)pollset, + (void *)pollset->polling_island); + } else if (pollset->polling_island == NULL) { + pi_new = polling_island_lock(fd->polling_island); + gpr_mu_unlock(&pi_new->mu); + + GRPC_POLLING_TRACE( + "pollset_add_fd: pollset->pi was NULL. pi_new: %p (fd: %d, pollset: " + "%p, fd->pi: %p", + (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island); + } else { + pi_new = polling_island_merge(fd->polling_island, pollset->polling_island, + &error); + GRPC_POLLING_TRACE( + "pollset_add_fd: polling islands merged. pi_new: %p (fd: %d, pollset: " + "%p, fd->pi: %p, pollset->pi: %p)", + (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island, + (void *)pollset->polling_island); + } + + /* At this point, pi_new is the polling island that both fd->polling_island + and pollset->polling_island must be pointing to */ + + if (fd->polling_island != pi_new) { + PI_ADD_REF(pi_new, "fd"); + if (fd->polling_island != NULL) { + PI_UNREF(fd->polling_island, "fd"); + } + fd->polling_island = pi_new; + } + + if (pollset->polling_island != pi_new) { + PI_ADD_REF(pi_new, "ps"); + if (pollset->polling_island != NULL) { + PI_UNREF(pollset->polling_island, "ps"); + } + pollset->polling_island = pi_new; + } + + gpr_mu_unlock(&fd->pi_mu); + gpr_mu_unlock(&pollset->mu); +} + +/******************************************************************************* + * Pollset-set Definitions + */ + +static grpc_pollset_set *pollset_set_create(void) { + grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set)); + memset(pollset_set, 0, sizeof(*pollset_set)); + gpr_mu_init(&pollset_set->mu); + return pollset_set; +} + +static void pollset_set_destroy(grpc_pollset_set *pollset_set) { + size_t i; + gpr_mu_destroy(&pollset_set->mu); + for (i = 0; i < pollset_set->fd_count; i++) { + GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); + } + gpr_free(pollset_set->pollsets); + gpr_free(pollset_set->pollset_sets); + gpr_free(pollset_set->fds); + gpr_free(pollset_set); +} + +static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set, grpc_fd *fd) { + size_t i; + gpr_mu_lock(&pollset_set->mu); + if (pollset_set->fd_count == pollset_set->fd_capacity) { + pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity); + pollset_set->fds = gpr_realloc( + pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds)); + } + GRPC_FD_REF(fd, "pollset_set"); + pollset_set->fds[pollset_set->fd_count++] = fd; + for (i = 0; i < pollset_set->pollset_count; i++) { + pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd); + } + for (i = 0; i < pollset_set->pollset_set_count; i++) { + pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd); + } + gpr_mu_unlock(&pollset_set->mu); +} + +static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set, grpc_fd *fd) { + size_t i; + gpr_mu_lock(&pollset_set->mu); + for (i = 0; i < pollset_set->fd_count; i++) { + if (pollset_set->fds[i] == fd) { + pollset_set->fd_count--; + GPR_SWAP(grpc_fd *, pollset_set->fds[i], + pollset_set->fds[pollset_set->fd_count]); + GRPC_FD_UNREF(fd, "pollset_set"); + break; + } + } + for (i = 0; i < pollset_set->pollset_set_count; i++) { + pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd); + } + gpr_mu_unlock(&pollset_set->mu); +} + +static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set, + grpc_pollset *pollset) { + size_t i, j; + gpr_mu_lock(&pollset_set->mu); + if (pollset_set->pollset_count == pollset_set->pollset_capacity) { + pollset_set->pollset_capacity = + GPR_MAX(8, 2 * pollset_set->pollset_capacity); + pollset_set->pollsets = + gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity * + sizeof(*pollset_set->pollsets)); + } + pollset_set->pollsets[pollset_set->pollset_count++] = pollset; + for (i = 0, j = 0; i < pollset_set->fd_count; i++) { + if (fd_is_orphaned(pollset_set->fds[i])) { + GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); + } else { + pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]); + pollset_set->fds[j++] = pollset_set->fds[i]; + } + } + pollset_set->fd_count = j; + gpr_mu_unlock(&pollset_set->mu); +} + +static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set, + grpc_pollset *pollset) { + size_t i; + gpr_mu_lock(&pollset_set->mu); + for (i = 0; i < pollset_set->pollset_count; i++) { + if (pollset_set->pollsets[i] == pollset) { + pollset_set->pollset_count--; + GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i], + pollset_set->pollsets[pollset_set->pollset_count]); + break; + } + } + gpr_mu_unlock(&pollset_set->mu); +} + +static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *bag, + grpc_pollset_set *item) { + size_t i, j; + gpr_mu_lock(&bag->mu); + if (bag->pollset_set_count == bag->pollset_set_capacity) { + bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity); + bag->pollset_sets = + gpr_realloc(bag->pollset_sets, + bag->pollset_set_capacity * sizeof(*bag->pollset_sets)); + } + bag->pollset_sets[bag->pollset_set_count++] = item; + for (i = 0, j = 0; i < bag->fd_count; i++) { + if (fd_is_orphaned(bag->fds[i])) { + GRPC_FD_UNREF(bag->fds[i], "pollset_set"); + } else { + pollset_set_add_fd(exec_ctx, item, bag->fds[i]); + bag->fds[j++] = bag->fds[i]; + } + } + bag->fd_count = j; + gpr_mu_unlock(&bag->mu); +} + +static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *bag, + grpc_pollset_set *item) { + size_t i; + gpr_mu_lock(&bag->mu); + for (i = 0; i < bag->pollset_set_count; i++) { + if (bag->pollset_sets[i] == item) { + bag->pollset_set_count--; + GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i], + bag->pollset_sets[bag->pollset_set_count]); + break; + } + } + gpr_mu_unlock(&bag->mu); +} + +/* Test helper functions + * */ +void *grpc_fd_get_polling_island(grpc_fd *fd) { + polling_island *pi; + + gpr_mu_lock(&fd->pi_mu); + pi = fd->polling_island; + gpr_mu_unlock(&fd->pi_mu); + + return pi; +} + +void *grpc_pollset_get_polling_island(grpc_pollset *ps) { + polling_island *pi; + + gpr_mu_lock(&ps->mu); + pi = ps->polling_island; + gpr_mu_unlock(&ps->mu); + + return pi; +} + +bool grpc_are_polling_islands_equal(void *p, void *q) { + polling_island *p1 = p; + polling_island *p2 = q; + + /* Note: polling_island_lock_pair() may change p1 and p2 to point to the + latest polling islands in their respective linked lists */ + polling_island_lock_pair(&p1, &p2); + polling_island_unlock_pair(p1, p2); + + return p1 == p2; +} + +/******************************************************************************* + * Event engine binding + */ + +static void shutdown_engine(void) { + fd_global_shutdown(); + pollset_global_shutdown(); + polling_island_global_shutdown(); +} + +static const grpc_event_engine_vtable vtable = { + .pollset_size = sizeof(grpc_pollset), + + .fd_create = fd_create, + .fd_wrapped_fd = fd_wrapped_fd, + .fd_orphan = fd_orphan, + .fd_shutdown = fd_shutdown, + .fd_is_shutdown = fd_is_shutdown, + .fd_notify_on_read = fd_notify_on_read, + .fd_notify_on_write = fd_notify_on_write, + .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, + + .pollset_init = pollset_init, + .pollset_shutdown = pollset_shutdown, + .pollset_reset = pollset_reset, + .pollset_destroy = pollset_destroy, + .pollset_work = pollset_work, + .pollset_kick = pollset_kick, + .pollset_add_fd = pollset_add_fd, + + .pollset_set_create = pollset_set_create, + .pollset_set_destroy = pollset_set_destroy, + .pollset_set_add_pollset = pollset_set_add_pollset, + .pollset_set_del_pollset = pollset_set_del_pollset, + .pollset_set_add_pollset_set = pollset_set_add_pollset_set, + .pollset_set_del_pollset_set = pollset_set_del_pollset_set, + .pollset_set_add_fd = pollset_set_add_fd, + .pollset_set_del_fd = pollset_set_del_fd, + + .kick_poller = kick_poller, + + .shutdown_engine = shutdown_engine, +}; + +/* It is possible that GLIBC has epoll but the underlying kernel doesn't. + * Create a dummy epoll_fd to make sure epoll support is available */ +static bool is_epoll_available() { + int fd = epoll_create1(EPOLL_CLOEXEC); + if (fd < 0) { + gpr_log( + GPR_ERROR, + "epoll_create1 failed with error: %d. Not using epoll polling engine", + fd); + return false; + } + close(fd); + return true; +} + +const grpc_event_engine_vtable *grpc_init_epoll_linux(void) { + /* If use of signals is disabled, we cannot use epoll engine*/ + if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) { + return NULL; + } + + if (!is_epoll_available()) { + return NULL; + } + + if (!is_grpc_wakeup_signal_initialized) { + grpc_use_signal(SIGRTMIN + 2); + } + + fd_global_init(); + + if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) { + return NULL; + } + + if (!GRPC_LOG_IF_ERROR("polling_island_global_init", + polling_island_global_init())) { + return NULL; + } + + return &vtable; +} + +#else /* defined(GPR_LINUX_EPOLL) */ +#if defined(GPR_POSIX_SOCKET) +#include "src/core/lib/iomgr/ev_posix.h" +/* If GPR_LINUX_EPOLL is not defined, it means epoll is not available. Return + * NULL */ +const grpc_event_engine_vtable *grpc_init_epoll_linux(void) { return NULL; } +#endif /* defined(GPR_POSIX_SOCKET) */ + +void grpc_use_signal(int signum) {} +#endif /* !defined(GPR_LINUX_EPOLL) */ diff --git a/src/core/lib/iomgr/ev_epoll_linux.h b/src/core/lib/iomgr/ev_epoll_linux.h new file mode 100644 index 0000000000..7a494aba19 --- /dev/null +++ b/src/core/lib/iomgr/ev_epoll_linux.h @@ -0,0 +1,47 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H +#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H + +#include "src/core/lib/iomgr/ev_posix.h" + +const grpc_event_engine_vtable *grpc_init_epoll_linux(void); + +#ifdef GPR_LINUX_EPOLL +void *grpc_fd_get_polling_island(grpc_fd *fd); +void *grpc_pollset_get_polling_island(grpc_pollset *ps); +bool grpc_are_polling_islands_equal(void *p, void *q); +#endif /* defined(GPR_LINUX_EPOLL) */ + +#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H */ diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c new file mode 100644 index 0000000000..9e306af5fa --- /dev/null +++ b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c @@ -0,0 +1,2043 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* This file will be removed shortly: it's here to keep refactoring + * steps simple and auditable. + * It's the combination of the old files: + * - fd_posix.{h,c} + * - pollset_posix.{h,c} + * - pullset_multipoller_with_{poll,epoll}.{h,c} + * The new version will be split into: + * - ev_poll_posix.{h,c} + * - ev_epoll_posix.{h,c} + */ + +#include <grpc/support/port_platform.h> + +#ifdef GPR_POSIX_SOCKET + +#include "src/core/lib/iomgr/ev_poll_and_epoll_posix.h" + +#include <assert.h> +#include <errno.h> +#include <poll.h> +#include <string.h> +#include <sys/socket.h> +#include <unistd.h> + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> +#include <grpc/support/string_util.h> +#include <grpc/support/tls.h> +#include <grpc/support/useful.h> + +#include "src/core/lib/iomgr/iomgr_internal.h" +#include "src/core/lib/iomgr/wakeup_fd_posix.h" +#include "src/core/lib/profiling/timers.h" +#include "src/core/lib/support/block_annotate.h" + +/******************************************************************************* + * FD declarations + */ + +typedef struct grpc_fd_watcher { + struct grpc_fd_watcher *next; + struct grpc_fd_watcher *prev; + grpc_pollset *pollset; + grpc_pollset_worker *worker; + grpc_fd *fd; +} grpc_fd_watcher; + +struct grpc_fd { + int fd; + /* refst format: + bit0: 1=active/0=orphaned + bit1-n: refcount + meaning that mostly we ref by two to avoid altering the orphaned bit, + and just unref by 1 when we're ready to flag the object as orphaned */ + gpr_atm refst; + + gpr_mu mu; + int shutdown; + int closed; + int released; + + /* The watcher list. + + The following watcher related fields are protected by watcher_mu. + + An fd_watcher is an ephemeral object created when an fd wants to + begin polling, and destroyed after the poll. + + It denotes the fd's interest in whether to read poll or write poll + or both or neither on this fd. + + If a watcher is asked to poll for reads or writes, the read_watcher + or write_watcher fields are set respectively. A watcher may be asked + to poll for both, in which case both fields will be set. + + read_watcher and write_watcher may be NULL if no watcher has been + asked to poll for reads or writes. + + If an fd_watcher is not asked to poll for reads or writes, it's added + to a linked list of inactive watchers, rooted at inactive_watcher_root. + If at a later time there becomes need of a poller to poll, one of + the inactive pollers may be kicked out of their poll loops to take + that responsibility. */ + grpc_fd_watcher inactive_watcher_root; + grpc_fd_watcher *read_watcher; + grpc_fd_watcher *write_watcher; + + grpc_closure *read_closure; + grpc_closure *write_closure; + + struct grpc_fd *freelist_next; + + grpc_closure *on_done_closure; + + grpc_iomgr_object iomgr_object; + + /* The pollset that last noticed and notified that the fd is readable */ + grpc_pollset *read_notifier_pollset; +}; + +/* Begin polling on an fd. + Registers that the given pollset is interested in this fd - so that if read + or writability interest changes, the pollset can be kicked to pick up that + new interest. + Return value is: + (fd_needs_read? read_mask : 0) | (fd_needs_write? write_mask : 0) + i.e. a combination of read_mask and write_mask determined by the fd's current + interest in said events. + Polling strategies that do not need to alter their behavior depending on the + fd's current interest (such as epoll) do not need to call this function. + MUST NOT be called with a pollset lock taken */ +static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, + grpc_pollset_worker *worker, uint32_t read_mask, + uint32_t write_mask, grpc_fd_watcher *rec); +/* Complete polling previously started with fd_begin_poll + MUST NOT be called with a pollset lock taken + if got_read or got_write are 1, also does the become_{readable,writable} as + appropriate. */ +static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec, + int got_read, int got_write, + grpc_pollset *read_notifier_pollset); + +/* Return 1 if this fd is orphaned, 0 otherwise */ +static bool fd_is_orphaned(grpc_fd *fd); + +/* Reference counting for fds */ +/*#define GRPC_FD_REF_COUNT_DEBUG*/ +#ifdef GRPC_FD_REF_COUNT_DEBUG +static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line); +static void fd_unref(grpc_fd *fd, const char *reason, const char *file, + int line); +#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__) +#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__) +#else +static void fd_ref(grpc_fd *fd); +static void fd_unref(grpc_fd *fd); +#define GRPC_FD_REF(fd, reason) fd_ref(fd) +#define GRPC_FD_UNREF(fd, reason) fd_unref(fd) +#endif + +static void fd_global_init(void); +static void fd_global_shutdown(void); + +#define CLOSURE_NOT_READY ((grpc_closure *)0) +#define CLOSURE_READY ((grpc_closure *)1) + +/******************************************************************************* + * pollset declarations + */ + +typedef struct grpc_pollset_vtable grpc_pollset_vtable; + +typedef struct grpc_cached_wakeup_fd { + grpc_wakeup_fd fd; + struct grpc_cached_wakeup_fd *next; +} grpc_cached_wakeup_fd; + +struct grpc_pollset_worker { + grpc_cached_wakeup_fd *wakeup_fd; + int reevaluate_polling_on_wakeup; + int kicked_specifically; + struct grpc_pollset_worker *next; + struct grpc_pollset_worker *prev; +}; + +struct grpc_pollset { + /* pollsets under posix can mutate representation as fds are added and + removed. + For example, we may choose a poll() based implementation on linux for + few fds, and an epoll() based implementation for many fds */ + const grpc_pollset_vtable *vtable; + gpr_mu mu; + grpc_pollset_worker root_worker; + int in_flight_cbs; + int shutting_down; + int called_shutdown; + int kicked_without_pollers; + grpc_closure *shutdown_done; + grpc_closure_list idle_jobs; + union { + int fd; + void *ptr; + } data; + /* Local cache of eventfds for workers */ + grpc_cached_wakeup_fd *local_wakeup_cache; +}; + +struct grpc_pollset_vtable { + void (*add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + struct grpc_fd *fd, int and_unlock_pollset); + grpc_error *(*maybe_work_and_unlock)(grpc_exec_ctx *exec_ctx, + grpc_pollset *pollset, + grpc_pollset_worker *worker, + gpr_timespec deadline, gpr_timespec now); + void (*finish_shutdown)(grpc_pollset *pollset); + void (*destroy)(grpc_pollset *pollset); +}; + +/* Add an fd to a pollset */ +static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + struct grpc_fd *fd); + +static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set, grpc_fd *fd); + +/* Convert a timespec to milliseconds: + - very small or negative poll times are clamped to zero to do a + non-blocking poll (which becomes spin polling) + - other small values are rounded up to one millisecond + - longer than a millisecond polls are rounded up to the next nearest + millisecond to avoid spinning + - infinite timeouts are converted to -1 */ +static int poll_deadline_to_millis_timeout(gpr_timespec deadline, + gpr_timespec now); + +/* Allow kick to wakeup the currently polling worker */ +#define GRPC_POLLSET_CAN_KICK_SELF 1 +/* Force the wakee to repoll when awoken */ +#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2 +/* As per pollset_kick, with an extended set of flags (defined above) + -- mostly for fd_posix's use. */ +static grpc_error *pollset_kick_ext(grpc_pollset *p, + grpc_pollset_worker *specific_worker, + uint32_t flags) GRPC_MUST_USE_RESULT; + +/* turn a pollset into a multipoller: platform specific */ +typedef void (*platform_become_multipoller_type)(grpc_exec_ctx *exec_ctx, + grpc_pollset *pollset, + struct grpc_fd **fds, + size_t fd_count); +static platform_become_multipoller_type platform_become_multipoller; + +/* Return 1 if the pollset has active threads in pollset_work (pollset must + * be locked) */ +static int pollset_has_workers(grpc_pollset *pollset); + +static void remove_fd_from_all_epoll_sets(int fd); + +/******************************************************************************* + * pollset_set definitions + */ + +struct grpc_pollset_set { + gpr_mu mu; + + size_t pollset_count; + size_t pollset_capacity; + grpc_pollset **pollsets; + + size_t pollset_set_count; + size_t pollset_set_capacity; + struct grpc_pollset_set **pollset_sets; + + size_t fd_count; + size_t fd_capacity; + grpc_fd **fds; +}; + +/******************************************************************************* + * fd_posix.c + */ + +/* We need to keep a freelist not because of any concerns of malloc performance + * but instead so that implementations with multiple threads in (for example) + * epoll_wait deal with the race between pollset removal and incoming poll + * notifications. + * + * The problem is that the poller ultimately holds a reference to this + * object, so it is very difficult to know when is safe to free it, at least + * without some expensive synchronization. + * + * If we keep the object freelisted, in the worst case losing this race just + * becomes a spurious read notification on a reused fd. + */ +/* TODO(klempner): We could use some form of polling generation count to know + * when these are safe to free. */ +/* TODO(klempner): Consider disabling freelisting if we don't have multiple + * threads in poll on the same fd */ +/* TODO(klempner): Batch these allocations to reduce fragmentation */ +static grpc_fd *fd_freelist = NULL; +static gpr_mu fd_freelist_mu; + +static void freelist_fd(grpc_fd *fd) { + gpr_mu_lock(&fd_freelist_mu); + fd->freelist_next = fd_freelist; + fd_freelist = fd; + grpc_iomgr_unregister_object(&fd->iomgr_object); + gpr_mu_unlock(&fd_freelist_mu); +} + +static grpc_fd *alloc_fd(int fd) { + grpc_fd *r = NULL; + gpr_mu_lock(&fd_freelist_mu); + if (fd_freelist != NULL) { + r = fd_freelist; + fd_freelist = fd_freelist->freelist_next; + } + gpr_mu_unlock(&fd_freelist_mu); + if (r == NULL) { + r = gpr_malloc(sizeof(grpc_fd)); + gpr_mu_init(&r->mu); + } + + gpr_mu_lock(&r->mu); + gpr_atm_rel_store(&r->refst, 1); + r->shutdown = 0; + r->read_closure = CLOSURE_NOT_READY; + r->write_closure = CLOSURE_NOT_READY; + r->fd = fd; + r->inactive_watcher_root.next = r->inactive_watcher_root.prev = + &r->inactive_watcher_root; + r->freelist_next = NULL; + r->read_watcher = r->write_watcher = NULL; + r->on_done_closure = NULL; + r->closed = 0; + r->released = 0; + r->read_notifier_pollset = NULL; + gpr_mu_unlock(&r->mu); + return r; +} + +static void destroy(grpc_fd *fd) { + gpr_mu_destroy(&fd->mu); + gpr_free(fd); +} + +#ifdef GRPC_FD_REF_COUNT_DEBUG +#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__) +#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__) +static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, + int line) { + gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n, + gpr_atm_no_barrier_load(&fd->refst), + gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); +#else +#define REF_BY(fd, n, reason) ref_by(fd, n) +#define UNREF_BY(fd, n, reason) unref_by(fd, n) +static void ref_by(grpc_fd *fd, int n) { +#endif + GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); +} + +#ifdef GRPC_FD_REF_COUNT_DEBUG +static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, + int line) { + gpr_atm old; + gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n, + gpr_atm_no_barrier_load(&fd->refst), + gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); +#else +static void unref_by(grpc_fd *fd, int n) { + gpr_atm old; +#endif + old = gpr_atm_full_fetch_add(&fd->refst, -n); + if (old == n) { + freelist_fd(fd); + } else { + GPR_ASSERT(old > n); + } +} + +static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); } + +static void fd_global_shutdown(void) { + gpr_mu_lock(&fd_freelist_mu); + gpr_mu_unlock(&fd_freelist_mu); + while (fd_freelist != NULL) { + grpc_fd *fd = fd_freelist; + fd_freelist = fd_freelist->freelist_next; + destroy(fd); + } + gpr_mu_destroy(&fd_freelist_mu); +} + +static grpc_fd *fd_create(int fd, const char *name) { + grpc_fd *r = alloc_fd(fd); + char *name2; + gpr_asprintf(&name2, "%s fd=%d", name, fd); + grpc_iomgr_register_object(&r->iomgr_object, name2); + gpr_free(name2); +#ifdef GRPC_FD_REF_COUNT_DEBUG + gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, r, name); +#endif + return r; +} + +static bool fd_is_orphaned(grpc_fd *fd) { + return (gpr_atm_acq_load(&fd->refst) & 1) == 0; +} + +static grpc_error *pollset_kick_locked(grpc_fd_watcher *watcher) { + gpr_mu_lock(&watcher->pollset->mu); + GPR_ASSERT(watcher->worker); + grpc_error *err = pollset_kick_ext(watcher->pollset, watcher->worker, + GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP); + gpr_mu_unlock(&watcher->pollset->mu); + return err; +} + +static void maybe_wake_one_watcher_locked(grpc_fd *fd) { + if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) { + pollset_kick_locked(fd->inactive_watcher_root.next); + } else if (fd->read_watcher) { + pollset_kick_locked(fd->read_watcher); + } else if (fd->write_watcher) { + pollset_kick_locked(fd->write_watcher); + } +} + +static void wake_all_watchers_locked(grpc_fd *fd) { + grpc_fd_watcher *watcher; + for (watcher = fd->inactive_watcher_root.next; + watcher != &fd->inactive_watcher_root; watcher = watcher->next) { + pollset_kick_locked(watcher); + } + if (fd->read_watcher) { + pollset_kick_locked(fd->read_watcher); + } + if (fd->write_watcher && fd->write_watcher != fd->read_watcher) { + pollset_kick_locked(fd->write_watcher); + } +} + +static int has_watchers(grpc_fd *fd) { + return fd->read_watcher != NULL || fd->write_watcher != NULL || + fd->inactive_watcher_root.next != &fd->inactive_watcher_root; +} + +static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { + fd->closed = 1; + if (!fd->released) { + close(fd->fd); + } else { + remove_fd_from_all_epoll_sets(fd->fd); + } + grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE, NULL); +} + +static int fd_wrapped_fd(grpc_fd *fd) { + if (fd->released || fd->closed) { + return -1; + } else { + return fd->fd; + } +} + +static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_closure *on_done, int *release_fd, + const char *reason) { + fd->on_done_closure = on_done; + fd->released = release_fd != NULL; + if (!fd->released) { + shutdown(fd->fd, SHUT_RDWR); + } else { + *release_fd = fd->fd; + } + gpr_mu_lock(&fd->mu); + REF_BY(fd, 1, reason); /* remove active status, but keep referenced */ + if (!has_watchers(fd)) { + close_fd_locked(exec_ctx, fd); + } else { + wake_all_watchers_locked(fd); + } + gpr_mu_unlock(&fd->mu); + UNREF_BY(fd, 2, reason); /* drop the reference */ +} + +/* increment refcount by two to avoid changing the orphan bit */ +#ifdef GRPC_FD_REF_COUNT_DEBUG +static void fd_ref(grpc_fd *fd, const char *reason, const char *file, + int line) { + ref_by(fd, 2, reason, file, line); +} + +static void fd_unref(grpc_fd *fd, const char *reason, const char *file, + int line) { + unref_by(fd, 2, reason, file, line); +} +#else +static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); } + +static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); } +#endif + +static grpc_error *fd_shutdown_error(bool shutdown) { + if (!shutdown) { + return GRPC_ERROR_NONE; + } else { + return GRPC_ERROR_CREATE("FD shutdown"); + } +} + +static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_closure **st, grpc_closure *closure) { + if (fd->shutdown) { + grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"), + NULL); + } else if (*st == CLOSURE_NOT_READY) { + /* not ready ==> switch to a waiting state by setting the closure */ + *st = closure; + } else if (*st == CLOSURE_READY) { + /* already ready ==> queue the closure to run immediately */ + *st = CLOSURE_NOT_READY; + grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown), + NULL); + maybe_wake_one_watcher_locked(fd); + } else { + /* upcallptr was set to a different closure. This is an error! */ + gpr_log(GPR_ERROR, + "User called a notify_on function with a previous callback still " + "pending"); + abort(); + } +} + +/* returns 1 if state becomes not ready */ +static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_closure **st) { + if (*st == CLOSURE_READY) { + /* duplicate ready ==> ignore */ + return 0; + } else if (*st == CLOSURE_NOT_READY) { + /* not ready, and not waiting ==> flag ready */ + *st = CLOSURE_READY; + return 0; + } else { + /* waiting ==> queue closure */ + grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL); + *st = CLOSURE_NOT_READY; + return 1; + } +} + +static void set_read_notifier_pollset_locked( + grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *read_notifier_pollset) { + fd->read_notifier_pollset = read_notifier_pollset; +} + +static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { + gpr_mu_lock(&fd->mu); + /* only shutdown once */ + if (!fd->shutdown) { + fd->shutdown = 1; + /* signal read/write closed to OS so that future operations fail */ + shutdown(fd->fd, SHUT_RDWR); + set_ready_locked(exec_ctx, fd, &fd->read_closure); + set_ready_locked(exec_ctx, fd, &fd->write_closure); + } + gpr_mu_unlock(&fd->mu); +} + +static bool fd_is_shutdown(grpc_fd *fd) { + gpr_mu_lock(&fd->mu); + bool r = fd->shutdown; + gpr_mu_unlock(&fd->mu); + return r; +} + +static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_closure *closure) { + gpr_mu_lock(&fd->mu); + notify_on_locked(exec_ctx, fd, &fd->read_closure, closure); + gpr_mu_unlock(&fd->mu); +} + +static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_closure *closure) { + gpr_mu_lock(&fd->mu); + notify_on_locked(exec_ctx, fd, &fd->write_closure, closure); + gpr_mu_unlock(&fd->mu); +} + +/* Return the read-notifier pollset */ +static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, + grpc_fd *fd) { + grpc_pollset *notifier = NULL; + + gpr_mu_lock(&fd->mu); + notifier = fd->read_notifier_pollset; + gpr_mu_unlock(&fd->mu); + + return notifier; +} + +static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, + grpc_pollset_worker *worker, uint32_t read_mask, + uint32_t write_mask, grpc_fd_watcher *watcher) { + uint32_t mask = 0; + grpc_closure *cur; + int requested; + /* keep track of pollers that have requested our events, in case they change + */ + GRPC_FD_REF(fd, "poll"); + + gpr_mu_lock(&fd->mu); + + /* if we are shutdown, then don't add to the watcher set */ + if (fd->shutdown) { + watcher->fd = NULL; + watcher->pollset = NULL; + watcher->worker = NULL; + gpr_mu_unlock(&fd->mu); + GRPC_FD_UNREF(fd, "poll"); + return 0; + } + + /* if there is nobody polling for read, but we need to, then start doing so */ + cur = fd->read_closure; + requested = cur != CLOSURE_READY; + if (read_mask && fd->read_watcher == NULL && requested) { + fd->read_watcher = watcher; + mask |= read_mask; + } + /* if there is nobody polling for write, but we need to, then start doing so + */ + cur = fd->write_closure; + requested = cur != CLOSURE_READY; + if (write_mask && fd->write_watcher == NULL && requested) { + fd->write_watcher = watcher; + mask |= write_mask; + } + /* if not polling, remember this watcher in case we need someone to later */ + if (mask == 0 && worker != NULL) { + watcher->next = &fd->inactive_watcher_root; + watcher->prev = watcher->next->prev; + watcher->next->prev = watcher->prev->next = watcher; + } + watcher->pollset = pollset; + watcher->worker = worker; + watcher->fd = fd; + gpr_mu_unlock(&fd->mu); + + return mask; +} + +static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher, + int got_read, int got_write, + grpc_pollset *read_notifier_pollset) { + int was_polling = 0; + int kick = 0; + grpc_fd *fd = watcher->fd; + + if (fd == NULL) { + return; + } + + gpr_mu_lock(&fd->mu); + + if (watcher == fd->read_watcher) { + /* remove read watcher, kick if we still need a read */ + was_polling = 1; + if (!got_read) { + kick = 1; + } + fd->read_watcher = NULL; + } + if (watcher == fd->write_watcher) { + /* remove write watcher, kick if we still need a write */ + was_polling = 1; + if (!got_write) { + kick = 1; + } + fd->write_watcher = NULL; + } + if (!was_polling && watcher->worker != NULL) { + /* remove from inactive list */ + watcher->next->prev = watcher->prev; + watcher->prev->next = watcher->next; + } + if (got_read) { + if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) { + kick = 1; + } + + if (read_notifier_pollset != NULL) { + set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset); + } + } + if (got_write) { + if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) { + kick = 1; + } + } + if (kick) { + maybe_wake_one_watcher_locked(fd); + } + if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) { + close_fd_locked(exec_ctx, fd); + } + gpr_mu_unlock(&fd->mu); + + GRPC_FD_UNREF(fd, "poll"); +} + +/******************************************************************************* + * pollset_posix.c + */ + +GPR_TLS_DECL(g_current_thread_poller); +GPR_TLS_DECL(g_current_thread_worker); + +/** The alarm system needs to be able to wakeup 'some poller' sometimes + * (specifically when a new alarm needs to be triggered earlier than the next + * alarm 'epoch'). + * This wakeup_fd gives us something to alert on when such a case occurs. */ +grpc_wakeup_fd grpc_global_wakeup_fd; + +static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) { + worker->prev->next = worker->next; + worker->next->prev = worker->prev; +} + +static int pollset_has_workers(grpc_pollset *p) { + return p->root_worker.next != &p->root_worker; +} + +static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) { + if (pollset_has_workers(p)) { + grpc_pollset_worker *w = p->root_worker.next; + remove_worker(p, w); + return w; + } else { + return NULL; + } +} + +static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) { + worker->next = &p->root_worker; + worker->prev = worker->next->prev; + worker->prev->next = worker->next->prev = worker; +} + +static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) { + worker->prev = &p->root_worker; + worker->next = worker->prev->next; + worker->prev->next = worker->next->prev = worker; +} + +static void kick_append_error(grpc_error **composite, grpc_error *error) { + if (error == GRPC_ERROR_NONE) return; + if (*composite == GRPC_ERROR_NONE) { + *composite = GRPC_ERROR_CREATE("Kick Failure"); + } + *composite = grpc_error_add_child(*composite, error); +} + +static grpc_error *pollset_kick_ext(grpc_pollset *p, + grpc_pollset_worker *specific_worker, + uint32_t flags) { + GPR_TIMER_BEGIN("pollset_kick_ext", 0); + grpc_error *error = GRPC_ERROR_NONE; + + /* pollset->mu already held */ + if (specific_worker != NULL) { + if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) { + GPR_TIMER_BEGIN("pollset_kick_ext.broadcast", 0); + GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0); + for (specific_worker = p->root_worker.next; + specific_worker != &p->root_worker; + specific_worker = specific_worker->next) { + kick_append_error( + &error, grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd)); + } + p->kicked_without_pollers = true; + GPR_TIMER_END("pollset_kick_ext.broadcast", 0); + } else if (gpr_tls_get(&g_current_thread_worker) != + (intptr_t)specific_worker) { + GPR_TIMER_MARK("different_thread_worker", 0); + if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) { + specific_worker->reevaluate_polling_on_wakeup = true; + } + specific_worker->kicked_specifically = true; + kick_append_error(&error, + grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd)); + } else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) { + GPR_TIMER_MARK("kick_yoself", 0); + if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) { + specific_worker->reevaluate_polling_on_wakeup = true; + } + specific_worker->kicked_specifically = true; + kick_append_error(&error, + grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd)); + } + } else if (gpr_tls_get(&g_current_thread_poller) != (intptr_t)p) { + GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0); + GPR_TIMER_MARK("kick_anonymous", 0); + specific_worker = pop_front_worker(p); + if (specific_worker != NULL) { + if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) { + GPR_TIMER_MARK("kick_anonymous_not_self", 0); + push_back_worker(p, specific_worker); + specific_worker = pop_front_worker(p); + if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 && + gpr_tls_get(&g_current_thread_worker) == + (intptr_t)specific_worker) { + push_back_worker(p, specific_worker); + specific_worker = NULL; + } + } + if (specific_worker != NULL) { + GPR_TIMER_MARK("finally_kick", 0); + push_back_worker(p, specific_worker); + kick_append_error( + &error, grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd)); + } + } else { + GPR_TIMER_MARK("kicked_no_pollers", 0); + p->kicked_without_pollers = true; + } + } + + GPR_TIMER_END("pollset_kick_ext", 0); + return error; +} + +static grpc_error *pollset_kick(grpc_pollset *p, + grpc_pollset_worker *specific_worker) { + return pollset_kick_ext(p, specific_worker, 0); +} + +/* global state management */ + +static grpc_error *pollset_global_init(void) { + gpr_tls_init(&g_current_thread_poller); + gpr_tls_init(&g_current_thread_worker); + return grpc_wakeup_fd_init(&grpc_global_wakeup_fd); +} + +static void pollset_global_shutdown(void) { + grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd); + gpr_tls_destroy(&g_current_thread_poller); + gpr_tls_destroy(&g_current_thread_worker); +} + +static grpc_error *kick_poller(void) { + return grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd); +} + +/* main interface */ + +static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null); + +static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { + gpr_mu_init(&pollset->mu); + *mu = &pollset->mu; + pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker; + pollset->in_flight_cbs = 0; + pollset->shutting_down = 0; + pollset->called_shutdown = 0; + pollset->kicked_without_pollers = 0; + pollset->idle_jobs.head = pollset->idle_jobs.tail = NULL; + pollset->local_wakeup_cache = NULL; + pollset->kicked_without_pollers = 0; + become_basic_pollset(pollset, NULL); +} + +static void pollset_destroy(grpc_pollset *pollset) { + GPR_ASSERT(pollset->in_flight_cbs == 0); + GPR_ASSERT(!pollset_has_workers(pollset)); + GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail); + pollset->vtable->destroy(pollset); + while (pollset->local_wakeup_cache) { + grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next; + grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd); + gpr_free(pollset->local_wakeup_cache); + pollset->local_wakeup_cache = next; + } + gpr_mu_destroy(&pollset->mu); +} + +static void pollset_reset(grpc_pollset *pollset) { + GPR_ASSERT(pollset->shutting_down); + GPR_ASSERT(pollset->in_flight_cbs == 0); + GPR_ASSERT(!pollset_has_workers(pollset)); + GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail); + pollset->vtable->destroy(pollset); + pollset->shutting_down = 0; + pollset->called_shutdown = 0; + pollset->kicked_without_pollers = 0; + become_basic_pollset(pollset, NULL); +} + +static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_fd *fd) { + gpr_mu_lock(&pollset->mu); + pollset->vtable->add_fd(exec_ctx, pollset, fd, 1); +/* the following (enabled only in debug) will reacquire and then release + our lock - meaning that if the unlocking flag passed to add_fd above is + not respected, the code will deadlock (in a way that we have a chance of + debugging) */ +#ifndef NDEBUG + gpr_mu_lock(&pollset->mu); + gpr_mu_unlock(&pollset->mu); +#endif +} + +static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { + GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs)); + pollset->vtable->finish_shutdown(pollset); + grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL); +} + +static void work_combine_error(grpc_error **composite, grpc_error *error) { + if (error == GRPC_ERROR_NONE) return; + if (*composite == GRPC_ERROR_NONE) { + *composite = GRPC_ERROR_CREATE("pollset_work"); + } + *composite = grpc_error_add_child(*composite, error); +} + +static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_pollset_worker **worker_hdl, + gpr_timespec now, gpr_timespec deadline) { + grpc_pollset_worker worker; + *worker_hdl = &worker; + grpc_error *error = GRPC_ERROR_NONE; + + /* pollset->mu already held */ + int added_worker = 0; + int locked = 1; + int queued_work = 0; + int keep_polling = 0; + GPR_TIMER_BEGIN("pollset_work", 0); + /* this must happen before we (potentially) drop pollset->mu */ + worker.next = worker.prev = NULL; + worker.reevaluate_polling_on_wakeup = 0; + if (pollset->local_wakeup_cache != NULL) { + worker.wakeup_fd = pollset->local_wakeup_cache; + pollset->local_wakeup_cache = worker.wakeup_fd->next; + } else { + worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd)); + error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd); + if (error != GRPC_ERROR_NONE) { + return error; + } + } + worker.kicked_specifically = 0; + /* If there's work waiting for the pollset to be idle, and the + pollset is idle, then do that work */ + if (!pollset_has_workers(pollset) && + !grpc_closure_list_empty(pollset->idle_jobs)) { + GPR_TIMER_MARK("pollset_work.idle_jobs", 0); + grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL); + goto done; + } + /* If we're shutting down then we don't execute any extended work */ + if (pollset->shutting_down) { + GPR_TIMER_MARK("pollset_work.shutting_down", 0); + goto done; + } + /* Give do_promote priority so we don't starve it out */ + if (pollset->in_flight_cbs) { + GPR_TIMER_MARK("pollset_work.in_flight_cbs", 0); + gpr_mu_unlock(&pollset->mu); + locked = 0; + goto done; + } + /* Start polling, and keep doing so while we're being asked to + re-evaluate our pollers (this allows poll() based pollers to + ensure they don't miss wakeups) */ + keep_polling = 1; + while (keep_polling) { + keep_polling = 0; + if (!pollset->kicked_without_pollers) { + if (!added_worker) { + push_front_worker(pollset, &worker); + added_worker = 1; + gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); + } + gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset); + GPR_TIMER_BEGIN("maybe_work_and_unlock", 0); + work_combine_error(&error, + pollset->vtable->maybe_work_and_unlock( + exec_ctx, pollset, &worker, deadline, now)); + GPR_TIMER_END("maybe_work_and_unlock", 0); + locked = 0; + gpr_tls_set(&g_current_thread_poller, 0); + } else { + GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0); + pollset->kicked_without_pollers = 0; + } + /* Finished execution - start cleaning up. + Note that we may arrive here from outside the enclosing while() loop. + In that case we won't loop though as we haven't added worker to the + worker list, which means nobody could ask us to re-evaluate polling). */ + done: + if (!locked) { + queued_work |= grpc_exec_ctx_flush(exec_ctx); + gpr_mu_lock(&pollset->mu); + locked = 1; + } + /* If we're forced to re-evaluate polling (via pollset_kick with + GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force + a loop */ + if (worker.reevaluate_polling_on_wakeup) { + worker.reevaluate_polling_on_wakeup = 0; + pollset->kicked_without_pollers = 0; + if (queued_work || worker.kicked_specifically) { + /* If there's queued work on the list, then set the deadline to be + immediate so we get back out of the polling loop quickly */ + deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC); + } + keep_polling = 1; + } + } + if (added_worker) { + remove_worker(pollset, &worker); + gpr_tls_set(&g_current_thread_worker, 0); + } + /* release wakeup fd to the local pool */ + worker.wakeup_fd->next = pollset->local_wakeup_cache; + pollset->local_wakeup_cache = worker.wakeup_fd; + /* check shutdown conditions */ + if (pollset->shutting_down) { + if (pollset_has_workers(pollset)) { + pollset_kick(pollset, NULL); + } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) { + pollset->called_shutdown = 1; + gpr_mu_unlock(&pollset->mu); + finish_shutdown(exec_ctx, pollset); + grpc_exec_ctx_flush(exec_ctx); + /* Continuing to access pollset here is safe -- it is the caller's + * responsibility to not destroy when it has outstanding calls to + * pollset_work. + * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */ + gpr_mu_lock(&pollset->mu); + } else if (!grpc_closure_list_empty(pollset->idle_jobs)) { + grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL); + gpr_mu_unlock(&pollset->mu); + grpc_exec_ctx_flush(exec_ctx); + gpr_mu_lock(&pollset->mu); + } + } + *worker_hdl = NULL; + GPR_TIMER_END("pollset_work", 0); + return error; +} + +static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_closure *closure) { + GPR_ASSERT(!pollset->shutting_down); + pollset->shutting_down = 1; + pollset->shutdown_done = closure; + pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); + if (!pollset_has_workers(pollset)) { + grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL); + } + if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 && + !pollset_has_workers(pollset)) { + pollset->called_shutdown = 1; + finish_shutdown(exec_ctx, pollset); + } +} + +static int poll_deadline_to_millis_timeout(gpr_timespec deadline, + gpr_timespec now) { + gpr_timespec timeout; + static const int64_t max_spin_polling_us = 10; + if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { + return -1; + } + if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros( + max_spin_polling_us, + GPR_TIMESPAN))) <= 0) { + return 0; + } + timeout = gpr_time_sub(deadline, now); + return gpr_time_to_millis(gpr_time_add( + timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN))); +} + +/* + * basic_pollset - a vtable that provides polling for zero or one file + * descriptor via poll() + */ + +typedef struct grpc_unary_promote_args { + const grpc_pollset_vtable *original_vtable; + grpc_pollset *pollset; + grpc_fd *fd; + grpc_closure promotion_closure; +} grpc_unary_promote_args; + +static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args, + grpc_error *error) { + grpc_unary_promote_args *up_args = args; + const grpc_pollset_vtable *original_vtable = up_args->original_vtable; + grpc_pollset *pollset = up_args->pollset; + grpc_fd *fd = up_args->fd; + + /* + * This is quite tricky. There are a number of cases to keep in mind here: + * 1. fd may have been orphaned + * 2. The pollset may no longer be a unary poller (and we can't let case #1 + * leak to other pollset types!) + * 3. pollset's fd (which may have changed) may have been orphaned + * 4. The pollset may be shutting down. + */ + + gpr_mu_lock(&pollset->mu); + /* First we need to ensure that nobody is polling concurrently */ + GPR_ASSERT(!pollset_has_workers(pollset)); + + gpr_free(up_args); + /* At this point the pollset may no longer be a unary poller. In that case + * we should just call the right add function and be done. */ + /* TODO(klempner): If we're not careful this could cause infinite recursion. + * That's not a problem for now because empty_pollset has a trivial poller + * and we don't have any mechanism to unbecome multipoller. */ + pollset->in_flight_cbs--; + if (pollset->shutting_down) { + /* We don't care about this pollset anymore. */ + if (pollset->in_flight_cbs == 0 && !pollset->called_shutdown) { + pollset->called_shutdown = 1; + finish_shutdown(exec_ctx, pollset); + } + } else if (fd_is_orphaned(fd)) { + /* Don't try to add it to anything, we'll drop our ref on it below */ + } else if (pollset->vtable != original_vtable) { + pollset->vtable->add_fd(exec_ctx, pollset, fd, 0); + } else if (fd != pollset->data.ptr) { + grpc_fd *fds[2]; + fds[0] = pollset->data.ptr; + fds[1] = fd; + + if (fds[0] && !fd_is_orphaned(fds[0])) { + platform_become_multipoller(exec_ctx, pollset, fds, GPR_ARRAY_SIZE(fds)); + GRPC_FD_UNREF(fds[0], "basicpoll"); + } else { + /* old fd is orphaned and we haven't cleaned it up until now, so remain a + * unary poller */ + /* Note that it is possible that fds[1] is also orphaned at this point. + * That's okay, we'll correct it at the next add or poll. */ + if (fds[0]) GRPC_FD_UNREF(fds[0], "basicpoll"); + pollset->data.ptr = fd; + GRPC_FD_REF(fd, "basicpoll"); + } + } + + gpr_mu_unlock(&pollset->mu); + + /* Matching ref in basic_pollset_add_fd */ + GRPC_FD_UNREF(fd, "basicpoll_add"); +} + +static void basic_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_fd *fd, int and_unlock_pollset) { + grpc_unary_promote_args *up_args; + GPR_ASSERT(fd); + if (fd == pollset->data.ptr) goto exit; + + if (!pollset_has_workers(pollset)) { + /* Fast path -- no in flight cbs */ + /* TODO(klempner): Comment this out and fix any test failures or establish + * they are due to timing issues */ + grpc_fd *fds[2]; + fds[0] = pollset->data.ptr; + fds[1] = fd; + + if (fds[0] == NULL) { + pollset->data.ptr = fd; + GRPC_FD_REF(fd, "basicpoll"); + } else if (!fd_is_orphaned(fds[0])) { + platform_become_multipoller(exec_ctx, pollset, fds, GPR_ARRAY_SIZE(fds)); + GRPC_FD_UNREF(fds[0], "basicpoll"); + } else { + /* old fd is orphaned and we haven't cleaned it up until now, so remain a + * unary poller */ + GRPC_FD_UNREF(fds[0], "basicpoll"); + pollset->data.ptr = fd; + GRPC_FD_REF(fd, "basicpoll"); + } + goto exit; + } + + /* Now we need to promote. This needs to happen when we're not polling. Since + * this may be called from poll, the wait needs to happen asynchronously. */ + GRPC_FD_REF(fd, "basicpoll_add"); + pollset->in_flight_cbs++; + up_args = gpr_malloc(sizeof(*up_args)); + up_args->fd = fd; + up_args->original_vtable = pollset->vtable; + up_args->pollset = pollset; + up_args->promotion_closure.cb = basic_do_promote; + up_args->promotion_closure.cb_arg = up_args; + + grpc_closure_list_append(&pollset->idle_jobs, &up_args->promotion_closure, + GRPC_ERROR_NONE); + pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); + +exit: + if (and_unlock_pollset) { + gpr_mu_unlock(&pollset->mu); + } +} + +static grpc_error *basic_pollset_maybe_work_and_unlock( + grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker, + gpr_timespec deadline, gpr_timespec now) { +#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR) +#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR) + + struct pollfd pfd[3]; + grpc_fd *fd; + grpc_fd_watcher fd_watcher; + int timeout; + int r; + nfds_t nfds; + grpc_error *error = GRPC_ERROR_NONE; + + fd = pollset->data.ptr; + if (fd && fd_is_orphaned(fd)) { + GRPC_FD_UNREF(fd, "basicpoll"); + fd = pollset->data.ptr = NULL; + } + timeout = poll_deadline_to_millis_timeout(deadline, now); + pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd); + pfd[0].events = POLLIN; + pfd[0].revents = 0; + pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd); + pfd[1].events = POLLIN; + pfd[1].revents = 0; + nfds = 2; + if (fd) { + pfd[2].fd = fd->fd; + pfd[2].revents = 0; + GRPC_FD_REF(fd, "basicpoll_begin"); + gpr_mu_unlock(&pollset->mu); + pfd[2].events = + (short)fd_begin_poll(fd, pollset, worker, POLLIN, POLLOUT, &fd_watcher); + if (pfd[2].events != 0) { + nfds++; + } + } else { + gpr_mu_unlock(&pollset->mu); + } + + /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid + even going into the blocking annotation if possible */ + /* poll fd count (argument 2) is shortened by one if we have no events + to poll on - such that it only includes the kicker */ + GPR_TIMER_BEGIN("poll", 0); + GRPC_SCHEDULING_START_BLOCKING_REGION; + r = grpc_poll_function(pfd, nfds, timeout); + GRPC_SCHEDULING_END_BLOCKING_REGION; + GPR_TIMER_END("poll", 0); + + if (r < 0) { + if (errno != EINTR) { + work_combine_error(&error, GRPC_OS_ERROR(errno, "poll")); + } + if (fd) { + fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL); + } + } else if (r == 0) { + if (fd) { + fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL); + } + } else { + if (pfd[0].revents & POLLIN_CHECK) { + work_combine_error(&error, + grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd)); + } + if (pfd[1].revents & POLLIN_CHECK) { + work_combine_error(&error, + grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd)); + } + if (nfds > 2) { + fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK, + pfd[2].revents & POLLOUT_CHECK, pollset); + } else if (fd) { + fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL); + } + } + + if (fd) { + GRPC_FD_UNREF(fd, "basicpoll_begin"); + } + + return error; +} + +static void basic_pollset_destroy(grpc_pollset *pollset) { + if (pollset->data.ptr != NULL) { + GRPC_FD_UNREF(pollset->data.ptr, "basicpoll"); + pollset->data.ptr = NULL; + } +} + +static const grpc_pollset_vtable basic_pollset = { + basic_pollset_add_fd, basic_pollset_maybe_work_and_unlock, + basic_pollset_destroy, basic_pollset_destroy}; + +static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) { + pollset->vtable = &basic_pollset; + pollset->data.ptr = fd_or_null; + if (fd_or_null != NULL) { + GRPC_FD_REF(fd_or_null, "basicpoll"); + } +} + +/******************************************************************************* + * pollset_multipoller_with_poll_posix.c + */ + +#ifndef GPR_LINUX_MULTIPOLL_WITH_EPOLL + +typedef struct { + /* all polled fds */ + size_t fd_count; + size_t fd_capacity; + grpc_fd **fds; + /* fds that have been removed from the pollset explicitly */ + size_t del_count; + size_t del_capacity; + grpc_fd **dels; +} poll_hdr; + +static void multipoll_with_poll_pollset_add_fd(grpc_exec_ctx *exec_ctx, + grpc_pollset *pollset, + grpc_fd *fd, + int and_unlock_pollset) { + size_t i; + poll_hdr *h = pollset->data.ptr; + /* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */ + for (i = 0; i < h->fd_count; i++) { + if (h->fds[i] == fd) goto exit; + } + if (h->fd_count == h->fd_capacity) { + h->fd_capacity = GPR_MAX(h->fd_capacity + 8, h->fd_count * 3 / 2); + h->fds = gpr_realloc(h->fds, sizeof(grpc_fd *) * h->fd_capacity); + } + h->fds[h->fd_count++] = fd; + GRPC_FD_REF(fd, "multipoller"); +exit: + if (and_unlock_pollset) { + gpr_mu_unlock(&pollset->mu); + } +} + +static grpc_error *multipoll_with_poll_pollset_maybe_work_and_unlock( + grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker, + gpr_timespec deadline, gpr_timespec now) { +#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR) +#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR) + + int timeout; + int r; + size_t i, j, fd_count; + nfds_t pfd_count; + poll_hdr *h; + /* TODO(ctiller): inline some elements to avoid an allocation */ + grpc_fd_watcher *watchers; + struct pollfd *pfds; + grpc_error *error = GRPC_ERROR_NONE; + + h = pollset->data.ptr; + timeout = poll_deadline_to_millis_timeout(deadline, now); + /* TODO(ctiller): perform just one malloc here if we exceed the inline case */ + pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 2)); + watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 2)); + fd_count = 0; + pfd_count = 2; + pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd); + pfds[0].events = POLLIN; + pfds[0].revents = 0; + pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd); + pfds[1].events = POLLIN; + pfds[1].revents = 0; + for (i = 0; i < h->fd_count; i++) { + int remove = fd_is_orphaned(h->fds[i]); + for (j = 0; !remove && j < h->del_count; j++) { + if (h->fds[i] == h->dels[j]) remove = 1; + } + if (remove) { + GRPC_FD_UNREF(h->fds[i], "multipoller"); + } else { + h->fds[fd_count++] = h->fds[i]; + watchers[pfd_count].fd = h->fds[i]; + GRPC_FD_REF(watchers[pfd_count].fd, "multipoller_start"); + pfds[pfd_count].fd = h->fds[i]->fd; + pfds[pfd_count].revents = 0; + pfd_count++; + } + } + for (j = 0; j < h->del_count; j++) { + GRPC_FD_UNREF(h->dels[j], "multipoller_del"); + } + h->del_count = 0; + h->fd_count = fd_count; + gpr_mu_unlock(&pollset->mu); + + for (i = 2; i < pfd_count; i++) { + grpc_fd *fd = watchers[i].fd; + pfds[i].events = (short)fd_begin_poll(fd, pollset, worker, POLLIN, POLLOUT, + &watchers[i]); + GRPC_FD_UNREF(fd, "multipoller_start"); + } + + /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid + even going into the blocking annotation if possible */ + GRPC_SCHEDULING_START_BLOCKING_REGION; + r = grpc_poll_function(pfds, pfd_count, timeout); + GRPC_SCHEDULING_END_BLOCKING_REGION; + + if (r < 0) { + if (errno != EINTR) { + work_combine_error(&error, GRPC_OS_ERROR(errno, "poll")); + } + for (i = 2; i < pfd_count; i++) { + fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL); + } + } else if (r == 0) { + for (i = 2; i < pfd_count; i++) { + fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL); + } + } else { + if (pfds[0].revents & POLLIN_CHECK) { + work_combine_error(&error, + grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd)); + } + if (pfds[1].revents & POLLIN_CHECK) { + work_combine_error(&error, + grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd)); + } + for (i = 2; i < pfd_count; i++) { + if (watchers[i].fd == NULL) { + fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL); + continue; + } + fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK, + pfds[i].revents & POLLOUT_CHECK, pollset); + } + } + + gpr_free(pfds); + gpr_free(watchers); + + return error; +} + +static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) { + size_t i; + poll_hdr *h = pollset->data.ptr; + for (i = 0; i < h->fd_count; i++) { + GRPC_FD_UNREF(h->fds[i], "multipoller"); + } + for (i = 0; i < h->del_count; i++) { + GRPC_FD_UNREF(h->dels[i], "multipoller_del"); + } + h->fd_count = 0; + h->del_count = 0; +} + +static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) { + poll_hdr *h = pollset->data.ptr; + multipoll_with_poll_pollset_finish_shutdown(pollset); + gpr_free(h->fds); + gpr_free(h->dels); + gpr_free(h); +} + +static const grpc_pollset_vtable multipoll_with_poll_pollset = { + multipoll_with_poll_pollset_add_fd, + multipoll_with_poll_pollset_maybe_work_and_unlock, + multipoll_with_poll_pollset_finish_shutdown, + multipoll_with_poll_pollset_destroy}; + +static void poll_become_multipoller(grpc_exec_ctx *exec_ctx, + grpc_pollset *pollset, grpc_fd **fds, + size_t nfds) { + size_t i; + poll_hdr *h = gpr_malloc(sizeof(poll_hdr)); + pollset->vtable = &multipoll_with_poll_pollset; + pollset->data.ptr = h; + h->fd_count = nfds; + h->fd_capacity = nfds; + h->fds = gpr_malloc(nfds * sizeof(grpc_fd *)); + h->del_count = 0; + h->del_capacity = 0; + h->dels = NULL; + for (i = 0; i < nfds; i++) { + h->fds[i] = fds[i]; + GRPC_FD_REF(fds[i], "multipoller"); + } +} + +#endif /* !GPR_LINUX_MULTIPOLL_WITH_EPOLL */ + +/******************************************************************************* + * pollset_multipoller_with_epoll_posix.c + */ + +#ifdef GPR_LINUX_MULTIPOLL_WITH_EPOLL + +#include <errno.h> +#include <poll.h> +#include <string.h> +#include <sys/epoll.h> +#include <unistd.h> + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> +#include <grpc/support/useful.h> + +#include "src/core/lib/iomgr/ev_posix.h" +#include "src/core/lib/profiling/timers.h" +#include "src/core/lib/support/block_annotate.h" + +static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st, + grpc_pollset *read_notifier_pollset) { + /* only one set_ready can be active at once (but there may be a racing + notify_on) */ + gpr_mu_lock(&fd->mu); + set_ready_locked(exec_ctx, fd, st); + + /* A non-NULL read_notifier_pollset means that the fd is readable. */ + if (read_notifier_pollset != NULL) { + /* Note: Since the fd might be a part of multiple pollsets, this might be + * called multiple times (for each time the fd becomes readable) and it is + * okay to set the fd's read-notifier pollset to anyone of these pollsets */ + set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset); + } + + gpr_mu_unlock(&fd->mu); +} + +static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, + grpc_pollset *notifier_pollset) { + set_ready(exec_ctx, fd, &fd->read_closure, notifier_pollset); +} + +static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { + set_ready(exec_ctx, fd, &fd->write_closure, NULL); +} + +struct epoll_fd_list { + int *epoll_fds; + size_t count; + size_t capacity; +}; + +static struct epoll_fd_list epoll_fd_global_list; +static gpr_once init_epoll_fd_list_mu = GPR_ONCE_INIT; +static gpr_mu epoll_fd_list_mu; + +static void init_mu(void) { gpr_mu_init(&epoll_fd_list_mu); } + +static void add_epoll_fd_to_global_list(int epoll_fd) { + gpr_once_init(&init_epoll_fd_list_mu, init_mu); + + gpr_mu_lock(&epoll_fd_list_mu); + if (epoll_fd_global_list.count == epoll_fd_global_list.capacity) { + epoll_fd_global_list.capacity = + GPR_MAX((size_t)8, epoll_fd_global_list.capacity * 2); + epoll_fd_global_list.epoll_fds = + gpr_realloc(epoll_fd_global_list.epoll_fds, + epoll_fd_global_list.capacity * sizeof(int)); + } + epoll_fd_global_list.epoll_fds[epoll_fd_global_list.count++] = epoll_fd; + gpr_mu_unlock(&epoll_fd_list_mu); +} + +static void remove_epoll_fd_from_global_list(int epoll_fd) { + gpr_mu_lock(&epoll_fd_list_mu); + GPR_ASSERT(epoll_fd_global_list.count > 0); + for (size_t i = 0; i < epoll_fd_global_list.count; i++) { + if (epoll_fd == epoll_fd_global_list.epoll_fds[i]) { + epoll_fd_global_list.epoll_fds[i] = + epoll_fd_global_list.epoll_fds[--(epoll_fd_global_list.count)]; + break; + } + } + gpr_mu_unlock(&epoll_fd_list_mu); +} + +static void remove_fd_from_all_epoll_sets(int fd) { + int err; + gpr_once_init(&init_epoll_fd_list_mu, init_mu); + gpr_mu_lock(&epoll_fd_list_mu); + if (epoll_fd_global_list.count == 0) { + gpr_mu_unlock(&epoll_fd_list_mu); + return; + } + for (size_t i = 0; i < epoll_fd_global_list.count; i++) { + err = epoll_ctl(epoll_fd_global_list.epoll_fds[i], EPOLL_CTL_DEL, fd, NULL); + if (err < 0 && errno != ENOENT) { + gpr_log(GPR_ERROR, "epoll_ctl del for %d failed: %s", fd, + strerror(errno)); + } + } + gpr_mu_unlock(&epoll_fd_list_mu); +} + +typedef struct { + grpc_pollset *pollset; + grpc_fd *fd; + grpc_closure closure; +} delayed_add; + +typedef struct { int epoll_fd; } epoll_hdr; + +static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_fd *fd) { + epoll_hdr *h = pollset->data.ptr; + struct epoll_event ev; + int err; + grpc_fd_watcher watcher; + + /* We pretend to be polling whilst adding an fd to keep the fd from being + closed during the add. This may result in a spurious wakeup being assigned + to this pollset whilst adding, but that should be benign. */ + GPR_ASSERT(fd_begin_poll(fd, pollset, NULL, 0, 0, &watcher) == 0); + if (watcher.fd != NULL) { + ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET); + ev.data.ptr = fd; + err = epoll_ctl(h->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev); + if (err < 0) { + /* FDs may be added to a pollset multiple times, so EEXIST is normal. */ + if (errno != EEXIST) { + gpr_log(GPR_ERROR, "epoll_ctl add for %d failed: %s", fd->fd, + strerror(errno)); + } + } + } + fd_end_poll(exec_ctx, &watcher, 0, 0, NULL); +} + +static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { + delayed_add *da = arg; + + if (!fd_is_orphaned(da->fd)) { + finally_add_fd(exec_ctx, da->pollset, da->fd); + } + + gpr_mu_lock(&da->pollset->mu); + da->pollset->in_flight_cbs--; + if (da->pollset->shutting_down) { + /* We don't care about this pollset anymore. */ + if (da->pollset->in_flight_cbs == 0 && !da->pollset->called_shutdown) { + da->pollset->called_shutdown = 1; + grpc_exec_ctx_sched(exec_ctx, da->pollset->shutdown_done, GRPC_ERROR_NONE, + NULL); + } + } + gpr_mu_unlock(&da->pollset->mu); + + GRPC_FD_UNREF(da->fd, "delayed_add"); + + gpr_free(da); +} + +static void multipoll_with_epoll_pollset_add_fd(grpc_exec_ctx *exec_ctx, + grpc_pollset *pollset, + grpc_fd *fd, + int and_unlock_pollset) { + if (and_unlock_pollset) { + gpr_mu_unlock(&pollset->mu); + finally_add_fd(exec_ctx, pollset, fd); + } else { + delayed_add *da = gpr_malloc(sizeof(*da)); + da->pollset = pollset; + da->fd = fd; + GRPC_FD_REF(fd, "delayed_add"); + grpc_closure_init(&da->closure, perform_delayed_add, da); + pollset->in_flight_cbs++; + grpc_exec_ctx_sched(exec_ctx, &da->closure, GRPC_ERROR_NONE, NULL); + } +} + +/* TODO(klempner): We probably want to turn this down a bit */ +#define GRPC_EPOLL_MAX_EVENTS 1000 + +static grpc_error *multipoll_with_epoll_pollset_maybe_work_and_unlock( + grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker, + gpr_timespec deadline, gpr_timespec now) { + struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS]; + int ep_rv; + int poll_rv; + epoll_hdr *h = pollset->data.ptr; + int timeout_ms; + struct pollfd pfds[2]; + grpc_error *error = GRPC_ERROR_NONE; + + /* If you want to ignore epoll's ability to sanely handle parallel pollers, + * for a more apples-to-apples performance comparison with poll, add a + * if (pollset->counter != 0) { return 0; } + * here. + */ + + gpr_mu_unlock(&pollset->mu); + + timeout_ms = poll_deadline_to_millis_timeout(deadline, now); + + pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd); + pfds[0].events = POLLIN; + pfds[0].revents = 0; + pfds[1].fd = h->epoll_fd; + pfds[1].events = POLLIN; + pfds[1].revents = 0; + + /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid + even going into the blocking annotation if possible */ + GPR_TIMER_BEGIN("poll", 0); + GRPC_SCHEDULING_START_BLOCKING_REGION; + poll_rv = grpc_poll_function(pfds, 2, timeout_ms); + GRPC_SCHEDULING_END_BLOCKING_REGION; + GPR_TIMER_END("poll", 0); + + if (poll_rv < 0) { + if (errno != EINTR) { + work_combine_error(&error, GRPC_OS_ERROR(errno, "poll")); + } + } else if (poll_rv == 0) { + /* do nothing */ + } else { + if (pfds[0].revents) { + work_combine_error(&error, + grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd)); + } + if (pfds[1].revents) { + do { + /* The following epoll_wait never blocks; it has a timeout of 0 */ + ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0); + if (ep_rv < 0) { + if (errno != EINTR) { + work_combine_error(&error, GRPC_OS_ERROR(errno, "epoll_wait")); + } + } else { + int i; + for (i = 0; i < ep_rv; ++i) { + grpc_fd *fd = ep_ev[i].data.ptr; + /* TODO(klempner): We might want to consider making err and pri + * separate events */ + int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP); + int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI); + int write_ev = ep_ev[i].events & EPOLLOUT; + if (fd == NULL) { + work_combine_error(&error, grpc_wakeup_fd_consume_wakeup( + &grpc_global_wakeup_fd)); + } else { + if (read_ev || cancel) { + fd_become_readable(exec_ctx, fd, pollset); + } + if (write_ev || cancel) { + fd_become_writable(exec_ctx, fd); + } + } + } + } + } while (ep_rv == GRPC_EPOLL_MAX_EVENTS); + } + } + return error; +} + +static void multipoll_with_epoll_pollset_finish_shutdown( + grpc_pollset *pollset) {} + +static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) { + epoll_hdr *h = pollset->data.ptr; + close(h->epoll_fd); + remove_epoll_fd_from_global_list(h->epoll_fd); + gpr_free(h); +} + +static const grpc_pollset_vtable multipoll_with_epoll_pollset = { + multipoll_with_epoll_pollset_add_fd, + multipoll_with_epoll_pollset_maybe_work_and_unlock, + multipoll_with_epoll_pollset_finish_shutdown, + multipoll_with_epoll_pollset_destroy}; + +static void epoll_become_multipoller(grpc_exec_ctx *exec_ctx, + grpc_pollset *pollset, grpc_fd **fds, + size_t nfds) { + size_t i; + epoll_hdr *h = gpr_malloc(sizeof(epoll_hdr)); + struct epoll_event ev; + int err; + + pollset->vtable = &multipoll_with_epoll_pollset; + pollset->data.ptr = h; + h->epoll_fd = epoll_create1(EPOLL_CLOEXEC); + if (h->epoll_fd < 0) { + /* TODO(klempner): Fall back to poll here, especially on ENOSYS */ + gpr_log(GPR_ERROR, "epoll_create1 failed: %s", strerror(errno)); + abort(); + } + add_epoll_fd_to_global_list(h->epoll_fd); + + ev.events = (uint32_t)(EPOLLIN | EPOLLET); + ev.data.ptr = NULL; + err = epoll_ctl(h->epoll_fd, EPOLL_CTL_ADD, + GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd), &ev); + if (err < 0) { + gpr_log(GPR_ERROR, "epoll_ctl add for %d failed: %s", + GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd), + strerror(errno)); + } + + for (i = 0; i < nfds; i++) { + multipoll_with_epoll_pollset_add_fd(exec_ctx, pollset, fds[i], 0); + } +} + +#else /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */ + +static void remove_fd_from_all_epoll_sets(int fd) {} + +#endif /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */ + +/******************************************************************************* + * pollset_set_posix.c + */ + +static grpc_pollset_set *pollset_set_create(void) { + grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set)); + memset(pollset_set, 0, sizeof(*pollset_set)); + gpr_mu_init(&pollset_set->mu); + return pollset_set; +} + +static void pollset_set_destroy(grpc_pollset_set *pollset_set) { + size_t i; + gpr_mu_destroy(&pollset_set->mu); + for (i = 0; i < pollset_set->fd_count; i++) { + GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); + } + gpr_free(pollset_set->pollsets); + gpr_free(pollset_set->pollset_sets); + gpr_free(pollset_set->fds); + gpr_free(pollset_set); +} + +static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set, + grpc_pollset *pollset) { + size_t i, j; + gpr_mu_lock(&pollset_set->mu); + if (pollset_set->pollset_count == pollset_set->pollset_capacity) { + pollset_set->pollset_capacity = + GPR_MAX(8, 2 * pollset_set->pollset_capacity); + pollset_set->pollsets = + gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity * + sizeof(*pollset_set->pollsets)); + } + pollset_set->pollsets[pollset_set->pollset_count++] = pollset; + for (i = 0, j = 0; i < pollset_set->fd_count; i++) { + if (fd_is_orphaned(pollset_set->fds[i])) { + GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); + } else { + pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]); + pollset_set->fds[j++] = pollset_set->fds[i]; + } + } + pollset_set->fd_count = j; + gpr_mu_unlock(&pollset_set->mu); +} + +static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set, + grpc_pollset *pollset) { + size_t i; + gpr_mu_lock(&pollset_set->mu); + for (i = 0; i < pollset_set->pollset_count; i++) { + if (pollset_set->pollsets[i] == pollset) { + pollset_set->pollset_count--; + GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i], + pollset_set->pollsets[pollset_set->pollset_count]); + break; + } + } + gpr_mu_unlock(&pollset_set->mu); +} + +static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *bag, + grpc_pollset_set *item) { + size_t i, j; + gpr_mu_lock(&bag->mu); + if (bag->pollset_set_count == bag->pollset_set_capacity) { + bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity); + bag->pollset_sets = + gpr_realloc(bag->pollset_sets, + bag->pollset_set_capacity * sizeof(*bag->pollset_sets)); + } + bag->pollset_sets[bag->pollset_set_count++] = item; + for (i = 0, j = 0; i < bag->fd_count; i++) { + if (fd_is_orphaned(bag->fds[i])) { + GRPC_FD_UNREF(bag->fds[i], "pollset_set"); + } else { + pollset_set_add_fd(exec_ctx, item, bag->fds[i]); + bag->fds[j++] = bag->fds[i]; + } + } + bag->fd_count = j; + gpr_mu_unlock(&bag->mu); +} + +static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *bag, + grpc_pollset_set *item) { + size_t i; + gpr_mu_lock(&bag->mu); + for (i = 0; i < bag->pollset_set_count; i++) { + if (bag->pollset_sets[i] == item) { + bag->pollset_set_count--; + GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i], + bag->pollset_sets[bag->pollset_set_count]); + break; + } + } + gpr_mu_unlock(&bag->mu); +} + +static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set, grpc_fd *fd) { + size_t i; + gpr_mu_lock(&pollset_set->mu); + if (pollset_set->fd_count == pollset_set->fd_capacity) { + pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity); + pollset_set->fds = gpr_realloc( + pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds)); + } + GRPC_FD_REF(fd, "pollset_set"); + pollset_set->fds[pollset_set->fd_count++] = fd; + for (i = 0; i < pollset_set->pollset_count; i++) { + pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd); + } + for (i = 0; i < pollset_set->pollset_set_count; i++) { + pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd); + } + gpr_mu_unlock(&pollset_set->mu); +} + +static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set, grpc_fd *fd) { + size_t i; + gpr_mu_lock(&pollset_set->mu); + for (i = 0; i < pollset_set->fd_count; i++) { + if (pollset_set->fds[i] == fd) { + pollset_set->fd_count--; + GPR_SWAP(grpc_fd *, pollset_set->fds[i], + pollset_set->fds[pollset_set->fd_count]); + GRPC_FD_UNREF(fd, "pollset_set"); + break; + } + } + for (i = 0; i < pollset_set->pollset_set_count; i++) { + pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd); + } + gpr_mu_unlock(&pollset_set->mu); +} + +/******************************************************************************* + * event engine binding + */ + +static void shutdown_engine(void) { + fd_global_shutdown(); + pollset_global_shutdown(); +} + +static const grpc_event_engine_vtable vtable = { + .pollset_size = sizeof(grpc_pollset), + + .fd_create = fd_create, + .fd_wrapped_fd = fd_wrapped_fd, + .fd_orphan = fd_orphan, + .fd_shutdown = fd_shutdown, + .fd_is_shutdown = fd_is_shutdown, + .fd_notify_on_read = fd_notify_on_read, + .fd_notify_on_write = fd_notify_on_write, + .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, + + .pollset_init = pollset_init, + .pollset_shutdown = pollset_shutdown, + .pollset_reset = pollset_reset, + .pollset_destroy = pollset_destroy, + .pollset_work = pollset_work, + .pollset_kick = pollset_kick, + .pollset_add_fd = pollset_add_fd, + + .pollset_set_create = pollset_set_create, + .pollset_set_destroy = pollset_set_destroy, + .pollset_set_add_pollset = pollset_set_add_pollset, + .pollset_set_del_pollset = pollset_set_del_pollset, + .pollset_set_add_pollset_set = pollset_set_add_pollset_set, + .pollset_set_del_pollset_set = pollset_set_del_pollset_set, + .pollset_set_add_fd = pollset_set_add_fd, + .pollset_set_del_fd = pollset_set_del_fd, + + .kick_poller = kick_poller, + + .shutdown_engine = shutdown_engine, +}; + +const grpc_event_engine_vtable *grpc_init_poll_and_epoll_posix(void) { +#ifdef GPR_LINUX_MULTIPOLL_WITH_EPOLL + platform_become_multipoller = epoll_become_multipoller; +#else + platform_become_multipoller = poll_become_multipoller; +#endif + fd_global_init(); + pollset_global_init(); + return &vtable; +} + +#endif diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.h b/src/core/lib/iomgr/ev_poll_and_epoll_posix.h new file mode 100644 index 0000000000..06d6dbf29d --- /dev/null +++ b/src/core/lib/iomgr/ev_poll_and_epoll_posix.h @@ -0,0 +1,41 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H +#define GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H + +#include "src/core/lib/iomgr/ev_posix.h" + +const grpc_event_engine_vtable *grpc_init_poll_and_epoll_posix(void); + +#endif /* GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H */ diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c index e5eb16c3be..45c0a5e954 100644 --- a/src/core/lib/iomgr/ev_poll_posix.c +++ b/src/core/lib/iomgr/ev_poll_posix.c @@ -59,8 +59,6 @@ * FD declarations */ -grpc_wakeup_fd grpc_global_wakeup_fd; - typedef struct grpc_fd_watcher { struct grpc_fd_watcher *next; struct grpc_fd_watcher *prev; @@ -432,7 +430,10 @@ static grpc_error *fd_shutdown_error(bool shutdown) { static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st, grpc_closure *closure) { - if (*st == CLOSURE_NOT_READY) { + if (fd->shutdown) { + grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"), + NULL); + } else if (*st == CLOSURE_NOT_READY) { /* not ready ==> switch to a waiting state by setting the closure */ *st = closure; } else if (*st == CLOSURE_READY) { @@ -475,13 +476,24 @@ static void set_read_notifier_pollset_locked( static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { gpr_mu_lock(&fd->mu); - GPR_ASSERT(!fd->shutdown); - fd->shutdown = 1; - set_ready_locked(exec_ctx, fd, &fd->read_closure); - set_ready_locked(exec_ctx, fd, &fd->write_closure); + /* only shutdown once */ + if (!fd->shutdown) { + fd->shutdown = 1; + /* signal read/write closed to OS so that future operations fail */ + shutdown(fd->fd, SHUT_RDWR); + set_ready_locked(exec_ctx, fd, &fd->read_closure); + set_ready_locked(exec_ctx, fd, &fd->write_closure); + } gpr_mu_unlock(&fd->mu); } +static bool fd_is_shutdown(grpc_fd *fd) { + gpr_mu_lock(&fd->mu); + bool r = fd->shutdown; + gpr_mu_unlock(&fd->mu); + return r; +} + static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { gpr_mu_lock(&fd->mu); @@ -717,6 +729,7 @@ static grpc_error *pollset_kick_ext(grpc_pollset *p, } GPR_TIMER_END("pollset_kick_ext", 0); + GRPC_LOG_IF_ERROR("pollset_kick_ext", GRPC_ERROR_REF(error)); return error; } @@ -845,6 +858,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd)); error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd); if (error != GRPC_ERROR_NONE) { + GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error)); return error; } } @@ -1024,6 +1038,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } *worker_hdl = NULL; GPR_TIMER_END("pollset_work", 0); + GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error)); return error; } @@ -1215,6 +1230,7 @@ static const grpc_event_engine_vtable vtable = { .fd_wrapped_fd = fd_wrapped_fd, .fd_orphan = fd_orphan, .fd_shutdown = fd_shutdown, + .fd_is_shutdown = fd_is_shutdown, .fd_notify_on_read = fd_notify_on_read, .fd_notify_on_write = fd_notify_on_write, .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, @@ -1242,7 +1258,9 @@ static const grpc_event_engine_vtable vtable = { }; const grpc_event_engine_vtable *grpc_init_poll_posix(void) { - pollset_global_init(); + if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) { + return NULL; + } return &vtable; } diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c index ad8eaab454..a3c1e9db9a 100644 --- a/src/core/lib/iomgr/ev_posix.c +++ b/src/core/lib/iomgr/ev_posix.c @@ -44,6 +44,8 @@ #include <grpc/support/string_util.h> #include <grpc/support/useful.h> +#include "src/core/lib/iomgr/ev_epoll_linux.h" +#include "src/core/lib/iomgr/ev_poll_and_epoll_posix.h" #include "src/core/lib/iomgr/ev_poll_posix.h" #include "src/core/lib/support/env.h" @@ -52,6 +54,7 @@ grpc_poll_function_type grpc_poll_function = poll; static const grpc_event_engine_vtable *g_event_engine; +static const char *g_poll_strategy_name = NULL; typedef const grpc_event_engine_vtable *(*event_engine_factory_fn)(void); @@ -61,7 +64,9 @@ typedef struct { } event_engine_factory; static const event_engine_factory g_factories[] = { + {"epoll", grpc_init_epoll_linux}, {"poll", grpc_init_poll_posix}, + {"legacy", grpc_init_poll_and_epoll_posix}, }; static void add(const char *beg, const char *end, char ***ss, size_t *ns) { @@ -97,6 +102,7 @@ static void try_engine(const char *engine) { for (size_t i = 0; i < GPR_ARRAY_SIZE(g_factories); i++) { if (is(engine, g_factories[i].name)) { if ((g_event_engine = g_factories[i].factory())) { + g_poll_strategy_name = g_factories[i].name; gpr_log(GPR_DEBUG, "Using polling engine: %s", g_factories[i].name); return; } @@ -104,6 +110,9 @@ static void try_engine(const char *engine) { } } +/* Call this only after calling grpc_event_engine_init() */ +const char *grpc_get_poll_strategy_name() { return g_poll_strategy_name; } + void grpc_event_engine_init(void) { char *s = gpr_getenv("GRPC_POLL_STRATEGY"); if (s == NULL) { @@ -152,6 +161,10 @@ void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { g_event_engine->fd_shutdown(exec_ctx, fd); } +bool grpc_fd_is_shutdown(grpc_fd *fd) { + return g_event_engine->fd_is_shutdown(fd); +} + void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { g_event_engine->fd_notify_on_read(exec_ctx, fd, closure); @@ -162,11 +175,6 @@ void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, g_event_engine->fd_notify_on_write(exec_ctx, fd, closure); } -grpc_pollset *grpc_fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { - return g_event_engine->fd_get_read_notifier_pollset(exec_ctx, fd); -} - size_t grpc_pollset_size(void) { return g_event_engine->pollset_size; } void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h index 3058f22c91..579c84ef70 100644 --- a/src/core/lib/iomgr/ev_posix.h +++ b/src/core/lib/iomgr/ev_posix.h @@ -55,6 +55,7 @@ typedef struct grpc_event_engine_vtable { grpc_closure *closure); void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure); + bool (*fd_is_shutdown)(grpc_fd *fd); grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx, grpc_fd *fd); @@ -98,6 +99,9 @@ typedef struct grpc_event_engine_vtable { void grpc_event_engine_init(void); void grpc_event_engine_shutdown(void); +/* Return the name of the poll strategy */ +const char *grpc_get_poll_strategy_name(); + /* Create a wrapped file descriptor. Requires fd is a non-blocking file descriptor. This takes ownership of closing fd. */ @@ -116,7 +120,10 @@ int grpc_fd_wrapped_fd(grpc_fd *fd); void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done, int *release_fd, const char *reason); -/* Cause any current callbacks to error out with GRPC_CALLBACK_CANCELLED. */ +/* Has grpc_fd_shutdown been called on an fd? */ +bool grpc_fd_is_shutdown(grpc_fd *fd); + +/* Cause any current and future callbacks to fail. */ void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd); /* Register read interest, causing read_cb to be called once when fd becomes diff --git a/src/core/lib/iomgr/iocp_windows.c b/src/core/lib/iomgr/iocp_windows.c index 1faa8eeedf..2532e52e48 100644 --- a/src/core/lib/iomgr/iocp_windows.c +++ b/src/core/lib/iomgr/iocp_windows.c @@ -39,7 +39,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/log.h> -#include <grpc/support/log_win32.h> +#include <grpc/support/log_windows.h> #include <grpc/support/thd.h> #include "src/core/lib/iomgr/iocp_windows.h" diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c index 60cef8ba77..89292a153e 100644 --- a/src/core/lib/iomgr/iomgr.c +++ b/src/core/lib/iomgr/iomgr.c @@ -96,7 +96,8 @@ void grpc_iomgr_shutdown(void) { gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time), gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) { if (g_root_object.next != &g_root_object) { - gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed", + gpr_log(GPR_DEBUG, + "Waiting for %" PRIuPTR " iomgr objects to be destroyed", count_objects()); } last_warning_time = gpr_now(GPR_CLOCK_REALTIME); @@ -114,9 +115,9 @@ void grpc_iomgr_shutdown(void) { if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) { if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) { if (g_root_object.next != &g_root_object) { - gpr_log(GPR_DEBUG, - "Failed to free %d iomgr objects before shutdown deadline: " - "memory leaks are likely", + gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR + " iomgr objects before shutdown deadline: " + "memory leaks are likely", count_objects()); dump_objects("LEAKED"); if (grpc_iomgr_abort_on_leaks()) { diff --git a/src/core/lib/iomgr/iomgr_windows.c b/src/core/lib/iomgr/iomgr_windows.c index 398517fc75..7653f6e635 100644 --- a/src/core/lib/iomgr/iomgr_windows.c +++ b/src/core/lib/iomgr/iomgr_windows.c @@ -35,7 +35,7 @@ #ifdef GPR_WINSOCK_SOCKET -#include "src/core/lib/iomgr/sockaddr_win32.h" +#include "src/core/lib/iomgr/sockaddr_windows.h" #include <grpc/support/log.h> diff --git a/src/core/lib/iomgr/network_status_tracker.c b/src/core/lib/iomgr/network_status_tracker.c new file mode 100644 index 0000000000..38a1c9b7d4 --- /dev/null +++ b/src/core/lib/iomgr/network_status_tracker.c @@ -0,0 +1,121 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> +#include "src/core/lib/iomgr/endpoint.h" + +typedef struct endpoint_ll_node { + grpc_endpoint *ep; + struct endpoint_ll_node *next; +} endpoint_ll_node; + +static endpoint_ll_node *head = NULL; +static gpr_mu g_endpoint_mutex; +static bool g_init_done = false; + +void grpc_initialize_network_status_monitor() { + g_init_done = true; + gpr_mu_init(&g_endpoint_mutex); + // TODO(makarandd): Install callback with OS to monitor network status. +} + +void grpc_destroy_network_status_monitor() { + for (endpoint_ll_node *curr = head; curr != NULL;) { + endpoint_ll_node *next = curr->next; + gpr_free(curr); + curr = next; + } + gpr_mu_destroy(&g_endpoint_mutex); +} + +void grpc_network_status_register_endpoint(grpc_endpoint *ep) { + if (!g_init_done) { + grpc_initialize_network_status_monitor(); + } + gpr_mu_lock(&g_endpoint_mutex); + if (head == NULL) { + head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node)); + head->ep = ep; + head->next = NULL; + } else { + endpoint_ll_node *prev_head = head; + head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node)); + head->ep = ep; + head->next = prev_head; + } + gpr_mu_unlock(&g_endpoint_mutex); +} + +void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) { + gpr_mu_lock(&g_endpoint_mutex); + GPR_ASSERT(head); + bool found = false; + endpoint_ll_node *prev = head; + // if we're unregistering the head, just move head to the next + if (ep == head->ep) { + head = head->next; + gpr_free(prev); + found = true; + } else { + for (endpoint_ll_node *curr = head->next; curr != NULL; curr = curr->next) { + if (ep == curr->ep) { + prev->next = curr->next; + gpr_free(curr); + found = true; + break; + } + prev = curr; + } + } + gpr_mu_unlock(&g_endpoint_mutex); + GPR_ASSERT(found); +} + +// Walk the linked-list from head and execute shutdown. It is possible that +// other threads might be in the process of shutdown as well, but that has +// no side effect since endpoint shutdown is idempotent. +void grpc_network_status_shutdown_all_endpoints() { + gpr_mu_lock(&g_endpoint_mutex); + if (head == NULL) { + gpr_mu_unlock(&g_endpoint_mutex); + return; + } + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + + for (endpoint_ll_node *curr = head; curr != NULL; curr = curr->next) { + curr->ep->vtable->shutdown(&exec_ctx, curr->ep); + } + gpr_mu_unlock(&g_endpoint_mutex); + grpc_exec_ctx_finish(&exec_ctx); +} diff --git a/src/core/lib/iomgr/network_status_tracker.h b/src/core/lib/iomgr/network_status_tracker.h new file mode 100644 index 0000000000..74a1aa8135 --- /dev/null +++ b/src/core/lib/iomgr/network_status_tracker.h @@ -0,0 +1,41 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H +#define GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H +#include "src/core/lib/iomgr/endpoint.h" + +void grpc_network_status_register_endpoint(grpc_endpoint *ep); +void grpc_network_status_unregister_endpoint(grpc_endpoint *ep); +void grpc_network_status_shutdown_all_endpoints(); +#endif /* GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H */ diff --git a/src/core/lib/iomgr/polling_entity.c b/src/core/lib/iomgr/polling_entity.c new file mode 100644 index 0000000000..d1686aa12f --- /dev/null +++ b/src/core/lib/iomgr/polling_entity.c @@ -0,0 +1,104 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> + +#include "src/core/lib/iomgr/polling_entity.h" + +grpc_polling_entity grpc_polling_entity_create_from_pollset_set( + grpc_pollset_set *pollset_set) { + grpc_polling_entity pollent; + pollent.pollent.pollset_set = pollset_set; + pollent.tag = POPS_POLLSET_SET; + return pollent; +} + +grpc_polling_entity grpc_polling_entity_create_from_pollset( + grpc_pollset *pollset) { + grpc_polling_entity pollent; + pollent.pollent.pollset = pollset; + pollent.tag = POPS_POLLSET; + return pollent; +} + +grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) { + if (pollent->tag == POPS_POLLSET) { + return pollent->pollent.pollset; + } + return NULL; +} + +grpc_pollset_set *grpc_polling_entity_pollset_set( + grpc_polling_entity *pollent) { + if (pollent->tag == POPS_POLLSET_SET) { + return pollent->pollent.pollset_set; + } + return NULL; +} + +bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent) { + return pollent->tag == POPS_NONE; +} + +void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx, + grpc_polling_entity *pollent, + grpc_pollset_set *pss_dst) { + if (pollent->tag == POPS_POLLSET) { + GPR_ASSERT(pollent->pollent.pollset != NULL); + grpc_pollset_set_add_pollset(exec_ctx, pss_dst, pollent->pollent.pollset); + } else if (pollent->tag == POPS_POLLSET_SET) { + GPR_ASSERT(pollent->pollent.pollset_set != NULL); + grpc_pollset_set_add_pollset_set(exec_ctx, pss_dst, + pollent->pollent.pollset_set); + } else { + gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag); + abort(); + } +} + +void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx, + grpc_polling_entity *pollent, + grpc_pollset_set *pss_dst) { + if (pollent->tag == POPS_POLLSET) { + GPR_ASSERT(pollent->pollent.pollset != NULL); + grpc_pollset_set_del_pollset(exec_ctx, pss_dst, pollent->pollent.pollset); + } else if (pollent->tag == POPS_POLLSET_SET) { + GPR_ASSERT(pollent->pollent.pollset_set != NULL); + grpc_pollset_set_del_pollset_set(exec_ctx, pss_dst, + pollent->pollent.pollset_set); + } else { + gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag); + abort(); + } +} diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h new file mode 100644 index 0000000000..e81531053c --- /dev/null +++ b/src/core/lib/iomgr/polling_entity.h @@ -0,0 +1,81 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H +#define GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H + +#include "src/core/lib/iomgr/pollset.h" +#include "src/core/lib/iomgr/pollset_set.h" + +/* A grpc_polling_entity is a pollset-or-pollset_set container. It allows + * functions that + * accept a pollset XOR a pollset_set to do so through an abstract interface. + * No ownership is taken. */ + +typedef struct grpc_polling_entity { + union { + grpc_pollset *pollset; + grpc_pollset_set *pollset_set; + } pollent; + enum pops_tag { POPS_NONE, POPS_POLLSET, POPS_POLLSET_SET } tag; +} grpc_polling_entity; + +grpc_polling_entity grpc_polling_entity_create_from_pollset_set( + grpc_pollset_set *pollset_set); +grpc_polling_entity grpc_polling_entity_create_from_pollset( + grpc_pollset *pollset); + +/** If \a pollent contains a pollset, return it. Otherwise, return NULL */ +grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent); + +/** If \a pollent contains a pollset_set, return it. Otherwise, return NULL */ +grpc_pollset_set *grpc_polling_entity_pollset_set(grpc_polling_entity *pollent); + +bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent); + +/** Add the pollset or pollset_set in \a pollent to the destination pollset_set + * \a + * pss_dst */ +void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx, + grpc_polling_entity *pollent, + grpc_pollset_set *pss_dst); + +/** Delete the pollset or pollset_set in \a pollent from the destination + * pollset_set \a + * pss_dst */ +void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx, + grpc_polling_entity *pollent, + grpc_pollset_set *pss_dst); +/* pollset_set specific */ + +#endif /* GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H */ diff --git a/src/core/lib/iomgr/pollset_set_windows.c b/src/core/lib/iomgr/pollset_set_windows.c index 89f60b92fb..a35a9766fc 100644 --- a/src/core/lib/iomgr/pollset_set_windows.c +++ b/src/core/lib/iomgr/pollset_set_windows.c @@ -32,12 +32,15 @@ */ #include <grpc/support/port_platform.h> +#include <stdint.h> #ifdef GPR_WINSOCK_SOCKET #include "src/core/lib/iomgr/pollset_set_windows.h" -grpc_pollset_set* grpc_pollset_set_create(void) { return NULL; } +grpc_pollset_set* grpc_pollset_set_create(void) { + return (grpc_pollset_set*)((intptr_t)0xdeafbeef); +} void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {} diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c index fac4d13a27..2af8af82dc 100644 --- a/src/core/lib/iomgr/resolve_address_windows.c +++ b/src/core/lib/iomgr/resolve_address_windows.c @@ -43,7 +43,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/host_port.h> #include <grpc/support/log.h> -#include <grpc/support/log_win32.h> +#include <grpc/support/log_windows.h> #include <grpc/support/string_util.h> #include <grpc/support/thd.h> #include <grpc/support/time.h> diff --git a/src/core/lib/iomgr/sockaddr.h b/src/core/lib/iomgr/sockaddr.h index 891a2f094f..5563d0b8a6 100644 --- a/src/core/lib/iomgr/sockaddr.h +++ b/src/core/lib/iomgr/sockaddr.h @@ -36,8 +36,8 @@ #include <grpc/support/port_platform.h> -#ifdef GPR_WIN32 -#include "src/core/lib/iomgr/sockaddr_win32.h" +#ifdef GPR_WINDOWS +#include "src/core/lib/iomgr/sockaddr_windows.h" #endif #ifdef GPR_POSIX_SOCKETADDR diff --git a/src/core/lib/iomgr/sockaddr_win32.h b/src/core/lib/iomgr/sockaddr_windows.h index 02aeae7619..971db5b32b 100644 --- a/src/core/lib/iomgr/sockaddr_win32.h +++ b/src/core/lib/iomgr/sockaddr_windows.h @@ -31,8 +31,8 @@ * */ -#ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_WIN32_H -#define GRPC_CORE_LIB_IOMGR_SOCKADDR_WIN32_H +#ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_WINDOWS_H +#define GRPC_CORE_LIB_IOMGR_SOCKADDR_WINDOWS_H #include <winsock2.h> #include <ws2tcpip.h> @@ -40,4 +40,4 @@ // must be included after the above #include <mswsock.h> -#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_WIN32_H */ +#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_WINDOWS_H */ diff --git a/src/core/lib/iomgr/socket_utils_common_posix.c b/src/core/lib/iomgr/socket_utils_common_posix.c index bd8ef00c85..d2f6261e2a 100644 --- a/src/core/lib/iomgr/socket_utils_common_posix.c +++ b/src/core/lib/iomgr/socket_utils_common_posix.c @@ -49,6 +49,7 @@ #include <sys/types.h> #include <unistd.h> +#include <grpc/support/alloc.h> #include <grpc/support/host_port.h> #include <grpc/support/log.h> #include <grpc/support/port_platform.h> @@ -116,6 +117,20 @@ grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd) { return GRPC_ERROR_NONE; } +grpc_error *grpc_set_socket_sndbuf(int fd, int buffer_size_bytes) { + return 0 == setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buffer_size_bytes, + sizeof(buffer_size_bytes)) + ? GRPC_ERROR_NONE + : GRPC_OS_ERROR(errno, "setsockopt(SO_SNDBUF)"); +} + +grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes) { + return 0 == setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &buffer_size_bytes, + sizeof(buffer_size_bytes)) + ? GRPC_ERROR_NONE + : GRPC_OS_ERROR(errno, "setsockopt(SO_RCVBUF)"); +} + /* set a socket to close on exec */ grpc_error *grpc_set_socket_cloexec(int fd, int close_on_exec) { int oldflags = fcntl(fd, F_GETFD, 0); @@ -238,6 +253,16 @@ static int set_socket_dualstack(int fd) { } } +static grpc_error *error_for_fd(int fd, const struct sockaddr *addr) { + if (fd >= 0) return GRPC_ERROR_NONE; + char *addr_str; + grpc_sockaddr_to_string(&addr_str, addr, 0); + grpc_error *err = grpc_error_set_str(GRPC_OS_ERROR(errno, "socket"), + GRPC_ERROR_STR_TARGET_ADDRESS, addr_str); + gpr_free(addr_str); + return err; +} + grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type, int protocol, grpc_dualstack_mode *dsmode, @@ -258,7 +283,7 @@ grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type, /* If this isn't an IPv4 address, then return whatever we've got. */ if (!grpc_sockaddr_is_v4mapped(addr, NULL)) { *dsmode = GRPC_DSMODE_IPV6; - return GRPC_ERROR_NONE; + return error_for_fd(*newfd, addr); } /* Fall back to AF_INET. */ if (*newfd >= 0) { @@ -268,10 +293,7 @@ grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type, } *dsmode = family == AF_INET ? GRPC_DSMODE_IPV4 : GRPC_DSMODE_NONE; *newfd = socket(family, type, protocol); - if (*newfd == -1) { - return GRPC_OS_ERROR(errno, "socket"); - } - return GRPC_ERROR_NONE; + return error_for_fd(*newfd, addr); } #endif diff --git a/src/core/lib/iomgr/socket_utils_posix.h b/src/core/lib/iomgr/socket_utils_posix.h index 607fbc7a79..7bcc2219ae 100644 --- a/src/core/lib/iomgr/socket_utils_posix.h +++ b/src/core/lib/iomgr/socket_utils_posix.h @@ -69,20 +69,23 @@ grpc_error *grpc_set_socket_reuse_port(int fd, int reuse); int grpc_ipv6_loopback_available(void); /* Tries to set SO_NOSIGPIPE if available on this platform. - Returns 1 on success, 0 on failure. If SO_NO_SIGPIPE is not available, returns 1. */ grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd); /* Tries to set IP_PKTINFO if available on this platform. - Returns 1 on success, 0 on failure. If IP_PKTINFO is not available, returns 1. */ grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd); /* Tries to set IPV6_RECVPKTINFO if available on this platform. - Returns 1 on success, 0 on failure. If IPV6_RECVPKTINFO is not available, returns 1. */ grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd); +/* Tries to set the socket's send buffer to given size. */ +grpc_error *grpc_set_socket_sndbuf(int fd, int buffer_size_bytes); + +/* Tries to set the socket's receive buffer to given size. */ +grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes); + /* An enum to keep track of IPv4/IPv6 socket modes. Currently, this information is only used when a socket is first created, but diff --git a/src/core/lib/iomgr/socket_windows.c b/src/core/lib/iomgr/socket_windows.c index fbc1bb5301..d7d5f6f157 100644 --- a/src/core/lib/iomgr/socket_windows.c +++ b/src/core/lib/iomgr/socket_windows.c @@ -42,7 +42,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/log.h> -#include <grpc/support/log_win32.h> +#include <grpc/support/log_windows.h> #include <grpc/support/string_util.h> #include "src/core/lib/iomgr/iocp_windows.h" diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c index 6134d4476b..562cb9c6bf 100644 --- a/src/core/lib/iomgr/tcp_client_windows.c +++ b/src/core/lib/iomgr/tcp_client_windows.c @@ -35,11 +35,11 @@ #ifdef GPR_WINSOCK_SOCKET -#include "src/core/lib/iomgr/sockaddr_win32.h" +#include "src/core/lib/iomgr/sockaddr_windows.h" #include <grpc/support/alloc.h> #include <grpc/support/log.h> -#include <grpc/support/log_win32.h> +#include <grpc/support/log_windows.h> #include <grpc/support/slice_buffer.h> #include <grpc/support/useful.h> diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index 2f7c50ce35..2ab45e33ce 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -35,6 +35,7 @@ #ifdef GPR_POSIX_SOCKET +#include "src/core/lib/iomgr/network_status_tracker.h" #include "src/core/lib/iomgr/tcp_posix.h" #include <errno.h> @@ -152,6 +153,7 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } #endif static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { + grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; TCP_UNREF(exec_ctx, tcp, "destroy"); } @@ -410,7 +412,10 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (buf->length == 0) { GPR_TIMER_END("tcp_write", 0); - grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL); + grpc_exec_ctx_sched(exec_ctx, cb, grpc_fd_is_shutdown(tcp->em_fd) + ? GRPC_ERROR_CREATE("EOF") + : GRPC_ERROR_NONE, + NULL); return; } tcp->outgoing_buffer = buf; @@ -471,6 +476,8 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, tcp->write_closure.cb = tcp_handle_write; tcp->write_closure.cb_arg = tcp; gpr_slice_buffer_init(&tcp->last_read_buffer); + /* Tell network status tracker about new endpoint */ + grpc_network_status_register_endpoint(&tcp->base); return &tcp->base; } diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c index 8c551785c1..eda2585c46 100644 --- a/src/core/lib/iomgr/tcp_server_posix.c +++ b/src/core/lib/iomgr/tcp_server_posix.c @@ -595,9 +595,9 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr, if (port == 0 && sp != NULL) { grpc_sockaddr_set_port((struct sockaddr *)&wild4, sp->port); } - addr = (struct sockaddr *)&wild4; - addr_len = sizeof(wild4); } + addr = (struct sockaddr *)&wild4; + addr_len = sizeof(wild4); } errs[1] = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd); diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c index b4f28a4c3b..7b0966704c 100644 --- a/src/core/lib/iomgr/tcp_server_windows.c +++ b/src/core/lib/iomgr/tcp_server_windows.c @@ -41,7 +41,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/log.h> -#include <grpc/support/log_win32.h> +#include <grpc/support/log_windows.h> #include <grpc/support/string_util.h> #include <grpc/support/sync.h> #include <grpc/support/time.h> @@ -103,6 +103,7 @@ struct grpc_tcp_server { /* Public function. Allocates the proper data structures to hold a grpc_tcp_server. */ grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete, + const grpc_channel_args *args, grpc_tcp_server **server) { grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server)); gpr_ref_init(&s->refs, 1); @@ -396,7 +397,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock, size_t addr_len, unsigned port_index, grpc_tcp_listener **listener) { grpc_tcp_listener *sp = NULL; - int port; + int port = -1; int status; GUID guid = WSAID_ACCEPTEX; DWORD ioctl_num_bytes; diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c index b2003675be..37ab59021e 100644 --- a/src/core/lib/iomgr/tcp_windows.c +++ b/src/core/lib/iomgr/tcp_windows.c @@ -37,11 +37,12 @@ #include <limits.h> -#include "src/core/lib/iomgr/sockaddr_win32.h" +#include "src/core/lib/iomgr/network_status_tracker.h" +#include "src/core/lib/iomgr/sockaddr_windows.h" #include <grpc/support/alloc.h> #include <grpc/support/log.h> -#include <grpc/support/log_win32.h> +#include <grpc/support/log_windows.h> #include <grpc/support/slice_buffer.h> #include <grpc/support/string_util.h> #include <grpc/support/useful.h> @@ -378,6 +379,7 @@ static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { } static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { + grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; TCP_UNREF(tcp, "destroy"); } @@ -401,6 +403,9 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) { grpc_closure_init(&tcp->on_read, on_read, tcp); grpc_closure_init(&tcp->on_write, on_write, tcp); tcp->peer_string = gpr_strdup(peer_string); + /* Tell network status tracking code about the new endpoint */ + grpc_network_status_register_endpoint(&tcp->base); + return &tcp->base; } diff --git a/src/core/lib/iomgr/timer.c b/src/core/lib/iomgr/timer.c index 7b0ef550a7..9975fa1671 100644 --- a/src/core/lib/iomgr/timer.c +++ b/src/core/lib/iomgr/timer.c @@ -287,8 +287,8 @@ static int refill_queue(shard_type *shard, gpr_timespec now) { return !grpc_timer_heap_is_empty(&shard->heap); } -/* This pops the next non-cancelled timer with deadline <= now from the queue, - or returns NULL if there isn't one. +/* This pops the next non-cancelled timer with deadline <= now from the + queue, or returns NULL if there isn't one. REQUIRES: shard->mu locked */ static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) { grpc_timer *timer; diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c index 98ffccd59b..1ebccf2ee2 100644 --- a/src/core/lib/iomgr/udp_server.c +++ b/src/core/lib/iomgr/udp_server.c @@ -210,6 +210,8 @@ static int prepare_socket(int fd, const struct sockaddr *addr, size_t addr_len) { struct sockaddr_storage sockname_temp; socklen_t sockname_len; + /* Set send/receive socket buffers to 1 MB */ + int buffer_size_bytes = 1024 * 1024; if (fd < 0) { goto error; @@ -239,6 +241,18 @@ static int prepare_socket(int fd, const struct sockaddr *addr, goto error; } + if (!grpc_set_socket_sndbuf(fd, buffer_size_bytes)) { + gpr_log(GPR_ERROR, "Failed to set send buffer size to %d bytes", + buffer_size_bytes); + goto error; + } + + if (!grpc_set_socket_rcvbuf(fd, buffer_size_bytes)) { + gpr_log(GPR_ERROR, "Failed to set receive buffer size to %d bytes", + buffer_size_bytes); + goto error; + } + return grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp); error: diff --git a/src/core/lib/iomgr/wakeup_fd_eventfd.c b/src/core/lib/iomgr/wakeup_fd_eventfd.c index 667b4a5f90..95f6102330 100644 --- a/src/core/lib/iomgr/wakeup_fd_eventfd.c +++ b/src/core/lib/iomgr/wakeup_fd_eventfd.c @@ -84,8 +84,10 @@ static void eventfd_destroy(grpc_wakeup_fd* fd_info) { } static int eventfd_check_availability(void) { - /* TODO(klempner): Actually check if eventfd is available */ - return 1; + const int efd = eventfd(0, 0); + const int is_available = efd >= 0; + if (is_available) close(efd); + return is_available; } const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = { diff --git a/src/core/lib/iomgr/workqueue.h b/src/core/lib/iomgr/workqueue.h index f4a2cfa4fd..ac765375eb 100644 --- a/src/core/lib/iomgr/workqueue.h +++ b/src/core/lib/iomgr/workqueue.h @@ -44,7 +44,7 @@ #include "src/core/lib/iomgr/workqueue_posix.h" #endif -#ifdef GPR_WIN32 +#ifdef GPR_WINDOWS #include "src/core/lib/iomgr/workqueue_windows.h" #endif diff --git a/src/core/lib/iomgr/workqueue_windows.c b/src/core/lib/iomgr/workqueue_windows.c index c3c0446a57..275f040b1c 100644 --- a/src/core/lib/iomgr/workqueue_windows.c +++ b/src/core/lib/iomgr/workqueue_windows.c @@ -33,8 +33,8 @@ #include <grpc/support/port_platform.h> -#ifdef GPR_WIN32 +#ifdef GPR_WINDOWS #include "src/core/lib/iomgr/workqueue.h" -#endif /* GPR_WIN32 */ +#endif /* GPR_WINDOWS */ |