diff options
Diffstat (limited to 'src/core/lib/iomgr')
43 files changed, 1036 insertions, 572 deletions
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c index da0ec878a3..509c1ff95d 100644 --- a/src/core/lib/iomgr/closure.c +++ b/src/core/lib/iomgr/closure.c @@ -34,6 +34,7 @@ #include "src/core/lib/iomgr/closure.h" #include <grpc/support/alloc.h> +#include <grpc/support/log.h> #include "src/core/lib/profiling/timers.h" @@ -50,20 +51,22 @@ void grpc_closure_list_init(grpc_closure_list *closure_list) { closure_list->head = closure_list->tail = NULL; } -void grpc_closure_list_append(grpc_closure_list *closure_list, +bool grpc_closure_list_append(grpc_closure_list *closure_list, grpc_closure *closure, grpc_error *error) { if (closure == NULL) { GRPC_ERROR_UNREF(error); - return; + return false; } closure->error_data.error = error; closure->next_data.next = NULL; - if (closure_list->head == NULL) { + bool was_empty = (closure_list->head == NULL); + if (was_empty) { closure_list->head = closure; } else { closure_list->tail->next_data.next = closure; } closure_list->tail = closure; + return was_empty; } void grpc_closure_list_fail_all(grpc_closure_list *list, diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h index ee386fbc76..2510d50b42 100644 --- a/src/core/lib/iomgr/closure.h +++ b/src/core/lib/iomgr/closure.h @@ -66,6 +66,7 @@ typedef struct grpc_closure_scheduler_vtable { grpc_error *error); void (*sched)(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error); + const char *name; } grpc_closure_scheduler_vtable; /** Abstract type that can schedule closures for execution */ @@ -115,8 +116,9 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg, void grpc_closure_list_init(grpc_closure_list *list); /** add \a closure to the end of \a list - and set \a closure's result to \a error */ -void grpc_closure_list_append(grpc_closure_list *list, grpc_closure *closure, + and set \a closure's result to \a error + Returns true if \a list becomes non-empty */ +bool grpc_closure_list_append(grpc_closure_list *list, grpc_closure *closure, grpc_error *error); /** force all success bits in \a list to false */ diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c index c26a73b2b7..fa9966c3a6 100644 --- a/src/core/lib/iomgr/combiner.c +++ b/src/core/lib/iomgr/combiner.c @@ -72,6 +72,7 @@ struct grpc_combiner { bool final_list_covered_by_poller; grpc_closure_list final_list; grpc_closure offload; + gpr_refcount refs; }; static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, @@ -86,13 +87,17 @@ static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx, grpc_error *error); static const grpc_closure_scheduler_vtable scheduler_uncovered = { - combiner_exec_uncovered, combiner_exec_uncovered}; + combiner_exec_uncovered, combiner_exec_uncovered, + "combiner:immediately:uncovered"}; static const grpc_closure_scheduler_vtable scheduler_covered = { - combiner_exec_covered, combiner_exec_covered}; + combiner_exec_covered, combiner_exec_covered, + "combiner:immediately:covered"}; static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = { - combiner_finally_exec_uncovered, combiner_finally_exec_uncovered}; + combiner_finally_exec_uncovered, combiner_finally_exec_uncovered, + "combiner:finally:uncovered"}; static const grpc_closure_scheduler_vtable finally_scheduler_covered = { - combiner_finally_exec_covered, combiner_finally_exec_covered}; + combiner_finally_exec_covered, combiner_finally_exec_covered, + "combiner:finally:covered"}; static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); @@ -122,6 +127,7 @@ static bool is_covered_by_poller(grpc_combiner *lock) { grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) { grpc_combiner *lock = gpr_malloc(sizeof(*lock)); + gpr_ref_init(&lock->refs, 1); lock->next_combiner_on_this_exec_ctx = NULL; lock->time_to_execute_final_list = false; lock->optional_workqueue = optional_workqueue; @@ -148,7 +154,7 @@ static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { gpr_free(lock); } -void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { +static void start_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED); GRPC_COMBINER_TRACE(gpr_log( GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state)); @@ -157,6 +163,30 @@ void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { } } +#ifdef GRPC_COMBINER_REFCOUNT_DEBUG +#define GRPC_COMBINER_DEBUG_SPAM(op, delta) \ + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, \ + "combiner[%p] %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op), \ + gpr_atm_no_barrier_load(&lock->refs.count), \ + gpr_atm_no_barrier_load(&lock->refs.count) + (delta), reason); +#else +#define GRPC_COMBINER_DEBUG_SPAM(op, delta) +#endif + +void grpc_combiner_unref(grpc_exec_ctx *exec_ctx, + grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS) { + GRPC_COMBINER_DEBUG_SPAM("UNREF", -1); + if (gpr_unref(&lock->refs)) { + start_destroy(exec_ctx, lock); + } +} + +grpc_combiner *grpc_combiner_ref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS) { + GRPC_COMBINER_DEBUG_SPAM(" REF", 1); + gpr_ref(&lock->refs); + return lock; +} + static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { lock->next_combiner_on_this_exec_ctx = NULL; diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h index 81dff85d40..75dcb0b70a 100644 --- a/src/core/lib/iomgr/combiner.h +++ b/src/core/lib/iomgr/combiner.h @@ -48,8 +48,27 @@ // Initialize the lock, with an optional workqueue to shift load to when // necessary grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue); -// Destroy the lock -void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock); + +//#define GRPC_COMBINER_REFCOUNT_DEBUG +#ifdef GRPC_COMBINER_REFCOUNT_DEBUG +#define GRPC_COMBINER_DEBUG_ARGS \ + , const char *file, int line, const char *reason +#define GRPC_COMBINER_REF(combiner, reason) \ + grpc_combiner_ref((combiner), __FILE__, __LINE__, (reason)) +#define GRPC_COMBINER_UNREF(exec_ctx, combiner, reason) \ + grpc_combiner_unref((exec_ctx), (combiner), __FILE__, __LINE__, (reason)) +#else +#define GRPC_COMBINER_DEBUG_ARGS +#define GRPC_COMBINER_REF(combiner, reason) grpc_combiner_ref((combiner)) +#define GRPC_COMBINER_UNREF(exec_ctx, combiner, reason) \ + grpc_combiner_unref((exec_ctx), (combiner)) +#endif + +// Ref/unref the lock, for when we're sharing the lock ownership +// Prefer to use the macros above +grpc_combiner *grpc_combiner_ref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS); +void grpc_combiner_unref(grpc_exec_ctx *exec_ctx, + grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS); // Fetch a scheduler to schedule closures against grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock, bool covered_by_poller); diff --git a/src/core/lib/iomgr/endpoint.c b/src/core/lib/iomgr/endpoint.c index 2d300f4560..bf6e98146a 100644 --- a/src/core/lib/iomgr/endpoint.c +++ b/src/core/lib/iomgr/endpoint.c @@ -54,8 +54,9 @@ void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx, ep->vtable->add_to_pollset_set(exec_ctx, ep, pollset_set); } -void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) { - ep->vtable->shutdown(exec_ctx, ep); +void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, + grpc_error* why) { + ep->vtable->shutdown(exec_ctx, ep, why); } void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) { diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h index 1609b64f2b..740357ecc5 100644 --- a/src/core/lib/iomgr/endpoint.h +++ b/src/core/lib/iomgr/endpoint.h @@ -57,7 +57,7 @@ struct grpc_endpoint_vtable { grpc_pollset *pollset); void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_pollset_set *pollset); - void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); + void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_error *why); void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); grpc_resource_user *(*get_resource_user)(grpc_endpoint *ep); char *(*get_peer)(grpc_endpoint *ep); @@ -96,7 +96,8 @@ void grpc_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, /* Causes any pending and future read/write callbacks to run immediately with success==0 */ -void grpc_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); +void grpc_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, + grpc_error *why); void grpc_endpoint_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); /* Add an endpoint to a pollset, so that when the pollset is polled, events from diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c index f6bb3a0477..dbe5b139f9 100644 --- a/src/core/lib/iomgr/error.c +++ b/src/core/lib/iomgr/error.c @@ -33,13 +33,10 @@ #include "src/core/lib/iomgr/error.h" -#include <inttypes.h> -#include <stdbool.h> #include <string.h> #include <grpc/status.h> #include <grpc/support/alloc.h> -#include <grpc/support/avl.h> #include <grpc/support/log.h> #include <grpc/support/string_util.h> #include <grpc/support/useful.h> @@ -48,6 +45,7 @@ #include <grpc/support/log_windows.h> #endif +#include "src/core/lib/iomgr/error_internal.h" #include "src/core/lib/profiling/timers.h" static void destroy_integer(void *key) {} @@ -128,6 +126,10 @@ static const char *error_int_name(grpc_error_ints key) { static const char *error_str_name(grpc_error_strs key) { switch (key) { + case GRPC_ERROR_STR_KEY: + return "key"; + case GRPC_ERROR_STR_VALUE: + return "value"; case GRPC_ERROR_STR_DESCRIPTION: return "description"; case GRPC_ERROR_STR_OS_ERROR: @@ -160,16 +162,7 @@ static const char *error_time_name(grpc_error_times key) { GPR_UNREACHABLE_CODE(return "unknown"); } -struct grpc_error { - gpr_refcount refs; - gpr_avl ints; - gpr_avl strs; - gpr_avl times; - gpr_avl errs; - uintptr_t next_err; -}; - -static bool is_special(grpc_error *err) { +bool grpc_error_is_special(grpc_error *err) { return err == GRPC_ERROR_NONE || err == GRPC_ERROR_OOM || err == GRPC_ERROR_CANCELLED; } @@ -177,7 +170,7 @@ static bool is_special(grpc_error *err) { #ifdef GRPC_ERROR_REFCOUNT_DEBUG grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line, const char *func) { - if (is_special(err)) return err; + if (grpc_error_is_special(err)) return err; gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err, err->refs.count, err->refs.count + 1, file, line, func); gpr_ref(&err->refs); @@ -185,25 +178,26 @@ grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line, } #else grpc_error *grpc_error_ref(grpc_error *err) { - if (is_special(err)) return err; + if (grpc_error_is_special(err)) return err; gpr_ref(&err->refs); return err; } #endif static void error_destroy(grpc_error *err) { - GPR_ASSERT(!is_special(err)); + GPR_ASSERT(!grpc_error_is_special(err)); gpr_avl_unref(err->ints); gpr_avl_unref(err->strs); gpr_avl_unref(err->errs); gpr_avl_unref(err->times); + gpr_free((void *)gpr_atm_acq_load(&err->error_string)); gpr_free(err); } #ifdef GRPC_ERROR_REFCOUNT_DEBUG void grpc_error_unref(grpc_error *err, const char *file, int line, const char *func) { - if (is_special(err)) return; + if (grpc_error_is_special(err)) return; gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err, err->refs.count, err->refs.count - 1, file, line, func); if (gpr_unref(&err->refs)) { @@ -212,7 +206,7 @@ void grpc_error_unref(grpc_error *err, const char *file, int line, } #else void grpc_error_unref(grpc_error *err) { - if (is_special(err)) return; + if (grpc_error_is_special(err)) return; if (gpr_unref(&err->refs)) { error_destroy(err); } @@ -247,6 +241,7 @@ grpc_error *grpc_error_create(const char *file, int line, const char *desc, err->times = gpr_avl_add(gpr_avl_create(&avl_vtable_times), (void *)(uintptr_t)GRPC_ERROR_TIME_CREATED, box_time(gpr_now(GPR_CLOCK_REALTIME))); + gpr_atm_no_barrier_store(&err->error_string, 0); gpr_ref_init(&err->refs, 1); GPR_TIMER_END("grpc_error_create", 0); return err; @@ -255,9 +250,10 @@ grpc_error *grpc_error_create(const char *file, int line, const char *desc, static grpc_error *copy_error_and_unref(grpc_error *in) { GPR_TIMER_BEGIN("copy_error_and_unref", 0); grpc_error *out; - if (is_special(in)) { + if (grpc_error_is_special(in)) { if (in == GRPC_ERROR_NONE) - out = GRPC_ERROR_CREATE("no error"); + out = grpc_error_set_int(GRPC_ERROR_CREATE("no error"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_OK); else if (in == GRPC_ERROR_OOM) out = GRPC_ERROR_CREATE("oom"); else if (in == GRPC_ERROR_CANCELLED) @@ -275,6 +271,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) { out->strs = gpr_avl_ref(in->strs); out->errs = gpr_avl_ref(in->errs); out->times = gpr_avl_ref(in->times); + gpr_atm_no_barrier_store(&out->error_string, 0); out->next_err = in->next_err; gpr_ref_init(&out->refs, 1); GRPC_ERROR_UNREF(in); @@ -292,14 +289,29 @@ grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which, return new; } +typedef struct { + grpc_error *error; + grpc_status_code code; + const char *msg; +} special_error_status_map; +static special_error_status_map error_status_map[] = { + {GRPC_ERROR_NONE, GRPC_STATUS_OK, NULL}, + {GRPC_ERROR_CANCELLED, GRPC_STATUS_CANCELLED, "Cancelled"}, + {GRPC_ERROR_OOM, GRPC_STATUS_RESOURCE_EXHAUSTED, "Out of memory"}, +}; + bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) { GPR_TIMER_BEGIN("grpc_error_get_int", 0); void *pp; - if (is_special(err)) { - if (err == GRPC_ERROR_CANCELLED && which == GRPC_ERROR_INT_GRPC_STATUS) { - *p = GRPC_STATUS_CANCELLED; - GPR_TIMER_END("grpc_error_get_int", 0); - return true; + if (grpc_error_is_special(err)) { + if (which == GRPC_ERROR_INT_GRPC_STATUS) { + for (size_t i = 0; i < GPR_ARRAY_SIZE(error_status_map); i++) { + if (error_status_map[i].error == err) { + if (p != NULL) *p = error_status_map[i].code; + GPR_TIMER_END("grpc_error_get_int", 0); + return true; + } + } } GPR_TIMER_END("grpc_error_get_int", 0); return false; @@ -324,66 +336,17 @@ grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which, } const char *grpc_error_get_str(grpc_error *err, grpc_error_strs which) { - if (is_special(err)) return NULL; - return gpr_avl_get(err->strs, (void *)(uintptr_t)which); -} - -typedef struct { - grpc_error *error; - grpc_status_code code; - const char *msg; -} special_error_status_map; -static special_error_status_map error_status_map[] = { - {GRPC_ERROR_NONE, GRPC_STATUS_OK, ""}, - {GRPC_ERROR_CANCELLED, GRPC_STATUS_CANCELLED, "RPC cancelled"}, - {GRPC_ERROR_OOM, GRPC_STATUS_RESOURCE_EXHAUSTED, "Out of memory"}, -}; - -static grpc_error *recursively_find_error_with_status(grpc_error *error, - intptr_t *status) { - // If the error itself has a status code, return it. - if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, status)) { - return error; - } - // Otherwise, search through its children. - intptr_t key = 0; - while (true) { - grpc_error *child_error = gpr_avl_get(error->errs, (void *)key++); - if (child_error == NULL) break; - grpc_error *result = - recursively_find_error_with_status(child_error, status); - if (result != NULL) return result; - } - return NULL; -} - -void grpc_error_get_status(grpc_error *error, grpc_status_code *code, - const char **msg) { - // Handle special errors via the static map. - for (size_t i = 0; i < GPR_ARRAY_SIZE(error_status_map); ++i) { - if (error == error_status_map[i].error) { - *code = error_status_map[i].code; - *msg = error_status_map[i].msg; - return; + if (grpc_error_is_special(err)) { + if (which == GRPC_ERROR_STR_GRPC_MESSAGE) { + for (size_t i = 0; i < GPR_ARRAY_SIZE(error_status_map); i++) { + if (error_status_map[i].error == err) { + return error_status_map[i].msg; + } + } } + return NULL; } - // Populate code. - // Start with the parent error and recurse through the tree of children - // until we find the first one that has a status code. - intptr_t status = GRPC_STATUS_UNKNOWN; // Default in case we don't find one. - grpc_error *found_error = recursively_find_error_with_status(error, &status); - *code = (grpc_status_code)status; - // Now populate msg. - // If we found an error with a status code above, use that; otherwise, - // fall back to using the parent error. - if (found_error == NULL) found_error = error; - // If the error has a status message, use it. Otherwise, fall back to - // the error description. - *msg = grpc_error_get_str(found_error, GRPC_ERROR_STR_GRPC_MESSAGE); - if (*msg == NULL) { - *msg = grpc_error_get_str(found_error, GRPC_ERROR_STR_DESCRIPTION); - if (*msg == NULL) *msg = "uknown error"; // Just in case. - } + return gpr_avl_get(err->strs, (void *)(uintptr_t)which); } grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) { @@ -535,7 +498,6 @@ static void add_errs(gpr_avl_node *n, char **s, size_t *sz, size_t *cap, *first = false; const char *e = grpc_error_string(n->value); append_str(e, s, sz, cap); - grpc_error_free_string(e); add_errs(n->right, s, sz, cap, first); } @@ -557,7 +519,7 @@ static int cmp_kvs(const void *a, const void *b) { return strcmp(ka->key, kb->key); } -static const char *finish_kvs(kv_pairs *kvs) { +static char *finish_kvs(kv_pairs *kvs) { char *s = NULL; size_t sz = 0; size_t cap = 0; @@ -578,19 +540,18 @@ static const char *finish_kvs(kv_pairs *kvs) { return s; } -void grpc_error_free_string(const char *str) { - if (str == no_error_string) return; - if (str == oom_error_string) return; - if (str == cancelled_error_string) return; - gpr_free((char *)str); -} - const char *grpc_error_string(grpc_error *err) { GPR_TIMER_BEGIN("grpc_error_string", 0); if (err == GRPC_ERROR_NONE) return no_error_string; if (err == GRPC_ERROR_OOM) return oom_error_string; if (err == GRPC_ERROR_CANCELLED) return cancelled_error_string; + void *p = (void *)gpr_atm_acq_load(&err->error_string); + if (p != NULL) { + GPR_TIMER_END("grpc_error_string", 0); + return p; + } + kv_pairs kvs; memset(&kvs, 0, sizeof(kvs)); @@ -603,7 +564,13 @@ const char *grpc_error_string(grpc_error *err) { qsort(kvs.kvs, kvs.num_kvs, sizeof(kv_pair), cmp_kvs); - const char *out = finish_kvs(&kvs); + char *out = finish_kvs(&kvs); + + if (!gpr_atm_rel_cas(&err->error_string, 0, (gpr_atm)out)) { + gpr_free(out); + out = (char *)gpr_atm_no_barrier_load(&err->error_string); + } + GPR_TIMER_END("grpc_error_string", 0); return out; } @@ -638,7 +605,6 @@ bool grpc_log_if_error(const char *what, grpc_error *error, const char *file, if (error == GRPC_ERROR_NONE) return true; const char *msg = grpc_error_string(error); gpr_log(file, line, GPR_LOG_SEVERITY_ERROR, "%s: %s", what, msg); - grpc_error_free_string(msg); GRPC_ERROR_UNREF(error); return false; } diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h index f3f3b80a09..2613512acb 100644 --- a/src/core/lib/iomgr/error.h +++ b/src/core/lib/iomgr/error.h @@ -124,7 +124,11 @@ typedef enum { /// filename that we were trying to read/write when this error occurred GRPC_ERROR_STR_FILENAME, /// which data was queued for writing when the error occurred - GRPC_ERROR_STR_QUEUED_BUFFERS + GRPC_ERROR_STR_QUEUED_BUFFERS, + /// key associated with the error + GRPC_ERROR_STR_KEY, + /// value associated with the error + GRPC_ERROR_STR_VALUE, } grpc_error_strs; typedef enum { @@ -133,15 +137,14 @@ typedef enum { } grpc_error_times; /// The following "special" errors can be propagated without allocating memory. -/// They are always even so that other code (particularly combiner locks) can -/// safely use the lower bit for themselves. +/// They are always even so that other code (particularly combiner locks, +/// polling engines) can safely use the lower bit for themselves. #define GRPC_ERROR_NONE ((grpc_error *)NULL) #define GRPC_ERROR_OOM ((grpc_error *)2) #define GRPC_ERROR_CANCELLED ((grpc_error *)4) const char *grpc_error_string(grpc_error *error); -void grpc_error_free_string(const char *str); /// Create an error - but use GRPC_ERROR_CREATE instead grpc_error *grpc_error_create(const char *file, int line, const char *desc, @@ -189,12 +192,6 @@ grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which, /// Caller does NOT own return value. const char *grpc_error_get_str(grpc_error *error, grpc_error_strs which); -/// A utility function to get the status code and message to be returned -/// to the application. If not set in the top-level message, looks -/// through child errors until it finds the first one with these attributes. -void grpc_error_get_status(grpc_error *error, grpc_status_code *code, - const char **msg); - /// Add a child error: an error that is believed to have contributed to this /// error occurring. Allows root causing high level errors from lower level /// errors that contributed to them. diff --git a/src/core/lib/iomgr/error_internal.h b/src/core/lib/iomgr/error_internal.h new file mode 100644 index 0000000000..1c89ead4ed --- /dev/null +++ b/src/core/lib/iomgr/error_internal.h @@ -0,0 +1,54 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H +#define GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H + +#include <inttypes.h> +#include <stdbool.h> + +#include <grpc/support/avl.h> + +struct grpc_error { + gpr_refcount refs; + gpr_avl ints; + gpr_avl strs; + gpr_avl times; + gpr_avl errs; + uintptr_t next_err; + gpr_atm error_string; +}; + +bool grpc_error_is_special(grpc_error *err); + +#endif /* GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H */ diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c index 715d057c51..11208b9ad1 100644 --- a/src/core/lib/iomgr/ev_epoll_linux.c +++ b/src/core/lib/iomgr/ev_epoll_linux.c @@ -68,7 +68,7 @@ static int grpc_polling_trace = 0; /* Disabled by default */ gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \ } -/* Uncomment the following enable extra checks on poll_object operations */ +/* Uncomment the following to enable extra checks on poll_object operations */ /* #define PO_DEBUG */ static int grpc_wakeup_signal = -1; @@ -140,23 +140,61 @@ struct grpc_fd { Ref/Unref by two to avoid altering the orphaned bit */ gpr_atm refst; - /* Indicates that the fd is shutdown and that any pending read/write closures - should fail */ - bool shutdown; + /* Internally stores data of type (grpc_error *). If the FD is shutdown, this + contains reason for shutdown (i.e a pointer to grpc_error) ORed with + FD_SHUTDOWN_BIT. Since address allocations are word-aligned, the lower bit + of (grpc_error *) addresses is guaranteed to be zero. Even if the + (grpc_error *), is of special types like GRPC_ERROR_NONE, GRPC_ERROR_OOM + etc, the lower bit is guaranteed to be zero. - /* The fd is either closed or we relinquished control of it. In either cases, - this indicates that the 'fd' on this structure is no longer valid */ + Once an fd is shutdown, any pending or future read/write closures on the + fd should fail */ + gpr_atm shutdown_error; + + /* The fd is either closed or we relinquished control of it. In either + cases, this indicates that the 'fd' on this structure is no longer + valid */ bool orphaned; - /* TODO: sreek - Move this to a lockfree implementation */ - grpc_closure *read_closure; - grpc_closure *write_closure; + /* Closures to call when the fd is readable or writable respectively. These + fields contain one of the following values: + CLOSURE_READY : The fd has an I/O event of interest but there is no + closure yet to execute + + CLOSURE_NOT_READY : The fd has no I/O event of interest + + closure ptr : The closure to be executed when the fd has an I/O + event of interest + + shutdown_error | FD_SHUTDOWN_BIT : + 'shutdown_error' field ORed with FD_SHUTDOWN_BIT. + This indicates that the fd is shutdown. Since all + memory allocations are word-aligned, the lower two + bits of the shutdown_error pointer are always 0. So + it is safe to OR these with FD_SHUTDOWN_BIT + + Valid state transitions: + + <closure ptr> <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY + | | ^ | ^ | | + | | | | | | | + | +--------------4----------+ 6 +---------2---------------+ | + | | | + | v | + +-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+ + + For 1, 4 : See set_ready() function + For 2, 3 : See notify_on() function + For 5,6,7: See set_shutdown() function */ + gpr_atm read_closure; + gpr_atm write_closure; struct grpc_fd *freelist_next; grpc_closure *on_done_closure; - /* The pollset that last noticed that the fd is readable */ - grpc_pollset *read_notifier_pollset; + /* The pollset that last noticed that the fd is readable. The actual type + * stored in this is (grpc_pollset *) */ + gpr_atm read_notifier_pollset; grpc_iomgr_object iomgr_object; }; @@ -179,8 +217,10 @@ static void fd_unref(grpc_fd *fd); static void fd_global_init(void); static void fd_global_shutdown(void); -#define CLOSURE_NOT_READY ((grpc_closure *)0) -#define CLOSURE_READY ((grpc_closure *)1) +#define CLOSURE_NOT_READY ((gpr_atm)0) +#define CLOSURE_READY ((gpr_atm)2) + +#define FD_SHUTDOWN_BIT 1 /******************************************************************************* * Polling island Declarations @@ -321,7 +361,7 @@ gpr_atm g_epoll_sync; #endif /* defined(GRPC_TSAN) */ static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = { - workqueue_enqueue, workqueue_enqueue}; + workqueue_enqueue, workqueue_enqueue, "workqueue"}; static void pi_add_ref(polling_island *pi); static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi); @@ -908,6 +948,11 @@ static void unref_by(grpc_fd *fd, int n) { fd_freelist = fd; grpc_iomgr_unregister_object(&fd->iomgr_object); + grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error); + /* Clear the least significant bit if it set (in case fd was shutdown) */ + err = (grpc_error *)((intptr_t)err & ~FD_SHUTDOWN_BIT); + GRPC_ERROR_UNREF(err); + gpr_mu_unlock(&fd_freelist_mu); } else { GPR_ASSERT(old > n); @@ -970,13 +1015,14 @@ static grpc_fd *fd_create(int fd, const char *name) { gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1); new_fd->fd = fd; - new_fd->shutdown = false; + gpr_atm_no_barrier_store(&new_fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE); new_fd->orphaned = false; - new_fd->read_closure = CLOSURE_NOT_READY; - new_fd->write_closure = CLOSURE_NOT_READY; + gpr_atm_no_barrier_store(&new_fd->read_closure, CLOSURE_NOT_READY); + gpr_atm_no_barrier_store(&new_fd->write_closure, CLOSURE_NOT_READY); + gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL); + new_fd->freelist_next = NULL; new_fd->on_done_closure = NULL; - new_fd->read_notifier_pollset = NULL; gpr_mu_unlock(&new_fd->po.mu); @@ -1058,98 +1104,206 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, GRPC_ERROR_UNREF(error); } -static grpc_error *fd_shutdown_error(bool shutdown) { - if (!shutdown) { - return GRPC_ERROR_NONE; - } else { - return GRPC_ERROR_CREATE("FD shutdown"); +static void notify_on(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state, + grpc_closure *closure) { + while (true) { + /* Fast-path: CLOSURE_NOT_READY -> <closure>. + The 'release' cas here matches the 'acquire' load in set_ready and + set_shutdown ensuring that the closure (scheduled by set_ready or + set_shutdown) happens-after the I/O event on the fd */ + if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) { + return; /* Fast-path successful. Return */ + } + + /* Slowpath. The 'acquire' load matches the 'release' cas in set_ready and + set_shutdown */ + gpr_atm curr = gpr_atm_acq_load(state); + switch (curr) { + case CLOSURE_NOT_READY: { + break; /* retry */ + } + + case CLOSURE_READY: { + /* Change the state to CLOSURE_NOT_READY. Schedule the closure if + successful. If not, the state most likely transitioned to shutdown. + We should retry. + + This can be a no-barrier cas since the state is being transitioned to + CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any + closure when transitioning out of CLOSURE_NO_READY state (i.e there + is no other code that needs to 'happen-after' this) */ + if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) { + grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); + return; /* Slow-path successful. Return */ + } + + break; /* retry */ + } + + default: { + /* 'curr' is either a closure or the fd is shutdown(in which case 'curr' + contains a pointer to the shutdown-error). If the fd is shutdown, + schedule the closure with the shutdown error */ + if ((curr & FD_SHUTDOWN_BIT) > 0) { + grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT); + grpc_closure_sched( + exec_ctx, closure, + GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1)); + return; + } + + /* There is already a closure!. This indicates a bug in the code */ + gpr_log(GPR_ERROR, + "notify_on called with a previous callback still pending"); + abort(); + } + } } + + GPR_UNREACHABLE_CODE(return ); } -static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure **st, grpc_closure *closure) { - if (fd->shutdown) { - grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown")); - } else if (*st == CLOSURE_NOT_READY) { - /* not ready ==> switch to a waiting state by setting the closure */ - *st = closure; - } else if (*st == CLOSURE_READY) { - /* already ready ==> queue the closure to run immediately */ - *st = CLOSURE_NOT_READY; - grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown)); - } else { - /* upcallptr was set to a different closure. This is an error! */ - gpr_log(GPR_ERROR, - "User called a notify_on function with a previous callback still " - "pending"); - abort(); +static void set_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state, + grpc_error *shutdown_err) { + /* Try the fast-path first (i.e expect the current value to be + CLOSURE_NOT_READY */ + gpr_atm curr = CLOSURE_NOT_READY; + gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT; + + while (true) { + /* The 'release' cas here matches the 'acquire' load in notify_on to ensure + that the closure it schedules 'happens-after' the set_shutdown is called + on the fd */ + if (gpr_atm_rel_cas(state, curr, new_state)) { + return; /* Fast-path successful. Return */ + } + + /* Fallback to slowpath. This 'acquire' load matches the 'release' cas in + notify_on and set_ready */ + curr = gpr_atm_acq_load(state); + switch (curr) { + case CLOSURE_READY: { + break; /* retry */ + } + + case CLOSURE_NOT_READY: { + break; /* retry */ + } + + default: { + /* 'curr' is either a closure or the fd is already shutdown */ + + /* If fd is already shutdown, we are done */ + if ((curr & FD_SHUTDOWN_BIT) > 0) { + return; + } + + /* Fd is not shutdown. Schedule the closure and move the state to + shutdown state. The 'release' cas here matches the 'acquire' load in + notify_on to ensure that the closure it schedules 'happens-after' + the set_shutdown is called on the fd */ + if (gpr_atm_rel_cas(state, curr, new_state)) { + grpc_closure_sched( + exec_ctx, (grpc_closure *)curr, + GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1)); + return; + } + + /* 'curr' was a closure but now changed to a different state. We will + have to retry */ + break; + } + } } + + GPR_UNREACHABLE_CODE(return ); } -/* returns 1 if state becomes not ready */ -static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure **st) { - if (*st == CLOSURE_READY) { - /* duplicate ready ==> ignore */ - return 0; - } else if (*st == CLOSURE_NOT_READY) { - /* not ready, and not waiting ==> flag ready */ - *st = CLOSURE_READY; - return 0; - } else { - /* waiting ==> queue closure */ - grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown)); - *st = CLOSURE_NOT_READY; - return 1; +static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state) { + /* Try an optimistic case first (i.e assume current state is + CLOSURE_NOT_READY). + + This 'release' cas matches the 'acquire' load in notify_on ensuring that + any closure (scheduled by notify_on) 'happens-after' the return from + epoll_pwait */ + if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) { + return; /* early out */ + } + + /* The 'acquire' load here matches the 'release' cas in notify_on and + set_shutdown */ + gpr_atm curr = gpr_atm_acq_load(state); + switch (curr) { + case CLOSURE_READY: { + /* Already ready. We are done here */ + break; + } + + case CLOSURE_NOT_READY: { + /* The state was not CLOSURE_NOT_READY when we checked initially at the + beginning of this function but now it is CLOSURE_NOT_READY again. + This is only possible if the state transitioned out of + CLOSURE_NOT_READY to either CLOSURE_READY or <some closure> and then + back to CLOSURE_NOT_READY again (i.e after we entered this function, + the fd became "ready" and the necessary actions were already done). + So there is no need to make the state CLOSURE_READY now */ + break; + } + + default: { + /* 'curr' is either a closure or the fd is shutdown */ + if ((curr & FD_SHUTDOWN_BIT) > 0) { + /* The fd is shutdown. Do nothing */ + } else if (gpr_atm_no_barrier_cas(state, curr, CLOSURE_NOT_READY)) { + /* The cas above was no-barrier since the state is being transitioned to + CLOSURE_NOT_READY; notify_on and set_shutdown do not schedule any + closures when transitioning out of CLOSURE_NO_READY state (i.e there + is no other code that needs to 'happen-after' this) */ + + grpc_closure_sched(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE); + } + /* else the state changed again (only possible by either a racing + set_ready or set_shutdown functions. In both these cases, the closure + would have been scheduled for execution. So we are done here */ + break; + } } } static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_pollset *notifier = NULL; - - gpr_mu_lock(&fd->po.mu); - notifier = fd->read_notifier_pollset; - gpr_mu_unlock(&fd->po.mu); - - return notifier; + gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); + return (grpc_pollset *)notifier; } static bool fd_is_shutdown(grpc_fd *fd) { - gpr_mu_lock(&fd->po.mu); - const bool r = fd->shutdown; - gpr_mu_unlock(&fd->po.mu); - return r; + grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error); + return (((intptr_t)err & FD_SHUTDOWN_BIT) > 0); } /* Might be called multiple times */ -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - gpr_mu_lock(&fd->po.mu); - /* Do the actual shutdown only once */ - if (!fd->shutdown) { - fd->shutdown = true; - +static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { + /* Store the shutdown error ORed with FD_SHUTDOWN_BIT in fd->shutdown_error */ + if (gpr_atm_rel_cas(&fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE, + (gpr_atm)why | FD_SHUTDOWN_BIT)) { shutdown(fd->fd, SHUT_RDWR); - /* Flush any pending read and write closures. Since fd->shutdown is 'true' - at this point, the closures would be called with 'success = false' */ - set_ready_locked(exec_ctx, fd, &fd->read_closure); - set_ready_locked(exec_ctx, fd, &fd->write_closure); + + set_shutdown(exec_ctx, fd, &fd->read_closure, why); + set_shutdown(exec_ctx, fd, &fd->write_closure, why); + } else { + /* Shutdown already called */ + GRPC_ERROR_UNREF(why); } - gpr_mu_unlock(&fd->po.mu); } static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - gpr_mu_lock(&fd->po.mu); - notify_on_locked(exec_ctx, fd, &fd->read_closure, closure); - gpr_mu_unlock(&fd->po.mu); + notify_on(exec_ctx, fd, &fd->read_closure, closure); } static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - gpr_mu_lock(&fd->po.mu); - notify_on_locked(exec_ctx, fd, &fd->write_closure, closure); - gpr_mu_unlock(&fd->po.mu); + notify_on(exec_ctx, fd, &fd->write_closure, closure); } static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { @@ -1338,18 +1492,19 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *notifier) { - /* Need the fd->po.mu since we might be racing with fd_notify_on_read */ - gpr_mu_lock(&fd->po.mu); - set_ready_locked(exec_ctx, fd, &fd->read_closure); - fd->read_notifier_pollset = notifier; - gpr_mu_unlock(&fd->po.mu); + set_ready(exec_ctx, fd, &fd->read_closure); + + /* Note, it is possible that fd_become_readable might be called twice with + different 'notifier's when an fd becomes readable and it is in two epoll + sets (This can happen briefly during polling island merges). In such cases + it does not really matter which notifer is set as the read_notifier_pollset + (They would both point to the same polling island anyway) */ + /* Use release store to match with acquire load in fd_get_read_notifier */ + gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); } static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - /* Need the fd->po.mu since we might be racing with fd_notify_on_write */ - gpr_mu_lock(&fd->po.mu); - set_ready_locked(exec_ctx, fd, &fd->write_closure); - gpr_mu_unlock(&fd->po.mu); + set_ready(exec_ctx, fd, &fd->write_closure); } static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, @@ -1400,16 +1555,6 @@ static void pollset_destroy(grpc_pollset *pollset) { gpr_mu_destroy(&pollset->po.mu); } -static void pollset_reset(grpc_pollset *pollset) { - GPR_ASSERT(pollset->shutting_down); - GPR_ASSERT(!pollset_has_workers(pollset)); - pollset->shutting_down = false; - pollset->finish_shutdown_called = false; - pollset->kicked_without_pollers = false; - pollset->shutdown_done = NULL; - GPR_ASSERT(pollset->po.pi == NULL); -} - static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx, polling_island *pi) { if (gpr_mu_trylock(&pi->workqueue_read_mu)) { @@ -1527,7 +1672,8 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, append_error(error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd), err_desc); } else if (data_ptr == &pi->workqueue_wakeup_fd) { - append_error(error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd), + append_error(error, + grpc_wakeup_fd_consume_wakeup(&pi->workqueue_wakeup_fd), err_desc); maybe_do_workqueue_work(exec_ctx, pi); } else if (data_ptr == &polling_island_wakeup_fd) { @@ -1741,7 +1887,7 @@ retry: (void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type), (void *)bag); /* No need to lock 'pi_new' here since this is a new polling island - * and no one has a reference to it yet */ + and no one has a reference to it yet */ polling_island_remove_all_fds_locked(pi_new, true, &error); /* Ref and unref so that the polling island gets deleted during unref @@ -1846,13 +1992,12 @@ static grpc_pollset_set *pollset_set_create(void) { return pss; } -static void pollset_set_destroy(grpc_pollset_set *pss) { +static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pss) { gpr_mu_destroy(&pss->po.mu); if (pss->po.pi != NULL) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - PI_UNREF(&exec_ctx, pss->po.pi, "pss_destroy"); - grpc_exec_ctx_finish(&exec_ctx); + PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy"); } gpr_free(pss); @@ -1952,7 +2097,6 @@ static const grpc_event_engine_vtable vtable = { .pollset_init = pollset_init, .pollset_shutdown = pollset_shutdown, - .pollset_reset = pollset_reset, .pollset_destroy = pollset_destroy, .pollset_work = pollset_work, .pollset_kick = pollset_kick, diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c index 9477ac3688..5ddd5313e2 100644 --- a/src/core/lib/iomgr/ev_poll_posix.c +++ b/src/core/lib/iomgr/ev_poll_posix.c @@ -82,6 +82,7 @@ struct grpc_fd { int shutdown; int closed; int released; + grpc_error *shutdown_error; /* The watcher list. @@ -148,7 +149,7 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec, static bool fd_is_orphaned(grpc_fd *fd); /* Reference counting for fds */ -/*#define GRPC_FD_REF_COUNT_DEBUG*/ +//#define GRPC_FD_REF_COUNT_DEBUG #ifdef GRPC_FD_REF_COUNT_DEBUG static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line); static void fd_unref(grpc_fd *fd, const char *reason, const char *file, @@ -190,6 +191,7 @@ struct grpc_pollset { int kicked_without_pollers; grpc_closure *shutdown_done; grpc_closure_list idle_jobs; + int pollset_set_count; /* all polled fds */ size_t fd_count; size_t fd_capacity; @@ -227,7 +229,7 @@ static grpc_error *pollset_kick_ext(grpc_pollset *p, /* Return 1 if the pollset has active threads in pollset_work (pollset must * be locked) */ -static int pollset_has_workers(grpc_pollset *pollset); +static bool pollset_has_workers(grpc_pollset *pollset); /******************************************************************************* * pollset_set definitions @@ -281,8 +283,8 @@ cv_fd_table g_cvfds; static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) { gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n, - gpr_atm_no_barrier_load(&fd->refst), - gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); + (int)gpr_atm_no_barrier_load(&fd->refst), + (int)gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); #else #define REF_BY(fd, n, reason) ref_by(fd, n) #define UNREF_BY(fd, n, reason) unref_by(fd, n) @@ -296,8 +298,8 @@ static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) { gpr_atm old; gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n, - gpr_atm_no_barrier_load(&fd->refst), - gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); + (int)gpr_atm_no_barrier_load(&fd->refst), + (int)gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); #else static void unref_by(grpc_fd *fd, int n) { gpr_atm old; @@ -306,6 +308,7 @@ static void unref_by(grpc_fd *fd, int n) { if (old == n) { gpr_mu_destroy(&fd->mu); grpc_iomgr_unregister_object(&fd->iomgr_object); + if (fd->shutdown) GRPC_ERROR_UNREF(fd->shutdown_error); gpr_free(fd); } else { GPR_ASSERT(old > n); @@ -444,11 +447,11 @@ static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); } static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); } #endif -static grpc_error *fd_shutdown_error(bool shutdown) { - if (!shutdown) { +static grpc_error *fd_shutdown_error(grpc_fd *fd) { + if (!fd->shutdown) { return GRPC_ERROR_NONE; } else { - return GRPC_ERROR_CREATE("FD shutdown"); + return GRPC_ERROR_CREATE_REFERENCING("FD shutdown", &fd->shutdown_error, 1); } } @@ -462,7 +465,7 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, } else if (*st == CLOSURE_READY) { /* already ready ==> queue the closure to run immediately */ *st = CLOSURE_NOT_READY; - grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown)); + grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd)); maybe_wake_one_watcher_locked(fd); } else { /* upcallptr was set to a different closure. This is an error! */ @@ -485,7 +488,7 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, return 0; } else { /* waiting ==> queue closure */ - grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown)); + grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd)); *st = CLOSURE_NOT_READY; return 1; } @@ -496,15 +499,18 @@ static void set_read_notifier_pollset_locked( fd->read_notifier_pollset = read_notifier_pollset; } -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { +static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { gpr_mu_lock(&fd->mu); /* only shutdown once */ if (!fd->shutdown) { fd->shutdown = 1; + fd->shutdown_error = why; /* signal read/write closed to OS so that future operations fail */ shutdown(fd->fd, SHUT_RDWR); set_ready_locked(exec_ctx, fd, &fd->read_closure); set_ready_locked(exec_ctx, fd, &fd->write_closure); + } else { + GRPC_ERROR_UNREF(why); } gpr_mu_unlock(&fd->mu); } @@ -653,10 +659,18 @@ static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) { worker->next->prev = worker->prev; } -static int pollset_has_workers(grpc_pollset *p) { +static bool pollset_has_workers(grpc_pollset *p) { return p->root_worker.next != &p->root_worker; } +static bool pollset_in_pollset_sets(grpc_pollset *p) { + return p->pollset_set_count; +} + +static bool pollset_has_observers(grpc_pollset *p) { + return pollset_has_workers(p) || pollset_in_pollset_sets(p); +} + static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) { if (pollset_has_workers(p)) { grpc_pollset_worker *w = p->root_worker.next; @@ -795,6 +809,7 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { pollset->fd_count = 0; pollset->fd_capacity = 0; pollset->fds = NULL; + pollset->pollset_set_count = 0; } static void pollset_destroy(grpc_pollset *pollset) { @@ -810,16 +825,6 @@ static void pollset_destroy(grpc_pollset *pollset) { gpr_mu_destroy(&pollset->mu); } -static void pollset_reset(grpc_pollset *pollset) { - GPR_ASSERT(pollset->shutting_down); - GPR_ASSERT(!pollset_has_workers(pollset)); - GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail); - GPR_ASSERT(pollset->fd_count == 0); - pollset->shutting_down = 0; - pollset->called_shutdown = 0; - pollset->kicked_without_pollers = 0; -} - static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd *fd) { gpr_mu_lock(&pollset->mu); @@ -1066,7 +1071,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (pollset->shutting_down) { if (pollset_has_workers(pollset)) { pollset_kick(pollset, NULL); - } else if (!pollset->called_shutdown) { + } else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; gpr_mu_unlock(&pollset->mu); finish_shutdown(exec_ctx, pollset); @@ -1098,7 +1103,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (!pollset_has_workers(pollset)) { grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs); } - if (!pollset->called_shutdown && !pollset_has_workers(pollset)) { + if (!pollset->called_shutdown && !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; finish_shutdown(exec_ctx, pollset); } @@ -1126,18 +1131,32 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline, */ static grpc_pollset_set *pollset_set_create(void) { - grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set)); - memset(pollset_set, 0, sizeof(*pollset_set)); + grpc_pollset_set *pollset_set = gpr_zalloc(sizeof(*pollset_set)); gpr_mu_init(&pollset_set->mu); return pollset_set; } -static void pollset_set_destroy(grpc_pollset_set *pollset_set) { +static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set) { size_t i; gpr_mu_destroy(&pollset_set->mu); for (i = 0; i < pollset_set->fd_count; i++) { GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); } + for (i = 0; i < pollset_set->pollset_count; i++) { + grpc_pollset *pollset = pollset_set->pollsets[i]; + gpr_mu_lock(&pollset->mu); + pollset->pollset_set_count--; + /* check shutdown */ + if (pollset->shutting_down && !pollset->called_shutdown && + !pollset_has_observers(pollset)) { + pollset->called_shutdown = 1; + gpr_mu_unlock(&pollset->mu); + finish_shutdown(exec_ctx, pollset); + } else { + gpr_mu_unlock(&pollset->mu); + } + } gpr_free(pollset_set->pollsets); gpr_free(pollset_set->pollset_sets); gpr_free(pollset_set->fds); @@ -1148,6 +1167,9 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pollset_set, grpc_pollset *pollset) { size_t i, j; + gpr_mu_lock(&pollset->mu); + pollset->pollset_set_count++; + gpr_mu_unlock(&pollset->mu); gpr_mu_lock(&pollset_set->mu); if (pollset_set->pollset_count == pollset_set->pollset_capacity) { pollset_set->pollset_capacity = @@ -1183,6 +1205,17 @@ static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, } } gpr_mu_unlock(&pollset_set->mu); + gpr_mu_lock(&pollset->mu); + pollset->pollset_set_count--; + /* check shutdown */ + if (pollset->shutting_down && !pollset->called_shutdown && + !pollset_has_observers(pollset)) { + pollset->called_shutdown = 1; + gpr_mu_unlock(&pollset->mu); + finish_shutdown(exec_ctx, pollset); + } else { + gpr_mu_unlock(&pollset->mu); + } } static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, @@ -1509,7 +1542,6 @@ static const grpc_event_engine_vtable vtable = { .pollset_init = pollset_init, .pollset_shutdown = pollset_shutdown, - .pollset_reset = pollset_reset, .pollset_destroy = pollset_destroy, .pollset_work = pollset_work, .pollset_kick = pollset_kick, diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c index 2975d619e1..b5be5504b9 100644 --- a/src/core/lib/iomgr/ev_posix.c +++ b/src/core/lib/iomgr/ev_posix.c @@ -52,6 +52,8 @@ * tests */ grpc_poll_function_type grpc_poll_function = poll; +grpc_wakeup_fd grpc_global_wakeup_fd; + static const grpc_event_engine_vtable *g_event_engine; static const char *g_poll_strategy_name = NULL; @@ -160,8 +162,8 @@ void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done, g_event_engine->fd_orphan(exec_ctx, fd, on_done, release_fd, reason); } -void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - g_event_engine->fd_shutdown(exec_ctx, fd); +void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { + g_event_engine->fd_shutdown(exec_ctx, fd, why); } bool grpc_fd_is_shutdown(grpc_fd *fd) { @@ -189,10 +191,6 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, g_event_engine->pollset_shutdown(exec_ctx, pollset, closure); } -void grpc_pollset_reset(grpc_pollset *pollset) { - g_event_engine->pollset_reset(pollset); -} - void grpc_pollset_destroy(grpc_pollset *pollset) { g_event_engine->pollset_destroy(pollset); } @@ -217,8 +215,9 @@ grpc_pollset_set *grpc_pollset_set_create(void) { return g_event_engine->pollset_set_create(); } -void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) { - g_event_engine->pollset_set_destroy(pollset_set); +void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set) { + g_event_engine->pollset_set_destroy(exec_ctx, pollset_set); } void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h index 1068a4bad5..1a9e5c115a 100644 --- a/src/core/lib/iomgr/ev_posix.h +++ b/src/core/lib/iomgr/ev_posix.h @@ -51,7 +51,7 @@ typedef struct grpc_event_engine_vtable { int (*fd_wrapped_fd)(grpc_fd *fd); void (*fd_orphan)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done, int *release_fd, const char *reason); - void (*fd_shutdown)(grpc_exec_ctx *exec_ctx, grpc_fd *fd); + void (*fd_shutdown)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why); void (*fd_notify_on_read)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure); void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, @@ -64,7 +64,6 @@ typedef struct grpc_event_engine_vtable { void (*pollset_init)(grpc_pollset *pollset, gpr_mu **mu); void (*pollset_shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_closure *closure); - void (*pollset_reset)(grpc_pollset *pollset); void (*pollset_destroy)(grpc_pollset *pollset); grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker, gpr_timespec now, @@ -75,7 +74,8 @@ typedef struct grpc_event_engine_vtable { struct grpc_fd *fd); grpc_pollset_set *(*pollset_set_create)(void); - void (*pollset_set_destroy)(grpc_pollset_set *pollset_set); + void (*pollset_set_destroy)(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set); void (*pollset_set_add_pollset)(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pollset_set, grpc_pollset *pollset); @@ -140,7 +140,7 @@ void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done, bool grpc_fd_is_shutdown(grpc_fd *fd); /* Cause any current and future callbacks to fail. */ -void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd); +void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why); /* Register read interest, causing read_cb to be called once when fd becomes readable, on deadline specified by deadline, or on shutdown triggered by diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c index 6aa788f8e5..83bb436bd0 100644 --- a/src/core/lib/iomgr/exec_ctx.c +++ b/src/core/lib/iomgr/exec_ctx.c @@ -42,11 +42,16 @@ #include "src/core/lib/profiling/timers.h" bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) { - if (!exec_ctx->cached_ready_to_finish) { - exec_ctx->cached_ready_to_finish = exec_ctx->check_ready_to_finish( - exec_ctx, exec_ctx->check_ready_to_finish_arg); + if ((exec_ctx->flags & GRPC_EXEC_CTX_FLAG_IS_FINISHED) == 0) { + if (exec_ctx->check_ready_to_finish(exec_ctx, + exec_ctx->check_ready_to_finish_arg)) { + exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED; + return true; + } + return false; + } else { + return true; } - return exec_ctx->cached_ready_to_finish; } bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) { @@ -82,7 +87,7 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { } void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) { - exec_ctx->cached_ready_to_finish = true; + exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED; grpc_exec_ctx_flush(exec_ctx); } @@ -101,6 +106,6 @@ void grpc_exec_ctx_global_init(void) {} void grpc_exec_ctx_global_shutdown(void) {} static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = { - exec_ctx_run, exec_ctx_sched}; + exec_ctx_run, exec_ctx_sched, "exec_ctx"}; static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable}; grpc_closure_scheduler *grpc_schedule_on_exec_ctx = &exec_ctx_scheduler; diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h index e566f1b3e8..f99a0fee5f 100644 --- a/src/core/lib/iomgr/exec_ctx.h +++ b/src/core/lib/iomgr/exec_ctx.h @@ -43,6 +43,13 @@ typedef struct grpc_workqueue grpc_workqueue; typedef struct grpc_combiner grpc_combiner; +/* This exec_ctx is ready to return: either pre-populated, or cached as soon as + the finish_check returns true */ +#define GRPC_EXEC_CTX_FLAG_IS_FINISHED 1 +/* The exec_ctx's thread is (potentially) owned by a call or channel: care + should be given to not delete said call/channel from this exec_ctx */ +#define GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP 2 + /** Execution context. * A bag of data that collects information along a callstack. * Generally created at public API entry points, and passed down as @@ -63,36 +70,26 @@ typedef struct grpc_combiner grpc_combiner; * - Instances are always passed as the first argument to a function that * takes it, and always as a pointer (grpc_exec_ctx is never copied). */ -#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER struct grpc_exec_ctx { grpc_closure_list closure_list; /** currently active combiner: updated only via combiner.c */ grpc_combiner *active_combiner; /** last active combiner in the active combiner list */ grpc_combiner *last_combiner; - bool cached_ready_to_finish; + uintptr_t flags; void *check_ready_to_finish_arg; bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg); }; /* initializer for grpc_exec_ctx: prefer to use GRPC_EXEC_CTX_INIT whenever possible */ -#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \ - { GRPC_CLOSURE_LIST_INIT, NULL, NULL, false, finish_check_arg, finish_check } -#else -struct grpc_exec_ctx { - bool cached_ready_to_finish; - void *check_ready_to_finish_arg; - bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg); -}; -#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \ - { false, finish_check_arg, finish_check } -#endif +#define GRPC_EXEC_CTX_INITIALIZER(flags, finish_check, finish_check_arg) \ + { GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, finish_check_arg, finish_check } /* initialize an execution context at the top level of an API call into grpc (this is safe to use elsewhere, though possibly not as efficient) */ #define GRPC_EXEC_CTX_INIT \ - GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL) + GRPC_EXEC_CTX_INITIALIZER(GRPC_EXEC_CTX_FLAG_IS_FINISHED, NULL, NULL) extern grpc_closure_scheduler *grpc_schedule_on_exec_ctx; diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c index 852775564f..a5b62aa888 100644 --- a/src/core/lib/iomgr/executor.c +++ b/src/core/lib/iomgr/executor.c @@ -158,7 +158,7 @@ void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) { gpr_mu_destroy(&g_executor.mu); } -static const grpc_closure_scheduler_vtable executor_vtable = {executor_push, - executor_push}; +static const grpc_closure_scheduler_vtable executor_vtable = { + executor_push, executor_push, "executor"}; static grpc_closure_scheduler executor_scheduler = {&executor_vtable}; grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler; diff --git a/src/core/lib/iomgr/load_file.c b/src/core/lib/iomgr/load_file.c index 217bc5da59..f40c8b28cc 100644 --- a/src/core/lib/iomgr/load_file.c +++ b/src/core/lib/iomgr/load_file.c @@ -47,7 +47,7 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator, grpc_slice *output) { unsigned char *contents = NULL; size_t contents_size = 0; - grpc_slice result = gpr_empty_slice(); + grpc_slice result = grpc_empty_slice(); FILE *file; size_t bytes_read = 0; grpc_error *error = GRPC_ERROR_NONE; diff --git a/src/core/lib/iomgr/network_status_tracker.c b/src/core/lib/iomgr/network_status_tracker.c index a5ca9ed2c3..4104bf927a 100644 --- a/src/core/lib/iomgr/network_status_tracker.c +++ b/src/core/lib/iomgr/network_status_tracker.c @@ -31,94 +31,18 @@ * */ -#include <grpc/support/alloc.h> -#include <grpc/support/log.h> #include "src/core/lib/iomgr/endpoint.h" -typedef struct endpoint_ll_node { - grpc_endpoint *ep; - struct endpoint_ll_node *next; -} endpoint_ll_node; - -static endpoint_ll_node *head = NULL; -static gpr_mu g_endpoint_mutex; - -void grpc_network_status_shutdown(void) { - if (head != NULL) { - gpr_log(GPR_ERROR, - "Memory leaked as not all network endpoints were shut down"); - } - gpr_mu_destroy(&g_endpoint_mutex); -} +void grpc_network_status_shutdown(void) {} void grpc_network_status_init(void) { - gpr_mu_init(&g_endpoint_mutex); // TODO(makarandd): Install callback with OS to monitor network status. } -void grpc_destroy_network_status_monitor() { - for (endpoint_ll_node *curr = head; curr != NULL;) { - endpoint_ll_node *next = curr->next; - gpr_free(curr); - curr = next; - } - gpr_mu_destroy(&g_endpoint_mutex); -} - -void grpc_network_status_register_endpoint(grpc_endpoint *ep) { - gpr_mu_lock(&g_endpoint_mutex); - if (head == NULL) { - head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node)); - head->ep = ep; - head->next = NULL; - } else { - endpoint_ll_node *prev_head = head; - head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node)); - head->ep = ep; - head->next = prev_head; - } - gpr_mu_unlock(&g_endpoint_mutex); -} +void grpc_destroy_network_status_monitor() {} -void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) { - gpr_mu_lock(&g_endpoint_mutex); - GPR_ASSERT(head); - bool found = false; - endpoint_ll_node *prev = head; - // if we're unregistering the head, just move head to the next - if (ep == head->ep) { - head = head->next; - gpr_free(prev); - found = true; - } else { - for (endpoint_ll_node *curr = head->next; curr != NULL; curr = curr->next) { - if (ep == curr->ep) { - prev->next = curr->next; - gpr_free(curr); - found = true; - break; - } - prev = curr; - } - } - gpr_mu_unlock(&g_endpoint_mutex); - GPR_ASSERT(found); -} +void grpc_network_status_register_endpoint(grpc_endpoint *ep) { (void)ep; } -// Walk the linked-list from head and execute shutdown. It is possible that -// other threads might be in the process of shutdown as well, but that has -// no side effect since endpoint shutdown is idempotent. -void grpc_network_status_shutdown_all_endpoints() { - gpr_mu_lock(&g_endpoint_mutex); - if (head == NULL) { - gpr_mu_unlock(&g_endpoint_mutex); - return; - } - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; +void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) { (void)ep; } - for (endpoint_ll_node *curr = head; curr != NULL; curr = curr->next) { - curr->ep->vtable->shutdown(&exec_ctx, curr->ep); - } - gpr_mu_unlock(&g_endpoint_mutex); - grpc_exec_ctx_finish(&exec_ctx); -} +void grpc_network_status_shutdown_all_endpoints() {} diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h index 8d9edc8406..e19ce697b8 100644 --- a/src/core/lib/iomgr/pollset.h +++ b/src/core/lib/iomgr/pollset.h @@ -53,14 +53,12 @@ typedef struct grpc_pollset grpc_pollset; typedef struct grpc_pollset_worker grpc_pollset_worker; size_t grpc_pollset_size(void); +/* Initialize a pollset: assumes *pollset contains all zeros */ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu); /* Begin shutting down the pollset, and call closure when done. * pollset's mutex must be held */ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_closure *closure); -/** Reset the pollset to its initial state (perhaps with some cached objects); - * must have been previously shutdown */ -void grpc_pollset_reset(grpc_pollset *pollset); void grpc_pollset_destroy(grpc_pollset *pollset); /* Do some work on a pollset. diff --git a/src/core/lib/iomgr/pollset_set.h b/src/core/lib/iomgr/pollset_set.h index 34bb728c41..d11801d63b 100644 --- a/src/core/lib/iomgr/pollset_set.h +++ b/src/core/lib/iomgr/pollset_set.h @@ -44,7 +44,8 @@ typedef struct grpc_pollset_set grpc_pollset_set; grpc_pollset_set *grpc_pollset_set_create(void); -void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set); +void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set); void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pollset_set, grpc_pollset *pollset); diff --git a/src/core/lib/iomgr/pollset_set_uv.c b/src/core/lib/iomgr/pollset_set_uv.c index e5ef8b29e0..836cfee4ef 100644 --- a/src/core/lib/iomgr/pollset_set_uv.c +++ b/src/core/lib/iomgr/pollset_set_uv.c @@ -41,7 +41,8 @@ grpc_pollset_set* grpc_pollset_set_create(void) { return (grpc_pollset_set*)((intptr_t)0xdeafbeef); } -void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {} +void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx, + grpc_pollset_set* pollset_set) {} void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pollset_set, diff --git a/src/core/lib/iomgr/pollset_set_windows.c b/src/core/lib/iomgr/pollset_set_windows.c index 645650db9b..ae18c8a3ce 100644 --- a/src/core/lib/iomgr/pollset_set_windows.c +++ b/src/core/lib/iomgr/pollset_set_windows.c @@ -42,7 +42,8 @@ grpc_pollset_set* grpc_pollset_set_create(void) { return (grpc_pollset_set*)((intptr_t)0xdeafbeef); } -void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {} +void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx, + grpc_pollset_set* pollset_set) {} void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pollset_set, diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c index ed3edeee94..af33949c69 100644 --- a/src/core/lib/iomgr/pollset_uv.c +++ b/src/core/lib/iomgr/pollset_uv.c @@ -57,24 +57,35 @@ int grpc_pollset_work_run_loop; gpr_mu grpc_polling_mu; +/* This is used solely to kick the uv loop, by setting a callback to be run + immediately in the next loop iteration. + Note: In the future, if there is a bug that involves missing wakeups in the + future, try adding a uv_async_t to kick the loop differently */ +uv_timer_t dummy_uv_handle; + size_t grpc_pollset_size() { return sizeof(grpc_pollset); } +void dummy_timer_cb(uv_timer_t *handle) {} + void grpc_pollset_global_init(void) { gpr_mu_init(&grpc_polling_mu); + uv_timer_init(uv_default_loop(), &dummy_uv_handle); grpc_pollset_work_run_loop = 1; } -void grpc_pollset_global_shutdown(void) { gpr_mu_destroy(&grpc_polling_mu); } +static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; } + +void grpc_pollset_global_shutdown(void) { + gpr_mu_destroy(&grpc_polling_mu); + uv_close((uv_handle_t *)&dummy_uv_handle, timer_close_cb); +} void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { *mu = &grpc_polling_mu; - memset(pollset, 0, sizeof(grpc_pollset)); uv_timer_init(uv_default_loop(), &pollset->timer); pollset->shutting_down = 0; } -static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; } - void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_closure *closure) { GPR_ASSERT(!pollset->shutting_down); @@ -82,6 +93,9 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (grpc_pollset_work_run_loop) { // Drain any pending UV callbacks without blocking uv_run(uv_default_loop(), UV_RUN_NOWAIT); + } else { + // kick the loop once + uv_timer_start(&dummy_uv_handle, dummy_timer_cb, 0, 0); } grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); } @@ -97,11 +111,6 @@ void grpc_pollset_destroy(grpc_pollset *pollset) { } } -void grpc_pollset_reset(grpc_pollset *pollset) { - GPR_ASSERT(pollset->shutting_down); - pollset->shutting_down = 0; -} - static void timer_run_cb(uv_timer_t *timer) {} grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, @@ -136,6 +145,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_error *grpc_pollset_kick(grpc_pollset *pollset, grpc_pollset_worker *specific_worker) { + uv_timer_start(&dummy_uv_handle, dummy_timer_cb, 0, 0); return GRPC_ERROR_NONE; } diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c index 2a45e708df..17043c1ea1 100644 --- a/src/core/lib/iomgr/pollset_windows.c +++ b/src/core/lib/iomgr/pollset_windows.c @@ -98,7 +98,6 @@ size_t grpc_pollset_size(void) { return sizeof(grpc_pollset); } void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { *mu = &grpc_polling_mu; - memset(pollset, 0, sizeof(*pollset)); pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next = pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev = &pollset->root_worker; @@ -117,16 +116,6 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, void grpc_pollset_destroy(grpc_pollset *pollset) {} -void grpc_pollset_reset(grpc_pollset *pollset) { - GPR_ASSERT(pollset->shutting_down); - GPR_ASSERT( - !has_workers(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET)); - pollset->shutting_down = 0; - pollset->is_iocp_worker = 0; - pollset->kicked_without_pollers = 0; - pollset->on_shutdown = NULL; -} - grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, gpr_timespec now, gpr_timespec deadline) { diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c index 31590cd53b..511ffdcdf1 100644 --- a/src/core/lib/iomgr/resource_quota.c +++ b/src/core/lib/iomgr/resource_quota.c @@ -33,6 +33,8 @@ #include "src/core/lib/iomgr/resource_quota.h" +#include <limits.h> +#include <stdint.h> #include <string.h> #include <grpc/support/alloc.h> @@ -44,6 +46,8 @@ int grpc_resource_quota_trace = 0; +#define MEMORY_USAGE_ESTIMATION_MAX 65536 + /* Internal linked list pointers for a resource user */ typedef struct { grpc_resource_user *next; @@ -126,9 +130,12 @@ struct grpc_resource_quota { /* refcount */ gpr_refcount refs; + /* estimate of current memory usage + scaled to the range [0..RESOURCE_USAGE_ESTIMATION_MAX] */ + gpr_atm memory_usage_estimation; + /* Master combiner lock: all activity on a quota executes under this combiner - * (so no mutex is needed for this data structure) - */ + * (so no mutex is needed for this data structure) */ grpc_combiner *combiner; /* Size of the resource quota */ int64_t size; @@ -269,6 +276,16 @@ static void rq_step_sched(grpc_exec_ctx *exec_ctx, GRPC_ERROR_NONE); } +/* update the atomically available resource estimate - use no barriers since + timeliness of delivery really doesn't matter much */ +static void rq_update_estimate(grpc_resource_quota *resource_quota) { + gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, + (gpr_atm)((1.0 - + ((double)resource_quota->free_pool) / + ((double)resource_quota->size)) * + MEMORY_USAGE_ESTIMATION_MAX)); +} + /* returns true if all allocations are completed */ static bool rq_alloc(grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) { @@ -281,6 +298,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx, int64_t amt = -resource_user->free_pool; resource_user->free_pool = 0; resource_quota->free_pool -= amt; + rq_update_estimate(resource_quota); if (grpc_resource_quota_trace) { gpr_log(GPR_DEBUG, "RQ %s %s: grant alloc %" PRId64 " bytes; rq_free_pool -> %" PRId64, @@ -315,6 +333,7 @@ static bool rq_reclaim_from_per_user_free_pool( int64_t amt = resource_user->free_pool; resource_user->free_pool = 0; resource_quota->free_pool += amt; + rq_update_estimate(resource_quota); if (grpc_resource_quota_trace) { gpr_log(GPR_DEBUG, "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64 " bytes; rq_free_pool -> %" PRId64, @@ -378,11 +397,15 @@ static void ru_slice_unref(grpc_exec_ctx *exec_ctx, void *p) { } } +static const grpc_slice_refcount_vtable ru_slice_vtable = { + ru_slice_ref, ru_slice_unref, grpc_slice_default_eq_impl, + grpc_slice_default_hash_impl}; + static grpc_slice ru_slice_create(grpc_resource_user *resource_user, size_t size) { ru_slice_refcount *rc = gpr_malloc(sizeof(ru_slice_refcount) + size); - rc->base.ref = ru_slice_ref; - rc->base.unref = ru_slice_unref; + rc->base.vtable = &ru_slice_vtable; + rc->base.sub_refcount = &rc->base; gpr_ref_init(&rc->refs, 1); rc->resource_user = resource_user; rc->size = size; @@ -527,6 +550,7 @@ static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) { int64_t delta = a->size - a->resource_quota->size; a->resource_quota->size += delta; a->resource_quota->free_pool += delta; + rq_update_estimate(a->resource_quota); rq_step_sched(exec_ctx, a->resource_quota); grpc_resource_quota_unref_internal(exec_ctx, a->resource_quota); gpr_free(a); @@ -553,6 +577,7 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) { resource_quota->size = INT64_MAX; resource_quota->step_scheduled = false; resource_quota->reclaiming = false; + gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0); if (name != NULL) { resource_quota->name = gpr_strdup(name); } else { @@ -574,7 +599,7 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) { void grpc_resource_quota_unref_internal(grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) { if (gpr_unref(&resource_quota->refs)) { - grpc_combiner_destroy(exec_ctx, resource_quota->combiner); + GRPC_COMBINER_UNREF(exec_ctx, resource_quota->combiner, "resource_quota"); gpr_free(resource_quota->name); gpr_free(resource_quota); } @@ -598,6 +623,13 @@ void grpc_resource_quota_ref(grpc_resource_quota *resource_quota) { grpc_resource_quota_ref_internal(resource_quota); } +double grpc_resource_quota_get_memory_pressure( + grpc_resource_quota *resource_quota) { + return ((double)(gpr_atm_no_barrier_load( + &resource_quota->memory_usage_estimation))) / + ((double)MEMORY_USAGE_ESTIMATION_MAX); +} + /* Public API */ void grpc_resource_quota_resize(grpc_resource_quota *resource_quota, size_t size) { @@ -804,12 +836,10 @@ void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx, void grpc_resource_user_slice_allocator_init( grpc_resource_user_slice_allocator *slice_allocator, grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) { - grpc_closure_init( - &slice_allocator->on_allocated, ru_allocated_slices, slice_allocator, - grpc_combiner_scheduler(resource_user->resource_quota->combiner, false)); - grpc_closure_init( - &slice_allocator->on_done, cb, p, - grpc_combiner_scheduler(resource_user->resource_quota->combiner, false)); + grpc_closure_init(&slice_allocator->on_allocated, ru_allocated_slices, + slice_allocator, grpc_schedule_on_exec_ctx); + grpc_closure_init(&slice_allocator->on_done, cb, p, + grpc_schedule_on_exec_ctx); slice_allocator->resource_user = resource_user; } diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h index d1127ce9ea..b9f62cbf83 100644 --- a/src/core/lib/iomgr/resource_quota.h +++ b/src/core/lib/iomgr/resource_quota.h @@ -84,6 +84,12 @@ void grpc_resource_quota_unref_internal(grpc_exec_ctx *exec_ctx, grpc_resource_quota *grpc_resource_quota_from_channel_args( const grpc_channel_args *channel_args); +/* Return a number indicating current memory pressure: + 0.0 ==> no memory usage + 1.0 ==> maximum memory usage */ +double grpc_resource_quota_get_memory_pressure( + grpc_resource_quota *resource_quota); + typedef struct grpc_resource_user grpc_resource_user; grpc_resource_user *grpc_resource_user_create( diff --git a/src/core/lib/iomgr/sockaddr_utils.c b/src/core/lib/iomgr/sockaddr_utils.c index f23f9888d5..9d2732666b 100644 --- a/src/core/lib/iomgr/sockaddr_utils.c +++ b/src/core/lib/iomgr/sockaddr_utils.c @@ -200,31 +200,37 @@ int grpc_sockaddr_to_string(char **out, } char *grpc_sockaddr_to_uri(const grpc_resolved_address *resolved_addr) { - char *temp; - char *result; grpc_resolved_address addr_normalized; - const struct sockaddr *addr; - if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) { resolved_addr = &addr_normalized; } + const char *scheme = grpc_sockaddr_get_uri_scheme(resolved_addr); + if (scheme == NULL || strcmp("unix", scheme) == 0) { + return grpc_sockaddr_to_uri_unix_if_possible(resolved_addr); + } + char *path = NULL; + char *uri_str = NULL; + if (grpc_sockaddr_to_string(&path, resolved_addr, + false /* suppress errors */) && + scheme != NULL) { + gpr_asprintf(&uri_str, "%s:%s", scheme, path); + } + gpr_free(path); + return uri_str != NULL ? uri_str : NULL; +} - addr = (const struct sockaddr *)resolved_addr->addr; - +const char *grpc_sockaddr_get_uri_scheme( + const grpc_resolved_address *resolved_addr) { + const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; switch (addr->sa_family) { case AF_INET: - grpc_sockaddr_to_string(&temp, resolved_addr, 0); - gpr_asprintf(&result, "ipv4:%s", temp); - gpr_free(temp); - return result; + return "ipv4"; case AF_INET6: - grpc_sockaddr_to_string(&temp, resolved_addr, 0); - gpr_asprintf(&result, "ipv6:%s", temp); - gpr_free(temp); - return result; - default: - return grpc_sockaddr_to_uri_unix_if_possible(resolved_addr); + return "ipv6"; + case AF_UNIX: + return "unix"; } + return NULL; } int grpc_sockaddr_get_port(const grpc_resolved_address *resolved_addr) { diff --git a/src/core/lib/iomgr/sockaddr_utils.h b/src/core/lib/iomgr/sockaddr_utils.h index 5371e360c5..2b22f11b49 100644 --- a/src/core/lib/iomgr/sockaddr_utils.h +++ b/src/core/lib/iomgr/sockaddr_utils.h @@ -84,6 +84,10 @@ int grpc_sockaddr_set_port(const grpc_resolved_address *addr, int port); int grpc_sockaddr_to_string(char **out, const grpc_resolved_address *addr, int normalize); +/* Returns the URI string corresponding to \a addr */ char *grpc_sockaddr_to_uri(const grpc_resolved_address *addr); +/* Returns the URI scheme corresponding to \a addr */ +const char *grpc_sockaddr_get_uri_scheme(const grpc_resolved_address *addr); + #endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H */ diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c index 9a77c92016..0144192b71 100644 --- a/src/core/lib/iomgr/tcp_client_posix.c +++ b/src/core/lib/iomgr/tcp_client_posix.c @@ -118,11 +118,11 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { const char *str = grpc_error_string(error); gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str, str); - grpc_error_free_string(str); } gpr_mu_lock(&ac->mu); if (ac->fd != NULL) { - grpc_fd_shutdown(exec_ctx, ac->fd); + grpc_fd_shutdown(exec_ctx, ac->fd, + GRPC_ERROR_CREATE("connect() timed out")); } done = (--ac->refs == 0); gpr_mu_unlock(&ac->mu); @@ -178,7 +178,6 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { const char *str = grpc_error_string(error); gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: error=%s", ac->addr_str, str); - grpc_error_free_string(str); } gpr_mu_lock(&ac->mu); diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c index 3de0795187..ae66577caf 100644 --- a/src/core/lib/iomgr/tcp_client_uv.c +++ b/src/core/lib/iomgr/tcp_client_uv.c @@ -144,8 +144,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, } } - connect = gpr_malloc(sizeof(grpc_uv_tcp_connect)); - memset(connect, 0, sizeof(grpc_uv_tcp_connect)); + connect = gpr_zalloc(sizeof(grpc_uv_tcp_connect)); connect->closure = closure; connect->endpoint = ep; connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t)); diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c index 1e84ec3a1e..c8dc9e64bd 100644 --- a/src/core/lib/iomgr/tcp_client_windows.c +++ b/src/core/lib/iomgr/tcp_client_windows.c @@ -135,12 +135,10 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { /* Tries to issue one async connection, then schedules both an IOCP notification request for the connection, and one timeout alert. */ -void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done, - grpc_endpoint **endpoint, - grpc_pollset_set *interested_parties, - const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, - gpr_timespec deadline) { +static void tcp_client_connect_impl( + grpc_exec_ctx *exec_ctx, grpc_closure *on_done, grpc_endpoint **endpoint, + grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, + const grpc_resolved_address *addr, gpr_timespec deadline) { SOCKET sock = INVALID_SOCKET; BOOL success; int status; @@ -252,4 +250,21 @@ failure: grpc_closure_sched(exec_ctx, on_done, final_error); } +// overridden by api_fuzzer.c +void (*grpc_tcp_client_connect_impl)( + grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, + grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, + const grpc_resolved_address *addr, + gpr_timespec deadline) = tcp_client_connect_impl; + +void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, + grpc_endpoint **ep, + grpc_pollset_set *interested_parties, + const grpc_channel_args *channel_args, + const grpc_resolved_address *addr, + gpr_timespec deadline) { + grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, + channel_args, addr, deadline); +} + #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index ece44978b0..a4381f8fc9 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -119,9 +119,10 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, grpc_error *error); -static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { +static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, + grpc_error *why) { grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_fd_shutdown(exec_ctx, tcp->em_fd); + grpc_fd_shutdown(exec_ctx, tcp->em_fd, why); grpc_resource_user_shutdown(exec_ctx, tcp->resource_user); } @@ -181,7 +182,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, size_t i; const char *str = grpc_error_string(error); gpr_log(GPR_DEBUG, "read: error=%s", str); - grpc_error_free_string(str); + for (i = 0; i < tcp->incoming_buffer->count; i++) { char *dump = grpc_dump_slice(tcp->incoming_buffer->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); @@ -435,7 +436,6 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, if (grpc_tcp_trace) { const char *str = grpc_error_string(error); gpr_log(GPR_DEBUG, "write: %s", str); - grpc_error_free_string(str); } grpc_closure_run(exec_ctx, cb, error); @@ -485,7 +485,6 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (grpc_tcp_trace) { const char *str = grpc_error_string(error); gpr_log(GPR_DEBUG, "write: %s", str); - grpc_error_free_string(str); } grpc_closure_sched(exec_ctx, cb, error); } diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c index 20efb678b2..36f878fdd4 100644 --- a/src/core/lib/iomgr/tcp_server_posix.c +++ b/src/core/lib/iomgr/tcp_server_posix.c @@ -44,6 +44,7 @@ #include <errno.h> #include <fcntl.h> +#include <ifaddrs.h> #include <limits.h> #include <netinet/in.h> #include <netinet/tcp.h> @@ -115,6 +116,8 @@ struct grpc_tcp_server { bool shutdown; /* use SO_REUSEPORT */ bool so_reuseport; + /* expand wildcard addresses to a list of all local addresses */ + bool expand_wildcard_addrs; /* linked list of server ports */ grpc_tcp_listener *head; @@ -161,6 +164,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server)); s->so_reuseport = has_so_reuseport; s->resource_quota = grpc_resource_quota_create(NULL); + s->expand_wildcard_addrs = false; for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) { if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) { if (args->args[i].type == GRPC_ARG_INTEGER) { @@ -183,6 +187,15 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool"); } + } else if (0 == strcmp(GRPC_ARG_EXPAND_WILDCARD_ADDRS, args->args[i].key)) { + if (args->args[i].type == GRPC_ARG_INTEGER) { + s->expand_wildcard_addrs = (args->args[i].value.integer != 0); + } else { + grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota); + gpr_free(s); + return GRPC_ERROR_CREATE(GRPC_ARG_EXPAND_WILDCARD_ADDRS + " must be an integer"); + } } } gpr_ref_init(&s->refs, 1); @@ -276,7 +289,8 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { if (s->active_ports) { grpc_tcp_listener *sp; for (sp = s->head; sp; sp = sp->next) { - grpc_fd_shutdown(exec_ctx, sp->emfd); + grpc_fd_shutdown(exec_ctx, sp->emfd, + GRPC_ERROR_CREATE("Server destroyed")); } gpr_mu_unlock(&s->mu); } else { @@ -503,9 +517,224 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd, return err; } -/* Insert count new listeners after listener. Every new listener will have the - same listen address as listener (SO_REUSEPORT must be enabled). Every new - listener is a sibling of listener. */ +/* If successful, add a listener to s for addr, set *dsmode for the socket, and + return the *listener. */ +static grpc_error *add_addr_to_server(grpc_tcp_server *s, + const grpc_resolved_address *addr, + unsigned port_index, unsigned fd_index, + grpc_dualstack_mode *dsmode, + grpc_tcp_listener **listener) { + grpc_resolved_address addr4_copy; + int fd; + grpc_error *err = + grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, dsmode, &fd); + if (err != GRPC_ERROR_NONE) { + return err; + } + if (*dsmode == GRPC_DSMODE_IPV4 && + grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) { + addr = &addr4_copy; + } + return add_socket_to_server(s, fd, addr, port_index, fd_index, listener); +} + +/* Bind to "::" to get a port number not used by any address. */ +static grpc_error *get_unused_port(int *port) { + grpc_resolved_address wild; + grpc_sockaddr_make_wildcard6(0, &wild); + grpc_dualstack_mode dsmode; + int fd; + grpc_error *err = + grpc_create_dualstack_socket(&wild, SOCK_STREAM, 0, &dsmode, &fd); + if (err != GRPC_ERROR_NONE) { + return err; + } + if (dsmode == GRPC_DSMODE_IPV4) { + grpc_sockaddr_make_wildcard4(0, &wild); + } + if (bind(fd, (const struct sockaddr *)wild.addr, (socklen_t)wild.len) != 0) { + err = GRPC_OS_ERROR(errno, "bind"); + close(fd); + return err; + } + if (getsockname(fd, (struct sockaddr *)wild.addr, (socklen_t *)&wild.len) != + 0) { + err = GRPC_OS_ERROR(errno, "getsockname"); + close(fd); + return err; + } + close(fd); + *port = grpc_sockaddr_get_port(&wild); + return *port <= 0 ? GRPC_ERROR_CREATE("Bad port") : GRPC_ERROR_NONE; +} + +/* Return the listener in s with address addr or NULL. */ +static grpc_tcp_listener *find_listener_with_addr(grpc_tcp_server *s, + grpc_resolved_address *addr) { + grpc_tcp_listener *l; + gpr_mu_lock(&s->mu); + for (l = s->head; l != NULL; l = l->next) { + if (l->addr.len != addr->len) { + continue; + } + if (memcmp(l->addr.addr, addr->addr, addr->len) == 0) { + break; + } + } + gpr_mu_unlock(&s->mu); + return l; +} + +/* Get all addresses assigned to network interfaces on the machine and create a + listener for each. requested_port is the port to use for every listener, or 0 + to select one random port that will be used for every listener. Set *out_port + to the port selected. Return GRPC_ERROR_NONE only if all listeners were + added. */ +static grpc_error *add_all_local_addrs_to_server(grpc_tcp_server *s, + unsigned port_index, + int requested_port, + int *out_port) { + struct ifaddrs *ifa = NULL; + struct ifaddrs *ifa_it; + unsigned fd_index = 0; + grpc_tcp_listener *sp = NULL; + grpc_error *err = GRPC_ERROR_NONE; + if (requested_port == 0) { + /* Note: There could be a race where some local addrs can listen on the + selected port and some can't. The sane way to handle this would be to + retry by recreating the whole grpc_tcp_server. Backing out individual + listeners and orphaning the FDs looks like too much trouble. */ + if ((err = get_unused_port(&requested_port)) != GRPC_ERROR_NONE) { + return err; + } else if (requested_port <= 0) { + return GRPC_ERROR_CREATE("Bad get_unused_port()"); + } + gpr_log(GPR_DEBUG, "Picked unused port %d", requested_port); + } + if (getifaddrs(&ifa) != 0 || ifa == NULL) { + return GRPC_OS_ERROR(errno, "getifaddrs"); + } + for (ifa_it = ifa; ifa_it != NULL; ifa_it = ifa_it->ifa_next) { + grpc_resolved_address addr; + char *addr_str = NULL; + grpc_dualstack_mode dsmode; + grpc_tcp_listener *new_sp = NULL; + const char *ifa_name = (ifa_it->ifa_name ? ifa_it->ifa_name : "<unknown>"); + if (ifa_it->ifa_addr == NULL) { + continue; + } else if (ifa_it->ifa_addr->sa_family == AF_INET) { + addr.len = sizeof(struct sockaddr_in); + } else if (ifa_it->ifa_addr->sa_family == AF_INET6) { + addr.len = sizeof(struct sockaddr_in6); + } else { + continue; + } + memcpy(addr.addr, ifa_it->ifa_addr, addr.len); + if (!grpc_sockaddr_set_port(&addr, requested_port)) { + /* Should never happen, because we check sa_family above. */ + err = GRPC_ERROR_CREATE("Failed to set port"); + break; + } + if (grpc_sockaddr_to_string(&addr_str, &addr, 0) < 0) { + addr_str = gpr_strdup("<error>"); + } + gpr_log(GPR_DEBUG, + "Adding local addr from interface %s flags 0x%x to server: %s", + ifa_name, ifa_it->ifa_flags, addr_str); + /* We could have multiple interfaces with the same address (e.g., bonding), + so look for duplicates. */ + if (find_listener_with_addr(s, &addr) != NULL) { + gpr_log(GPR_DEBUG, "Skipping duplicate addr %s on interface %s", addr_str, + ifa_name); + gpr_free(addr_str); + continue; + } + if ((err = add_addr_to_server(s, &addr, port_index, fd_index, &dsmode, + &new_sp)) != GRPC_ERROR_NONE) { + char *err_str = NULL; + grpc_error *root_err; + if (gpr_asprintf(&err_str, "Failed to add listener: %s", addr_str) < 0) { + err_str = gpr_strdup("Failed to add listener"); + } + root_err = GRPC_ERROR_CREATE(err_str); + gpr_free(err_str); + gpr_free(addr_str); + err = grpc_error_add_child(root_err, err); + break; + } else { + GPR_ASSERT(requested_port == new_sp->port); + ++fd_index; + if (sp != NULL) { + new_sp->is_sibling = 1; + sp->sibling = new_sp; + } + sp = new_sp; + } + gpr_free(addr_str); + } + freeifaddrs(ifa); + if (err != GRPC_ERROR_NONE) { + return err; + } else if (sp == NULL) { + return GRPC_ERROR_CREATE("No local addresses"); + } else { + *out_port = sp->port; + return GRPC_ERROR_NONE; + } +} + +/* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */ +static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s, + unsigned port_index, + int requested_port, + int *out_port) { + grpc_resolved_address wild4; + grpc_resolved_address wild6; + unsigned fd_index = 0; + grpc_dualstack_mode dsmode; + grpc_tcp_listener *sp = NULL; + grpc_tcp_listener *sp2 = NULL; + grpc_error *v6_err = GRPC_ERROR_NONE; + grpc_error *v4_err = GRPC_ERROR_NONE; + *out_port = -1; + if (s->expand_wildcard_addrs) { + return add_all_local_addrs_to_server(s, port_index, requested_port, + out_port); + } + grpc_sockaddr_make_wildcards(requested_port, &wild4, &wild6); + /* Try listening on IPv6 first. */ + if ((v6_err = add_addr_to_server(s, &wild6, port_index, fd_index, &dsmode, + &sp)) == GRPC_ERROR_NONE) { + ++fd_index; + requested_port = *out_port = sp->port; + if (dsmode == GRPC_DSMODE_DUALSTACK || dsmode == GRPC_DSMODE_IPV4) { + return GRPC_ERROR_NONE; + } + } + /* If we got a v6-only socket or nothing, try adding 0.0.0.0. */ + grpc_sockaddr_set_port(&wild4, requested_port); + if ((v4_err = add_addr_to_server(s, &wild4, port_index, fd_index, &dsmode, + &sp2)) == GRPC_ERROR_NONE) { + *out_port = sp2->port; + if (sp != NULL) { + sp2->is_sibling = 1; + sp->sibling = sp2; + } + } + if (*out_port > 0) { + GRPC_LOG_IF_ERROR("Failed to add :: listener", v6_err); + GRPC_LOG_IF_ERROR("Failed to add 0.0.0.0 listener", v4_err); + return GRPC_ERROR_NONE; + } else { + grpc_error *root_err = + GRPC_ERROR_CREATE("Failed to add any wildcard listeners"); + GPR_ASSERT(v6_err != GRPC_ERROR_NONE && v4_err != GRPC_ERROR_NONE); + root_err = grpc_error_add_child(root_err, v6_err); + root_err = grpc_error_add_child(root_err, v4_err); + return root_err; + } +} + static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) { grpc_tcp_listener *sp = NULL; char *addr_str; @@ -558,19 +787,13 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const grpc_resolved_address *addr, int *out_port) { grpc_tcp_listener *sp; - grpc_tcp_listener *sp2 = NULL; - int fd; - grpc_dualstack_mode dsmode; - grpc_resolved_address addr6_v4mapped; - grpc_resolved_address wild4; - grpc_resolved_address wild6; - grpc_resolved_address addr4_copy; - grpc_resolved_address *allocated_addr = NULL; grpc_resolved_address sockname_temp; - int port; + grpc_resolved_address addr6_v4mapped; + int requested_port = grpc_sockaddr_get_port(addr); unsigned port_index = 0; - unsigned fd_index = 0; - grpc_error *errs[2] = {GRPC_ERROR_NONE, GRPC_ERROR_NONE}; + grpc_dualstack_mode dsmode; + grpc_error *err; + *out_port = -1; if (s->tail != NULL) { port_index = s->tail->port_index + 1; } @@ -578,85 +801,34 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, /* Check if this is a wildcard port, and if so, try to keep the port the same as some previously created listener. */ - if (grpc_sockaddr_get_port(addr) == 0) { + if (requested_port == 0) { for (sp = s->head; sp; sp = sp->next) { sockname_temp.len = sizeof(struct sockaddr_storage); - if (0 == getsockname(sp->fd, (struct sockaddr *)sockname_temp.addr, + if (0 == getsockname(sp->fd, (struct sockaddr *)&sockname_temp.addr, (socklen_t *)&sockname_temp.len)) { - port = grpc_sockaddr_get_port(&sockname_temp); - if (port > 0) { - allocated_addr = gpr_malloc(sizeof(grpc_resolved_address)); - memcpy(allocated_addr, addr, addr->len); - grpc_sockaddr_set_port(allocated_addr, port); - addr = allocated_addr; + int used_port = grpc_sockaddr_get_port(&sockname_temp); + if (used_port > 0) { + memcpy(&sockname_temp, addr, sizeof(grpc_resolved_address)); + grpc_sockaddr_set_port(&sockname_temp, used_port); + requested_port = used_port; + addr = &sockname_temp; break; } } } } - - sp = NULL; - + if (grpc_sockaddr_is_wildcard(addr, &requested_port)) { + return add_wildcard_addrs_to_server(s, port_index, requested_port, + out_port); + } if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) { addr = &addr6_v4mapped; } - - /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */ - if (grpc_sockaddr_is_wildcard(addr, &port)) { - grpc_sockaddr_make_wildcards(port, &wild4, &wild6); - - /* Try listening on IPv6 first. */ - addr = &wild6; - errs[0] = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd); - if (errs[0] == GRPC_ERROR_NONE) { - errs[0] = add_socket_to_server(s, fd, addr, port_index, fd_index, &sp); - if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) { - goto done; - } - if (sp != NULL) { - ++fd_index; - } - /* If we didn't get a dualstack socket, also listen on 0.0.0.0. */ - if (port == 0 && sp != NULL) { - grpc_sockaddr_set_port(&wild4, sp->port); - } - } - addr = &wild4; - } - - errs[1] = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd); - if (errs[1] == GRPC_ERROR_NONE) { - if (dsmode == GRPC_DSMODE_IPV4 && - grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) { - addr = &addr4_copy; - } - sp2 = sp; - errs[1] = add_socket_to_server(s, fd, addr, port_index, fd_index, &sp); - if (sp2 != NULL && sp != NULL) { - sp2->sibling = sp; - sp->is_sibling = 1; - } - } - -done: - gpr_free(allocated_addr); - if (sp != NULL) { + if ((err = add_addr_to_server(s, addr, port_index, 0, &dsmode, &sp)) == + GRPC_ERROR_NONE) { *out_port = sp->port; - GRPC_ERROR_UNREF(errs[0]); - GRPC_ERROR_UNREF(errs[1]); - return GRPC_ERROR_NONE; - } else { - *out_port = -1; - char *addr_str = grpc_sockaddr_to_uri(addr); - grpc_error *err = grpc_error_set_str( - GRPC_ERROR_CREATE_REFERENCING("Failed to add port to server", errs, - GPR_ARRAY_SIZE(errs)), - GRPC_ERROR_STR_TARGET_ADDRESS, addr_str); - GRPC_ERROR_UNREF(errs[0]); - GRPC_ERROR_UNREF(errs[1]); - gpr_free(addr_str); - return err; } + return err; } /* Return listener at port_index or NULL. Should only be called with s->mu @@ -773,7 +945,8 @@ void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, if (s->active_ports) { grpc_tcp_listener *sp; for (sp = s->head; sp; sp = sp->next) { - grpc_fd_shutdown(exec_ctx, sp->emfd); + grpc_fd_shutdown(exec_ctx, sp->emfd, + GRPC_ERROR_CREATE("Server shutdown")); } } gpr_mu_unlock(&s->mu); diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c index dafe851ce8..bd4b9b2df1 100644 --- a/src/core/lib/iomgr/tcp_server_windows.c +++ b/src/core/lib/iomgr/tcp_server_windows.c @@ -343,7 +343,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { if (error != GRPC_ERROR_NONE) { const char *msg = grpc_error_string(error); gpr_log(GPR_INFO, "Skipping on_accept due to error: %s", msg); - grpc_error_free_string(msg); + gpr_mu_unlock(&sp->server->mu); return; } diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c index 72955631b5..5541c62068 100644 --- a/src/core/lib/iomgr/tcp_uv.c +++ b/src/core/lib/iomgr/tcp_uv.c @@ -162,7 +162,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread, size_t i; const char *str = grpc_error_string(error); gpr_log(GPR_DEBUG, "read: error=%s", str); - grpc_error_free_string(str); + for (i = 0; i < tcp->read_slices->count; i++) { char *dump = grpc_dump_slice(tcp->read_slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); @@ -303,13 +303,15 @@ static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, static void shutdown_callback(uv_shutdown_t *req, int status) {} -static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { +static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, + grpc_error *why) { grpc_tcp *tcp = (grpc_tcp *)ep; if (!tcp->shutting_down) { tcp->shutting_down = true; uv_shutdown_t *req = &tcp->shutdown_req; uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback); } + GRPC_ERROR_UNREF(why); } static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c index 84f791ba07..6c413971e3 100644 --- a/src/core/lib/iomgr/tcp_windows.c +++ b/src/core/lib/iomgr/tcp_windows.c @@ -116,6 +116,7 @@ typedef struct grpc_tcp { to protect ourselves when requesting a shutdown. */ gpr_mu mu; int shutting_down; + grpc_error *shutdown_error; char *peer_string; } grpc_tcp; @@ -125,6 +126,7 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { gpr_mu_destroy(&tcp->mu); gpr_free(tcp->peer_string); grpc_resource_user_unref(exec_ctx, tcp->resource_user); + if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error); gpr_free(tcp); } @@ -182,7 +184,10 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { grpc_slice_buffer_add(tcp->read_slices, sub); } else { grpc_slice_unref_internal(exec_ctx, tcp->read_slice); - error = GRPC_ERROR_CREATE("End of TCP stream"); + error = tcp->shutting_down + ? GRPC_ERROR_CREATE_REFERENCING("TCP stream shutting down", + &tcp->shutdown_error, 1) + : GRPC_ERROR_CREATE("End of TCP stream"); } } } @@ -203,8 +208,9 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, WSABUF buffer; if (tcp->shutting_down) { - grpc_closure_sched(exec_ctx, cb, - GRPC_ERROR_CREATE("TCP socket is shutting down")); + grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_CREATE_REFERENCING( + "TCP socket is shutting down", + &tcp->shutdown_error, 1)); return; } @@ -291,8 +297,9 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, size_t len; if (tcp->shutting_down) { - grpc_closure_sched(exec_ctx, cb, - GRPC_ERROR_CREATE("TCP socket is shutting down")); + grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_CREATE_REFERENCING( + "TCP socket is shutting down", + &tcp->shutdown_error, 1)); return; } @@ -373,12 +380,18 @@ static void win_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, we're not going to protect against these. However the IO Completion Port callback will happen from another thread, so we need to protect against concurrent access of the data structure in that regard. */ -static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { +static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, + grpc_error *why) { grpc_tcp *tcp = (grpc_tcp *)ep; gpr_mu_lock(&tcp->mu); /* At that point, what may happen is that we're already inside the IOCP callback. See the comments in on_read and on_write. */ - tcp->shutting_down = 1; + if (!tcp->shutting_down) { + tcp->shutting_down = 1; + tcp->shutdown_error = why; + } else { + GRPC_ERROR_UNREF(why); + } grpc_winsocket_shutdown(tcp->socket); gpr_mu_unlock(&tcp->mu); grpc_resource_user_shutdown(exec_ctx, tcp->resource_user); diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index 40c8351472..d4df96c214 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -42,6 +42,7 @@ #include <grpc/support/useful.h> #include "src/core/lib/iomgr/time_averaged_stats.h" #include "src/core/lib/iomgr/timer_heap.h" +#include "src/core/lib/support/spinlock.h" #define INVALID_HEAP_INDEX 0xffffffffu @@ -69,7 +70,7 @@ typedef struct { /* Protects g_shard_queue */ static gpr_mu g_mu; /* Allow only one run_some_expired_timers at once */ -static gpr_mu g_checker_mu; +static gpr_spinlock g_checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER; static gpr_clock_type g_clock_type; static shard_type g_shards[NUM_SHARDS]; /* Protected by g_mu */ @@ -90,7 +91,6 @@ void grpc_timer_list_init(gpr_timespec now) { g_initialized = true; gpr_mu_init(&g_mu); - gpr_mu_init(&g_checker_mu); g_clock_type = now.clock_type; for (i = 0; i < NUM_SHARDS; i++) { @@ -117,16 +117,9 @@ void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) { grpc_timer_heap_destroy(&shard->heap); } gpr_mu_destroy(&g_mu); - gpr_mu_destroy(&g_checker_mu); g_initialized = false; } -/* This is a cheap, but good enough, pointer hash for sharding the tasks: */ -static size_t shard_idx(const grpc_timer *info) { - size_t x = (size_t)info; - return ((x >> 4) ^ (x >> 9) ^ (x >> 14)) & (NUM_SHARDS - 1); -} - static double ts_to_dbl(gpr_timespec ts) { return (double)ts.tv_sec + 1e-9 * ts.tv_nsec; } @@ -181,30 +174,30 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_timespec deadline, grpc_closure *closure, gpr_timespec now) { int is_first_timer = 0; - shard_type *shard = &g_shards[shard_idx(timer)]; + shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)]; GPR_ASSERT(deadline.clock_type == g_clock_type); GPR_ASSERT(now.clock_type == g_clock_type); timer->closure = closure; timer->deadline = deadline; - timer->triggered = 0; if (!g_initialized) { - timer->triggered = 1; + timer->pending = false; grpc_closure_sched( exec_ctx, timer->closure, GRPC_ERROR_CREATE("Attempt to create timer before initialization")); return; } + gpr_mu_lock(&shard->mu); + timer->pending = true; if (gpr_time_cmp(deadline, now) <= 0) { - timer->triggered = 1; + timer->pending = false; grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE); + gpr_mu_unlock(&shard->mu); + /* early out */ return; } - /* TODO(ctiller): check deadline expired */ - - gpr_mu_lock(&shard->mu); grpc_time_averaged_stats_add_sample(&shard->stats, ts_to_dbl(gpr_time_sub(deadline, now))); if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) { @@ -247,11 +240,11 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { return; } - shard_type *shard = &g_shards[shard_idx(timer)]; + shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)]; gpr_mu_lock(&shard->mu); - if (!timer->triggered) { + if (timer->pending) { grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); - timer->triggered = 1; + timer->pending = false; if (timer->heap_index == INVALID_HEAP_INDEX) { list_remove(timer); } else { @@ -302,7 +295,7 @@ static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) { } timer = grpc_timer_heap_top(&shard->heap); if (gpr_time_cmp(timer->deadline, now) > 0) return NULL; - timer->triggered = 1; + timer->pending = false; grpc_timer_heap_pop(&shard->heap); return timer; } @@ -330,7 +323,7 @@ static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now, /* TODO(ctiller): verify that there are any timers (atomically) here */ - if (gpr_mu_trylock(&g_checker_mu)) { + if (gpr_spinlock_trylock(&g_checker_mu)) { gpr_mu_lock(&g_mu); while (gpr_time_cmp(g_shard_queue[0]->min_deadline, now) < 0) { @@ -356,7 +349,7 @@ static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now, } gpr_mu_unlock(&g_mu); - gpr_mu_unlock(&g_checker_mu); + gpr_spinlock_unlock(&g_checker_mu); } else if (next != NULL) { /* TODO(ctiller): this forces calling code to do an short poll, and then retry the timer check (because this time through the timer list was diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h index 9d901c7e68..1608dce9fb 100644 --- a/src/core/lib/iomgr/timer_generic.h +++ b/src/core/lib/iomgr/timer_generic.h @@ -40,7 +40,7 @@ struct grpc_timer { gpr_timespec deadline; uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */ - int triggered; + bool pending; struct grpc_timer *next; struct grpc_timer *prev; grpc_closure *closure; diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c index fa2cdee964..f28a14405d 100644 --- a/src/core/lib/iomgr/timer_uv.c +++ b/src/core/lib/iomgr/timer_uv.c @@ -53,8 +53,8 @@ static void stop_uv_timer(uv_timer_t *handle) { void run_expired_timer(uv_timer_t *handle) { grpc_timer *timer = (grpc_timer *)handle->data; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GPR_ASSERT(!timer->triggered); - timer->triggered = 1; + GPR_ASSERT(timer->pending); + timer->pending = 0; grpc_closure_sched(&exec_ctx, timer->closure, GRPC_ERROR_NONE); stop_uv_timer(handle); grpc_exec_ctx_finish(&exec_ctx); @@ -67,11 +67,11 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, uv_timer_t *uv_timer; timer->closure = closure; if (gpr_time_cmp(deadline, now) <= 0) { - timer->triggered = 1; + timer->pending = 0; grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE); return; } - timer->triggered = 0; + timer->pending = 1; timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now)); uv_timer = gpr_malloc(sizeof(uv_timer_t)); uv_timer_init(uv_default_loop(), uv_timer); @@ -81,8 +81,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, } void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { - if (!timer->triggered) { - timer->triggered = 1; + if (timer->pending) { + timer->pending = 0; grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); stop_uv_timer((uv_timer_t *)timer->uv_timer); } diff --git a/src/core/lib/iomgr/timer_uv.h b/src/core/lib/iomgr/timer_uv.h index 13cf8bd4fa..9870cd4a5c 100644 --- a/src/core/lib/iomgr/timer_uv.h +++ b/src/core/lib/iomgr/timer_uv.h @@ -41,7 +41,7 @@ struct grpc_timer { /* This is actually a uv_timer_t*, but we want to keep platform-specific types out of headers */ void *uv_timer; - int triggered; + int pending; }; #endif /* GRPC_CORE_LIB_IOMGR_TIMER_UV_H */ diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c index dfbd295c91..2a1c8d39fa 100644 --- a/src/core/lib/iomgr/udp_server.c +++ b/src/core/lib/iomgr/udp_server.c @@ -76,8 +76,10 @@ struct grpc_udp_listener { grpc_udp_server *server; grpc_resolved_address addr; grpc_closure read_closure; + grpc_closure write_closure; grpc_closure destroyed_closure; grpc_udp_server_read_cb read_cb; + grpc_udp_server_write_cb write_cb; grpc_udp_server_orphan_cb orphan_cb; struct grpc_udp_listener *next; @@ -176,7 +178,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { /* Call the orphan_cb to signal that the FD is about to be closed and * should no longer be used. */ GPR_ASSERT(sp->orphan_cb); - sp->orphan_cb(sp->emfd); + sp->orphan_cb(exec_ctx, sp->emfd); grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL, "udp_listener_shutdown"); @@ -202,8 +204,9 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, if (s->active_ports) { for (sp = s->head; sp; sp = sp->next) { GPR_ASSERT(sp->orphan_cb); - sp->orphan_cb(sp->emfd); - grpc_fd_shutdown(exec_ctx, sp->emfd); + sp->orphan_cb(exec_ctx, sp->emfd); + grpc_fd_shutdown(exec_ctx, sp->emfd, + GRPC_ERROR_CREATE("Server destroyed")); } gpr_mu_unlock(&s->mu); } else { @@ -303,9 +306,33 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { gpr_mu_unlock(&sp->server->mu); } +static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { + grpc_udp_listener *sp = arg; + + gpr_mu_lock(&(sp->server->mu)); + if (error != GRPC_ERROR_NONE) { + if (0 == --sp->server->active_ports) { + gpr_mu_unlock(&sp->server->mu); + deactivated_all_ports(exec_ctx, sp->server); + } else { + gpr_mu_unlock(&sp->server->mu); + } + return; + } + + /* Tell the registered callback that the socket is writeable. */ + GPR_ASSERT(sp->write_cb); + sp->write_cb(exec_ctx, sp->emfd); + + /* Re-arm the notification event so we get another chance to write. */ + grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure); + gpr_mu_unlock(&sp->server->mu); +} + static int add_socket_to_server(grpc_udp_server *s, int fd, const grpc_resolved_address *addr, grpc_udp_server_read_cb read_cb, + grpc_udp_server_write_cb write_cb, grpc_udp_server_orphan_cb orphan_cb) { grpc_udp_listener *sp; int port; @@ -332,6 +359,7 @@ static int add_socket_to_server(grpc_udp_server *s, int fd, sp->emfd = grpc_fd_create(fd, name); memcpy(&sp->addr, addr, sizeof(grpc_resolved_address)); sp->read_cb = read_cb; + sp->write_cb = write_cb; sp->orphan_cb = orphan_cb; GPR_ASSERT(sp->emfd); gpr_mu_unlock(&s->mu); @@ -344,6 +372,7 @@ static int add_socket_to_server(grpc_udp_server *s, int fd, int grpc_udp_server_add_port(grpc_udp_server *s, const grpc_resolved_address *addr, grpc_udp_server_read_cb read_cb, + grpc_udp_server_write_cb write_cb, grpc_udp_server_orphan_cb orphan_cb) { grpc_udp_listener *sp; int allocated_port1 = -1; @@ -390,7 +419,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s, // TODO(rjshade): Test and propagate the returned grpc_error*: GRPC_ERROR_UNREF(grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd)); - allocated_port1 = add_socket_to_server(s, fd, addr, read_cb, orphan_cb); + allocated_port1 = + add_socket_to_server(s, fd, addr, read_cb, write_cb, orphan_cb); if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) { goto done; } @@ -412,7 +442,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s, grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) { addr = &addr4_copy; } - allocated_port2 = add_socket_to_server(s, fd, addr, read_cb, orphan_cb); + allocated_port2 = + add_socket_to_server(s, fd, addr, read_cb, write_cb, orphan_cb); done: gpr_free(allocated_addr); @@ -450,6 +481,10 @@ void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, grpc_schedule_on_exec_ctx); grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); + grpc_closure_init(&sp->write_closure, on_write, sp, + grpc_schedule_on_exec_ctx); + grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure); + s->active_ports++; sp = sp->next; } diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h index f3c466a031..ed63fa7d81 100644 --- a/src/core/lib/iomgr/udp_server.h +++ b/src/core/lib/iomgr/udp_server.h @@ -49,8 +49,13 @@ typedef struct grpc_udp_server grpc_udp_server; typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd, struct grpc_server *server); +/* Called when the socket is writeable. */ +typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx, + grpc_fd *emfd); + /* Called when the grpc_fd is about to be orphaned (and the FD closed). */ -typedef void (*grpc_udp_server_orphan_cb)(grpc_fd *emfd); +typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx *exec_ctx, + grpc_fd *emfd); /* Create a server, initially not bound to any ports */ grpc_udp_server *grpc_udp_server_create(void); @@ -75,6 +80,7 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index); int grpc_udp_server_add_port(grpc_udp_server *s, const grpc_resolved_address *addr, grpc_udp_server_read_cb read_cb, + grpc_udp_server_write_cb write_cb, grpc_udp_server_orphan_cb orphan_cb); void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *server, diff --git a/src/core/lib/iomgr/unix_sockets_posix.c b/src/core/lib/iomgr/unix_sockets_posix.c index 030acd9811..1233cec04e 100644 --- a/src/core/lib/iomgr/unix_sockets_posix.c +++ b/src/core/lib/iomgr/unix_sockets_posix.c @@ -45,6 +45,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/log.h> +#include <grpc/support/useful.h> void grpc_create_socketpair_if_unix(int sv[2]) { GPR_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0); @@ -53,7 +54,16 @@ void grpc_create_socketpair_if_unix(int sv[2]) { grpc_error *grpc_resolve_unix_domain_address(const char *name, grpc_resolved_addresses **addrs) { struct sockaddr_un *un; - + if (strlen(name) > GPR_ARRAY_SIZE(((struct sockaddr_un *)0)->sun_path) - 1) { + char *err_msg; + grpc_error *err; + gpr_asprintf(&err_msg, + "Path name should not have more than %" PRIuPTR " characters.", + GPR_ARRAY_SIZE(un->sun_path) - 1); + err = GRPC_ERROR_CREATE(err_msg); + gpr_free(err_msg); + return err; + } *addrs = gpr_malloc(sizeof(grpc_resolved_addresses)); (*addrs)->naddrs = 1; (*addrs)->addrs = gpr_malloc(sizeof(grpc_resolved_address)); |