From d848280d025a2cbc2ab2fe687a5a0051004ce026 Mon Sep 17 00:00:00 2001 From: Carter Sande Date: Wed, 27 Jun 2018 17:48:16 +0000 Subject: tcp_posix.cc: adhere to IOV_MAX in tcp_flush --- src/core/lib/iomgr/tcp_posix.cc | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc index 9df2e206b2..b53ffbf01c 100644 --- a/src/core/lib/iomgr/tcp_posix.cc +++ b/src/core/lib/iomgr/tcp_posix.cc @@ -26,6 +26,7 @@ #include "src/core/lib/iomgr/tcp_posix.h" #include +#include #include #include #include @@ -513,7 +514,11 @@ static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer, } /* returns true if done, false if pending; if returning true, *error is set */ +#if defined(IOV_MAX) && IOV_MAX < 1000 +#define MAX_WRITE_IOVEC IOV_MAX +#else #define MAX_WRITE_IOVEC 1000 +#endif static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) { struct msghdr msg; struct iovec iov[MAX_WRITE_IOVEC]; -- cgit v1.2.3 From 37d8bbc32dd7929682f9dacd4b7041f76f169877 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 10 Jul 2018 13:30:57 -0700 Subject: resolver and default executors --- src/core/lib/iomgr/executor.cc | 114 ++++++++++++++++++-------- src/core/lib/iomgr/executor.h | 40 ++++++++- src/core/lib/iomgr/resolve_address_posix.cc | 5 +- src/core/lib/iomgr/resolve_address_windows.cc | 5 +- 4 files changed, 121 insertions(+), 43 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc index 1ad13b831d..d87eb4fbf8 100644 --- a/src/core/lib/iomgr/executor.cc +++ b/src/core/lib/iomgr/executor.cc @@ -44,7 +44,7 @@ grpc_core::TraceFlag executor_trace(false, "executor"); GPR_TLS_DECL(g_this_thread_state); -GrpcExecutor::GrpcExecutor(const char* executor_name) : name_(executor_name) { +GrpcExecutor::GrpcExecutor(const char* name) : name_(name) { adding_thread_lock_ = GPR_SPINLOCK_STATIC_INITIALIZER; gpr_atm_no_barrier_store(&num_threads_, 0); max_threads_ = GPR_MAX(1, 2 * gpr_cpu_num_cores()); @@ -298,60 +298,104 @@ void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error, } while (retry_push); } -static GrpcExecutor* global_executor; +static GrpcExecutor* executors[GRPC_NUM_EXECUTORS]; -void enqueue_long(grpc_closure* closure, grpc_error* error) { - global_executor->Enqueue(closure, error, false /* is_short */); +void default_enqueue_short(grpc_closure* closure, grpc_error* error) { + executors[GRPC_DEFAULT_EXECUTOR]->Enqueue(closure, error, + true /* is_short */); } -void enqueue_short(grpc_closure* closure, grpc_error* error) { - global_executor->Enqueue(closure, error, true /* is_short */); +void default_enqueue_long(grpc_closure* closure, grpc_error* error) { + executors[GRPC_DEFAULT_EXECUTOR]->Enqueue(closure, error, + false /* is_short */); } -// Short-Job executor scheduler -static const grpc_closure_scheduler_vtable global_executor_vtable_short = { - enqueue_short, enqueue_short, "executor-short"}; -static grpc_closure_scheduler global_scheduler_short = { - &global_executor_vtable_short}; +void resolver_enqueue_short(grpc_closure* closure, grpc_error* error) { + executors[GRPC_RESOLVER_EXECUTOR]->Enqueue(closure, error, + true /* is_short */); +} + +void resolver_enqueue_long(grpc_closure* closure, grpc_error* error) { + executors[GRPC_RESOLVER_EXECUTOR]->Enqueue(closure, error, + false /* is_short */); +} -// Long-job executor scheduler -static const grpc_closure_scheduler_vtable global_executor_vtable_long = { - enqueue_long, enqueue_long, "executor-long"}; -static grpc_closure_scheduler global_scheduler_long = { - &global_executor_vtable_long}; +static const grpc_closure_scheduler_vtable vtables_[] = { + {&default_enqueue_short, &default_enqueue_short, "def-ex-short"}, + {&default_enqueue_long, &default_enqueue_long, "def-ex-long"}, + {&resolver_enqueue_short, &resolver_enqueue_short, "res-ex-short"}, + {&resolver_enqueue_long, &resolver_enqueue_long, "res-ex-long"}}; + +static grpc_closure_scheduler schedulers_[] = { + {&vtables_[0]}, // Default short + {&vtables_[1]}, // Default long + {&vtables_[2]}, // Resolver short + {&vtables_[3]} // Resolver long +}; + +const char* executor_name(GrpcExecutorType executor_type) { + switch (executor_type) { + case GRPC_DEFAULT_EXECUTOR: + return "default-executor"; + case GRPC_RESOLVER_EXECUTOR: + return "resolver-executor"; + default: + GPR_UNREACHABLE_CODE(return "unknown"); + } + GPR_UNREACHABLE_CODE(return "unknown"); +} // grpc_executor_init() and grpc_executor_shutdown() functions are called in the // the grpc_init() and grpc_shutdown() code paths which are protected by a // global mutex. So it is okay to assume that these functions are thread-safe void grpc_executor_init() { - if (global_executor != nullptr) { - // grpc_executor_init() already called once (and grpc_executor_shutdown() - // wasn't called) - return; + for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { + // Return if grpc_executor_init() already called earlier + if (executors[i] != nullptr) { + GPR_ASSERT(i == 0); + break; + } + + executors[i] = grpc_core::New( + executor_name(static_cast(i))); + executors[i]->Init(); } +} - global_executor = grpc_core::New("global-executor"); - global_executor->Init(); +grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorType executor_type, + GrpcExecutorJobType job_type) { + return &schedulers_[(executor_type * GRPC_NUM_EXECUTORS) + job_type]; +} + +grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type) { + return grpc_executor_scheduler(GRPC_DEFAULT_EXECUTOR, job_type); } void grpc_executor_shutdown() { - // Shutdown already called - if (global_executor == nullptr) { - return; - } + for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { + // Return if grpc_executor_shutdown() is already called earlier + if (executors[i] == nullptr) { + GPR_ASSERT(i == 0); + break; + } - global_executor->Shutdown(); - grpc_core::Delete(global_executor); - global_executor = nullptr; + executors[i]->Shutdown(); + grpc_core::Delete(executors[i]); + executors[i] = nullptr; + } } -bool grpc_executor_is_threaded() { return global_executor->IsThreaded(); } +bool grpc_executor_is_threaded(GrpcExecutorType executor_type) { + GPR_ASSERT(executor_type < GRPC_NUM_EXECUTORS); + return executors[executor_type]->IsThreaded(); +} -void grpc_executor_set_threading(bool enable) { - global_executor->SetThreading(enable); +bool grpc_executor_is_threaded() { + return grpc_executor_is_threaded(GRPC_DEFAULT_EXECUTOR); } -grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type) { - return job_type == GRPC_EXECUTOR_SHORT ? &global_scheduler_short - : &global_scheduler_long; +void grpc_executor_set_threading(bool enable) { + for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { + executors[i]->SetThreading(enable); + } } diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h index 395fc52863..bb2c2d82b9 100644 --- a/src/core/lib/iomgr/executor.h +++ b/src/core/lib/iomgr/executor.h @@ -36,7 +36,11 @@ typedef struct { grpc_core::Thread thd; } ThreadState; -typedef enum { GRPC_EXECUTOR_SHORT, GRPC_EXECUTOR_LONG } GrpcExecutorJobType; +typedef enum { + GRPC_EXECUTOR_SHORT = 0, + GRPC_EXECUTOR_LONG, + GRPC_NUM_EXECUTOR_JOB_TYPES // Add new values above this +} GrpcExecutorJobType; class GrpcExecutor { public: @@ -70,14 +74,42 @@ class GrpcExecutor { // == Global executor functions == +typedef enum { + GRPC_DEFAULT_EXECUTOR = 0, + GRPC_RESOLVER_EXECUTOR, + + GRPC_NUM_EXECUTORS // Add new values above this +} GrpcExecutorType; + +// TODO(sreek): Currently we have two executors (available globally): The +// default executor and the resolver executor. +// +// Some of the functions below operate on the DEFAULT executor only while some +// operate of ALL the executors. This is a bit confusing and should be cleaned +// up in future (where we make all the following functions take executor_type +// and/or job_type) + +// Initialize ALL the executors void grpc_executor_init(); +// Shutdown ALL the executors +void grpc_executor_shutdown(); + +// Set the threading mode for ALL the executors +void grpc_executor_set_threading(bool enable); + +// Get the DEFAULT executor scheduler for the given job_type grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type); -void grpc_executor_shutdown(); +// Get the executor scheduler for a given executor_type and a job_type +grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorType executor_type, + GrpcExecutorJobType job_type); -bool grpc_executor_is_threaded(); +// Return if a given executor is running in threaded mode (i.e if +// grpc_executor_set_threading(true) was called previously on that executor) +bool grpc_executor_is_threaded(GrpcExecutorType executor_type); -void grpc_executor_set_threading(bool enable); +// Return if the DEFAULT executor is threaded +bool grpc_executor_is_threaded(); #endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_H */ diff --git a/src/core/lib/iomgr/resolve_address_posix.cc b/src/core/lib/iomgr/resolve_address_posix.cc index 7a825643e1..c285d7eca6 100644 --- a/src/core/lib/iomgr/resolve_address_posix.cc +++ b/src/core/lib/iomgr/resolve_address_posix.cc @@ -166,8 +166,9 @@ static void posix_resolve_address(const char* name, const char* default_port, grpc_closure* on_done, grpc_resolved_addresses** addrs) { request* r = static_cast(gpr_malloc(sizeof(request))); - GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r, - grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)); + GRPC_CLOSURE_INIT( + &r->request_closure, do_request_thread, r, + grpc_executor_scheduler(GRPC_RESOLVER_EXECUTOR, GRPC_EXECUTOR_SHORT)); r->name = gpr_strdup(name); r->default_port = gpr_strdup(default_port); r->on_done = on_done; diff --git a/src/core/lib/iomgr/resolve_address_windows.cc b/src/core/lib/iomgr/resolve_address_windows.cc index 71c92615ad..3e977dca2d 100644 --- a/src/core/lib/iomgr/resolve_address_windows.cc +++ b/src/core/lib/iomgr/resolve_address_windows.cc @@ -151,8 +151,9 @@ static void windows_resolve_address(const char* name, const char* default_port, grpc_closure* on_done, grpc_resolved_addresses** addresses) { request* r = (request*)gpr_malloc(sizeof(request)); - GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r, - grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)); + GRPC_CLOSURE_INIT( + &r->request_closure, do_request_thread, r, + grpc_executor_scheduler(GRPC_RESOLVER_EXECUTOR, GRPC_EXECUTOR_SHORT)); r->name = gpr_strdup(name); r->default_port = gpr_strdup(default_port); r->on_done = on_done; -- cgit v1.2.3 From 00476fd2b8876f0e33f5bec4f14cd73edbc9fe8b Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 16 Jul 2018 18:09:27 -0700 Subject: Fix tsan issue --- src/core/lib/iomgr/executor.cc | 122 +++++++++++++++++++++++++++-------------- src/core/lib/iomgr/executor.h | 5 +- 2 files changed, 84 insertions(+), 43 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc index d87eb4fbf8..3c3a784966 100644 --- a/src/core/lib/iomgr/executor.cc +++ b/src/core/lib/iomgr/executor.cc @@ -40,19 +40,25 @@ gpr_log(GPR_INFO, "EXECUTOR " format, __VA_ARGS__); \ } +#define EXECUTOR_TRACE0(str) \ + if (executor_trace.enabled()) { \ + gpr_log(GPR_INFO, "EXECUTOR " str); \ + } + grpc_core::TraceFlag executor_trace(false, "executor"); GPR_TLS_DECL(g_this_thread_state); GrpcExecutor::GrpcExecutor(const char* name) : name_(name) { adding_thread_lock_ = GPR_SPINLOCK_STATIC_INITIALIZER; - gpr_atm_no_barrier_store(&num_threads_, 0); + gpr_atm_rel_store(&num_threads_, 0); max_threads_ = GPR_MAX(1, 2 * gpr_cpu_num_cores()); } void GrpcExecutor::Init() { SetThreading(true); } -size_t GrpcExecutor::RunClosures(grpc_closure_list list) { +size_t GrpcExecutor::RunClosures(const char* executor_name, + grpc_closure_list list) { size_t n = 0; grpc_closure* c = list.head; @@ -60,11 +66,11 @@ size_t GrpcExecutor::RunClosures(grpc_closure_list list) { grpc_closure* next = c->next_data.next; grpc_error* error = c->error_data.error; #ifndef NDEBUG - EXECUTOR_TRACE("run %p [created by %s:%d]", c, c->file_created, - c->line_created); + EXECUTOR_TRACE("(%s) run %p [created by %s:%d]", executor_name, c, + c->file_created, c->line_created); c->scheduled = false; #else - EXECUTOR_TRACE("run %p", c); + EXECUTOR_TRACE("(%s) run %p", executor_name, c); #endif c->cb(c->cb_arg, error); GRPC_ERROR_UNREF(error); @@ -77,17 +83,21 @@ size_t GrpcExecutor::RunClosures(grpc_closure_list list) { } bool GrpcExecutor::IsThreaded() const { - return gpr_atm_no_barrier_load(&num_threads_) > 0; + return gpr_atm_acq_load(&num_threads_) > 0; } void GrpcExecutor::SetThreading(bool threading) { - gpr_atm curr_num_threads = gpr_atm_no_barrier_load(&num_threads_); + gpr_atm curr_num_threads = gpr_atm_acq_load(&num_threads_); + EXECUTOR_TRACE("(%s) SetThreading(%d) begin", name_, threading); if (threading) { - if (curr_num_threads > 0) return; + if (curr_num_threads > 0) { + EXECUTOR_TRACE("(%s) SetThreading(true). curr_num_threads == 0", name_); + return; + } GPR_ASSERT(num_threads_ == 0); - gpr_atm_no_barrier_store(&num_threads_, 1); + gpr_atm_rel_store(&num_threads_, 1); gpr_tls_init(&g_this_thread_state); thd_state_ = static_cast( gpr_zalloc(sizeof(ThreadState) * max_threads_)); @@ -96,6 +106,7 @@ void GrpcExecutor::SetThreading(bool threading) { gpr_mu_init(&thd_state_[i].mu); gpr_cv_init(&thd_state_[i].cv); thd_state_[i].id = i; + thd_state_[i].name = name_; thd_state_[i].thd = grpc_core::Thread(); thd_state_[i].elems = GRPC_CLOSURE_LIST_INIT; } @@ -104,7 +115,10 @@ void GrpcExecutor::SetThreading(bool threading) { grpc_core::Thread(name_, &GrpcExecutor::ThreadMain, &thd_state_[0]); thd_state_[0].thd.Start(); } else { // !threading - if (curr_num_threads == 0) return; + if (curr_num_threads == 0) { + EXECUTOR_TRACE("(%s) SetThreading(false). curr_num_threads == 0", name_); + return; + } for (size_t i = 0; i < max_threads_; i++) { gpr_mu_lock(&thd_state_[i].mu); @@ -121,20 +135,22 @@ void GrpcExecutor::SetThreading(bool threading) { curr_num_threads = gpr_atm_no_barrier_load(&num_threads_); for (gpr_atm i = 0; i < curr_num_threads; i++) { thd_state_[i].thd.Join(); - EXECUTOR_TRACE(" Thread %" PRIdPTR " of %" PRIdPTR " joined", i, - curr_num_threads); + EXECUTOR_TRACE("(%s) Thread %" PRIdPTR " of %" PRIdPTR " joined", name_, + i + 1, curr_num_threads); } - gpr_atm_no_barrier_store(&num_threads_, 0); + gpr_atm_rel_store(&num_threads_, 0); for (size_t i = 0; i < max_threads_; i++) { gpr_mu_destroy(&thd_state_[i].mu); gpr_cv_destroy(&thd_state_[i].cv); - RunClosures(thd_state_[i].elems); + RunClosures(thd_state_[i].name, thd_state_[i].elems); } gpr_free(thd_state_); gpr_tls_destroy(&g_this_thread_state); } + + EXECUTOR_TRACE("(%s) SetThreading(%d) done", name_, threading); } void GrpcExecutor::Shutdown() { SetThreading(false); } @@ -147,8 +163,8 @@ void GrpcExecutor::ThreadMain(void* arg) { size_t subtract_depth = 0; for (;;) { - EXECUTOR_TRACE("[%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")", ts->id, - subtract_depth); + EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")", + ts->name, ts->id, subtract_depth); gpr_mu_lock(&ts->mu); ts->depth -= subtract_depth; @@ -159,7 +175,7 @@ void GrpcExecutor::ThreadMain(void* arg) { } if (ts->shutdown) { - EXECUTOR_TRACE("[%" PRIdPTR "]: shutdown", ts->id); + EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: shutdown", ts->name, ts->id); gpr_mu_unlock(&ts->mu); break; } @@ -169,10 +185,10 @@ void GrpcExecutor::ThreadMain(void* arg) { ts->elems = GRPC_CLOSURE_LIST_INIT; gpr_mu_unlock(&ts->mu); - EXECUTOR_TRACE("[%" PRIdPTR "]: execute", ts->id); + EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: execute", ts->name, ts->id); grpc_core::ExecCtx::Get()->InvalidateNow(); - subtract_depth = RunClosures(closures); + subtract_depth = RunClosures(ts->name, closures); } } @@ -188,16 +204,16 @@ void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error, do { retry_push = false; size_t cur_thread_count = - static_cast(gpr_atm_no_barrier_load(&num_threads_)); + static_cast(gpr_atm_acq_load(&num_threads_)); // If the number of threads is zero(i.e either the executor is not threaded // or already shutdown), then queue the closure on the exec context itself if (cur_thread_count == 0) { #ifndef NDEBUG - EXECUTOR_TRACE("schedule %p (created %s:%d) inline", closure, + EXECUTOR_TRACE("(%s) schedule %p (created %s:%d) inline", name_, closure, closure->file_created, closure->line_created); #else - EXECUTOR_TRACE("schedule %p inline", closure); + EXECUTOR_TRACE("(%s) schedule %p inline", name_, closure); #endif grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(), closure, error); @@ -213,18 +229,18 @@ void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error, } ThreadState* orig_ts = ts; - bool try_new_thread = false; + for (;;) { #ifndef NDEBUG EXECUTOR_TRACE( - "try to schedule %p (%s) (created %s:%d) to thread " + "(%s) try to schedule %p (%s) (created %s:%d) to thread " "%" PRIdPTR, - closure, is_short ? "short" : "long", closure->file_created, + name_, closure, is_short ? "short" : "long", closure->file_created, closure->line_created, ts->id); #else - EXECUTOR_TRACE("try to schedule %p (%s) to thread %" PRIdPTR, closure, - is_short ? "short" : "long", ts->id); + EXECUTOR_TRACE("(%s) try to schedule %p (%s) to thread %" PRIdPTR, name_, + closure, is_short ? "short" : "long", ts->id); #endif gpr_mu_lock(&ts->mu); @@ -236,18 +252,22 @@ void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error, size_t idx = ts->id; ts = &thd_state_[(idx + 1) % cur_thread_count]; if (ts == orig_ts) { - // We cycled through all the threads. Retry enqueue again (by creating - // a new thread) + // We cycled through all the threads. Retry enqueue again by creating + // a new thread + // + // TODO (sreek): There is a potential issue here. We are + // unconditionally setting try_new_thread to true here. What if the + // executor is shutdown OR if cur_thread_count is already equal to + // max_threads ? + // (Fortunately, this is not an issue yet (as of july 2018) because + // there is only one instance of long job in gRPC and hence we will + // not hit this code path) retry_push = true; - // TODO (sreek): What if the executor is shutdown OR if - // cur_thread_count is already equal to max_threads ? (currently - as - // of July 2018, we do not run in to this issue because there is only - // one instance of long job in gRPC. This has to be fixed soon) try_new_thread = true; break; } - continue; + continue; // Try the next thread-state } // == Found the thread state (i.e thread) to enqueue this closure! == @@ -277,13 +297,11 @@ void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error, } if (try_new_thread && gpr_spinlock_trylock(&adding_thread_lock_)) { - cur_thread_count = - static_cast(gpr_atm_no_barrier_load(&num_threads_)); + cur_thread_count = static_cast(gpr_atm_acq_load(&num_threads_)); if (cur_thread_count < max_threads_) { - // Increment num_threads (Safe to do a no_barrier_store instead of a - // cas because we always increment num_threads under the - // 'adding_thread_lock') - gpr_atm_no_barrier_store(&num_threads_, cur_thread_count + 1); + // Increment num_threads (safe to do a store instead of a cas because we + // always increment num_threads under the 'adding_thread_lock') + gpr_atm_rel_store(&num_threads_, cur_thread_count + 1); thd_state_[cur_thread_count].thd = grpc_core::Thread( name_, &GrpcExecutor::ThreadMain, &thd_state_[cur_thread_count]); @@ -349,9 +367,12 @@ const char* executor_name(GrpcExecutorType executor_type) { // the grpc_init() and grpc_shutdown() code paths which are protected by a // global mutex. So it is okay to assume that these functions are thread-safe void grpc_executor_init() { + EXECUTOR_TRACE0("grpc_executor_init() enter"); for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { // Return if grpc_executor_init() already called earlier if (executors[i] != nullptr) { + // Ideally we should also assert that all executors i.e executor[0] to + // executor[GRPC_NUM_EXECUTORS-1] are != nullptr too. GPR_ASSERT(i == 0); break; } @@ -360,6 +381,7 @@ void grpc_executor_init() { executor_name(static_cast(i))); executors[i]->Init(); } + EXECUTOR_TRACE0("grpc_executor_init() done"); } grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorType executor_type, @@ -372,17 +394,34 @@ grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type) { } void grpc_executor_shutdown() { + EXECUTOR_TRACE0("grpc_executor_shutdown() enter"); for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { // Return if grpc_executor_shutdown() is already called earlier if (executors[i] == nullptr) { + // Ideally we should also assert that all executors i.e executor[0] to + // executor[GRPC_NUM_EXECUTORS-1] are nullptr too. GPR_ASSERT(i == 0); break; } - executors[i]->Shutdown(); + } + + // Delete the executor objects. + // + // NOTE: It is important to do this in a separate loop (i.e ONLY after all the + // executors are 'Shutdown' first) because it is possible for one executor + // (that is not shutdown yet) to call Enqueue() on a different executor which + // is already shutdown. This is legal and in such cases, the Enqueue() + // operation effectively "fails" and enqueues that closure on the calling + // thread's exec_ctx. + // + // By ensuring that all executors are shutdown first, we are also ensuring + // that no thread is active across all executors. + for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { grpc_core::Delete(executors[i]); executors[i] = nullptr; } + EXECUTOR_TRACE0("grpc_executor_shutdown() done"); } bool grpc_executor_is_threaded(GrpcExecutorType executor_type) { @@ -395,6 +434,7 @@ bool grpc_executor_is_threaded() { } void grpc_executor_set_threading(bool enable) { + EXECUTOR_TRACE("grpc_executor_set_threading(%d) called", enable); for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { executors[i]->SetThreading(enable); } diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h index bb2c2d82b9..8829138c5f 100644 --- a/src/core/lib/iomgr/executor.h +++ b/src/core/lib/iomgr/executor.h @@ -27,7 +27,8 @@ typedef struct { gpr_mu mu; - size_t id; // For debugging purposes + size_t id; // For debugging purposes + const char* name; // Thread state name gpr_cv cv; grpc_closure_list elems; size_t depth; // Number of closures in the closure list @@ -62,7 +63,7 @@ class GrpcExecutor { void Enqueue(grpc_closure* closure, grpc_error* error, bool is_short); private: - static size_t RunClosures(grpc_closure_list list); + static size_t RunClosures(const char* executor_name, grpc_closure_list list); static void ThreadMain(void* arg); const char* name_; -- cgit v1.2.3 From 67bb4e30302cec45c9e05144a64ee6a38c0f9559 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 11 Jun 2018 08:54:30 -0700 Subject: Initial scaffolding --- include/grpc/grpc.h | 4 ++++ include/grpcpp/resource_quota.h | 10 ++++++++++ src/core/lib/iomgr/resource_quota.cc | 27 +++++++++++++++++++++++++++ src/core/lib/iomgr/resource_quota.h | 16 ++++++++++++++++ src/cpp/common/resource_quota_cc.cc | 4 ++++ 5 files changed, 61 insertions(+) (limited to 'src/core/lib/iomgr') diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index c129a66949..bc3bc5fbbf 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -450,6 +450,10 @@ GRPCAPI void grpc_resource_quota_unref(grpc_resource_quota* resource_quota); GRPCAPI void grpc_resource_quota_resize(grpc_resource_quota* resource_quota, size_t new_size); +/** Update the size of the maximum number of threads allowed */ +GRPCAPI void grpc_resource_quota_set_max_threads( + grpc_resource_quota* resource_quota, int new_max_threads); + /** Fetch a vtable for a grpc_channel_arg that points to a grpc_resource_quota */ GRPCAPI const grpc_arg_pointer_vtable* grpc_resource_quota_arg_vtable(void); diff --git a/include/grpcpp/resource_quota.h b/include/grpcpp/resource_quota.h index 554437a40d..77cdd48dcc 100644 --- a/include/grpcpp/resource_quota.h +++ b/include/grpcpp/resource_quota.h @@ -44,6 +44,16 @@ class ResourceQuota final : private GrpcLibraryCodegen { /// No time bound is given for this to occur however. ResourceQuota& Resize(size_t new_size); + /// Set the max number of threads that can be allocated from this + /// ResourceQuota object. + /// + /// If the new_max_threads value is smaller than the current value, no new + /// threads are allocated until the number of active threads fall below + /// new_max_threads. There is no time bound on when this may happen i.e none + /// of the current threads are forcefully destroyed and all threads run their + /// normal course. + ResourceQuota& SetMaxThreads(int new_max_threads); + grpc_resource_quota* c_resource_quota() const { return impl_; } private: diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc index 539bc120ce..b50b2f2e46 100644 --- a/src/core/lib/iomgr/resource_quota.cc +++ b/src/core/lib/iomgr/resource_quota.cc @@ -96,6 +96,9 @@ struct grpc_resource_user { list, false otherwise */ bool added_to_free_pool; + /* The number of threads currently allocated to this resource user */ + gpr_atm num_threads; + /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer */ grpc_closure* reclaimers[2]; @@ -135,12 +138,21 @@ struct grpc_resource_quota { gpr_atm last_size; + /* Max number of threads allowed */ + int max_threads; + + /* Number of threads currently allocated via this resource_quota object */ + gpr_atm num_threads; + /* Has rq_step been scheduled to occur? */ bool step_scheduled; + /* Are we currently reclaiming memory */ bool reclaiming; + /* Closure around rq_step */ grpc_closure rq_step_closure; + /* Closure around rq_reclamation_done */ grpc_closure rq_reclamation_done_closure; @@ -594,6 +606,8 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) { resource_quota->free_pool = INT64_MAX; resource_quota->size = INT64_MAX; gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX); + resource_quota->max_threads = INT_MAX; + gpr_atm_no_barrier_store(&resource_quota->num_threads, 0); resource_quota->step_scheduled = false; resource_quota->reclaiming = false; gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0); @@ -646,6 +660,10 @@ double grpc_resource_quota_get_memory_pressure( (static_cast(MEMORY_USAGE_ESTIMATION_MAX)); } +/* Public API */ +void grpc_resource_quota_set_max_threads(grpc_resource_quota* resource_quota, + int new_max_threads) {} + /* Public API */ void grpc_resource_quota_resize(grpc_resource_quota* resource_quota, size_t size) { @@ -731,6 +749,7 @@ grpc_resource_user* grpc_resource_user_create( grpc_closure_list_init(&resource_user->on_allocated); resource_user->allocating = false; resource_user->added_to_free_pool = false; + gpr_atm_no_barrier_store(&resource_user->num_threads, 0); resource_user->reclaimers[0] = nullptr; resource_user->reclaimers[1] = nullptr; resource_user->new_reclaimers[0] = nullptr; @@ -785,6 +804,14 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) { } } +bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user, + int thd_count) { + return true; +} + +void grpc_resource_user_free_threads(grpc_resource_user* resource_user, + int thd_count) {} + void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size, grpc_closure* optional_on_done) { gpr_mu_lock(&resource_user->mu); diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h index 937daf8728..a111ebb4d8 100644 --- a/src/core/lib/iomgr/resource_quota.h +++ b/src/core/lib/iomgr/resource_quota.h @@ -93,6 +93,22 @@ void grpc_resource_user_ref(grpc_resource_user* resource_user); void grpc_resource_user_unref(grpc_resource_user* resource_user); void grpc_resource_user_shutdown(grpc_resource_user* resource_user); +/* Attempts to get quota (from the resource_user) to create 'thd_count' number + * of threads. Returns true if successful (i.e the caller is now free to create + * 'thd_count' number of threads or false if quota is not available */ +bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user, + int thd_count); +/* Releases 'thd_count' worth of quota back to the resource user. The quota + * should have been previously obtained successfully by calling + * grpc_resource_user_alloc_threads(). + * + * Note: There need not be an exact one-to-one correspondence between + * grpc_resource_user_alloc_threads() and grpc_resource_user_free_threads() + * calls. The only requirement is that the number of threads allocated should + * all be eventually released */ +void grpc_resource_user_free_threads(grpc_resource_user* resource_user, + int thd_count); + /* Allocate from the resource user (and its quota). If optional_on_done is NULL, then allocate immediately. This may push the quota over-limit, at which point reclamation will kick in. diff --git a/src/cpp/common/resource_quota_cc.cc b/src/cpp/common/resource_quota_cc.cc index daeb0ba171..276e5f7954 100644 --- a/src/cpp/common/resource_quota_cc.cc +++ b/src/cpp/common/resource_quota_cc.cc @@ -33,4 +33,8 @@ ResourceQuota& ResourceQuota::Resize(size_t new_size) { return *this; } +ResourceQuota& ResourceQuota::SetMaxThreads(int new_max_threads) { + grpc_resource_quota_set_max_threads(impl_, new_max_threads); + return *this; +} } // namespace grpc -- cgit v1.2.3 From 913f9b930a7fb6a5377c1b5e15ec47f5645828e7 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 12 Jun 2018 16:17:54 -0700 Subject: Add Core resource quota implementation --- include/grpcpp/resource_quota.h | 6 ++--- src/core/lib/iomgr/resource_quota.cc | 48 ++++++++++++++++++++++++++++++++---- src/core/lib/iomgr/resource_quota.h | 2 +- 3 files changed, 47 insertions(+), 9 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/include/grpcpp/resource_quota.h b/include/grpcpp/resource_quota.h index 77cdd48dcc..50bd1cb849 100644 --- a/include/grpcpp/resource_quota.h +++ b/include/grpcpp/resource_quota.h @@ -26,10 +26,10 @@ struct grpc_resource_quota; namespace grpc { -/// ResourceQuota represents a bound on memory usage by the gRPC library. -/// A ResourceQuota can be attached to a server (via \a ServerBuilder), +/// ResourceQuota represents a bound on memory and thread usage by the gRPC +/// library. A ResourceQuota can be attached to a server (via \a ServerBuilder), /// or a client channel (via \a ChannelArguments). -/// gRPC will attempt to keep memory used by all attached entities +/// gRPC will attempt to keep memory and threads used by all attached entities /// below the ResourceQuota bound. class ResourceQuota final : private GrpcLibraryCodegen { public: diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc index b50b2f2e46..a30688bd87 100644 --- a/src/core/lib/iomgr/resource_quota.cc +++ b/src/core/lib/iomgr/resource_quota.cc @@ -138,11 +138,22 @@ struct grpc_resource_quota { gpr_atm last_size; + /* Mutex to protect max_threads and num_threads */ + /* Note: We could have used gpr_atm for max_threads and num_threads and avoid + * having this mutex; but in that case, each invocation of the function + * grpc_resource_user_alloc_threads() will have to do atleast two atomic loads + * (for max_threads and num_threads) followed by a CAS (on num_threads). + * Moreover, we expect grpc_resource_user_alloc_threads() to be often called + * concurrently thereby increasing the chances of failing the CAS operation. + * This additional complexity is not worth the tiny perf gain we may (or may + * not) have by using atomics */ + gpr_mu thd_mu; + /* Max number of threads allowed */ int max_threads; /* Number of threads currently allocated via this resource_quota object */ - gpr_atm num_threads; + int num_threads; /* Has rq_step been scheduled to occur? */ bool step_scheduled; @@ -606,8 +617,9 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) { resource_quota->free_pool = INT64_MAX; resource_quota->size = INT64_MAX; gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX); + gpr_mu_init(&resource_quota->thd_mu); resource_quota->max_threads = INT_MAX; - gpr_atm_no_barrier_store(&resource_quota->num_threads, 0); + resource_quota->num_threads = 0; resource_quota->step_scheduled = false; resource_quota->reclaiming = false; gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0); @@ -662,7 +674,11 @@ double grpc_resource_quota_get_memory_pressure( /* Public API */ void grpc_resource_quota_set_max_threads(grpc_resource_quota* resource_quota, - int new_max_threads) {} + int new_max_threads) { + gpr_mu_lock(&resource_quota->thd_mu); + resource_quota->max_threads = new_max_threads; + gpr_mu_unlock(&resource_quota->thd_mu); +} /* Public API */ void grpc_resource_quota_resize(grpc_resource_quota* resource_quota, @@ -806,11 +822,33 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) { bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user, int thd_count) { - return true; + bool is_success = false; + gpr_mu_lock(&resource_user->resource_quota->thd_mu); + grpc_resource_quota* rq = resource_user->resource_quota; + if (rq->num_threads + thd_count <= rq->max_threads) { + rq->num_threads += thd_count; + gpr_atm_no_barrier_fetch_add(&resource_user->num_threads, thd_count); + is_success = true; + } + gpr_mu_unlock(&resource_user->resource_quota->thd_mu); + return is_success; } void grpc_resource_user_free_threads(grpc_resource_user* resource_user, - int thd_count) {} + int thd_count) { + gpr_mu_lock(&resource_user->resource_quota->thd_mu); + grpc_resource_quota* rq = resource_user->resource_quota; + rq->num_threads -= thd_count; + int old_cnt = static_cast( + gpr_atm_no_barrier_fetch_add(&resource_user->num_threads, -thd_count)); + if (old_cnt < thd_count || rq->num_threads < 0) { + gpr_log(GPR_ERROR, + "Releasing more threads (%d) that currently allocated (rq threads: " + "%d, ru threads: %d)", + thd_count, old_cnt, rq->num_threads + thd_count); + } + gpr_mu_unlock(&resource_user->resource_quota->thd_mu); +} void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size, grpc_closure* optional_on_done) { diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h index a111ebb4d8..7342ef84c8 100644 --- a/src/core/lib/iomgr/resource_quota.h +++ b/src/core/lib/iomgr/resource_quota.h @@ -95,7 +95,7 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user); /* Attempts to get quota (from the resource_user) to create 'thd_count' number * of threads. Returns true if successful (i.e the caller is now free to create - * 'thd_count' number of threads or false if quota is not available */ + * 'thd_count' number of threads) or false if quota is not available */ bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user, int thd_count); /* Releases 'thd_count' worth of quota back to the resource user. The quota -- cgit v1.2.3 From 68ad431864c59faa378df78faf280606296e3a6e Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 17 Jul 2018 20:26:29 -0700 Subject: Store schedulers_ and vtables_ in a 2D array --- src/core/lib/iomgr/executor.cc | 102 +++++++++++++++++++---------------------- 1 file changed, 47 insertions(+), 55 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc index 3c3a784966..45d96b80eb 100644 --- a/src/core/lib/iomgr/executor.cc +++ b/src/core/lib/iomgr/executor.cc @@ -338,55 +338,46 @@ void resolver_enqueue_long(grpc_closure* closure, grpc_error* error) { false /* is_short */); } -static const grpc_closure_scheduler_vtable vtables_[] = { - {&default_enqueue_short, &default_enqueue_short, "def-ex-short"}, - {&default_enqueue_long, &default_enqueue_long, "def-ex-long"}, - {&resolver_enqueue_short, &resolver_enqueue_short, "res-ex-short"}, - {&resolver_enqueue_long, &resolver_enqueue_long, "res-ex-long"}}; - -static grpc_closure_scheduler schedulers_[] = { - {&vtables_[0]}, // Default short - {&vtables_[1]}, // Default long - {&vtables_[2]}, // Resolver short - {&vtables_[3]} // Resolver long -}; - -const char* executor_name(GrpcExecutorType executor_type) { - switch (executor_type) { - case GRPC_DEFAULT_EXECUTOR: - return "default-executor"; - case GRPC_RESOLVER_EXECUTOR: - return "resolver-executor"; - default: - GPR_UNREACHABLE_CODE(return "unknown"); - } - GPR_UNREACHABLE_CODE(return "unknown"); -} +static const grpc_closure_scheduler_vtable + vtables_[GRPC_NUM_EXECUTORS][GRPC_NUM_EXECUTOR_JOB_TYPES] = { + {{&default_enqueue_short, &default_enqueue_short, "def-ex-short"}, + {&default_enqueue_long, &default_enqueue_long, "def-ex-long"}}, + {{&resolver_enqueue_short, &resolver_enqueue_short, "res-ex-short"}, + {&resolver_enqueue_long, &resolver_enqueue_long, "res-ex-long"}}}; + +static grpc_closure_scheduler + schedulers_[GRPC_NUM_EXECUTORS][GRPC_NUM_EXECUTOR_JOB_TYPES] = { + {{&vtables_[GRPC_DEFAULT_EXECUTOR][GRPC_EXECUTOR_SHORT]}, + {&vtables_[GRPC_DEFAULT_EXECUTOR][GRPC_EXECUTOR_LONG]}}, + {{&vtables_[GRPC_RESOLVER_EXECUTOR][GRPC_EXECUTOR_SHORT]}, + {&vtables_[GRPC_RESOLVER_EXECUTOR][GRPC_EXECUTOR_LONG]}}}; // grpc_executor_init() and grpc_executor_shutdown() functions are called in the // the grpc_init() and grpc_shutdown() code paths which are protected by a // global mutex. So it is okay to assume that these functions are thread-safe void grpc_executor_init() { EXECUTOR_TRACE0("grpc_executor_init() enter"); - for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { - // Return if grpc_executor_init() already called earlier - if (executors[i] != nullptr) { - // Ideally we should also assert that all executors i.e executor[0] to - // executor[GRPC_NUM_EXECUTORS-1] are != nullptr too. - GPR_ASSERT(i == 0); - break; - } - executors[i] = grpc_core::New( - executor_name(static_cast(i))); - executors[i]->Init(); + // Return if grpc_executor_init() is already called earlier + if (executors[GRPC_DEFAULT_EXECUTOR] != nullptr) { + GPR_ASSERT(executors[GRPC_RESOLVER_EXECUTOR] != nullptr); + return; } + + executors[GRPC_DEFAULT_EXECUTOR] = + grpc_core::New("default-executor"); + executors[GRPC_RESOLVER_EXECUTOR] = + grpc_core::New("resolver-executor"); + + executors[GRPC_DEFAULT_EXECUTOR]->Init(); + executors[GRPC_RESOLVER_EXECUTOR]->Init(); + EXECUTOR_TRACE0("grpc_executor_init() done"); } grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorType executor_type, GrpcExecutorJobType job_type) { - return &schedulers_[(executor_type * GRPC_NUM_EXECUTORS) + job_type]; + return &schedulers_[executor_type][job_type]; } grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type) { @@ -395,32 +386,33 @@ grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type) { void grpc_executor_shutdown() { EXECUTOR_TRACE0("grpc_executor_shutdown() enter"); - for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { - // Return if grpc_executor_shutdown() is already called earlier - if (executors[i] == nullptr) { - // Ideally we should also assert that all executors i.e executor[0] to - // executor[GRPC_NUM_EXECUTORS-1] are nullptr too. - GPR_ASSERT(i == 0); - break; - } - executors[i]->Shutdown(); + + // Return if grpc_executor_shutdown() is already called earlier + if (executors[GRPC_DEFAULT_EXECUTOR] == nullptr) { + GPR_ASSERT(executors[GRPC_RESOLVER_EXECUTOR] == nullptr); + return; } + executors[GRPC_DEFAULT_EXECUTOR]->Shutdown(); + executors[GRPC_RESOLVER_EXECUTOR]->Shutdown(); + // Delete the executor objects. // - // NOTE: It is important to do this in a separate loop (i.e ONLY after all the - // executors are 'Shutdown' first) because it is possible for one executor - // (that is not shutdown yet) to call Enqueue() on a different executor which - // is already shutdown. This is legal and in such cases, the Enqueue() - // operation effectively "fails" and enqueues that closure on the calling - // thread's exec_ctx. + // NOTE: It is important to call Shutdown() on all executors first before + // calling Delete() because it is possible for one executor (that is not + // shutdown yet) to call Enqueue() on a different executor which is already + // shutdown. This is legal and in such cases, the Enqueue() operation + // effectively "fails" and enqueues that closure on the calling thread's + // exec_ctx. // // By ensuring that all executors are shutdown first, we are also ensuring // that no thread is active across all executors. - for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { - grpc_core::Delete(executors[i]); - executors[i] = nullptr; - } + + grpc_core::Delete(executors[GRPC_DEFAULT_EXECUTOR]); + grpc_core::Delete(executors[GRPC_RESOLVER_EXECUTOR]); + executors[GRPC_DEFAULT_EXECUTOR] = nullptr; + executors[GRPC_RESOLVER_EXECUTOR] = nullptr; + EXECUTOR_TRACE0("grpc_executor_shutdown() done"); } -- cgit v1.2.3 From 6b2b91cc938b1ce4400ad19106255afbdefd00d7 Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Wed, 18 Jul 2018 14:43:59 -0700 Subject: Fix a TSAN bug in lockfree_event --- src/core/lib/iomgr/lockfree_event.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc index 5b6b79fa91..8f2c24f464 100644 --- a/src/core/lib/iomgr/lockfree_event.cc +++ b/src/core/lib/iomgr/lockfree_event.cc @@ -89,7 +89,11 @@ void LockfreeEvent::DestroyEvent() { void LockfreeEvent::NotifyOn(grpc_closure* closure) { while (true) { - gpr_atm curr = gpr_atm_no_barrier_load(&state_); + /* This load needs to be an acquire load because this can be a shutdown + * error that we might need to reference. Adding acquire semantics makes + * sure that the shutdown error has been inited properly before us + * referencing it. */ + gpr_atm curr = gpr_atm_acq_load(&state_); if (grpc_polling_trace.enabled()) { gpr_log(GPR_ERROR, "LockfreeEvent::NotifyOn: %p curr=%p closure=%p", this, (void*)curr, closure); -- cgit v1.2.3 From d44abfbce5084bbc543b6d9ab02d39f3673273d2 Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Wed, 18 Jul 2018 15:56:42 -0700 Subject: s/inited/initialized --- src/core/lib/iomgr/lockfree_event.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc index 8f2c24f464..085fea40a4 100644 --- a/src/core/lib/iomgr/lockfree_event.cc +++ b/src/core/lib/iomgr/lockfree_event.cc @@ -91,7 +91,7 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) { while (true) { /* This load needs to be an acquire load because this can be a shutdown * error that we might need to reference. Adding acquire semantics makes - * sure that the shutdown error has been inited properly before us + * sure that the shutdown error has been initialized properly before us * referencing it. */ gpr_atm curr = gpr_atm_acq_load(&state_); if (grpc_polling_trace.enabled()) { -- cgit v1.2.3 From b95772eeb926f78b8ac14e03b36ed3e73b2e1a2c Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 23 Jul 2018 23:38:13 -0700 Subject: Add Tests in Core and C++ and fix a few related bugs in thread_manager.cc --- src/core/lib/iomgr/resource_quota.cc | 7 ++ src/cpp/server/server_cc.cc | 10 +- src/cpp/thread_manager/thread_manager.cc | 70 ++++++------ src/cpp/thread_manager/thread_manager.h | 42 +++++-- test/core/iomgr/resource_quota_test.cc | 96 ++++++++++++++++ test/cpp/thread_manager/thread_manager_test.cc | 147 ++++++++++++++++++------- 6 files changed, 288 insertions(+), 84 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc index a30688bd87..67d05aa202 100644 --- a/src/core/lib/iomgr/resource_quota.cc +++ b/src/core/lib/iomgr/resource_quota.cc @@ -547,6 +547,11 @@ static void ru_shutdown(void* ru, grpc_error* error) { static void ru_destroy(void* ru, grpc_error* error) { grpc_resource_user* resource_user = static_cast(ru); GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0); + // Free all the remaining thread quota + grpc_resource_user_free_threads( + resource_user, + static_cast(gpr_atm_no_barrier_load(&resource_user->num_threads))); + for (int i = 0; i < GRPC_RULIST_COUNT; i++) { rulist_remove(resource_user, static_cast(i)); } @@ -642,6 +647,7 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) { void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) { if (gpr_unref(&resource_quota->refs)) { + GPR_ASSERT(resource_quota->num_threads == 0); // No outstanding thd quota GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota"); gpr_free(resource_quota->name); gpr_free(resource_quota); @@ -846,6 +852,7 @@ void grpc_resource_user_free_threads(grpc_resource_user* resource_user, "Releasing more threads (%d) that currently allocated (rq threads: " "%d, ru threads: %d)", thd_count, old_cnt, rq->num_threads + thd_count); + abort(); } gpr_mu_unlock(&resource_user->resource_quota->thd_mu); } diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 6e6e0bfffe..786ef44e3e 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -47,6 +47,12 @@ namespace grpc { namespace { +// The default value for maximum number of threads that can be created in the +// sync server. This value of 1500 is empirically chosen. To increase the max +// number of threads in a sync server, pass a custom ResourceQuota object (with +// the desired number of max-threads set) to the server builder +#define DEFAULT_MAX_SYNC_SERVER_THREADS 1500 + class DefaultGlobalCallbacks final : public Server::GlobalCallbacks { public: ~DefaultGlobalCallbacks() override {} @@ -395,7 +401,9 @@ Server::Server( if (sync_server_cqs_ != nullptr) { bool default_rq_created = false; if (server_rq == nullptr) { - server_rq = grpc_resource_quota_create("SyncServer-Default"); + server_rq = grpc_resource_quota_create("SyncServer-default-rq"); + grpc_resource_quota_set_max_threads(server_rq, + DEFAULT_MAX_SYNC_SERVER_THREADS); default_rq_created = true; } diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc index c0fa98798a..5d367511e2 100644 --- a/src/cpp/thread_manager/thread_manager.cc +++ b/src/cpp/thread_manager/thread_manager.cc @@ -22,8 +22,8 @@ #include #include - #include "src/core/lib/gprpp/thd.h" +#include "src/core/lib/iomgr/exec_ctx.h" namespace grpc { @@ -55,7 +55,8 @@ ThreadManager::ThreadManager(const char* name, num_pollers_(0), min_pollers_(min_pollers), max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers), - num_threads_(0) { + num_threads_(0), + max_active_threads_sofar_(0) { resource_user_ = grpc_resource_user_create(resource_quota, name); } @@ -65,6 +66,7 @@ ThreadManager::~ThreadManager() { GPR_ASSERT(num_threads_ == 0); } + grpc_core::ExecCtx exec_ctx; // grpc_resource_user_unref needs an exec_ctx grpc_resource_user_unref(resource_user_); CleanupCompletedThreads(); } @@ -86,17 +88,27 @@ bool ThreadManager::IsShutdown() { return shutdown_; } +int ThreadManager::GetMaxActiveThreadsSoFar() { + std::lock_guard list_lock(list_mu_); + return max_active_threads_sofar_; +} + void ThreadManager::MarkAsCompleted(WorkerThread* thd) { { std::lock_guard list_lock(list_mu_); completed_threads_.push_back(thd); } - std::lock_guard lock(mu_); - num_threads_--; - if (num_threads_ == 0) { - shutdown_cv_.notify_one(); + { + std::lock_guard lock(mu_); + num_threads_--; + if (num_threads_ == 0) { + shutdown_cv_.notify_one(); + } } + + // Give a thread back to the resource quota + grpc_resource_user_free_threads(resource_user_, 1); } void ThreadManager::CleanupCompletedThreads() { @@ -111,34 +123,24 @@ void ThreadManager::CleanupCompletedThreads() { } void ThreadManager::Initialize() { + if (!grpc_resource_user_alloc_threads(resource_user_, min_pollers_)) { + gpr_log(GPR_ERROR, + "No thread quota available to even create the minimum required " + "polling threads (i.e %d). Unable to start the thread manager", + min_pollers_); + abort(); + } + { std::unique_lock lock(mu_); num_pollers_ = min_pollers_; num_threads_ = min_pollers_; + max_active_threads_sofar_ = min_pollers_; } for (int i = 0; i < min_pollers_; i++) { - if (!CreateNewThread(this)) { - gpr_log(GPR_ERROR, - "No quota available to create additional threads. Created %d (of " - "%d) threads", - i, min_pollers_); - break; - } - } -} - -bool ThreadManager::CreateNewThread(ThreadManager* thd_mgr) { - if (!grpc_resource_user_alloc_threads(thd_mgr->resource_user_, 1)) { - return false; + new WorkerThread(this); } - // Create a new thread (which ends up calling the MainWorkLoop() function - new WorkerThread(thd_mgr); - return true; -} - -void ThreadManager::ReleaseThread(ThreadManager* thd_mgr) { - grpc_resource_user_free_threads(thd_mgr->resource_user_, 1); } void ThreadManager::MainWorkLoop() { @@ -162,14 +164,17 @@ void ThreadManager::MainWorkLoop() { done = true; break; case WORK_FOUND: - // If we got work and there are now insufficient pollers, start a new - // one - if (!shutdown_ && num_pollers_ < min_pollers_) { + // If we got work and there are now insufficient pollers and there is + // quota available to create a new thread,start a new poller thread + if (!shutdown_ && num_pollers_ < min_pollers_ && + grpc_resource_user_alloc_threads(resource_user_, 1)) { num_pollers_++; num_threads_++; + max_active_threads_sofar_ = + std::max(max_active_threads_sofar_, num_threads_); // Drop lock before spawning thread to avoid contention lock.unlock(); - CreateNewThread(this); + new WorkerThread(this); } else { // Drop lock for consistency with above branch lock.unlock(); @@ -219,10 +224,9 @@ void ThreadManager::MainWorkLoop() { } }; - // This thread is exiting. Do some cleanup work (i.e delete already completed - // worker threads and also release 1 thread back to the resource quota) + // This thread is exiting. Do some cleanup work i.e delete already completed + // worker threads CleanupCompletedThreads(); - ReleaseThread(this); // If we are here, either ThreadManager is shutting down or it already has // enough threads. diff --git a/src/cpp/thread_manager/thread_manager.h b/src/cpp/thread_manager/thread_manager.h index 23bd38ee4f..8332befed0 100644 --- a/src/cpp/thread_manager/thread_manager.h +++ b/src/cpp/thread_manager/thread_manager.h @@ -86,6 +86,11 @@ class ThreadManager { // all the threads have drained all the outstanding work virtual void Wait(); + // Max number of concurrent threads that were ever active in this thread + // manager so far. This is useful for debugging purposes (and in unit tests) + // to check if resource_quota is properly being enforced. + int GetMaxActiveThreadsSoFar(); + private: // Helper wrapper class around grpc_core::Thread. Takes a ThreadManager object // and starts a new grpc_core::Thread to calls the Run() function. @@ -93,6 +98,23 @@ class ThreadManager { // The Run() function calls ThreadManager::MainWorkLoop() function and once // that completes, it marks the WorkerThread completed by calling // ThreadManager::MarkAsCompleted() + // + // WHY IS THIS NEEDED?: + // When a thread terminates, some other tread *must* call Join() on that + // thread so that the resources are released. Having a WorkerThread wrapper + // will make this easier. Once Run() completes, each thread calls the + // following two functions: + // ThreadManager::CleanupCompletedThreads() + // ThreadManager::MarkAsCompleted() + // + // - MarkAsCompleted() puts the WorkerThread object in the ThreadManger's + // completed_threads_ list + // - CleanupCompletedThreads() calls "Join()" on the threads that are already + // in the completed_threads_ list (since a thread cannot call Join() on + // itself, it calls CleanupCompletedThreads() *before* calling + // MarkAsCompleted()) + // TODO: sreek - consider creating the threads 'detached' so that Join() need + // not be called class WorkerThread { public: WorkerThread(ThreadManager* thd_mgr); @@ -113,15 +135,8 @@ class ThreadManager { void MarkAsCompleted(WorkerThread* thd); void CleanupCompletedThreads(); - // Checks the resource quota and if available, creates a thread and returns - // true. If quota is not available, returns false (and thread is not created) - static bool CreateNewThread(ThreadManager* thd_mgr); - - // Give back a thread to the resource quota - static void ReleaseThread(ThreadManager* thd_mgr); - - // Protects shutdown_, num_pollers_ and num_threads_ - // TODO: sreek - Change num_pollers and num_threads_ to atomics + // Protects shutdown_, num_pollers_, num_threads_ and + // max_active_threads_sofar_ std::mutex mu_; bool shutdown_; @@ -142,10 +157,15 @@ class ThreadManager { int min_pollers_; int max_pollers_; - // The total number of threads (includes threads includes the threads that are - // currently polling i.e num_pollers_) + // The total number of threads currently active (includes threads includes the + // threads that are currently polling i.e num_pollers_) int num_threads_; + // See GetMaxActiveThreadsSoFar()'s description. + // To be more specific, this variable tracks the max value num_threads_ was + // ever set so far + int max_active_threads_sofar_; + std::mutex list_mu_; std::list completed_threads_; }; diff --git a/test/core/iomgr/resource_quota_test.cc b/test/core/iomgr/resource_quota_test.cc index 059ff7b5f8..573e4010fa 100644 --- a/test/core/iomgr/resource_quota_test.cc +++ b/test/core/iomgr/resource_quota_test.cc @@ -798,6 +798,97 @@ static void test_negative_rq_free_pool(void) { } } +// Simple test to check resource quota thread limits +static void test_thread_limit() { + grpc_core::ExecCtx exec_ctx; + + grpc_resource_quota* rq = grpc_resource_quota_create("test_thread_limit"); + grpc_resource_user* ru1 = grpc_resource_user_create(rq, "ru1"); + grpc_resource_user* ru2 = grpc_resource_user_create(rq, "ru2"); + + // Max threads = 100 + grpc_resource_quota_set_max_threads(rq, 100); + + // Request quota for 100 threads (50 for ru1, 50 for ru2) + GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 10)); + GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 10)); + GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 40)); + GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 40)); + + // Threads exhaused. Next request must fail + GPR_ASSERT(!grpc_resource_user_alloc_threads(ru2, 20)); + + // Free 20 threads from two different users + grpc_resource_user_free_threads(ru1, 10); + grpc_resource_user_free_threads(ru2, 10); + + // Next request to 20 threads must succeed + GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 20)); + + // No more thread quota again + GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 20)); + + // Free 10 more + grpc_resource_user_free_threads(ru1, 10); + + GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 5)); + GPR_ASSERT(!grpc_resource_user_alloc_threads(ru2, 10)); // Only 5 available + GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 5)); + + // Teardown (ru1 and ru2 release all the quota back to rq) + grpc_resource_user_unref(ru1); + grpc_resource_user_unref(ru2); + grpc_resource_quota_unref(rq); +} + +// Change max quota in either directions dynamically +static void test_thread_maxquota_change() { + grpc_core::ExecCtx exec_ctx; + + grpc_resource_quota* rq = + grpc_resource_quota_create("test_thread_maxquota_change"); + grpc_resource_user* ru1 = grpc_resource_user_create(rq, "ru1"); + grpc_resource_user* ru2 = grpc_resource_user_create(rq, "ru2"); + + // Max threads = 100 + grpc_resource_quota_set_max_threads(rq, 100); + + // Request quota for 100 threads (50 for ru1, 50 for ru2) + GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 50)); + GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 50)); + + // Threads exhaused. Next request must fail + GPR_ASSERT(!grpc_resource_user_alloc_threads(ru2, 20)); + + // Increase maxquota and retry + // Max threads = 150; + grpc_resource_quota_set_max_threads(rq, 150); + GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 20)); // ru2 = 70, ru1 = 50 + + // Decrease maxquota (Note: Quota already given to ru1 and ru2 is unaffected) + // Max threads = 10; + grpc_resource_quota_set_max_threads(rq, 10); + + // New requests will fail until quota is available + GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 10)); + + // Make quota available + grpc_resource_user_free_threads(ru1, 50); // ru1 now has 0 + GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 10)); // Still not enough + + grpc_resource_user_free_threads(ru2, 70); // ru2 now has 0 + + // Now we can get quota up-to 10, the current max + GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 10)); + // No more thread quota again + GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 10)); + + // Teardown (ru1 and ru2 release all the quota back to rq) + grpc_resource_user_unref(ru1); + grpc_resource_user_unref(ru2); + grpc_resource_quota_unref(rq); +} + int main(int argc, char** argv) { grpc_test_init(argc, argv); grpc_init(); @@ -827,6 +918,11 @@ int main(int argc, char** argv) { test_negative_rq_free_pool(); gpr_mu_destroy(&g_mu); gpr_cv_destroy(&g_cv); + + // Resource quota thread related + test_thread_limit(); + test_thread_maxquota_change(); + grpc_shutdown(); return 0; } diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index cf2cf770e6..a7ed2dd380 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -30,30 +30,44 @@ #include "test/cpp/util/test_config.h" namespace grpc { + +struct ThreadManagerTestSettings { + // The min number of pollers that SHOULD be active in ThreadManager + int min_pollers; + // The max number of pollers that could be active in ThreadManager + int max_pollers; + // The sleep duration in PollForWork() function to simulate "polling" + int poll_duration_ms; + // The sleep duration in DoWork() function to simulate "work" + int work_duration_ms; + // Max number of times PollForWork() is called before shutting down + int max_poll_calls; +}; + class ThreadManagerTest final : public grpc::ThreadManager { public: - ThreadManagerTest(const char* name, grpc_resource_quota* rq) - : ThreadManager(name, rq, kMinPollers, kMaxPollers), + ThreadManagerTest(const char* name, grpc_resource_quota* rq, + const ThreadManagerTestSettings& settings) + : ThreadManager(name, rq, settings.min_pollers, settings.max_pollers), + settings_(settings), num_do_work_(0), num_poll_for_work_(0), num_work_found_(0) {} grpc::ThreadManager::WorkStatus PollForWork(void** tag, bool* ok) override; void DoWork(void* tag, bool ok) override; - void PerformTest(); + + // Get number of times PollForWork() returned WORK_FOUND + int GetNumWorkFound(); + // Get number of times DoWork() was called + int GetNumDoWork(); private: void SleepForMs(int sleep_time_ms); - static const int kMinPollers = 2; - static const int kMaxPollers = 10; - - static const int kPollingTimeoutMsec = 10; - static const int kDoWorkDurationMsec = 1; - - // PollForWork will return SHUTDOWN after these many number of invocations - static const int kMaxNumPollForWork = 50; + ThreadManagerTestSettings settings_; + // Counters gpr_atm num_do_work_; // Number of calls to DoWork gpr_atm num_poll_for_work_; // Number of calls to PollForWork gpr_atm num_work_found_; // Number of times WORK_FOUND was returned @@ -69,58 +83,113 @@ void ThreadManagerTest::SleepForMs(int duration_ms) { grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void** tag, bool* ok) { int call_num = gpr_atm_no_barrier_fetch_add(&num_poll_for_work_, 1); - - if (call_num >= kMaxNumPollForWork) { + if (call_num >= settings_.max_poll_calls) { Shutdown(); return SHUTDOWN; } - // Simulate "polling for work" by sleeping for sometime - SleepForMs(kPollingTimeoutMsec); - + SleepForMs(settings_.poll_duration_ms); // Simulate "polling" duration *tag = nullptr; *ok = true; - // Return timeout roughly 1 out of every 3 calls + // Return timeout roughly 1 out of every 3 calls just to make the test a bit + // more interesting if (call_num % 3 == 0) { return TIMEOUT; - } else { - gpr_atm_no_barrier_fetch_add(&num_work_found_, 1); - return WORK_FOUND; } + + gpr_atm_no_barrier_fetch_add(&num_work_found_, 1); + return WORK_FOUND; } void ThreadManagerTest::DoWork(void* tag, bool ok) { gpr_atm_no_barrier_fetch_add(&num_do_work_, 1); - SleepForMs(kDoWorkDurationMsec); // Simulate doing work by sleeping + SleepForMs(settings_.work_duration_ms); // Simulate work by sleeping } -void ThreadManagerTest::PerformTest() { - // Initialize() starts the ThreadManager - Initialize(); - - // Wait for all the threads to gracefully terminate - Wait(); +int ThreadManagerTest::GetNumWorkFound() { + return static_cast(gpr_atm_no_barrier_load(&num_work_found_)); +} - // The number of times DoWork() was called is equal to the number of times - // WORK_FOUND was returned - gpr_log(GPR_DEBUG, "DoWork() called %" PRIdPTR " times", - gpr_atm_no_barrier_load(&num_do_work_)); - GPR_ASSERT(gpr_atm_no_barrier_load(&num_do_work_) == - gpr_atm_no_barrier_load(&num_work_found_)); +int ThreadManagerTest::GetNumDoWork() { + return static_cast(gpr_atm_no_barrier_load(&num_do_work_)); } } // namespace grpc -int main(int argc, char** argv) { - std::srand(std::time(nullptr)); +// Test that the number of times DoWork() is called is equal to the number of +// times PollForWork() returned WORK_FOUND +static void TestPollAndWork() { + grpc_resource_quota* rq = grpc_resource_quota_create("Test-poll-and-work"); + grpc::ThreadManagerTestSettings settings = { + 2 /* min_pollers */, 10 /* max_pollers */, 10 /* poll_duration_ms */, + 1 /* work_duration_ms */, 50 /* max_poll_calls */}; - grpc::testing::InitTest(&argc, &argv, true); + grpc::ThreadManagerTest test_thd_mgr("TestThreadManager", rq, settings); + grpc_resource_quota_unref(rq); + + test_thd_mgr.Initialize(); // Start the thread manager + test_thd_mgr.Wait(); // Wait for all threads to finish + + // Verify that The number of times DoWork() was called is equal to the number + // of times WORK_FOUND was returned + gpr_log(GPR_DEBUG, "DoWork() called %d times", test_thd_mgr.GetNumDoWork()); + GPR_ASSERT(test_thd_mgr.GetNumDoWork() == test_thd_mgr.GetNumWorkFound()); +} - grpc_resource_quota* rq = grpc_resource_quota_create("Test"); - grpc::ThreadManagerTest test_rpc_manager("TestThreadManager", rq); +static void TestThreadQuota() { + const int kMaxNumThreads = 3; + grpc_resource_quota* rq = grpc_resource_quota_create("Test-thread-quota"); + grpc_resource_quota_set_max_threads(rq, kMaxNumThreads); + + // Set work_duration_ms to be much greater than poll_duration_ms. This way, + // the thread manager will be forced to create more 'polling' threads to + // honor the min_pollers guarantee + grpc::ThreadManagerTestSettings settings = { + 1 /* min_pollers */, 1 /* max_pollers */, 1 /* poll_duration_ms */, + 10 /* work_duration_ms */, 50 /* max_poll_calls */}; + + // Create two thread managers (but with same resource quota). This means + // that the max number of active threads across BOTH the thread managers + // cannot be greater than kMaxNumthreads + grpc::ThreadManagerTest test_thd_mgr_1("TestThreadManager-1", rq, settings); + grpc::ThreadManagerTest test_thd_mgr_2("TestThreadManager-2", rq, settings); + // It is ok to unref resource quota before starting thread managers. grpc_resource_quota_unref(rq); - test_rpc_manager.PerformTest(); + // Start both thread managers + test_thd_mgr_1.Initialize(); + test_thd_mgr_2.Initialize(); + + // Wait for both to finish + test_thd_mgr_1.Wait(); + test_thd_mgr_2.Wait(); + + // Now verify that the total number of active threads in either thread manager + // never exceeds kMaxNumThreads + // + // NOTE: Actually the total active threads across *both* thread managers at + // any point of time never exceeds kMaxNumThreads but unfortunately there is + // no easy way to verify it (i.e we can't just do (max1 + max2 <= k)) + // Its okay to not test this case here. The resource quota c-core tests + // provide enough coverage to resource quota object with multiple resource + // users + int max1 = test_thd_mgr_1.GetMaxActiveThreadsSoFar(); + int max2 = test_thd_mgr_2.GetMaxActiveThreadsSoFar(); + gpr_log( + GPR_DEBUG, + "MaxActiveThreads in TestThreadManager_1: %d, TestThreadManager_2: %d", + max1, max2); + GPR_ASSERT(max1 <= kMaxNumThreads && max2 <= kMaxNumThreads); +} + +int main(int argc, char** argv) { + std::srand(std::time(nullptr)); + grpc::testing::InitTest(&argc, &argv, true); + grpc_init(); + + TestPollAndWork(); + TestThreadQuota(); + grpc_shutdown(); return 0; } -- cgit v1.2.3 From dd45987a5bf976079d9d89fa127740bd6f516a16 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 25 Jul 2018 10:39:16 -0700 Subject: fix comment --- src/core/lib/iomgr/resource_quota.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc index 67d05aa202..47b7856e95 100644 --- a/src/core/lib/iomgr/resource_quota.cc +++ b/src/core/lib/iomgr/resource_quota.cc @@ -141,8 +141,8 @@ struct grpc_resource_quota { /* Mutex to protect max_threads and num_threads */ /* Note: We could have used gpr_atm for max_threads and num_threads and avoid * having this mutex; but in that case, each invocation of the function - * grpc_resource_user_alloc_threads() will have to do atleast two atomic loads - * (for max_threads and num_threads) followed by a CAS (on num_threads). + * grpc_resource_user_alloc_threads() would have had to do at least two atomic + * loads (for max_threads and num_threads) followed by a CAS (on num_threads). * Moreover, we expect grpc_resource_user_alloc_threads() to be often called * concurrently thereby increasing the chances of failing the CAS operation. * This additional complexity is not worth the tiny perf gain we may (or may -- cgit v1.2.3 From 1b1d5e52e7aa0ecdd455ad084aa7930ea89bbfd1 Mon Sep 17 00:00:00 2001 From: Alex Polcyn Date: Sat, 16 Jun 2018 04:08:55 +0000 Subject: Get c-ares to compile and do address sorting on windows --- BUILD | 3 + CMakeLists.txt | 44 ++++-- Makefile | 42 ++++++ build.yaml | 15 ++ config.m4 | 3 + config.w32 | 3 + gRPC-Core.podspec | 3 + grpc.gemspec | 3 + grpc.gyp | 6 + include/grpc/impl/codegen/port_platform.h | 4 - package.xml | 3 + .../resolver/dns/c_ares/dns_resolver_ares.cc | 5 +- .../resolver/dns/c_ares/grpc_ares_ev_driver.cc | 6 +- .../dns/c_ares/grpc_ares_ev_driver_windows.cc | 59 ++++++++ .../resolver/dns/c_ares/grpc_ares_wrapper.cc | 9 +- .../resolver/dns/c_ares/grpc_ares_wrapper.h | 4 + .../resolver/dns/c_ares/grpc_ares_wrapper_posix.cc | 29 ++++ .../dns/c_ares/grpc_ares_wrapper_windows.cc | 29 ++++ src/core/lib/iomgr/socket_windows.cc | 29 ++++ src/core/lib/iomgr/socket_windows.h | 4 + src/python/grpcio/grpc_core_dependencies.py | 3 + test/core/iomgr/BUILD | 13 ++ .../iomgr/grpc_ipv6_loopback_available_test.cc | 48 +++++++ test/cpp/naming/address_sorting_test.cc | 160 ++++++++++++++++----- test/cpp/naming/gen_build_yaml.py | 2 +- third_party/address_sorting/address_sorting.c | 9 +- .../address_sorting/address_sorting_windows.c | 46 +++++- .../include/address_sorting/address_sorting.h | 3 + tools/doxygen/Doxyfile.core.internal | 3 + tools/run_tests/generated/sources_and_headers.json | 22 ++- tools/run_tests/generated/tests.json | 38 ++++- 31 files changed, 577 insertions(+), 73 deletions(-) create mode 100644 src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc create mode 100644 src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc create mode 100644 src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc create mode 100644 test/core/iomgr/grpc_ipv6_loopback_available_test.cc (limited to 'src/core/lib/iomgr') diff --git a/BUILD b/BUILD index ee4b5dfaec..81390dd1aa 100644 --- a/BUILD +++ b/BUILD @@ -1433,7 +1433,10 @@ grpc_cc_library( "src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc", ], hdrs = [ diff --git a/CMakeLists.txt b/CMakeLists.txt index 84e9c08cb5..e8e65d4b71 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -298,6 +298,7 @@ add_dependencies(buildtests_c grpc_completion_queue_test) add_dependencies(buildtests_c grpc_completion_queue_threading_test) add_dependencies(buildtests_c grpc_credentials_test) add_dependencies(buildtests_c grpc_fetch_oauth2) +add_dependencies(buildtests_c grpc_ipv6_loopback_available_test) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_c grpc_json_token_test) endif() @@ -671,12 +672,8 @@ endif() if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx resolver_component_tests_runner_invoker) endif() -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx address_sorting_test_unsecure) -endif() -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx address_sorting_test) -endif() if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx cancel_ares_query_test) endif() @@ -1236,8 +1233,11 @@ add_library(grpc src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc src/cpp/ext/filters/census/grpc_context.cc @@ -2538,8 +2538,11 @@ add_library(grpc_unsecure src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc @@ -7323,6 +7326,35 @@ target_link_libraries(grpc_fetch_oauth2 gpr ) +endif (gRPC_BUILD_TESTS) +if (gRPC_BUILD_TESTS) + +add_executable(grpc_ipv6_loopback_available_test + test/core/iomgr/grpc_ipv6_loopback_available_test.cc +) + + +target_include_directories(grpc_ipv6_loopback_available_test + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE ${_gRPC_SSL_INCLUDE_DIR} + PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR} + PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR} + PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR} + PRIVATE ${_gRPC_CARES_INCLUDE_DIR} + PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR} + PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR} +) + +target_link_libraries(grpc_ipv6_loopback_available_test + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc_test_util + grpc + gpr_test_util + gpr +) + endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) @@ -16351,7 +16383,6 @@ target_link_libraries(resolver_component_tests_runner_invoker endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_executable(address_sorting_test_unsecure test/cpp/naming/address_sorting_test.cc @@ -16391,10 +16422,8 @@ target_link_libraries(address_sorting_test_unsecure ${_gRPC_GFLAGS_LIBRARIES} ) -endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_executable(address_sorting_test test/cpp/naming/address_sorting_test.cc @@ -16434,7 +16463,6 @@ target_link_libraries(address_sorting_test ${_gRPC_GFLAGS_LIBRARIES} ) -endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) diff --git a/Makefile b/Makefile index bad41975a0..5174ab6719 100644 --- a/Makefile +++ b/Makefile @@ -1022,6 +1022,7 @@ grpc_completion_queue_threading_test: $(BINDIR)/$(CONFIG)/grpc_completion_queue_ grpc_create_jwt: $(BINDIR)/$(CONFIG)/grpc_create_jwt grpc_credentials_test: $(BINDIR)/$(CONFIG)/grpc_credentials_test grpc_fetch_oauth2: $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 +grpc_ipv6_loopback_available_test: $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test grpc_json_token_test: $(BINDIR)/$(CONFIG)/grpc_json_token_test grpc_jwt_verifier_test: $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test grpc_print_google_default_creds_token: $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token @@ -1472,6 +1473,7 @@ buildtests_c: privatelibs_c \ $(BINDIR)/$(CONFIG)/grpc_completion_queue_threading_test \ $(BINDIR)/$(CONFIG)/grpc_credentials_test \ $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 \ + $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test \ $(BINDIR)/$(CONFIG)/grpc_json_token_test \ $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test \ $(BINDIR)/$(CONFIG)/grpc_security_connector_test \ @@ -2028,6 +2030,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/grpc_completion_queue_threading_test || ( echo test grpc_completion_queue_threading_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_credentials_test" $(Q) $(BINDIR)/$(CONFIG)/grpc_credentials_test || ( echo test grpc_credentials_test failed ; exit 1 ) + $(E) "[RUN] Testing grpc_ipv6_loopback_available_test" + $(Q) $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test || ( echo test grpc_ipv6_loopback_available_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_json_token_test" $(Q) $(BINDIR)/$(CONFIG)/grpc_json_token_test || ( echo test grpc_json_token_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_jwt_verifier_test" @@ -3704,8 +3708,11 @@ LIBGRPC_SRC = \ src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc \ src/cpp/ext/filters/census/grpc_context.cc \ @@ -4972,8 +4979,11 @@ LIBGRPC_UNSECURE_SRC = \ src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc \ src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \ @@ -12365,6 +12375,38 @@ endif endif +GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_SRC = \ + test/core/iomgr/grpc_ipv6_loopback_available_test.cc \ + +GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test: openssl_dep_error + +else + + + +$(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test: $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test + +endif + +$(OBJDIR)/$(CONFIG)/test/core/iomgr/grpc_ipv6_loopback_available_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + +deps_grpc_ipv6_loopback_available_test: $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS:.o=.dep) +endif +endif + + GRPC_JSON_TOKEN_TEST_SRC = \ test/core/security/json_token_test.cc \ diff --git a/build.yaml b/build.yaml index 30389ec114..70af96046c 100644 --- a/build.yaml +++ b/build.yaml @@ -740,8 +740,11 @@ filegroups: - src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc + - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc + - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc + - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc plugin: grpc_resolver_dns_ares uses: - grpc_base @@ -2730,6 +2733,18 @@ targets: - grpc - gpr_test_util - gpr +- name: grpc_ipv6_loopback_available_test + build: test + language: c + src: + - test/core/iomgr/grpc_ipv6_loopback_available_test.cc + deps: + - grpc_test_util + - grpc + - gpr_test_util + - gpr + exclude_iomgrs: + - uv - name: grpc_json_token_test build: test language: c diff --git a/config.m4 b/config.m4 index c277ccafc8..aa40a698a6 100644 --- a/config.m4 +++ b/config.m4 @@ -380,8 +380,11 @@ if test "$PHP_GRPC" != "no"; then src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc \ src/cpp/ext/filters/census/grpc_context.cc \ diff --git a/config.w32 b/config.w32 index 2857781dd5..5afa4466ac 100644 --- a/config.w32 +++ b/config.w32 @@ -355,8 +355,11 @@ if (PHP_GRPC != "no") { "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\dns_resolver_ares.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver_posix.cc " + + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver_windows.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper_fallback.cc " + + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper_posix.cc " + + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper_windows.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\native\\dns_resolver.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\sockaddr\\sockaddr_resolver.cc " + "src\\cpp\\ext\\filters\\census\\grpc_context.cc " + diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 23edaec656..5c3649afbd 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -802,8 +802,11 @@ Pod::Spec.new do |s| 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/cpp/ext/filters/census/grpc_context.cc', diff --git a/grpc.gemspec b/grpc.gemspec index b69d5a7c6f..c250316b99 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -742,8 +742,11 @@ Gem::Specification.new do |s| s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc ) + s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc ) + s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc ) + s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc ) s.files += %w( src/cpp/ext/filters/census/grpc_context.cc ) diff --git a/grpc.gyp b/grpc.gyp index e1485efa05..25082fe540 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -572,8 +572,11 @@ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/cpp/ext/filters/census/grpc_context.cc', @@ -1287,8 +1290,11 @@ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc', diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index 01ce5f03e9..2b61a8816d 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -420,12 +420,8 @@ typedef unsigned __int64 uint64_t; #define GPR_MAX_ALIGNMENT 16 #ifndef GRPC_ARES -#ifdef GPR_WINDOWS -#define GRPC_ARES 0 -#else #define GRPC_ARES 1 #endif -#endif #ifndef GRPC_MUST_USE_RESULT #if defined(__GNUC__) && !defined(__MINGW32__) diff --git a/package.xml b/package.xml index 7f71536b1d..acdc6ffdb3 100644 --- a/package.xml +++ b/package.xml @@ -747,8 +747,11 @@ + + + diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc index f4f6444c5f..7050e82121 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -142,8 +141,8 @@ AresDnsResolver::AresDnsResolver(const ResolverArgs& args) channel_args_ = grpc_channel_args_copy(args.args); const grpc_arg* arg = grpc_channel_args_find( channel_args_, GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION); - request_service_config_ = !grpc_channel_arg_get_integer( - arg, (grpc_integer_options){false, false, true}); + grpc_integer_options integer_options = {false, false, true}; + request_service_config_ = !grpc_channel_arg_get_integer(arg, integer_options); arg = grpc_channel_args_find(channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS); min_time_between_resolutions_ = diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc index c886795608..0068d0d5f4 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc @@ -18,11 +18,10 @@ #include #include "src/core/lib/iomgr/port.h" -#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) +#if GRPC_ARES == 1 && !defined(GRPC_UV) #include #include -#include #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" @@ -32,7 +31,6 @@ #include #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" #include "src/core/lib/gpr/string.h" -#include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/sockaddr_utils.h" @@ -314,4 +312,4 @@ void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver) { } } -#endif /* GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) */ +#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */ diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc new file mode 100644 index 0000000000..5d65ae3ab3 --- /dev/null +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc @@ -0,0 +1,59 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#include + +#include "src/core/lib/iomgr/port.h" +#if GRPC_ARES == 1 && defined(GPR_WINDOWS) + +#include +#include +#include "src/core/lib/gprpp/memory.h" + +#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" + +namespace grpc_core { + +/* TODO: fill in the body of GrpcPolledFdWindows to enable c-ares on Windows. + This dummy implementation only allows grpc to compile on windows with + GRPC_ARES=1. */ +class GrpcPolledFdWindows : public GrpcPolledFd { + public: + GrpcPolledFdWindows() { abort(); } + ~GrpcPolledFdWindows() { abort(); } + void RegisterForOnReadableLocked(grpc_closure* read_closure) override { + abort(); + } + void RegisterForOnWriteableLocked(grpc_closure* write_closure) override { + abort(); + } + bool IsFdStillReadableLocked() override { abort(); } + void ShutdownLocked(grpc_error* error) override { abort(); } + ares_socket_t GetWrappedAresSocketLocked() override { abort(); } + const char* GetName() override { abort(); } +}; + +GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as, + grpc_pollset_set* driver_pollset_set) { + return nullptr; +} + +void ConfigureAresChannelLocked(ares_channel* channel) { abort(); } + +} // namespace grpc_core + +#endif /* GRPC_ARES == 1 && defined(GPR_WINDOWS) */ diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc index 497ad998af..b3d6437e9a 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc @@ -22,7 +22,6 @@ #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" #include "src/core/lib/iomgr/sockaddr.h" -#include "src/core/lib/iomgr/socket_utils_posix.h" #include #include @@ -215,7 +214,7 @@ static void on_hostbyname_done_locked(void* arg, int status, int timeouts, memset(&addr, 0, addr_len); memcpy(&addr.sin6_addr, hostent->h_addr_list[i - prev_naddr], sizeof(struct in6_addr)); - addr.sin6_family = static_cast(hostent->h_addrtype); + addr.sin6_family = static_cast(hostent->h_addrtype); addr.sin6_port = hr->port; grpc_lb_addresses_set_address( *lb_addresses, i, &addr, addr_len, @@ -236,7 +235,7 @@ static void on_hostbyname_done_locked(void* arg, int status, int timeouts, memset(&addr, 0, addr_len); memcpy(&addr.sin_addr, hostent->h_addr_list[i - prev_naddr], sizeof(struct in_addr)); - addr.sin_family = static_cast(hostent->h_addrtype); + addr.sin_family = static_cast(hostent->h_addrtype); addr.sin_port = hr->port; grpc_lb_addresses_set_address( *lb_addresses, i, &addr, addr_len, @@ -281,7 +280,7 @@ static void on_srv_query_done_locked(void* arg, int status, int timeouts, grpc_ares_ev_driver_get_channel_locked(r->ev_driver); for (struct ares_srv_reply* srv_it = reply; srv_it != nullptr; srv_it = srv_it->next) { - if (grpc_ipv6_loopback_available()) { + if (grpc_ares_query_ipv6()) { grpc_ares_hostbyname_request* hr = create_hostbyname_request_locked( r, srv_it->host, htons(srv_it->port), true /* is_balancer */); ares_gethostbyname(*channel, hr->host, AF_INET6, @@ -452,7 +451,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl( } } r->pending_queries = 1; - if (grpc_ipv6_loopback_available()) { + if (grpc_ares_query_ipv6()) { hr = create_hostbyname_request_locked(r, host, strhtons(port), false /* is_balancer */); ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_locked, diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h index ce26f5d524..17eaa7ccf0 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h @@ -70,6 +70,10 @@ void grpc_ares_cleanup(void); * and destroys the grpc_ares_request */ void grpc_ares_complete_request_locked(grpc_ares_request* request); +/* Indicates whether or not AAAA queries should be attempted. */ +/* E.g., return false if ipv6 is known to not be available. */ +bool grpc_ares_query_ipv6(); + /* Exposed only for testing */ void grpc_cares_wrapper_test_only_address_sorting_sort( grpc_lb_addresses* lb_addrs); diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc new file mode 100644 index 0000000000..23c0fec74f --- /dev/null +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc @@ -0,0 +1,29 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" +#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) + +#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" +#include "src/core/lib/iomgr/socket_utils_posix.h" + +bool grpc_ares_query_ipv6() { return grpc_ipv6_loopback_available(); } + +#endif /* GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) */ diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc new file mode 100644 index 0000000000..ee827e284e --- /dev/null +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc @@ -0,0 +1,29 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" +#if GRPC_ARES == 1 && defined(GPR_WINDOWS) + +#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" +#include "src/core/lib/iomgr/socket_windows.h" + +bool grpc_ares_query_ipv6() { return grpc_ipv6_loopback_available(); } + +#endif /* GRPC_ARES == 1 && defined(GPR_WINDOWS) */ diff --git a/src/core/lib/iomgr/socket_windows.cc b/src/core/lib/iomgr/socket_windows.cc index 2e23409582..4ad31cb35d 100644 --- a/src/core/lib/iomgr/socket_windows.cc +++ b/src/core/lib/iomgr/socket_windows.cc @@ -36,6 +36,7 @@ #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset_windows.h" +#include "src/core/lib/iomgr/sockaddr_windows.h" #include "src/core/lib/iomgr/socket_windows.h" grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name) { @@ -148,4 +149,32 @@ void grpc_socket_become_ready(grpc_winsocket* socket, if (should_destroy) destroy(socket); } +static gpr_once g_probe_ipv6_once = GPR_ONCE_INIT; +static bool g_ipv6_loopback_available = false; + +static void probe_ipv6_once(void) { + SOCKET s = socket(AF_INET6, SOCK_STREAM, 0); + g_ipv6_loopback_available = 0; + if (s == INVALID_SOCKET) { + gpr_log(GPR_INFO, "Disabling AF_INET6 sockets because socket() failed."); + } else { + grpc_sockaddr_in6 addr; + memset(&addr, 0, sizeof(addr)); + addr.sin6_family = AF_INET6; + addr.sin6_addr.s6_addr[15] = 1; /* [::1]:0 */ + if (bind(s, reinterpret_cast(&addr), sizeof(addr)) == 0) { + g_ipv6_loopback_available = 1; + } else { + gpr_log(GPR_INFO, + "Disabling AF_INET6 sockets because ::1 is not available."); + } + closesocket(s); + } +} + +int grpc_ipv6_loopback_available(void) { + gpr_once_init(&g_probe_ipv6_once, probe_ipv6_once); + return g_ipv6_loopback_available; +} + #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/src/core/lib/iomgr/socket_windows.h b/src/core/lib/iomgr/socket_windows.h index 7bd01eded5..b09b9da562 100644 --- a/src/core/lib/iomgr/socket_windows.h +++ b/src/core/lib/iomgr/socket_windows.h @@ -108,6 +108,10 @@ void grpc_socket_notify_on_read(grpc_winsocket* winsocket, void grpc_socket_become_ready(grpc_winsocket* winsocket, grpc_winsocket_callback_info* ci); +/* Returns true if this system can create AF_INET6 sockets bound to ::1. + The value is probed once, and cached for the life of the process. */ +int grpc_ipv6_loopback_available(void); + #endif #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_WINDOWS_H */ diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index 49185cc648..d6efb49750 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -354,8 +354,11 @@ CORE_SOURCE_FILES = [ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/cpp/ext/filters/census/grpc_context.cc', diff --git a/test/core/iomgr/BUILD b/test/core/iomgr/BUILD index fb0490a95f..002671a5fa 100644 --- a/test/core/iomgr/BUILD +++ b/test/core/iomgr/BUILD @@ -124,6 +124,19 @@ grpc_cc_test( ], ) +grpc_cc_test( + name = "grpc_ipv6_loopback_available_test", + srcs = ["grpc_ipv6_loopback_available_test.cc"], + language = "C++", + deps = [ + "//:gpr", + "//:grpc", + "//test/core/util:gpr_test_util", + "//test/core/util:grpc_test_util", + ], +) + + grpc_cc_test( name = "load_file_test", srcs = ["load_file_test.cc"], diff --git a/test/core/iomgr/grpc_ipv6_loopback_available_test.cc b/test/core/iomgr/grpc_ipv6_loopback_available_test.cc new file mode 100644 index 0000000000..329aa9a851 --- /dev/null +++ b/test/core/iomgr/grpc_ipv6_loopback_available_test.cc @@ -0,0 +1,48 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "src/core/lib/iomgr/port.h" + +// grpc_ipv6_loopback_available isn't currently available on UV. +#ifndef GRPC_UV + +#include +#include +#include "test/core/util/test_config.h" + +#ifdef GPR_WINDOWS +#include "src/core/lib/iomgr/socket_windows.h" +#else +#include "src/core/lib/iomgr/socket_utils_posix.h" +#endif + +int main(int argc, char** argv) { + grpc_test_init(argc, argv); + grpc_init(); + // This test assumes that the ipv6 loopback is available + // in all environments in which grpc tests run in. + GPR_ASSERT(grpc_ipv6_loopback_available()); + grpc_shutdown(); + return 0; +} + +#else + +int main(int argc, char** argv) { return 0; } + +#endif /* GRPC_UV */ diff --git a/test/cpp/naming/address_sorting_test.cc b/test/cpp/naming/address_sorting_test.cc index a92e9e3b3e..04c300876c 100644 --- a/test/cpp/naming/address_sorting_test.cc +++ b/test/cpp/naming/address_sorting_test.cc @@ -24,10 +24,8 @@ #include #include -#include #include #include -#include #include #include @@ -51,6 +49,11 @@ #include "test/core/util/port.h" #include "test/core/util/test_config.h" +#ifndef GPR_WINDOWS +#include +#include +#endif + namespace { struct TestAddress { @@ -190,10 +193,18 @@ void VerifyLbAddrOutputs(grpc_lb_addresses* lb_addrs, grpc_lb_addresses_destroy(lb_addrs); } -} // namespace +/* We need to run each test case inside of its own + * isolated grpc_init/grpc_shutdown pair, so that + * the "address sorting source addr factory" can be + * restored to its default for each test case. */ +class AddressSortingTest : public ::testing::Test { + protected: + void SetUp() override { grpc_init(); } + void TearDown() override { grpc_shutdown(); } +}; /* Tests for rule 1 */ -TEST(AddressSortingTest, TestDepriotizesUnreachableAddresses) { +TEST_F(AddressSortingTest, TestDepriotizesUnreachableAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -212,7 +223,7 @@ TEST(AddressSortingTest, TestDepriotizesUnreachableAddresses) { }); } -TEST(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv6) { +TEST_F(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv6) { bool ipv4_supported = true; bool ipv6_supported = false; OverrideAddressSortingSourceAddrFactory( @@ -231,7 +242,7 @@ TEST(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv6) { }); } -TEST(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv4) { +TEST_F(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv4) { bool ipv4_supported = false; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -253,7 +264,7 @@ TEST(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv4) { /* Tests for rule 2 */ -TEST(AddressSortingTest, TestDepriotizesNonMatchingScope) { +TEST_F(AddressSortingTest, TestDepriotizesNonMatchingScope) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -277,7 +288,7 @@ TEST(AddressSortingTest, TestDepriotizesNonMatchingScope) { /* Tests for rule 5 */ -TEST(AddressSortingTest, TestUsesLabelFromDefaultTable) { +TEST_F(AddressSortingTest, TestUsesLabelFromDefaultTable) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -300,7 +311,7 @@ TEST(AddressSortingTest, TestUsesLabelFromDefaultTable) { /* Flip the input on the test above to reorder the sort function's * comparator's inputs. */ -TEST(AddressSortingTest, TestUsesLabelFromDefaultTableInputFlipped) { +TEST_F(AddressSortingTest, TestUsesLabelFromDefaultTableInputFlipped) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -323,8 +334,8 @@ TEST(AddressSortingTest, TestUsesLabelFromDefaultTableInputFlipped) { /* Tests for rule 6 */ -TEST(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithAnIpv4Address) { +TEST_F(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithAnIpv4Address) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -348,8 +359,8 @@ TEST(AddressSortingTest, }); } -TEST(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithV4CompatAndLocalhostAddress) { +TEST_F(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithV4CompatAndLocalhostAddress) { bool ipv4_supported = true; bool ipv6_supported = true; // Handle unique observed behavior of inet_ntop(v4-compatible-address) on OS X. @@ -377,8 +388,8 @@ TEST(AddressSortingTest, }); } -TEST(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithCatchAllAndLocalhostAddress) { +TEST_F(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithCatchAllAndLocalhostAddress) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -403,8 +414,8 @@ TEST(AddressSortingTest, }); } -TEST(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWith2000PrefixedAddress) { +TEST_F(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWith2000PrefixedAddress) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -426,7 +437,7 @@ TEST(AddressSortingTest, }); } -TEST( +TEST_F( AddressSortingTest, TestUsesDestinationWithHigherPrecedenceWith2000PrefixedAddressEnsurePrefixMatchHasNoEffect) { bool ipv4_supported = true; @@ -448,8 +459,8 @@ TEST( }); } -TEST(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithLinkAndSiteLocalAddresses) { +TEST_F(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithLinkAndSiteLocalAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -469,19 +480,22 @@ TEST(AddressSortingTest, }); } -TEST( +TEST_F( AddressSortingTest, TestUsesDestinationWithHigherPrecedenceWithCatchAllAndAndV4MappedAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; + // Use embedded ipv4 addresses with leading 1's instead of zero's to be + // compatible with inet_ntop implementations that can display such + // addresses with leading zero's as e.g.: "::ffff:0:2", as on windows. OverrideAddressSortingSourceAddrFactory( ipv4_supported, ipv6_supported, { - {"[::ffff:0.0.0.2]:443", {"[::ffff:0.0.0.3]:0", AF_INET6}}, + {"[::ffff:1.1.1.2]:443", {"[::ffff:1.1.1.3]:0", AF_INET6}}, {"[1234::2]:443", {"[1234::3]:0", AF_INET6}}, }); grpc_lb_addresses* lb_addrs = BuildLbAddrInputs({ - {"[::ffff:0.0.0.2]:443", AF_INET6}, + {"[::ffff:1.1.1.2]:443", AF_INET6}, {"[1234::2]:443", AF_INET6}, }); grpc_cares_wrapper_test_only_address_sorting_sort(lb_addrs); @@ -489,13 +503,13 @@ TEST( // ::ffff:0:2 should match the v4-mapped // precedence entry and be deprioritized. "[1234::2]:443", - "[::ffff:0.0.0.2]:443", + "[::ffff:1.1.1.2]:443", }); } /* Tests for rule 8 */ -TEST(AddressSortingTest, TestPrefersSmallerScope) { +TEST_F(AddressSortingTest, TestPrefersSmallerScope) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -520,7 +534,7 @@ TEST(AddressSortingTest, TestPrefersSmallerScope) { /* Tests for rule 9 */ -TEST(AddressSortingTest, TestPrefersLongestMatchingSrcDstPrefix) { +TEST_F(AddressSortingTest, TestPrefersLongestMatchingSrcDstPrefix) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -543,8 +557,8 @@ TEST(AddressSortingTest, TestPrefersLongestMatchingSrcDstPrefix) { }); } -TEST(AddressSortingTest, - TestPrefersLongestMatchingSrcDstPrefixMatchesWholeAddress) { +TEST_F(AddressSortingTest, + TestPrefersLongestMatchingSrcDstPrefixMatchesWholeAddress) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -564,7 +578,7 @@ TEST(AddressSortingTest, }); } -TEST(AddressSortingTest, TestPrefersLongestPrefixStressInnerBytePrefix) { +TEST_F(AddressSortingTest, TestPrefersLongestPrefixStressInnerBytePrefix) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -584,7 +598,7 @@ TEST(AddressSortingTest, TestPrefersLongestPrefixStressInnerBytePrefix) { }); } -TEST(AddressSortingTest, TestPrefersLongestPrefixDiffersOnHighestBitOfByte) { +TEST_F(AddressSortingTest, TestPrefersLongestPrefixDiffersOnHighestBitOfByte) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -604,7 +618,7 @@ TEST(AddressSortingTest, TestPrefersLongestPrefixDiffersOnHighestBitOfByte) { }); } -TEST(AddressSortingTest, TestPrefersLongestPrefixDiffersByLastBit) { +TEST_F(AddressSortingTest, TestPrefersLongestPrefixDiffersByLastBit) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -628,7 +642,7 @@ TEST(AddressSortingTest, TestPrefersLongestPrefixDiffersByLastBit) { /* Tests for rule 10 */ -TEST(AddressSortingTest, TestStableSort) { +TEST_F(AddressSortingTest, TestStableSort) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -648,7 +662,7 @@ TEST(AddressSortingTest, TestStableSort) { }); } -TEST(AddressSortingTest, TestStableSortFiveElements) { +TEST_F(AddressSortingTest, TestStableSortFiveElements) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -677,7 +691,7 @@ TEST(AddressSortingTest, TestStableSortFiveElements) { }); } -TEST(AddressSortingTest, TestStableSortNoSrcAddrsExist) { +TEST_F(AddressSortingTest, TestStableSortNoSrcAddrsExist) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory(ipv4_supported, ipv6_supported, {}); @@ -698,7 +712,7 @@ TEST(AddressSortingTest, TestStableSortNoSrcAddrsExist) { }); } -TEST(AddressSortingTest, TestStableSortNoSrcAddrsExistWithIpv4) { +TEST_F(AddressSortingTest, TestStableSortNoSrcAddrsExistWithIpv4) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory(ipv4_supported, ipv6_supported, {}); @@ -713,7 +727,7 @@ TEST(AddressSortingTest, TestStableSortNoSrcAddrsExistWithIpv4) { }); } -TEST(AddressSortingTest, TestStableSortV4CompatAndSiteLocalAddresses) { +TEST_F(AddressSortingTest, TestStableSortV4CompatAndSiteLocalAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; // Handle unique observed behavior of inet_ntop(v4-compatible-address) on OS X. @@ -744,6 +758,78 @@ TEST(AddressSortingTest, TestStableSortV4CompatAndSiteLocalAddresses) { }); } +/* TestPrefersIpv6Loopback tests the actual "address probing" code + * for the current platform, without any mocks. + * This test relies on the assumption that the ipv6 loopback address is + * available in the hosts/containers that grpc C/C++ tests run on + * (whether ipv4 loopback is available or not, an available ipv6 + * loopback should be preferred). */ +TEST_F(AddressSortingTest, TestPrefersIpv6Loopback) { + grpc_lb_addresses* lb_addrs = BuildLbAddrInputs({ + {"[::1]:443", AF_INET6}, + {"127.0.0.1:443", AF_INET}, + }); + grpc_cares_wrapper_test_only_address_sorting_sort(lb_addrs); + VerifyLbAddrOutputs(lb_addrs, { + "[::1]:443", + "127.0.0.1:443", + }); +} + +/* Flip the order of the inputs above and expect the same output order + * (try to rule out influence of arbitrary qsort ordering) */ +TEST_F(AddressSortingTest, TestPrefersIpv6LoopbackInputsFlipped) { + grpc_lb_addresses* lb_addrs = BuildLbAddrInputs({ + {"127.0.0.1:443", AF_INET}, + {"[::1]:443", AF_INET6}, + }); + grpc_cares_wrapper_test_only_address_sorting_sort(lb_addrs); + VerifyLbAddrOutputs(lb_addrs, { + "[::1]:443", + "127.0.0.1:443", + }); +} + +/* Try to rule out false positives in the above two tests in which + * the sorter might think that neither ipv6 or ipv4 loopback is + * available, but ipv6 loopback is still preferred only due + * to precedance table lookups. */ +TEST_F(AddressSortingTest, TestSorterKnowsIpv6LoopbackIsAvailable) { + sockaddr_in6 ipv6_loopback; + memset(&ipv6_loopback, 0, sizeof(ipv6_loopback)); + ipv6_loopback.sin6_family = AF_INET6; + ((char*)&ipv6_loopback.sin6_addr)[15] = 1; + ipv6_loopback.sin6_port = htons(443); + // Set up the source and destination parameters of + // address_sorting_get_source_addr + address_sorting_address sort_input_dest; + memcpy(&sort_input_dest.addr, &ipv6_loopback, sizeof(ipv6_loopback)); + sort_input_dest.len = sizeof(ipv6_loopback); + address_sorting_address source_for_sort_input_dest; + memset(&source_for_sort_input_dest, 0, sizeof(source_for_sort_input_dest)); + // address_sorting_get_source_addr returns true if a source address was found + // for the destination address, otherwise false. + EXPECT_TRUE(address_sorting_get_source_addr_for_testing( + &sort_input_dest, &source_for_sort_input_dest)); + // Now also check that the source address was filled in correctly. + EXPECT_GT(source_for_sort_input_dest.len, 0u); + sockaddr_in6* source_addr_output = + (sockaddr_in6*)source_for_sort_input_dest.addr; + EXPECT_EQ(source_addr_output->sin6_family, AF_INET6); + char* buf = static_cast(gpr_zalloc(100)); + EXPECT_NE(inet_ntop(AF_INET6, &source_addr_output->sin6_addr, buf, 100), + nullptr) + << "inet_ntop failed. Errno: " + std::to_string(errno); + std::string source_addr_str(buf); + gpr_free(buf); + // This test + // assumes that the source address for any loopback destination is also the + // loopback address. + EXPECT_EQ(source_addr_str, "::1"); +} + +} // namespace + int main(int argc, char** argv) { char* resolver = gpr_getenv("GRPC_DNS_RESOLVER"); if (resolver == nullptr || strlen(resolver) == 0) { @@ -754,9 +840,7 @@ int main(int argc, char** argv) { gpr_free(resolver); grpc_test_init(argc, argv); ::testing::InitGoogleTest(&argc, argv); - grpc_init(); auto result = RUN_ALL_TESTS(); - grpc_shutdown(); // Test sequential and nested inits and shutdowns. grpc_init(); grpc_init(); diff --git a/test/cpp/naming/gen_build_yaml.py b/test/cpp/naming/gen_build_yaml.py index baa6512f62..5dad2ea7af 100755 --- a/test/cpp/naming/gen_build_yaml.py +++ b/test/cpp/naming/gen_build_yaml.py @@ -110,7 +110,7 @@ def main(): 'gtest': True, 'run': True, 'src': ['test/cpp/naming/address_sorting_test.cc'], - 'platforms': ['linux', 'posix', 'mac'], + 'platforms': ['linux', 'posix', 'mac', 'windows'], 'deps': [ 'grpc++_test_util' + unsecure_build_config_suffix, 'grpc_test_util' + unsecure_build_config_suffix, diff --git a/third_party/address_sorting/address_sorting.c b/third_party/address_sorting/address_sorting.c index e4f3b53799..9aee0a5419 100644 --- a/third_party/address_sorting/address_sorting.c +++ b/third_party/address_sorting/address_sorting.c @@ -55,12 +55,17 @@ static const int kIPv6AddrScopeGlobal = 3; static address_sorting_source_addr_factory* g_current_source_addr_factory = NULL; -static int address_sorting_get_source_addr(const address_sorting_address* dest, - address_sorting_address* source) { +static bool address_sorting_get_source_addr(const address_sorting_address* dest, + address_sorting_address* source) { return g_current_source_addr_factory->vtable->get_source_addr( g_current_source_addr_factory, dest, source); } +bool address_sorting_get_source_addr_for_testing( + const address_sorting_address* dest, address_sorting_address* source) { + return address_sorting_get_source_addr(dest, source); +} + static int ipv6_prefix_match_length(const struct sockaddr_in6* sa, const struct sockaddr_in6* sb) { unsigned char* a = (unsigned char*)&sa->sin6_addr; diff --git a/third_party/address_sorting/address_sorting_windows.c b/third_party/address_sorting/address_sorting_windows.c index b2f5708649..662a88248e 100644 --- a/third_party/address_sorting/address_sorting_windows.c +++ b/third_party/address_sorting/address_sorting_windows.c @@ -42,14 +42,54 @@ #if defined(ADDRESS_SORTING_WINDOWS) +#include +#include +#include #include +#include +#include -/* TODO : Add address sorting functionality to work on windows. */ +static bool windows_source_addr_factory_get_source_addr( + address_sorting_source_addr_factory* factory, + const address_sorting_address* dest_addr, + address_sorting_address* source_addr) { + bool source_addr_exists = false; + SOCKET s = socket(((struct sockaddr_in6*)dest_addr)->sin6_family, SOCK_DGRAM, + IPPROTO_UDP); + if (s != INVALID_SOCKET) { + if (connect(s, (struct sockaddr*)dest_addr, (int)dest_addr->len) == 0) { + address_sorting_address found_source_addr; + memset(&found_source_addr, 0, sizeof(found_source_addr)); + found_source_addr.len = sizeof(found_source_addr.addr); + if (getsockname(s, (struct sockaddr*)&found_source_addr.addr, + (socklen_t*)&found_source_addr.len) == 0) { + source_addr_exists = true; + *source_addr = found_source_addr; + } + } + closesocket(s); + } + return source_addr_exists; +} + +static void windows_source_addr_factory_destroy( + address_sorting_source_addr_factory* self) { + free(self); +} + +static const address_sorting_source_addr_factory_vtable + windows_source_addr_factory_vtable = { + windows_source_addr_factory_get_source_addr, + windows_source_addr_factory_destroy, +}; address_sorting_source_addr_factory* address_sorting_create_source_addr_factory_for_current_platform() { - abort(); - return NULL; + address_sorting_source_addr_factory* factory = + malloc(sizeof(address_sorting_source_addr_factory)); + memset(factory, 0, sizeof(address_sorting_source_addr_factory)); + factory->vtable = &windows_source_addr_factory_vtable; + return factory; } #endif // defined(ADDRESS_SORTING_WINDOWS) diff --git a/third_party/address_sorting/include/address_sorting/address_sorting.h b/third_party/address_sorting/include/address_sorting/address_sorting.h index f11cd424b5..c58fafe3f7 100644 --- a/third_party/address_sorting/include/address_sorting/address_sorting.h +++ b/third_party/address_sorting/include/address_sorting/address_sorting.h @@ -103,6 +103,9 @@ address_sorting_family address_sorting_abstract_get_family( void address_sorting_override_source_addr_factory_for_testing( address_sorting_source_addr_factory* factory); +bool address_sorting_get_source_addr_for_testing( + const address_sorting_address* dest, address_sorting_address* source); + #ifdef __cplusplus } #endif diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 576950934e..18f56984fe 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -924,9 +924,12 @@ src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ +src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ +src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ +src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/README.md \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 072402b2cf..a686dae8b4 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -1032,6 +1032,23 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", + "grpc_test_util" + ], + "headers": [], + "is_filegroup": false, + "language": "c", + "name": "grpc_ipv6_loopback_available_test", + "src": [ + "test/core/iomgr/grpc_ipv6_loopback_available_test.cc" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", @@ -10254,9 +10271,12 @@ "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h", - "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc" + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc" ], "third_party": false, "type": "filegroup" diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index a5439a5db1..5815f82fef 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -1313,6 +1313,32 @@ ], "uses_polling": true }, + { + "args": [], + "benchmark": false, + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "gtest": false, + "language": "c", + "name": "grpc_ipv6_loopback_available_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "uses_polling": true + }, { "args": [], "benchmark": false, @@ -5710,7 +5736,8 @@ "ci_platforms": [ "linux", "mac", - "posix" + "posix", + "windows" ], "cpu_cost": 1.0, "exclude_configs": [], @@ -5722,7 +5749,8 @@ "platforms": [ "linux", "mac", - "posix" + "posix", + "windows" ], "uses_polling": true }, @@ -5732,7 +5760,8 @@ "ci_platforms": [ "linux", "mac", - "posix" + "posix", + "windows" ], "cpu_cost": 1.0, "exclude_configs": [], @@ -5744,7 +5773,8 @@ "platforms": [ "linux", "mac", - "posix" + "posix", + "windows" ], "uses_polling": true }, -- cgit v1.2.3 From 89a2ddb870a70c5130b59c016399caa6fd9571bb Mon Sep 17 00:00:00 2001 From: Hope Casey-Allen Date: Wed, 25 Jul 2018 14:07:59 -0700 Subject: Fix typo in ev_epollex_linux --- src/core/lib/iomgr/ev_epollex_linux.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc index 7b368410cf..e1f3e43af7 100644 --- a/src/core/lib/iomgr/ev_epollex_linux.cc +++ b/src/core/lib/iomgr/ev_epollex_linux.cc @@ -135,7 +135,7 @@ struct pollable { // underlying epoll set (i.e whenever fd_orphan() is called). // // Implementing (2) above (i.e removing fds from cache on fd_orphan) adds a - // lot of complexity since an fd can be present in multiple pollalbles. So our + // lot of complexity since an fd can be present in multiple pollables. So our // implementation ONLY DOES (1) and NOT (2). // // The cache_fd.salt variable helps here to maintain correctness (it serves as -- cgit v1.2.3 From f34c65393622878e9610939193fd9c754b7c0a74 Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Thu, 26 Jul 2018 14:32:07 -0700 Subject: Revert "Merge pull request #15797 from apolcyn/windows_compile_and_sort" This reverts commit ae8d3efc3a360b289f0b33e1c53c8c73960cb31f, reversing changes made to e41215e181564a61320b9b69ae5feb7f7c3625fe. --- BUILD | 3 - CMakeLists.txt | 44 ++---- Makefile | 42 ------ build.yaml | 15 -- config.m4 | 3 - config.w32 | 3 - gRPC-Core.podspec | 3 - grpc.gemspec | 3 - grpc.gyp | 6 - include/grpc/impl/codegen/port_platform.h | 4 + package.xml | 3 - .../resolver/dns/c_ares/dns_resolver_ares.cc | 5 +- .../resolver/dns/c_ares/grpc_ares_ev_driver.cc | 6 +- .../dns/c_ares/grpc_ares_ev_driver_windows.cc | 59 -------- .../resolver/dns/c_ares/grpc_ares_wrapper.cc | 9 +- .../resolver/dns/c_ares/grpc_ares_wrapper.h | 4 - .../resolver/dns/c_ares/grpc_ares_wrapper_posix.cc | 29 ---- .../dns/c_ares/grpc_ares_wrapper_windows.cc | 29 ---- src/core/lib/iomgr/socket_windows.cc | 29 ---- src/core/lib/iomgr/socket_windows.h | 4 - src/python/grpcio/grpc_core_dependencies.py | 3 - test/core/iomgr/BUILD | 13 -- .../iomgr/grpc_ipv6_loopback_available_test.cc | 48 ------- test/cpp/naming/address_sorting_test.cc | 160 +++++---------------- test/cpp/naming/gen_build_yaml.py | 2 +- third_party/address_sorting/address_sorting.c | 9 +- .../address_sorting/address_sorting_windows.c | 46 +----- .../include/address_sorting/address_sorting.h | 3 - tools/doxygen/Doxyfile.core.internal | 3 - tools/run_tests/generated/sources_and_headers.json | 22 +-- tools/run_tests/generated/tests.json | 38 +---- 31 files changed, 73 insertions(+), 577 deletions(-) delete mode 100644 src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc delete mode 100644 src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc delete mode 100644 src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc delete mode 100644 test/core/iomgr/grpc_ipv6_loopback_available_test.cc (limited to 'src/core/lib/iomgr') diff --git a/BUILD b/BUILD index 81390dd1aa..ee4b5dfaec 100644 --- a/BUILD +++ b/BUILD @@ -1433,10 +1433,7 @@ grpc_cc_library( "src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc", - "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc", - "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc", - "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc", ], hdrs = [ diff --git a/CMakeLists.txt b/CMakeLists.txt index e8e65d4b71..84e9c08cb5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -298,7 +298,6 @@ add_dependencies(buildtests_c grpc_completion_queue_test) add_dependencies(buildtests_c grpc_completion_queue_threading_test) add_dependencies(buildtests_c grpc_credentials_test) add_dependencies(buildtests_c grpc_fetch_oauth2) -add_dependencies(buildtests_c grpc_ipv6_loopback_available_test) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_c grpc_json_token_test) endif() @@ -672,8 +671,12 @@ endif() if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx resolver_component_tests_runner_invoker) endif() +if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx address_sorting_test_unsecure) +endif() +if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx address_sorting_test) +endif() if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx cancel_ares_query_test) endif() @@ -1233,11 +1236,8 @@ add_library(grpc src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc src/cpp/ext/filters/census/grpc_context.cc @@ -2538,11 +2538,8 @@ add_library(grpc_unsecure src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc @@ -7326,35 +7323,6 @@ target_link_libraries(grpc_fetch_oauth2 gpr ) -endif (gRPC_BUILD_TESTS) -if (gRPC_BUILD_TESTS) - -add_executable(grpc_ipv6_loopback_available_test - test/core/iomgr/grpc_ipv6_loopback_available_test.cc -) - - -target_include_directories(grpc_ipv6_loopback_available_test - PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} - PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include - PRIVATE ${_gRPC_SSL_INCLUDE_DIR} - PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR} - PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR} - PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR} - PRIVATE ${_gRPC_CARES_INCLUDE_DIR} - PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR} - PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} - PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR} -) - -target_link_libraries(grpc_ipv6_loopback_available_test - ${_gRPC_ALLTARGETS_LIBRARIES} - grpc_test_util - grpc - gpr_test_util - gpr -) - endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) @@ -16383,6 +16351,7 @@ target_link_libraries(resolver_component_tests_runner_invoker endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) +if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_executable(address_sorting_test_unsecure test/cpp/naming/address_sorting_test.cc @@ -16422,8 +16391,10 @@ target_link_libraries(address_sorting_test_unsecure ${_gRPC_GFLAGS_LIBRARIES} ) +endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) +if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_executable(address_sorting_test test/cpp/naming/address_sorting_test.cc @@ -16463,6 +16434,7 @@ target_link_libraries(address_sorting_test ${_gRPC_GFLAGS_LIBRARIES} ) +endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) diff --git a/Makefile b/Makefile index 5174ab6719..bad41975a0 100644 --- a/Makefile +++ b/Makefile @@ -1022,7 +1022,6 @@ grpc_completion_queue_threading_test: $(BINDIR)/$(CONFIG)/grpc_completion_queue_ grpc_create_jwt: $(BINDIR)/$(CONFIG)/grpc_create_jwt grpc_credentials_test: $(BINDIR)/$(CONFIG)/grpc_credentials_test grpc_fetch_oauth2: $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 -grpc_ipv6_loopback_available_test: $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test grpc_json_token_test: $(BINDIR)/$(CONFIG)/grpc_json_token_test grpc_jwt_verifier_test: $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test grpc_print_google_default_creds_token: $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token @@ -1473,7 +1472,6 @@ buildtests_c: privatelibs_c \ $(BINDIR)/$(CONFIG)/grpc_completion_queue_threading_test \ $(BINDIR)/$(CONFIG)/grpc_credentials_test \ $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 \ - $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test \ $(BINDIR)/$(CONFIG)/grpc_json_token_test \ $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test \ $(BINDIR)/$(CONFIG)/grpc_security_connector_test \ @@ -2030,8 +2028,6 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/grpc_completion_queue_threading_test || ( echo test grpc_completion_queue_threading_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_credentials_test" $(Q) $(BINDIR)/$(CONFIG)/grpc_credentials_test || ( echo test grpc_credentials_test failed ; exit 1 ) - $(E) "[RUN] Testing grpc_ipv6_loopback_available_test" - $(Q) $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test || ( echo test grpc_ipv6_loopback_available_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_json_token_test" $(Q) $(BINDIR)/$(CONFIG)/grpc_json_token_test || ( echo test grpc_json_token_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_jwt_verifier_test" @@ -3708,11 +3704,8 @@ LIBGRPC_SRC = \ src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc \ src/cpp/ext/filters/census/grpc_context.cc \ @@ -4979,11 +4972,8 @@ LIBGRPC_UNSECURE_SRC = \ src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc \ src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \ @@ -12375,38 +12365,6 @@ endif endif -GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_SRC = \ - test/core/iomgr/grpc_ipv6_loopback_available_test.cc \ - -GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_SRC)))) -ifeq ($(NO_SECURE),true) - -# You can't build secure targets if you don't have OpenSSL. - -$(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test: openssl_dep_error - -else - - - -$(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test: $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a - $(E) "[LD] Linking $@" - $(Q) mkdir -p `dirname $@` - $(Q) $(LD) $(LDFLAGS) $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test - -endif - -$(OBJDIR)/$(CONFIG)/test/core/iomgr/grpc_ipv6_loopback_available_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a - -deps_grpc_ipv6_loopback_available_test: $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS:.o=.dep) - -ifneq ($(NO_SECURE),true) -ifneq ($(NO_DEPS),true) --include $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS:.o=.dep) -endif -endif - - GRPC_JSON_TOKEN_TEST_SRC = \ test/core/security/json_token_test.cc \ diff --git a/build.yaml b/build.yaml index 70af96046c..30389ec114 100644 --- a/build.yaml +++ b/build.yaml @@ -740,11 +740,8 @@ filegroups: - src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc - - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc - - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc - - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc plugin: grpc_resolver_dns_ares uses: - grpc_base @@ -2733,18 +2730,6 @@ targets: - grpc - gpr_test_util - gpr -- name: grpc_ipv6_loopback_available_test - build: test - language: c - src: - - test/core/iomgr/grpc_ipv6_loopback_available_test.cc - deps: - - grpc_test_util - - grpc - - gpr_test_util - - gpr - exclude_iomgrs: - - uv - name: grpc_json_token_test build: test language: c diff --git a/config.m4 b/config.m4 index aa40a698a6..c277ccafc8 100644 --- a/config.m4 +++ b/config.m4 @@ -380,11 +380,8 @@ if test "$PHP_GRPC" != "no"; then src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc \ src/cpp/ext/filters/census/grpc_context.cc \ diff --git a/config.w32 b/config.w32 index 5afa4466ac..2857781dd5 100644 --- a/config.w32 +++ b/config.w32 @@ -355,11 +355,8 @@ if (PHP_GRPC != "no") { "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\dns_resolver_ares.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver_posix.cc " + - "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver_windows.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper_fallback.cc " + - "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper_posix.cc " + - "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper_windows.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\native\\dns_resolver.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\sockaddr\\sockaddr_resolver.cc " + "src\\cpp\\ext\\filters\\census\\grpc_context.cc " + diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 5c3649afbd..23edaec656 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -802,11 +802,8 @@ Pod::Spec.new do |s| 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/cpp/ext/filters/census/grpc_context.cc', diff --git a/grpc.gemspec b/grpc.gemspec index c250316b99..b69d5a7c6f 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -742,11 +742,8 @@ Gem::Specification.new do |s| s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc ) - s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc ) - s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc ) - s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc ) s.files += %w( src/cpp/ext/filters/census/grpc_context.cc ) diff --git a/grpc.gyp b/grpc.gyp index 25082fe540..e1485efa05 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -572,11 +572,8 @@ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/cpp/ext/filters/census/grpc_context.cc', @@ -1290,11 +1287,8 @@ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc', diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index 2b61a8816d..01ce5f03e9 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -420,8 +420,12 @@ typedef unsigned __int64 uint64_t; #define GPR_MAX_ALIGNMENT 16 #ifndef GRPC_ARES +#ifdef GPR_WINDOWS +#define GRPC_ARES 0 +#else #define GRPC_ARES 1 #endif +#endif #ifndef GRPC_MUST_USE_RESULT #if defined(__GNUC__) && !defined(__MINGW32__) diff --git a/package.xml b/package.xml index acdc6ffdb3..7f71536b1d 100644 --- a/package.xml +++ b/package.xml @@ -747,11 +747,8 @@ - - - diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc index 7050e82121..f4f6444c5f 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -141,8 +142,8 @@ AresDnsResolver::AresDnsResolver(const ResolverArgs& args) channel_args_ = grpc_channel_args_copy(args.args); const grpc_arg* arg = grpc_channel_args_find( channel_args_, GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION); - grpc_integer_options integer_options = {false, false, true}; - request_service_config_ = !grpc_channel_arg_get_integer(arg, integer_options); + request_service_config_ = !grpc_channel_arg_get_integer( + arg, (grpc_integer_options){false, false, true}); arg = grpc_channel_args_find(channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS); min_time_between_resolutions_ = diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc index 0068d0d5f4..c886795608 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc @@ -18,10 +18,11 @@ #include #include "src/core/lib/iomgr/port.h" -#if GRPC_ARES == 1 && !defined(GRPC_UV) +#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) #include #include +#include #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" @@ -31,6 +32,7 @@ #include #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" #include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/sockaddr_utils.h" @@ -312,4 +314,4 @@ void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver) { } } -#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */ +#endif /* GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) */ diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc deleted file mode 100644 index 5d65ae3ab3..0000000000 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +++ /dev/null @@ -1,59 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -#include - -#include "src/core/lib/iomgr/port.h" -#if GRPC_ARES == 1 && defined(GPR_WINDOWS) - -#include -#include -#include "src/core/lib/gprpp/memory.h" - -#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" - -namespace grpc_core { - -/* TODO: fill in the body of GrpcPolledFdWindows to enable c-ares on Windows. - This dummy implementation only allows grpc to compile on windows with - GRPC_ARES=1. */ -class GrpcPolledFdWindows : public GrpcPolledFd { - public: - GrpcPolledFdWindows() { abort(); } - ~GrpcPolledFdWindows() { abort(); } - void RegisterForOnReadableLocked(grpc_closure* read_closure) override { - abort(); - } - void RegisterForOnWriteableLocked(grpc_closure* write_closure) override { - abort(); - } - bool IsFdStillReadableLocked() override { abort(); } - void ShutdownLocked(grpc_error* error) override { abort(); } - ares_socket_t GetWrappedAresSocketLocked() override { abort(); } - const char* GetName() override { abort(); } -}; - -GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as, - grpc_pollset_set* driver_pollset_set) { - return nullptr; -} - -void ConfigureAresChannelLocked(ares_channel* channel) { abort(); } - -} // namespace grpc_core - -#endif /* GRPC_ARES == 1 && defined(GPR_WINDOWS) */ diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc index b3d6437e9a..497ad998af 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc @@ -22,6 +22,7 @@ #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" #include "src/core/lib/iomgr/sockaddr.h" +#include "src/core/lib/iomgr/socket_utils_posix.h" #include #include @@ -214,7 +215,7 @@ static void on_hostbyname_done_locked(void* arg, int status, int timeouts, memset(&addr, 0, addr_len); memcpy(&addr.sin6_addr, hostent->h_addr_list[i - prev_naddr], sizeof(struct in6_addr)); - addr.sin6_family = static_cast(hostent->h_addrtype); + addr.sin6_family = static_cast(hostent->h_addrtype); addr.sin6_port = hr->port; grpc_lb_addresses_set_address( *lb_addresses, i, &addr, addr_len, @@ -235,7 +236,7 @@ static void on_hostbyname_done_locked(void* arg, int status, int timeouts, memset(&addr, 0, addr_len); memcpy(&addr.sin_addr, hostent->h_addr_list[i - prev_naddr], sizeof(struct in_addr)); - addr.sin_family = static_cast(hostent->h_addrtype); + addr.sin_family = static_cast(hostent->h_addrtype); addr.sin_port = hr->port; grpc_lb_addresses_set_address( *lb_addresses, i, &addr, addr_len, @@ -280,7 +281,7 @@ static void on_srv_query_done_locked(void* arg, int status, int timeouts, grpc_ares_ev_driver_get_channel_locked(r->ev_driver); for (struct ares_srv_reply* srv_it = reply; srv_it != nullptr; srv_it = srv_it->next) { - if (grpc_ares_query_ipv6()) { + if (grpc_ipv6_loopback_available()) { grpc_ares_hostbyname_request* hr = create_hostbyname_request_locked( r, srv_it->host, htons(srv_it->port), true /* is_balancer */); ares_gethostbyname(*channel, hr->host, AF_INET6, @@ -451,7 +452,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl( } } r->pending_queries = 1; - if (grpc_ares_query_ipv6()) { + if (grpc_ipv6_loopback_available()) { hr = create_hostbyname_request_locked(r, host, strhtons(port), false /* is_balancer */); ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_locked, diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h index 17eaa7ccf0..ce26f5d524 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h @@ -70,10 +70,6 @@ void grpc_ares_cleanup(void); * and destroys the grpc_ares_request */ void grpc_ares_complete_request_locked(grpc_ares_request* request); -/* Indicates whether or not AAAA queries should be attempted. */ -/* E.g., return false if ipv6 is known to not be available. */ -bool grpc_ares_query_ipv6(); - /* Exposed only for testing */ void grpc_cares_wrapper_test_only_address_sorting_sort( grpc_lb_addresses* lb_addrs); diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc deleted file mode 100644 index 23c0fec74f..0000000000 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include "src/core/lib/iomgr/port.h" -#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) - -#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" -#include "src/core/lib/iomgr/socket_utils_posix.h" - -bool grpc_ares_query_ipv6() { return grpc_ipv6_loopback_available(); } - -#endif /* GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) */ diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc deleted file mode 100644 index ee827e284e..0000000000 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include "src/core/lib/iomgr/port.h" -#if GRPC_ARES == 1 && defined(GPR_WINDOWS) - -#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" -#include "src/core/lib/iomgr/socket_windows.h" - -bool grpc_ares_query_ipv6() { return grpc_ipv6_loopback_available(); } - -#endif /* GRPC_ARES == 1 && defined(GPR_WINDOWS) */ diff --git a/src/core/lib/iomgr/socket_windows.cc b/src/core/lib/iomgr/socket_windows.cc index 4ad31cb35d..2e23409582 100644 --- a/src/core/lib/iomgr/socket_windows.cc +++ b/src/core/lib/iomgr/socket_windows.cc @@ -36,7 +36,6 @@ #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset_windows.h" -#include "src/core/lib/iomgr/sockaddr_windows.h" #include "src/core/lib/iomgr/socket_windows.h" grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name) { @@ -149,32 +148,4 @@ void grpc_socket_become_ready(grpc_winsocket* socket, if (should_destroy) destroy(socket); } -static gpr_once g_probe_ipv6_once = GPR_ONCE_INIT; -static bool g_ipv6_loopback_available = false; - -static void probe_ipv6_once(void) { - SOCKET s = socket(AF_INET6, SOCK_STREAM, 0); - g_ipv6_loopback_available = 0; - if (s == INVALID_SOCKET) { - gpr_log(GPR_INFO, "Disabling AF_INET6 sockets because socket() failed."); - } else { - grpc_sockaddr_in6 addr; - memset(&addr, 0, sizeof(addr)); - addr.sin6_family = AF_INET6; - addr.sin6_addr.s6_addr[15] = 1; /* [::1]:0 */ - if (bind(s, reinterpret_cast(&addr), sizeof(addr)) == 0) { - g_ipv6_loopback_available = 1; - } else { - gpr_log(GPR_INFO, - "Disabling AF_INET6 sockets because ::1 is not available."); - } - closesocket(s); - } -} - -int grpc_ipv6_loopback_available(void) { - gpr_once_init(&g_probe_ipv6_once, probe_ipv6_once); - return g_ipv6_loopback_available; -} - #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/src/core/lib/iomgr/socket_windows.h b/src/core/lib/iomgr/socket_windows.h index b09b9da562..7bd01eded5 100644 --- a/src/core/lib/iomgr/socket_windows.h +++ b/src/core/lib/iomgr/socket_windows.h @@ -108,10 +108,6 @@ void grpc_socket_notify_on_read(grpc_winsocket* winsocket, void grpc_socket_become_ready(grpc_winsocket* winsocket, grpc_winsocket_callback_info* ci); -/* Returns true if this system can create AF_INET6 sockets bound to ::1. - The value is probed once, and cached for the life of the process. */ -int grpc_ipv6_loopback_available(void); - #endif #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_WINDOWS_H */ diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index d6efb49750..49185cc648 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -354,11 +354,8 @@ CORE_SOURCE_FILES = [ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', - 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/cpp/ext/filters/census/grpc_context.cc', diff --git a/test/core/iomgr/BUILD b/test/core/iomgr/BUILD index 002671a5fa..fb0490a95f 100644 --- a/test/core/iomgr/BUILD +++ b/test/core/iomgr/BUILD @@ -124,19 +124,6 @@ grpc_cc_test( ], ) -grpc_cc_test( - name = "grpc_ipv6_loopback_available_test", - srcs = ["grpc_ipv6_loopback_available_test.cc"], - language = "C++", - deps = [ - "//:gpr", - "//:grpc", - "//test/core/util:gpr_test_util", - "//test/core/util:grpc_test_util", - ], -) - - grpc_cc_test( name = "load_file_test", srcs = ["load_file_test.cc"], diff --git a/test/core/iomgr/grpc_ipv6_loopback_available_test.cc b/test/core/iomgr/grpc_ipv6_loopback_available_test.cc deleted file mode 100644 index 329aa9a851..0000000000 --- a/test/core/iomgr/grpc_ipv6_loopback_available_test.cc +++ /dev/null @@ -1,48 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -// grpc_ipv6_loopback_available isn't currently available on UV. -#ifndef GRPC_UV - -#include -#include -#include "test/core/util/test_config.h" - -#ifdef GPR_WINDOWS -#include "src/core/lib/iomgr/socket_windows.h" -#else -#include "src/core/lib/iomgr/socket_utils_posix.h" -#endif - -int main(int argc, char** argv) { - grpc_test_init(argc, argv); - grpc_init(); - // This test assumes that the ipv6 loopback is available - // in all environments in which grpc tests run in. - GPR_ASSERT(grpc_ipv6_loopback_available()); - grpc_shutdown(); - return 0; -} - -#else - -int main(int argc, char** argv) { return 0; } - -#endif /* GRPC_UV */ diff --git a/test/cpp/naming/address_sorting_test.cc b/test/cpp/naming/address_sorting_test.cc index 04c300876c..a92e9e3b3e 100644 --- a/test/cpp/naming/address_sorting_test.cc +++ b/test/cpp/naming/address_sorting_test.cc @@ -24,8 +24,10 @@ #include #include +#include #include #include +#include #include #include @@ -49,11 +51,6 @@ #include "test/core/util/port.h" #include "test/core/util/test_config.h" -#ifndef GPR_WINDOWS -#include -#include -#endif - namespace { struct TestAddress { @@ -193,18 +190,10 @@ void VerifyLbAddrOutputs(grpc_lb_addresses* lb_addrs, grpc_lb_addresses_destroy(lb_addrs); } -/* We need to run each test case inside of its own - * isolated grpc_init/grpc_shutdown pair, so that - * the "address sorting source addr factory" can be - * restored to its default for each test case. */ -class AddressSortingTest : public ::testing::Test { - protected: - void SetUp() override { grpc_init(); } - void TearDown() override { grpc_shutdown(); } -}; +} // namespace /* Tests for rule 1 */ -TEST_F(AddressSortingTest, TestDepriotizesUnreachableAddresses) { +TEST(AddressSortingTest, TestDepriotizesUnreachableAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -223,7 +212,7 @@ TEST_F(AddressSortingTest, TestDepriotizesUnreachableAddresses) { }); } -TEST_F(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv6) { +TEST(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv6) { bool ipv4_supported = true; bool ipv6_supported = false; OverrideAddressSortingSourceAddrFactory( @@ -242,7 +231,7 @@ TEST_F(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv6) { }); } -TEST_F(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv4) { +TEST(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv4) { bool ipv4_supported = false; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -264,7 +253,7 @@ TEST_F(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv4) { /* Tests for rule 2 */ -TEST_F(AddressSortingTest, TestDepriotizesNonMatchingScope) { +TEST(AddressSortingTest, TestDepriotizesNonMatchingScope) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -288,7 +277,7 @@ TEST_F(AddressSortingTest, TestDepriotizesNonMatchingScope) { /* Tests for rule 5 */ -TEST_F(AddressSortingTest, TestUsesLabelFromDefaultTable) { +TEST(AddressSortingTest, TestUsesLabelFromDefaultTable) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -311,7 +300,7 @@ TEST_F(AddressSortingTest, TestUsesLabelFromDefaultTable) { /* Flip the input on the test above to reorder the sort function's * comparator's inputs. */ -TEST_F(AddressSortingTest, TestUsesLabelFromDefaultTableInputFlipped) { +TEST(AddressSortingTest, TestUsesLabelFromDefaultTableInputFlipped) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -334,8 +323,8 @@ TEST_F(AddressSortingTest, TestUsesLabelFromDefaultTableInputFlipped) { /* Tests for rule 6 */ -TEST_F(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithAnIpv4Address) { +TEST(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithAnIpv4Address) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -359,8 +348,8 @@ TEST_F(AddressSortingTest, }); } -TEST_F(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithV4CompatAndLocalhostAddress) { +TEST(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithV4CompatAndLocalhostAddress) { bool ipv4_supported = true; bool ipv6_supported = true; // Handle unique observed behavior of inet_ntop(v4-compatible-address) on OS X. @@ -388,8 +377,8 @@ TEST_F(AddressSortingTest, }); } -TEST_F(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithCatchAllAndLocalhostAddress) { +TEST(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithCatchAllAndLocalhostAddress) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -414,8 +403,8 @@ TEST_F(AddressSortingTest, }); } -TEST_F(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWith2000PrefixedAddress) { +TEST(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWith2000PrefixedAddress) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -437,7 +426,7 @@ TEST_F(AddressSortingTest, }); } -TEST_F( +TEST( AddressSortingTest, TestUsesDestinationWithHigherPrecedenceWith2000PrefixedAddressEnsurePrefixMatchHasNoEffect) { bool ipv4_supported = true; @@ -459,8 +448,8 @@ TEST_F( }); } -TEST_F(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithLinkAndSiteLocalAddresses) { +TEST(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithLinkAndSiteLocalAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -480,22 +469,19 @@ TEST_F(AddressSortingTest, }); } -TEST_F( +TEST( AddressSortingTest, TestUsesDestinationWithHigherPrecedenceWithCatchAllAndAndV4MappedAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; - // Use embedded ipv4 addresses with leading 1's instead of zero's to be - // compatible with inet_ntop implementations that can display such - // addresses with leading zero's as e.g.: "::ffff:0:2", as on windows. OverrideAddressSortingSourceAddrFactory( ipv4_supported, ipv6_supported, { - {"[::ffff:1.1.1.2]:443", {"[::ffff:1.1.1.3]:0", AF_INET6}}, + {"[::ffff:0.0.0.2]:443", {"[::ffff:0.0.0.3]:0", AF_INET6}}, {"[1234::2]:443", {"[1234::3]:0", AF_INET6}}, }); grpc_lb_addresses* lb_addrs = BuildLbAddrInputs({ - {"[::ffff:1.1.1.2]:443", AF_INET6}, + {"[::ffff:0.0.0.2]:443", AF_INET6}, {"[1234::2]:443", AF_INET6}, }); grpc_cares_wrapper_test_only_address_sorting_sort(lb_addrs); @@ -503,13 +489,13 @@ TEST_F( // ::ffff:0:2 should match the v4-mapped // precedence entry and be deprioritized. "[1234::2]:443", - "[::ffff:1.1.1.2]:443", + "[::ffff:0.0.0.2]:443", }); } /* Tests for rule 8 */ -TEST_F(AddressSortingTest, TestPrefersSmallerScope) { +TEST(AddressSortingTest, TestPrefersSmallerScope) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -534,7 +520,7 @@ TEST_F(AddressSortingTest, TestPrefersSmallerScope) { /* Tests for rule 9 */ -TEST_F(AddressSortingTest, TestPrefersLongestMatchingSrcDstPrefix) { +TEST(AddressSortingTest, TestPrefersLongestMatchingSrcDstPrefix) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -557,8 +543,8 @@ TEST_F(AddressSortingTest, TestPrefersLongestMatchingSrcDstPrefix) { }); } -TEST_F(AddressSortingTest, - TestPrefersLongestMatchingSrcDstPrefixMatchesWholeAddress) { +TEST(AddressSortingTest, + TestPrefersLongestMatchingSrcDstPrefixMatchesWholeAddress) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -578,7 +564,7 @@ TEST_F(AddressSortingTest, }); } -TEST_F(AddressSortingTest, TestPrefersLongestPrefixStressInnerBytePrefix) { +TEST(AddressSortingTest, TestPrefersLongestPrefixStressInnerBytePrefix) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -598,7 +584,7 @@ TEST_F(AddressSortingTest, TestPrefersLongestPrefixStressInnerBytePrefix) { }); } -TEST_F(AddressSortingTest, TestPrefersLongestPrefixDiffersOnHighestBitOfByte) { +TEST(AddressSortingTest, TestPrefersLongestPrefixDiffersOnHighestBitOfByte) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -618,7 +604,7 @@ TEST_F(AddressSortingTest, TestPrefersLongestPrefixDiffersOnHighestBitOfByte) { }); } -TEST_F(AddressSortingTest, TestPrefersLongestPrefixDiffersByLastBit) { +TEST(AddressSortingTest, TestPrefersLongestPrefixDiffersByLastBit) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -642,7 +628,7 @@ TEST_F(AddressSortingTest, TestPrefersLongestPrefixDiffersByLastBit) { /* Tests for rule 10 */ -TEST_F(AddressSortingTest, TestStableSort) { +TEST(AddressSortingTest, TestStableSort) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -662,7 +648,7 @@ TEST_F(AddressSortingTest, TestStableSort) { }); } -TEST_F(AddressSortingTest, TestStableSortFiveElements) { +TEST(AddressSortingTest, TestStableSortFiveElements) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -691,7 +677,7 @@ TEST_F(AddressSortingTest, TestStableSortFiveElements) { }); } -TEST_F(AddressSortingTest, TestStableSortNoSrcAddrsExist) { +TEST(AddressSortingTest, TestStableSortNoSrcAddrsExist) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory(ipv4_supported, ipv6_supported, {}); @@ -712,7 +698,7 @@ TEST_F(AddressSortingTest, TestStableSortNoSrcAddrsExist) { }); } -TEST_F(AddressSortingTest, TestStableSortNoSrcAddrsExistWithIpv4) { +TEST(AddressSortingTest, TestStableSortNoSrcAddrsExistWithIpv4) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory(ipv4_supported, ipv6_supported, {}); @@ -727,7 +713,7 @@ TEST_F(AddressSortingTest, TestStableSortNoSrcAddrsExistWithIpv4) { }); } -TEST_F(AddressSortingTest, TestStableSortV4CompatAndSiteLocalAddresses) { +TEST(AddressSortingTest, TestStableSortV4CompatAndSiteLocalAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; // Handle unique observed behavior of inet_ntop(v4-compatible-address) on OS X. @@ -758,78 +744,6 @@ TEST_F(AddressSortingTest, TestStableSortV4CompatAndSiteLocalAddresses) { }); } -/* TestPrefersIpv6Loopback tests the actual "address probing" code - * for the current platform, without any mocks. - * This test relies on the assumption that the ipv6 loopback address is - * available in the hosts/containers that grpc C/C++ tests run on - * (whether ipv4 loopback is available or not, an available ipv6 - * loopback should be preferred). */ -TEST_F(AddressSortingTest, TestPrefersIpv6Loopback) { - grpc_lb_addresses* lb_addrs = BuildLbAddrInputs({ - {"[::1]:443", AF_INET6}, - {"127.0.0.1:443", AF_INET}, - }); - grpc_cares_wrapper_test_only_address_sorting_sort(lb_addrs); - VerifyLbAddrOutputs(lb_addrs, { - "[::1]:443", - "127.0.0.1:443", - }); -} - -/* Flip the order of the inputs above and expect the same output order - * (try to rule out influence of arbitrary qsort ordering) */ -TEST_F(AddressSortingTest, TestPrefersIpv6LoopbackInputsFlipped) { - grpc_lb_addresses* lb_addrs = BuildLbAddrInputs({ - {"127.0.0.1:443", AF_INET}, - {"[::1]:443", AF_INET6}, - }); - grpc_cares_wrapper_test_only_address_sorting_sort(lb_addrs); - VerifyLbAddrOutputs(lb_addrs, { - "[::1]:443", - "127.0.0.1:443", - }); -} - -/* Try to rule out false positives in the above two tests in which - * the sorter might think that neither ipv6 or ipv4 loopback is - * available, but ipv6 loopback is still preferred only due - * to precedance table lookups. */ -TEST_F(AddressSortingTest, TestSorterKnowsIpv6LoopbackIsAvailable) { - sockaddr_in6 ipv6_loopback; - memset(&ipv6_loopback, 0, sizeof(ipv6_loopback)); - ipv6_loopback.sin6_family = AF_INET6; - ((char*)&ipv6_loopback.sin6_addr)[15] = 1; - ipv6_loopback.sin6_port = htons(443); - // Set up the source and destination parameters of - // address_sorting_get_source_addr - address_sorting_address sort_input_dest; - memcpy(&sort_input_dest.addr, &ipv6_loopback, sizeof(ipv6_loopback)); - sort_input_dest.len = sizeof(ipv6_loopback); - address_sorting_address source_for_sort_input_dest; - memset(&source_for_sort_input_dest, 0, sizeof(source_for_sort_input_dest)); - // address_sorting_get_source_addr returns true if a source address was found - // for the destination address, otherwise false. - EXPECT_TRUE(address_sorting_get_source_addr_for_testing( - &sort_input_dest, &source_for_sort_input_dest)); - // Now also check that the source address was filled in correctly. - EXPECT_GT(source_for_sort_input_dest.len, 0u); - sockaddr_in6* source_addr_output = - (sockaddr_in6*)source_for_sort_input_dest.addr; - EXPECT_EQ(source_addr_output->sin6_family, AF_INET6); - char* buf = static_cast(gpr_zalloc(100)); - EXPECT_NE(inet_ntop(AF_INET6, &source_addr_output->sin6_addr, buf, 100), - nullptr) - << "inet_ntop failed. Errno: " + std::to_string(errno); - std::string source_addr_str(buf); - gpr_free(buf); - // This test - // assumes that the source address for any loopback destination is also the - // loopback address. - EXPECT_EQ(source_addr_str, "::1"); -} - -} // namespace - int main(int argc, char** argv) { char* resolver = gpr_getenv("GRPC_DNS_RESOLVER"); if (resolver == nullptr || strlen(resolver) == 0) { @@ -840,7 +754,9 @@ int main(int argc, char** argv) { gpr_free(resolver); grpc_test_init(argc, argv); ::testing::InitGoogleTest(&argc, argv); + grpc_init(); auto result = RUN_ALL_TESTS(); + grpc_shutdown(); // Test sequential and nested inits and shutdowns. grpc_init(); grpc_init(); diff --git a/test/cpp/naming/gen_build_yaml.py b/test/cpp/naming/gen_build_yaml.py index 5dad2ea7af..baa6512f62 100755 --- a/test/cpp/naming/gen_build_yaml.py +++ b/test/cpp/naming/gen_build_yaml.py @@ -110,7 +110,7 @@ def main(): 'gtest': True, 'run': True, 'src': ['test/cpp/naming/address_sorting_test.cc'], - 'platforms': ['linux', 'posix', 'mac', 'windows'], + 'platforms': ['linux', 'posix', 'mac'], 'deps': [ 'grpc++_test_util' + unsecure_build_config_suffix, 'grpc_test_util' + unsecure_build_config_suffix, diff --git a/third_party/address_sorting/address_sorting.c b/third_party/address_sorting/address_sorting.c index 9aee0a5419..e4f3b53799 100644 --- a/third_party/address_sorting/address_sorting.c +++ b/third_party/address_sorting/address_sorting.c @@ -55,17 +55,12 @@ static const int kIPv6AddrScopeGlobal = 3; static address_sorting_source_addr_factory* g_current_source_addr_factory = NULL; -static bool address_sorting_get_source_addr(const address_sorting_address* dest, - address_sorting_address* source) { +static int address_sorting_get_source_addr(const address_sorting_address* dest, + address_sorting_address* source) { return g_current_source_addr_factory->vtable->get_source_addr( g_current_source_addr_factory, dest, source); } -bool address_sorting_get_source_addr_for_testing( - const address_sorting_address* dest, address_sorting_address* source) { - return address_sorting_get_source_addr(dest, source); -} - static int ipv6_prefix_match_length(const struct sockaddr_in6* sa, const struct sockaddr_in6* sb) { unsigned char* a = (unsigned char*)&sa->sin6_addr; diff --git a/third_party/address_sorting/address_sorting_windows.c b/third_party/address_sorting/address_sorting_windows.c index 662a88248e..b2f5708649 100644 --- a/third_party/address_sorting/address_sorting_windows.c +++ b/third_party/address_sorting/address_sorting_windows.c @@ -42,54 +42,14 @@ #if defined(ADDRESS_SORTING_WINDOWS) -#include -#include -#include #include -#include -#include -static bool windows_source_addr_factory_get_source_addr( - address_sorting_source_addr_factory* factory, - const address_sorting_address* dest_addr, - address_sorting_address* source_addr) { - bool source_addr_exists = false; - SOCKET s = socket(((struct sockaddr_in6*)dest_addr)->sin6_family, SOCK_DGRAM, - IPPROTO_UDP); - if (s != INVALID_SOCKET) { - if (connect(s, (struct sockaddr*)dest_addr, (int)dest_addr->len) == 0) { - address_sorting_address found_source_addr; - memset(&found_source_addr, 0, sizeof(found_source_addr)); - found_source_addr.len = sizeof(found_source_addr.addr); - if (getsockname(s, (struct sockaddr*)&found_source_addr.addr, - (socklen_t*)&found_source_addr.len) == 0) { - source_addr_exists = true; - *source_addr = found_source_addr; - } - } - closesocket(s); - } - return source_addr_exists; -} - -static void windows_source_addr_factory_destroy( - address_sorting_source_addr_factory* self) { - free(self); -} - -static const address_sorting_source_addr_factory_vtable - windows_source_addr_factory_vtable = { - windows_source_addr_factory_get_source_addr, - windows_source_addr_factory_destroy, -}; +/* TODO : Add address sorting functionality to work on windows. */ address_sorting_source_addr_factory* address_sorting_create_source_addr_factory_for_current_platform() { - address_sorting_source_addr_factory* factory = - malloc(sizeof(address_sorting_source_addr_factory)); - memset(factory, 0, sizeof(address_sorting_source_addr_factory)); - factory->vtable = &windows_source_addr_factory_vtable; - return factory; + abort(); + return NULL; } #endif // defined(ADDRESS_SORTING_WINDOWS) diff --git a/third_party/address_sorting/include/address_sorting/address_sorting.h b/third_party/address_sorting/include/address_sorting/address_sorting.h index c58fafe3f7..f11cd424b5 100644 --- a/third_party/address_sorting/include/address_sorting/address_sorting.h +++ b/third_party/address_sorting/include/address_sorting/address_sorting.h @@ -103,9 +103,6 @@ address_sorting_family address_sorting_abstract_get_family( void address_sorting_override_source_addr_factory_for_testing( address_sorting_source_addr_factory* factory); -bool address_sorting_get_source_addr_for_testing( - const address_sorting_address* dest, address_sorting_address* source); - #ifdef __cplusplus } #endif diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 18f56984fe..576950934e 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -924,12 +924,9 @@ src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ -src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ -src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ -src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/README.md \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index a686dae8b4..072402b2cf 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -1032,23 +1032,6 @@ "third_party": false, "type": "target" }, - { - "deps": [ - "gpr", - "gpr_test_util", - "grpc", - "grpc_test_util" - ], - "headers": [], - "is_filegroup": false, - "language": "c", - "name": "grpc_ipv6_loopback_available_test", - "src": [ - "test/core/iomgr/grpc_ipv6_loopback_available_test.cc" - ], - "third_party": false, - "type": "target" - }, { "deps": [ "gpr", @@ -10271,12 +10254,9 @@ "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc", - "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h", - "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc", - "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc", - "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc" + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc" ], "third_party": false, "type": "filegroup" diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 5815f82fef..a5439a5db1 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -1313,32 +1313,6 @@ ], "uses_polling": true }, - { - "args": [], - "benchmark": false, - "ci_platforms": [ - "linux", - "mac", - "posix", - "windows" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "gtest": false, - "language": "c", - "name": "grpc_ipv6_loopback_available_test", - "platforms": [ - "linux", - "mac", - "posix", - "windows" - ], - "uses_polling": true - }, { "args": [], "benchmark": false, @@ -5736,8 +5710,7 @@ "ci_platforms": [ "linux", "mac", - "posix", - "windows" + "posix" ], "cpu_cost": 1.0, "exclude_configs": [], @@ -5749,8 +5722,7 @@ "platforms": [ "linux", "mac", - "posix", - "windows" + "posix" ], "uses_polling": true }, @@ -5760,8 +5732,7 @@ "ci_platforms": [ "linux", "mac", - "posix", - "windows" + "posix" ], "cpu_cost": 1.0, "exclude_configs": [], @@ -5773,8 +5744,7 @@ "platforms": [ "linux", "mac", - "posix", - "windows" + "posix" ], "uses_polling": true }, -- cgit v1.2.3 From 35925d5863eb820df2ac4e87a607a810e6bd83ab Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Thu, 26 Jul 2018 14:57:22 -0700 Subject: Add API to grpc event engines to forcibly set underlying fd to be readable/writable/errored --- src/core/lib/iomgr/ev_epoll1_linux.cc | 9 +++++++++ src/core/lib/iomgr/ev_epollex_linux.cc | 9 +++++++++ src/core/lib/iomgr/ev_epollsig_linux.cc | 9 +++++++++ src/core/lib/iomgr/ev_poll_posix.cc | 20 ++++++++++++++++++++ src/core/lib/iomgr/ev_posix.cc | 6 ++++++ src/core/lib/iomgr/ev_posix.h | 18 ++++++++++++++++++ 6 files changed, 71 insertions(+) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc index 86a0243d2e..d1ea67c3dd 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.cc +++ b/src/core/lib/iomgr/ev_epoll1_linux.cc @@ -397,6 +397,12 @@ static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) { fd->error_closure->NotifyOn(closure); } +static void fd_set_readable(grpc_fd* fd) { fd->read_closure->SetReady(); } + +static void fd_set_writable(grpc_fd* fd) { fd->write_closure->SetReady(); } + +static void fd_set_error(grpc_fd* fd) { fd->error_closure->SetReady(); } + static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) { fd->read_closure->SetReady(); /* Use release store to match with acquire load in fd_get_read_notifier */ @@ -1217,6 +1223,9 @@ static const grpc_event_engine_vtable vtable = { fd_notify_on_read, fd_notify_on_write, fd_notify_on_error, + fd_set_readable, + fd_set_writable, + fd_set_error, fd_is_shutdown, fd_get_read_notifier_pollset, diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc index 7b368410cf..c7a1a8768a 100644 --- a/src/core/lib/iomgr/ev_epollex_linux.cc +++ b/src/core/lib/iomgr/ev_epollex_linux.cc @@ -550,6 +550,12 @@ static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) { fd->error_closure->NotifyOn(closure); } +static void fd_set_readable(grpc_fd* fd) { fd->read_closure->SetReady(); } + +static void fd_set_writable(grpc_fd* fd) { fd->write_closure->SetReady(); } + +static void fd_set_error(grpc_fd* fd) { fd->error_closure->SetReady(); } + /******************************************************************************* * Pollable Definitions */ @@ -1636,6 +1642,9 @@ static const grpc_event_engine_vtable vtable = { fd_notify_on_read, fd_notify_on_write, fd_notify_on_error, + fd_set_readable, + fd_set_writable, + fd_set_error, fd_is_shutdown, fd_get_read_notifier_pollset, diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc index 2189801c18..be7e7ba8f3 100644 --- a/src/core/lib/iomgr/ev_epollsig_linux.cc +++ b/src/core/lib/iomgr/ev_epollsig_linux.cc @@ -958,6 +958,12 @@ static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) { fd->error_closure->NotifyOn(closure); } +static void fd_set_readable(grpc_fd* fd) { fd->read_closure->SetReady(); } + +static void fd_set_writable(grpc_fd* fd) { fd->write_closure->SetReady(); } + +static void fd_set_error(grpc_fd* fd) { fd->error_closure->SetReady(); } + /******************************************************************************* * Pollset Definitions */ @@ -1667,6 +1673,9 @@ static const grpc_event_engine_vtable vtable = { fd_notify_on_read, fd_notify_on_write, fd_notify_on_error, + fd_set_readable, + fd_set_writable, + fd_set_error, fd_is_shutdown, fd_get_read_notifier_pollset, diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc index c9c09881a2..7801c02355 100644 --- a/src/core/lib/iomgr/ev_poll_posix.cc +++ b/src/core/lib/iomgr/ev_poll_posix.cc @@ -557,6 +557,23 @@ static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) { abort(); } +static void fd_set_readable(grpc_fd* fd) { + gpr_mu_lock(&fd->mu); + set_ready_locked(fd, &fd->read_closure); + gpr_mu_unlock(&fd->mu); +} + +static void fd_set_writable(grpc_fd* fd) { + gpr_mu_lock(&fd->mu); + set_ready_locked(fd, &fd->write_closure); + gpr_mu_unlock(&fd->mu); +} + +static void fd_set_error(grpc_fd* fd) { + gpr_log(GPR_ERROR, "Polling engine does not support tracking errors."); + abort(); +} + static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset, grpc_pollset_worker* worker, uint32_t read_mask, uint32_t write_mask, grpc_fd_watcher* watcher) { @@ -1723,6 +1740,9 @@ static const grpc_event_engine_vtable vtable = { fd_notify_on_read, fd_notify_on_write, fd_notify_on_error, + fd_set_readable, + fd_set_writable, + fd_set_error, fd_is_shutdown, fd_get_read_notifier_pollset, diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc index 1139b3273a..0e45fc42ca 100644 --- a/src/core/lib/iomgr/ev_posix.cc +++ b/src/core/lib/iomgr/ev_posix.cc @@ -239,6 +239,12 @@ void grpc_fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) { g_event_engine->fd_notify_on_error(fd, closure); } +void grpc_fd_set_readable(grpc_fd* fd) { g_event_engine->fd_set_readable(fd); } + +void grpc_fd_set_writable(grpc_fd* fd) { g_event_engine->fd_set_writable(fd); } + +void grpc_fd_set_error(grpc_fd* fd) { g_event_engine->fd_set_error(fd); } + static size_t pollset_size(void) { return g_event_engine->pollset_size; } static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) { diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h index b4c17fc80d..f232844f62 100644 --- a/src/core/lib/iomgr/ev_posix.h +++ b/src/core/lib/iomgr/ev_posix.h @@ -51,6 +51,9 @@ typedef struct grpc_event_engine_vtable { void (*fd_notify_on_read)(grpc_fd* fd, grpc_closure* closure); void (*fd_notify_on_write)(grpc_fd* fd, grpc_closure* closure); void (*fd_notify_on_error)(grpc_fd* fd, grpc_closure* closure); + void (*fd_set_readable)(grpc_fd* fd); + void (*fd_set_writable)(grpc_fd* fd); + void (*fd_set_error)(grpc_fd* fd); bool (*fd_is_shutdown)(grpc_fd* fd); grpc_pollset* (*fd_get_read_notifier_pollset)(grpc_fd* fd); @@ -142,6 +145,21 @@ void grpc_fd_notify_on_write(grpc_fd* fd, grpc_closure* closure); * needs to have been set on grpc_fd_create */ void grpc_fd_notify_on_error(grpc_fd* fd, grpc_closure* closure); +/* Forcibly set the fd to be readable, resulting in the closure registered with + * grpc_fd_notify_on_read being invoked. + */ +void grpc_fd_set_readable(grpc_fd* fd); + +/* Forcibly set the fd to be writable, resulting in the closure registered with + * grpc_fd_notify_on_write being invoked. + */ +void grpc_fd_set_writable(grpc_fd* fd); + +/* Forcibly set the fd to have errored, resulting in the closure registered with + * grpc_fd_notify_on_error being invoked. + */ +void grpc_fd_set_error(grpc_fd* fd); + /* Return the read notifier pollset from the fd */ grpc_pollset* grpc_fd_get_read_notifier_pollset(grpc_fd* fd); -- cgit v1.2.3 From d2d8f4776ccc5cf3079833f70c9f9139d38eb1c9 Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Thu, 26 Jul 2018 15:09:50 -0700 Subject: Remove the notifier pollset from grpc event engine since it's not used anywhere --- src/core/lib/iomgr/ev_epoll1_linux.cc | 19 ++-------------- src/core/lib/iomgr/ev_epollex_linux.cc | 26 ++------------------- src/core/lib/iomgr/ev_epollsig_linux.cc | 25 ++------------------- src/core/lib/iomgr/ev_poll_posix.cc | 40 ++++++--------------------------- src/core/lib/iomgr/ev_posix.h | 4 ---- 5 files changed, 13 insertions(+), 101 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc index 86a0243d2e..ecb7eadf85 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.cc +++ b/src/core/lib/iomgr/ev_epoll1_linux.cc @@ -140,10 +140,6 @@ struct grpc_fd { struct grpc_fd* freelist_next; - /* The pollset that last noticed that the fd is readable. The actual type - * stored in this is (grpc_pollset *) */ - gpr_atm read_notifier_pollset; - grpc_iomgr_object iomgr_object; }; @@ -293,7 +289,6 @@ static grpc_fd* fd_create(int fd, const char* name, bool track_err) { new_fd->read_closure->InitEvent(); new_fd->write_closure->InitEvent(); new_fd->error_closure->InitEvent(); - gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL); new_fd->freelist_next = nullptr; @@ -376,11 +371,6 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd, gpr_mu_unlock(&fd_freelist_mu); } -static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) { - gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); - return (grpc_pollset*)notifier; -} - static bool fd_is_shutdown(grpc_fd* fd) { return fd->read_closure->IsShutdown(); } @@ -397,11 +387,7 @@ static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) { fd->error_closure->NotifyOn(closure); } -static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) { - fd->read_closure->SetReady(); - /* Use release store to match with acquire load in fd_get_read_notifier */ - gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); -} +static void fd_become_readable(grpc_fd* fd) { fd->read_closure->SetReady(); } static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); } @@ -642,7 +628,7 @@ static grpc_error* process_epoll_events(grpc_pollset* pollset) { } if (read_ev || cancel || err_fallback) { - fd_become_readable(fd, pollset); + fd_become_readable(fd); } if (write_ev || cancel || err_fallback) { @@ -1218,7 +1204,6 @@ static const grpc_event_engine_vtable vtable = { fd_notify_on_write, fd_notify_on_error, fd_is_shutdown, - fd_get_read_notifier_pollset, pollset_init, pollset_shutdown, diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc index 7b368410cf..ffe70ca626 100644 --- a/src/core/lib/iomgr/ev_epollex_linux.cc +++ b/src/core/lib/iomgr/ev_epollex_linux.cc @@ -220,10 +220,6 @@ struct grpc_fd { struct grpc_fd* freelist_next; grpc_closure* on_done_closure; - // The pollset that last noticed that the fd is readable. The actual type - // stored in this is (grpc_pollset *) - gpr_atm read_notifier_pollset; - grpc_iomgr_object iomgr_object; // Do we need to track EPOLLERR events separately? @@ -353,7 +349,6 @@ static void invalidate_fd(grpc_fd* fd) { memset(&fd->pollable_mu, -1, sizeof(fd->pollable_mu)); fd->pollable_obj = nullptr; fd->on_done_closure = nullptr; - gpr_atm_no_barrier_store(&fd->read_notifier_pollset, 0); memset(&fd->iomgr_object, -1, sizeof(fd->iomgr_object)); fd->track_err = false; } @@ -445,7 +440,6 @@ static grpc_fd* fd_create(int fd, const char* name, bool track_err) { new_fd->error_closure->InitEvent(); new_fd->freelist_next = nullptr; new_fd->on_done_closure = nullptr; - gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL); char* fd_name; gpr_asprintf(&fd_name, "%s fd=%d", name, fd); @@ -514,11 +508,6 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd, UNREF_BY(fd, 2, reason); /* Drop the reference */ } -static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) { - gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); - return (grpc_pollset*)notifier; -} - static bool fd_is_shutdown(grpc_fd* fd) { return fd->read_closure->IsShutdown(); } @@ -875,17 +864,7 @@ static int poll_deadline_to_millis_timeout(grpc_millis millis) { return static_cast(delta); } -static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) { - fd->read_closure->SetReady(); - - /* Note, it is possible that fd_become_readable might be called twice with - different 'notifier's when an fd becomes readable and it is in two epoll - sets (This can happen briefly during polling island merges). In such cases - it does not really matter which notifer is set as the read_notifier_pollset - (They would both point to the same polling island anyway) */ - /* Use release store to match with acquire load in fd_get_read_notifier */ - gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); -} +static void fd_become_readable(grpc_fd* fd) { fd->read_closure->SetReady(); } static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); } @@ -983,7 +962,7 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset, fd_has_errors(fd); } if (read_ev || cancel || err_fallback) { - fd_become_readable(fd, pollset); + fd_become_readable(fd); } if (write_ev || cancel || err_fallback) { fd_become_writable(fd); @@ -1637,7 +1616,6 @@ static const grpc_event_engine_vtable vtable = { fd_notify_on_write, fd_notify_on_error, fd_is_shutdown, - fd_get_read_notifier_pollset, pollset_init, pollset_shutdown, diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc index 2189801c18..28656b0666 100644 --- a/src/core/lib/iomgr/ev_epollsig_linux.cc +++ b/src/core/lib/iomgr/ev_epollsig_linux.cc @@ -137,10 +137,6 @@ struct grpc_fd { struct grpc_fd* freelist_next; grpc_closure* on_done_closure; - /* The pollset that last noticed that the fd is readable. The actual type - * stored in this is (grpc_pollset *) */ - gpr_atm read_notifier_pollset; - grpc_iomgr_object iomgr_object; /* Do we need to track EPOLLERR events separately? */ @@ -845,7 +841,6 @@ static grpc_fd* fd_create(int fd, const char* name, bool track_err) { new_fd->write_closure->InitEvent(); new_fd->error_closure->InitEvent(); new_fd->track_err = track_err; - gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL); new_fd->freelist_next = nullptr; new_fd->on_done_closure = nullptr; @@ -927,11 +922,6 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd, GRPC_ERROR_UNREF(error); } -static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) { - gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); - return (grpc_pollset*)notifier; -} - static bool fd_is_shutdown(grpc_fd* fd) { return fd->read_closure->IsShutdown(); } @@ -1115,17 +1105,7 @@ static int poll_deadline_to_millis_timeout(grpc_millis millis) { return static_cast(delta); } -static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) { - fd->read_closure->SetReady(); - - /* Note, it is possible that fd_become_readable might be called twice with - different 'notifier's when an fd becomes readable and it is in two epoll - sets (This can happen briefly during polling island merges). In such cases - it does not really matter which notifer is set as the read_notifier_pollset - (They would both point to the same polling island anyway) */ - /* Use release store to match with acquire load in fd_get_read_notifier */ - gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); -} +static void fd_become_readable(grpc_fd* fd) { fd->read_closure->SetReady(); } static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); } @@ -1283,7 +1263,7 @@ static void pollset_work_and_unlock(grpc_pollset* pollset, fd_has_errors(fd); } if (read_ev || cancel || err_fallback) { - fd_become_readable(fd, pollset); + fd_become_readable(fd); } if (write_ev || cancel || err_fallback) { fd_become_writable(fd); @@ -1668,7 +1648,6 @@ static const grpc_event_engine_vtable vtable = { fd_notify_on_write, fd_notify_on_error, fd_is_shutdown, - fd_get_read_notifier_pollset, pollset_init, pollset_shutdown, diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc index c9c09881a2..ff4888eeb8 100644 --- a/src/core/lib/iomgr/ev_poll_posix.cc +++ b/src/core/lib/iomgr/ev_poll_posix.cc @@ -108,9 +108,6 @@ struct grpc_fd { grpc_closure* on_done_closure; grpc_iomgr_object iomgr_object; - - /* The pollset that last noticed and notified that the fd is readable */ - grpc_pollset* read_notifier_pollset; }; /* Begin polling on an fd. @@ -131,8 +128,7 @@ static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset, MUST NOT be called with a pollset lock taken if got_read or got_write are 1, also does the become_{readable,writable} as appropriate. */ -static void fd_end_poll(grpc_fd_watcher* rec, int got_read, int got_write, - grpc_pollset* read_notifier_pollset); +static void fd_end_poll(grpc_fd_watcher* rec, int got_read, int got_write); /* Return 1 if this fd is orphaned, 0 otherwise */ static bool fd_is_orphaned(grpc_fd* fd); @@ -346,7 +342,6 @@ static grpc_fd* fd_create(int fd, const char* name, bool track_err) { r->closed = 0; r->released = 0; gpr_atm_no_barrier_store(&r->pollhup, 0); - r->read_notifier_pollset = nullptr; char* name2; gpr_asprintf(&name2, "%s fd=%d", name, fd); @@ -359,17 +354,6 @@ static bool fd_is_orphaned(grpc_fd* fd) { return (gpr_atm_acq_load(&fd->refst) & 1) == 0; } -/* Return the read-notifier pollset */ -static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) { - grpc_pollset* notifier = nullptr; - - gpr_mu_lock(&fd->mu); - notifier = fd->read_notifier_pollset; - gpr_mu_unlock(&fd->mu); - - return notifier; -} - static grpc_error* pollset_kick_locked(grpc_fd_watcher* watcher) { gpr_mu_lock(&watcher->pollset->mu); GPR_ASSERT(watcher->worker); @@ -512,11 +496,6 @@ static int set_ready_locked(grpc_fd* fd, grpc_closure** st) { } } -static void set_read_notifier_pollset_locked( - grpc_fd* fd, grpc_pollset* read_notifier_pollset) { - fd->read_notifier_pollset = read_notifier_pollset; -} - static void fd_shutdown(grpc_fd* fd, grpc_error* why) { gpr_mu_lock(&fd->mu); /* only shutdown once */ @@ -608,8 +587,7 @@ static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset, return mask; } -static void fd_end_poll(grpc_fd_watcher* watcher, int got_read, int got_write, - grpc_pollset* read_notifier_pollset) { +static void fd_end_poll(grpc_fd_watcher* watcher, int got_read, int got_write) { int was_polling = 0; int kick = 0; grpc_fd* fd = watcher->fd; @@ -645,9 +623,6 @@ static void fd_end_poll(grpc_fd_watcher* watcher, int got_read, int got_write, if (set_ready_locked(fd, &fd->read_closure)) { kick = 1; } - if (read_notifier_pollset != nullptr) { - set_read_notifier_pollset_locked(fd, read_notifier_pollset); - } } if (got_write) { if (set_ready_locked(fd, &fd->write_closure)) { @@ -997,16 +972,16 @@ static grpc_error* pollset_work(grpc_pollset* pollset, for (i = 1; i < pfd_count; i++) { if (watchers[i].fd == nullptr) { - fd_end_poll(&watchers[i], 0, 0, nullptr); + fd_end_poll(&watchers[i], 0, 0); } else { // Wake up all the file descriptors, if we have an invalid one // we can identify it on the next pollset_work() - fd_end_poll(&watchers[i], 1, 1, pollset); + fd_end_poll(&watchers[i], 1, 1); } } } else if (r == 0) { for (i = 1; i < pfd_count; i++) { - fd_end_poll(&watchers[i], 0, 0, nullptr); + fd_end_poll(&watchers[i], 0, 0); } } else { if (pfds[0].revents & POLLIN_CHECK) { @@ -1018,7 +993,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset, } for (i = 1; i < pfd_count; i++) { if (watchers[i].fd == nullptr) { - fd_end_poll(&watchers[i], 0, 0, nullptr); + fd_end_poll(&watchers[i], 0, 0); } else { if (grpc_polling_trace.enabled()) { gpr_log(GPR_INFO, "%p got_event: %d r:%d w:%d [%d]", pollset, @@ -1032,7 +1007,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset, gpr_atm_no_barrier_store(&watchers[i].fd->pollhup, 1); } fd_end_poll(&watchers[i], pfds[i].revents & POLLIN_CHECK, - pfds[i].revents & POLLOUT_CHECK, pollset); + pfds[i].revents & POLLOUT_CHECK); } } } @@ -1724,7 +1699,6 @@ static const grpc_event_engine_vtable vtable = { fd_notify_on_write, fd_notify_on_error, fd_is_shutdown, - fd_get_read_notifier_pollset, pollset_init, pollset_shutdown, diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h index b4c17fc80d..393c3dd05e 100644 --- a/src/core/lib/iomgr/ev_posix.h +++ b/src/core/lib/iomgr/ev_posix.h @@ -52,7 +52,6 @@ typedef struct grpc_event_engine_vtable { void (*fd_notify_on_write)(grpc_fd* fd, grpc_closure* closure); void (*fd_notify_on_error)(grpc_fd* fd, grpc_closure* closure); bool (*fd_is_shutdown)(grpc_fd* fd); - grpc_pollset* (*fd_get_read_notifier_pollset)(grpc_fd* fd); void (*pollset_init)(grpc_pollset* pollset, gpr_mu** mu); void (*pollset_shutdown)(grpc_pollset* pollset, grpc_closure* closure); @@ -142,9 +141,6 @@ void grpc_fd_notify_on_write(grpc_fd* fd, grpc_closure* closure); * needs to have been set on grpc_fd_create */ void grpc_fd_notify_on_error(grpc_fd* fd, grpc_closure* closure); -/* Return the read notifier pollset from the fd */ -grpc_pollset* grpc_fd_get_read_notifier_pollset(grpc_fd* fd); - /* pollset_posix functions */ /* Add an fd to a pollset */ -- cgit v1.2.3 From 9896c641860a70e41b006a982e44f40bb3f41053 Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Fri, 27 Jul 2018 01:38:26 -0700 Subject: Revert "Merge pull request #16158 from apolcyn/revert_windows_breakage" This reverts commit d9e8f86675cf923714b3ead4b06087e14a88c08c, reversing changes made to 04847aeb1e62bc528d88fa3c47daa24a4cf629b3. --- BUILD | 3 + CMakeLists.txt | 44 ++++-- Makefile | 42 ++++++ build.yaml | 15 ++ config.m4 | 3 + config.w32 | 3 + gRPC-Core.podspec | 3 + grpc.gemspec | 3 + grpc.gyp | 6 + include/grpc/impl/codegen/port_platform.h | 4 - package.xml | 3 + .../resolver/dns/c_ares/dns_resolver_ares.cc | 5 +- .../resolver/dns/c_ares/grpc_ares_ev_driver.cc | 6 +- .../dns/c_ares/grpc_ares_ev_driver_windows.cc | 59 ++++++++ .../resolver/dns/c_ares/grpc_ares_wrapper.cc | 9 +- .../resolver/dns/c_ares/grpc_ares_wrapper.h | 4 + .../resolver/dns/c_ares/grpc_ares_wrapper_posix.cc | 29 ++++ .../dns/c_ares/grpc_ares_wrapper_windows.cc | 29 ++++ src/core/lib/iomgr/socket_windows.cc | 29 ++++ src/core/lib/iomgr/socket_windows.h | 4 + src/python/grpcio/grpc_core_dependencies.py | 3 + test/core/iomgr/BUILD | 13 ++ .../iomgr/grpc_ipv6_loopback_available_test.cc | 48 +++++++ test/cpp/naming/address_sorting_test.cc | 160 ++++++++++++++++----- test/cpp/naming/gen_build_yaml.py | 2 +- third_party/address_sorting/address_sorting.c | 9 +- .../address_sorting/address_sorting_windows.c | 46 +++++- .../include/address_sorting/address_sorting.h | 3 + tools/doxygen/Doxyfile.core.internal | 3 + tools/run_tests/generated/sources_and_headers.json | 22 ++- tools/run_tests/generated/tests.json | 38 ++++- 31 files changed, 577 insertions(+), 73 deletions(-) create mode 100644 src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc create mode 100644 src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc create mode 100644 src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc create mode 100644 test/core/iomgr/grpc_ipv6_loopback_available_test.cc (limited to 'src/core/lib/iomgr') diff --git a/BUILD b/BUILD index ee4b5dfaec..81390dd1aa 100644 --- a/BUILD +++ b/BUILD @@ -1433,7 +1433,10 @@ grpc_cc_library( "src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc", ], hdrs = [ diff --git a/CMakeLists.txt b/CMakeLists.txt index 84e9c08cb5..e8e65d4b71 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -298,6 +298,7 @@ add_dependencies(buildtests_c grpc_completion_queue_test) add_dependencies(buildtests_c grpc_completion_queue_threading_test) add_dependencies(buildtests_c grpc_credentials_test) add_dependencies(buildtests_c grpc_fetch_oauth2) +add_dependencies(buildtests_c grpc_ipv6_loopback_available_test) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_c grpc_json_token_test) endif() @@ -671,12 +672,8 @@ endif() if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx resolver_component_tests_runner_invoker) endif() -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx address_sorting_test_unsecure) -endif() -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx address_sorting_test) -endif() if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx cancel_ares_query_test) endif() @@ -1236,8 +1233,11 @@ add_library(grpc src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc src/cpp/ext/filters/census/grpc_context.cc @@ -2538,8 +2538,11 @@ add_library(grpc_unsecure src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc @@ -7323,6 +7326,35 @@ target_link_libraries(grpc_fetch_oauth2 gpr ) +endif (gRPC_BUILD_TESTS) +if (gRPC_BUILD_TESTS) + +add_executable(grpc_ipv6_loopback_available_test + test/core/iomgr/grpc_ipv6_loopback_available_test.cc +) + + +target_include_directories(grpc_ipv6_loopback_available_test + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE ${_gRPC_SSL_INCLUDE_DIR} + PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR} + PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR} + PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR} + PRIVATE ${_gRPC_CARES_INCLUDE_DIR} + PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR} + PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR} +) + +target_link_libraries(grpc_ipv6_loopback_available_test + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc_test_util + grpc + gpr_test_util + gpr +) + endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) @@ -16351,7 +16383,6 @@ target_link_libraries(resolver_component_tests_runner_invoker endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_executable(address_sorting_test_unsecure test/cpp/naming/address_sorting_test.cc @@ -16391,10 +16422,8 @@ target_link_libraries(address_sorting_test_unsecure ${_gRPC_GFLAGS_LIBRARIES} ) -endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_executable(address_sorting_test test/cpp/naming/address_sorting_test.cc @@ -16434,7 +16463,6 @@ target_link_libraries(address_sorting_test ${_gRPC_GFLAGS_LIBRARIES} ) -endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) diff --git a/Makefile b/Makefile index bad41975a0..5174ab6719 100644 --- a/Makefile +++ b/Makefile @@ -1022,6 +1022,7 @@ grpc_completion_queue_threading_test: $(BINDIR)/$(CONFIG)/grpc_completion_queue_ grpc_create_jwt: $(BINDIR)/$(CONFIG)/grpc_create_jwt grpc_credentials_test: $(BINDIR)/$(CONFIG)/grpc_credentials_test grpc_fetch_oauth2: $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 +grpc_ipv6_loopback_available_test: $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test grpc_json_token_test: $(BINDIR)/$(CONFIG)/grpc_json_token_test grpc_jwt_verifier_test: $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test grpc_print_google_default_creds_token: $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token @@ -1472,6 +1473,7 @@ buildtests_c: privatelibs_c \ $(BINDIR)/$(CONFIG)/grpc_completion_queue_threading_test \ $(BINDIR)/$(CONFIG)/grpc_credentials_test \ $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 \ + $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test \ $(BINDIR)/$(CONFIG)/grpc_json_token_test \ $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test \ $(BINDIR)/$(CONFIG)/grpc_security_connector_test \ @@ -2028,6 +2030,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/grpc_completion_queue_threading_test || ( echo test grpc_completion_queue_threading_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_credentials_test" $(Q) $(BINDIR)/$(CONFIG)/grpc_credentials_test || ( echo test grpc_credentials_test failed ; exit 1 ) + $(E) "[RUN] Testing grpc_ipv6_loopback_available_test" + $(Q) $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test || ( echo test grpc_ipv6_loopback_available_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_json_token_test" $(Q) $(BINDIR)/$(CONFIG)/grpc_json_token_test || ( echo test grpc_json_token_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_jwt_verifier_test" @@ -3704,8 +3708,11 @@ LIBGRPC_SRC = \ src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc \ src/cpp/ext/filters/census/grpc_context.cc \ @@ -4972,8 +4979,11 @@ LIBGRPC_UNSECURE_SRC = \ src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc \ src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \ @@ -12365,6 +12375,38 @@ endif endif +GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_SRC = \ + test/core/iomgr/grpc_ipv6_loopback_available_test.cc \ + +GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test: openssl_dep_error + +else + + + +$(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test: $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test + +endif + +$(OBJDIR)/$(CONFIG)/test/core/iomgr/grpc_ipv6_loopback_available_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + +deps_grpc_ipv6_loopback_available_test: $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(GRPC_IPV6_LOOPBACK_AVAILABLE_TEST_OBJS:.o=.dep) +endif +endif + + GRPC_JSON_TOKEN_TEST_SRC = \ test/core/security/json_token_test.cc \ diff --git a/build.yaml b/build.yaml index 30389ec114..70af96046c 100644 --- a/build.yaml +++ b/build.yaml @@ -740,8 +740,11 @@ filegroups: - src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc + - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc + - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc + - src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc plugin: grpc_resolver_dns_ares uses: - grpc_base @@ -2730,6 +2733,18 @@ targets: - grpc - gpr_test_util - gpr +- name: grpc_ipv6_loopback_available_test + build: test + language: c + src: + - test/core/iomgr/grpc_ipv6_loopback_available_test.cc + deps: + - grpc_test_util + - grpc + - gpr_test_util + - gpr + exclude_iomgrs: + - uv - name: grpc_json_token_test build: test language: c diff --git a/config.m4 b/config.m4 index c277ccafc8..aa40a698a6 100644 --- a/config.m4 +++ b/config.m4 @@ -380,8 +380,11 @@ if test "$PHP_GRPC" != "no"; then src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ + src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc \ src/cpp/ext/filters/census/grpc_context.cc \ diff --git a/config.w32 b/config.w32 index 2857781dd5..5afa4466ac 100644 --- a/config.w32 +++ b/config.w32 @@ -355,8 +355,11 @@ if (PHP_GRPC != "no") { "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\dns_resolver_ares.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver_posix.cc " + + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_ev_driver_windows.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper_fallback.cc " + + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper_posix.cc " + + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\c_ares\\grpc_ares_wrapper_windows.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\dns\\native\\dns_resolver.cc " + "src\\core\\ext\\filters\\client_channel\\resolver\\sockaddr\\sockaddr_resolver.cc " + "src\\cpp\\ext\\filters\\census\\grpc_context.cc " + diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 23edaec656..5c3649afbd 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -802,8 +802,11 @@ Pod::Spec.new do |s| 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/cpp/ext/filters/census/grpc_context.cc', diff --git a/grpc.gemspec b/grpc.gemspec index b69d5a7c6f..c250316b99 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -742,8 +742,11 @@ Gem::Specification.new do |s| s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc ) + s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc ) + s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc ) + s.files += %w( src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc ) s.files += %w( src/cpp/ext/filters/census/grpc_context.cc ) diff --git a/grpc.gyp b/grpc.gyp index e1485efa05..25082fe540 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -572,8 +572,11 @@ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/cpp/ext/filters/census/grpc_context.cc', @@ -1287,8 +1290,11 @@ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc', diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index 01ce5f03e9..2b61a8816d 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -420,12 +420,8 @@ typedef unsigned __int64 uint64_t; #define GPR_MAX_ALIGNMENT 16 #ifndef GRPC_ARES -#ifdef GPR_WINDOWS -#define GRPC_ARES 0 -#else #define GRPC_ARES 1 #endif -#endif #ifndef GRPC_MUST_USE_RESULT #if defined(__GNUC__) && !defined(__MINGW32__) diff --git a/package.xml b/package.xml index 7f71536b1d..acdc6ffdb3 100644 --- a/package.xml +++ b/package.xml @@ -747,8 +747,11 @@ + + + diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc index f4f6444c5f..7050e82121 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -142,8 +141,8 @@ AresDnsResolver::AresDnsResolver(const ResolverArgs& args) channel_args_ = grpc_channel_args_copy(args.args); const grpc_arg* arg = grpc_channel_args_find( channel_args_, GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION); - request_service_config_ = !grpc_channel_arg_get_integer( - arg, (grpc_integer_options){false, false, true}); + grpc_integer_options integer_options = {false, false, true}; + request_service_config_ = !grpc_channel_arg_get_integer(arg, integer_options); arg = grpc_channel_args_find(channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS); min_time_between_resolutions_ = diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc index c886795608..0068d0d5f4 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc @@ -18,11 +18,10 @@ #include #include "src/core/lib/iomgr/port.h" -#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) +#if GRPC_ARES == 1 && !defined(GRPC_UV) #include #include -#include #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" @@ -32,7 +31,6 @@ #include #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" #include "src/core/lib/gpr/string.h" -#include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/sockaddr_utils.h" @@ -314,4 +312,4 @@ void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver) { } } -#endif /* GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) */ +#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */ diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc new file mode 100644 index 0000000000..5d65ae3ab3 --- /dev/null +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc @@ -0,0 +1,59 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#include + +#include "src/core/lib/iomgr/port.h" +#if GRPC_ARES == 1 && defined(GPR_WINDOWS) + +#include +#include +#include "src/core/lib/gprpp/memory.h" + +#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" + +namespace grpc_core { + +/* TODO: fill in the body of GrpcPolledFdWindows to enable c-ares on Windows. + This dummy implementation only allows grpc to compile on windows with + GRPC_ARES=1. */ +class GrpcPolledFdWindows : public GrpcPolledFd { + public: + GrpcPolledFdWindows() { abort(); } + ~GrpcPolledFdWindows() { abort(); } + void RegisterForOnReadableLocked(grpc_closure* read_closure) override { + abort(); + } + void RegisterForOnWriteableLocked(grpc_closure* write_closure) override { + abort(); + } + bool IsFdStillReadableLocked() override { abort(); } + void ShutdownLocked(grpc_error* error) override { abort(); } + ares_socket_t GetWrappedAresSocketLocked() override { abort(); } + const char* GetName() override { abort(); } +}; + +GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as, + grpc_pollset_set* driver_pollset_set) { + return nullptr; +} + +void ConfigureAresChannelLocked(ares_channel* channel) { abort(); } + +} // namespace grpc_core + +#endif /* GRPC_ARES == 1 && defined(GPR_WINDOWS) */ diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc index 497ad998af..b3d6437e9a 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc @@ -22,7 +22,6 @@ #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" #include "src/core/lib/iomgr/sockaddr.h" -#include "src/core/lib/iomgr/socket_utils_posix.h" #include #include @@ -215,7 +214,7 @@ static void on_hostbyname_done_locked(void* arg, int status, int timeouts, memset(&addr, 0, addr_len); memcpy(&addr.sin6_addr, hostent->h_addr_list[i - prev_naddr], sizeof(struct in6_addr)); - addr.sin6_family = static_cast(hostent->h_addrtype); + addr.sin6_family = static_cast(hostent->h_addrtype); addr.sin6_port = hr->port; grpc_lb_addresses_set_address( *lb_addresses, i, &addr, addr_len, @@ -236,7 +235,7 @@ static void on_hostbyname_done_locked(void* arg, int status, int timeouts, memset(&addr, 0, addr_len); memcpy(&addr.sin_addr, hostent->h_addr_list[i - prev_naddr], sizeof(struct in_addr)); - addr.sin_family = static_cast(hostent->h_addrtype); + addr.sin_family = static_cast(hostent->h_addrtype); addr.sin_port = hr->port; grpc_lb_addresses_set_address( *lb_addresses, i, &addr, addr_len, @@ -281,7 +280,7 @@ static void on_srv_query_done_locked(void* arg, int status, int timeouts, grpc_ares_ev_driver_get_channel_locked(r->ev_driver); for (struct ares_srv_reply* srv_it = reply; srv_it != nullptr; srv_it = srv_it->next) { - if (grpc_ipv6_loopback_available()) { + if (grpc_ares_query_ipv6()) { grpc_ares_hostbyname_request* hr = create_hostbyname_request_locked( r, srv_it->host, htons(srv_it->port), true /* is_balancer */); ares_gethostbyname(*channel, hr->host, AF_INET6, @@ -452,7 +451,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl( } } r->pending_queries = 1; - if (grpc_ipv6_loopback_available()) { + if (grpc_ares_query_ipv6()) { hr = create_hostbyname_request_locked(r, host, strhtons(port), false /* is_balancer */); ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_locked, diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h index ce26f5d524..17eaa7ccf0 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h @@ -70,6 +70,10 @@ void grpc_ares_cleanup(void); * and destroys the grpc_ares_request */ void grpc_ares_complete_request_locked(grpc_ares_request* request); +/* Indicates whether or not AAAA queries should be attempted. */ +/* E.g., return false if ipv6 is known to not be available. */ +bool grpc_ares_query_ipv6(); + /* Exposed only for testing */ void grpc_cares_wrapper_test_only_address_sorting_sort( grpc_lb_addresses* lb_addrs); diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc new file mode 100644 index 0000000000..23c0fec74f --- /dev/null +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc @@ -0,0 +1,29 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" +#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) + +#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" +#include "src/core/lib/iomgr/socket_utils_posix.h" + +bool grpc_ares_query_ipv6() { return grpc_ipv6_loopback_available(); } + +#endif /* GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER) */ diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc new file mode 100644 index 0000000000..ee827e284e --- /dev/null +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc @@ -0,0 +1,29 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" +#if GRPC_ARES == 1 && defined(GPR_WINDOWS) + +#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" +#include "src/core/lib/iomgr/socket_windows.h" + +bool grpc_ares_query_ipv6() { return grpc_ipv6_loopback_available(); } + +#endif /* GRPC_ARES == 1 && defined(GPR_WINDOWS) */ diff --git a/src/core/lib/iomgr/socket_windows.cc b/src/core/lib/iomgr/socket_windows.cc index 2e23409582..4ad31cb35d 100644 --- a/src/core/lib/iomgr/socket_windows.cc +++ b/src/core/lib/iomgr/socket_windows.cc @@ -36,6 +36,7 @@ #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset_windows.h" +#include "src/core/lib/iomgr/sockaddr_windows.h" #include "src/core/lib/iomgr/socket_windows.h" grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name) { @@ -148,4 +149,32 @@ void grpc_socket_become_ready(grpc_winsocket* socket, if (should_destroy) destroy(socket); } +static gpr_once g_probe_ipv6_once = GPR_ONCE_INIT; +static bool g_ipv6_loopback_available = false; + +static void probe_ipv6_once(void) { + SOCKET s = socket(AF_INET6, SOCK_STREAM, 0); + g_ipv6_loopback_available = 0; + if (s == INVALID_SOCKET) { + gpr_log(GPR_INFO, "Disabling AF_INET6 sockets because socket() failed."); + } else { + grpc_sockaddr_in6 addr; + memset(&addr, 0, sizeof(addr)); + addr.sin6_family = AF_INET6; + addr.sin6_addr.s6_addr[15] = 1; /* [::1]:0 */ + if (bind(s, reinterpret_cast(&addr), sizeof(addr)) == 0) { + g_ipv6_loopback_available = 1; + } else { + gpr_log(GPR_INFO, + "Disabling AF_INET6 sockets because ::1 is not available."); + } + closesocket(s); + } +} + +int grpc_ipv6_loopback_available(void) { + gpr_once_init(&g_probe_ipv6_once, probe_ipv6_once); + return g_ipv6_loopback_available; +} + #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/src/core/lib/iomgr/socket_windows.h b/src/core/lib/iomgr/socket_windows.h index 7bd01eded5..b09b9da562 100644 --- a/src/core/lib/iomgr/socket_windows.h +++ b/src/core/lib/iomgr/socket_windows.h @@ -108,6 +108,10 @@ void grpc_socket_notify_on_read(grpc_winsocket* winsocket, void grpc_socket_become_ready(grpc_winsocket* winsocket, grpc_winsocket_callback_info* ci); +/* Returns true if this system can create AF_INET6 sockets bound to ::1. + The value is probed once, and cached for the life of the process. */ +int grpc_ipv6_loopback_available(void); + #endif #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_WINDOWS_H */ diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index 49185cc648..d6efb49750 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -354,8 +354,11 @@ CORE_SOURCE_FILES = [ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc', 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc', + 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc', 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc', 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc', 'src/cpp/ext/filters/census/grpc_context.cc', diff --git a/test/core/iomgr/BUILD b/test/core/iomgr/BUILD index fb0490a95f..002671a5fa 100644 --- a/test/core/iomgr/BUILD +++ b/test/core/iomgr/BUILD @@ -124,6 +124,19 @@ grpc_cc_test( ], ) +grpc_cc_test( + name = "grpc_ipv6_loopback_available_test", + srcs = ["grpc_ipv6_loopback_available_test.cc"], + language = "C++", + deps = [ + "//:gpr", + "//:grpc", + "//test/core/util:gpr_test_util", + "//test/core/util:grpc_test_util", + ], +) + + grpc_cc_test( name = "load_file_test", srcs = ["load_file_test.cc"], diff --git a/test/core/iomgr/grpc_ipv6_loopback_available_test.cc b/test/core/iomgr/grpc_ipv6_loopback_available_test.cc new file mode 100644 index 0000000000..329aa9a851 --- /dev/null +++ b/test/core/iomgr/grpc_ipv6_loopback_available_test.cc @@ -0,0 +1,48 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "src/core/lib/iomgr/port.h" + +// grpc_ipv6_loopback_available isn't currently available on UV. +#ifndef GRPC_UV + +#include +#include +#include "test/core/util/test_config.h" + +#ifdef GPR_WINDOWS +#include "src/core/lib/iomgr/socket_windows.h" +#else +#include "src/core/lib/iomgr/socket_utils_posix.h" +#endif + +int main(int argc, char** argv) { + grpc_test_init(argc, argv); + grpc_init(); + // This test assumes that the ipv6 loopback is available + // in all environments in which grpc tests run in. + GPR_ASSERT(grpc_ipv6_loopback_available()); + grpc_shutdown(); + return 0; +} + +#else + +int main(int argc, char** argv) { return 0; } + +#endif /* GRPC_UV */ diff --git a/test/cpp/naming/address_sorting_test.cc b/test/cpp/naming/address_sorting_test.cc index a92e9e3b3e..04c300876c 100644 --- a/test/cpp/naming/address_sorting_test.cc +++ b/test/cpp/naming/address_sorting_test.cc @@ -24,10 +24,8 @@ #include #include -#include #include #include -#include #include #include @@ -51,6 +49,11 @@ #include "test/core/util/port.h" #include "test/core/util/test_config.h" +#ifndef GPR_WINDOWS +#include +#include +#endif + namespace { struct TestAddress { @@ -190,10 +193,18 @@ void VerifyLbAddrOutputs(grpc_lb_addresses* lb_addrs, grpc_lb_addresses_destroy(lb_addrs); } -} // namespace +/* We need to run each test case inside of its own + * isolated grpc_init/grpc_shutdown pair, so that + * the "address sorting source addr factory" can be + * restored to its default for each test case. */ +class AddressSortingTest : public ::testing::Test { + protected: + void SetUp() override { grpc_init(); } + void TearDown() override { grpc_shutdown(); } +}; /* Tests for rule 1 */ -TEST(AddressSortingTest, TestDepriotizesUnreachableAddresses) { +TEST_F(AddressSortingTest, TestDepriotizesUnreachableAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -212,7 +223,7 @@ TEST(AddressSortingTest, TestDepriotizesUnreachableAddresses) { }); } -TEST(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv6) { +TEST_F(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv6) { bool ipv4_supported = true; bool ipv6_supported = false; OverrideAddressSortingSourceAddrFactory( @@ -231,7 +242,7 @@ TEST(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv6) { }); } -TEST(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv4) { +TEST_F(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv4) { bool ipv4_supported = false; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -253,7 +264,7 @@ TEST(AddressSortingTest, TestDepriotizesUnsupportedDomainIpv4) { /* Tests for rule 2 */ -TEST(AddressSortingTest, TestDepriotizesNonMatchingScope) { +TEST_F(AddressSortingTest, TestDepriotizesNonMatchingScope) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -277,7 +288,7 @@ TEST(AddressSortingTest, TestDepriotizesNonMatchingScope) { /* Tests for rule 5 */ -TEST(AddressSortingTest, TestUsesLabelFromDefaultTable) { +TEST_F(AddressSortingTest, TestUsesLabelFromDefaultTable) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -300,7 +311,7 @@ TEST(AddressSortingTest, TestUsesLabelFromDefaultTable) { /* Flip the input on the test above to reorder the sort function's * comparator's inputs. */ -TEST(AddressSortingTest, TestUsesLabelFromDefaultTableInputFlipped) { +TEST_F(AddressSortingTest, TestUsesLabelFromDefaultTableInputFlipped) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -323,8 +334,8 @@ TEST(AddressSortingTest, TestUsesLabelFromDefaultTableInputFlipped) { /* Tests for rule 6 */ -TEST(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithAnIpv4Address) { +TEST_F(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithAnIpv4Address) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -348,8 +359,8 @@ TEST(AddressSortingTest, }); } -TEST(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithV4CompatAndLocalhostAddress) { +TEST_F(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithV4CompatAndLocalhostAddress) { bool ipv4_supported = true; bool ipv6_supported = true; // Handle unique observed behavior of inet_ntop(v4-compatible-address) on OS X. @@ -377,8 +388,8 @@ TEST(AddressSortingTest, }); } -TEST(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithCatchAllAndLocalhostAddress) { +TEST_F(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithCatchAllAndLocalhostAddress) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -403,8 +414,8 @@ TEST(AddressSortingTest, }); } -TEST(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWith2000PrefixedAddress) { +TEST_F(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWith2000PrefixedAddress) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -426,7 +437,7 @@ TEST(AddressSortingTest, }); } -TEST( +TEST_F( AddressSortingTest, TestUsesDestinationWithHigherPrecedenceWith2000PrefixedAddressEnsurePrefixMatchHasNoEffect) { bool ipv4_supported = true; @@ -448,8 +459,8 @@ TEST( }); } -TEST(AddressSortingTest, - TestUsesDestinationWithHigherPrecedenceWithLinkAndSiteLocalAddresses) { +TEST_F(AddressSortingTest, + TestUsesDestinationWithHigherPrecedenceWithLinkAndSiteLocalAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -469,19 +480,22 @@ TEST(AddressSortingTest, }); } -TEST( +TEST_F( AddressSortingTest, TestUsesDestinationWithHigherPrecedenceWithCatchAllAndAndV4MappedAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; + // Use embedded ipv4 addresses with leading 1's instead of zero's to be + // compatible with inet_ntop implementations that can display such + // addresses with leading zero's as e.g.: "::ffff:0:2", as on windows. OverrideAddressSortingSourceAddrFactory( ipv4_supported, ipv6_supported, { - {"[::ffff:0.0.0.2]:443", {"[::ffff:0.0.0.3]:0", AF_INET6}}, + {"[::ffff:1.1.1.2]:443", {"[::ffff:1.1.1.3]:0", AF_INET6}}, {"[1234::2]:443", {"[1234::3]:0", AF_INET6}}, }); grpc_lb_addresses* lb_addrs = BuildLbAddrInputs({ - {"[::ffff:0.0.0.2]:443", AF_INET6}, + {"[::ffff:1.1.1.2]:443", AF_INET6}, {"[1234::2]:443", AF_INET6}, }); grpc_cares_wrapper_test_only_address_sorting_sort(lb_addrs); @@ -489,13 +503,13 @@ TEST( // ::ffff:0:2 should match the v4-mapped // precedence entry and be deprioritized. "[1234::2]:443", - "[::ffff:0.0.0.2]:443", + "[::ffff:1.1.1.2]:443", }); } /* Tests for rule 8 */ -TEST(AddressSortingTest, TestPrefersSmallerScope) { +TEST_F(AddressSortingTest, TestPrefersSmallerScope) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -520,7 +534,7 @@ TEST(AddressSortingTest, TestPrefersSmallerScope) { /* Tests for rule 9 */ -TEST(AddressSortingTest, TestPrefersLongestMatchingSrcDstPrefix) { +TEST_F(AddressSortingTest, TestPrefersLongestMatchingSrcDstPrefix) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -543,8 +557,8 @@ TEST(AddressSortingTest, TestPrefersLongestMatchingSrcDstPrefix) { }); } -TEST(AddressSortingTest, - TestPrefersLongestMatchingSrcDstPrefixMatchesWholeAddress) { +TEST_F(AddressSortingTest, + TestPrefersLongestMatchingSrcDstPrefixMatchesWholeAddress) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -564,7 +578,7 @@ TEST(AddressSortingTest, }); } -TEST(AddressSortingTest, TestPrefersLongestPrefixStressInnerBytePrefix) { +TEST_F(AddressSortingTest, TestPrefersLongestPrefixStressInnerBytePrefix) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -584,7 +598,7 @@ TEST(AddressSortingTest, TestPrefersLongestPrefixStressInnerBytePrefix) { }); } -TEST(AddressSortingTest, TestPrefersLongestPrefixDiffersOnHighestBitOfByte) { +TEST_F(AddressSortingTest, TestPrefersLongestPrefixDiffersOnHighestBitOfByte) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -604,7 +618,7 @@ TEST(AddressSortingTest, TestPrefersLongestPrefixDiffersOnHighestBitOfByte) { }); } -TEST(AddressSortingTest, TestPrefersLongestPrefixDiffersByLastBit) { +TEST_F(AddressSortingTest, TestPrefersLongestPrefixDiffersByLastBit) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -628,7 +642,7 @@ TEST(AddressSortingTest, TestPrefersLongestPrefixDiffersByLastBit) { /* Tests for rule 10 */ -TEST(AddressSortingTest, TestStableSort) { +TEST_F(AddressSortingTest, TestStableSort) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -648,7 +662,7 @@ TEST(AddressSortingTest, TestStableSort) { }); } -TEST(AddressSortingTest, TestStableSortFiveElements) { +TEST_F(AddressSortingTest, TestStableSortFiveElements) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory( @@ -677,7 +691,7 @@ TEST(AddressSortingTest, TestStableSortFiveElements) { }); } -TEST(AddressSortingTest, TestStableSortNoSrcAddrsExist) { +TEST_F(AddressSortingTest, TestStableSortNoSrcAddrsExist) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory(ipv4_supported, ipv6_supported, {}); @@ -698,7 +712,7 @@ TEST(AddressSortingTest, TestStableSortNoSrcAddrsExist) { }); } -TEST(AddressSortingTest, TestStableSortNoSrcAddrsExistWithIpv4) { +TEST_F(AddressSortingTest, TestStableSortNoSrcAddrsExistWithIpv4) { bool ipv4_supported = true; bool ipv6_supported = true; OverrideAddressSortingSourceAddrFactory(ipv4_supported, ipv6_supported, {}); @@ -713,7 +727,7 @@ TEST(AddressSortingTest, TestStableSortNoSrcAddrsExistWithIpv4) { }); } -TEST(AddressSortingTest, TestStableSortV4CompatAndSiteLocalAddresses) { +TEST_F(AddressSortingTest, TestStableSortV4CompatAndSiteLocalAddresses) { bool ipv4_supported = true; bool ipv6_supported = true; // Handle unique observed behavior of inet_ntop(v4-compatible-address) on OS X. @@ -744,6 +758,78 @@ TEST(AddressSortingTest, TestStableSortV4CompatAndSiteLocalAddresses) { }); } +/* TestPrefersIpv6Loopback tests the actual "address probing" code + * for the current platform, without any mocks. + * This test relies on the assumption that the ipv6 loopback address is + * available in the hosts/containers that grpc C/C++ tests run on + * (whether ipv4 loopback is available or not, an available ipv6 + * loopback should be preferred). */ +TEST_F(AddressSortingTest, TestPrefersIpv6Loopback) { + grpc_lb_addresses* lb_addrs = BuildLbAddrInputs({ + {"[::1]:443", AF_INET6}, + {"127.0.0.1:443", AF_INET}, + }); + grpc_cares_wrapper_test_only_address_sorting_sort(lb_addrs); + VerifyLbAddrOutputs(lb_addrs, { + "[::1]:443", + "127.0.0.1:443", + }); +} + +/* Flip the order of the inputs above and expect the same output order + * (try to rule out influence of arbitrary qsort ordering) */ +TEST_F(AddressSortingTest, TestPrefersIpv6LoopbackInputsFlipped) { + grpc_lb_addresses* lb_addrs = BuildLbAddrInputs({ + {"127.0.0.1:443", AF_INET}, + {"[::1]:443", AF_INET6}, + }); + grpc_cares_wrapper_test_only_address_sorting_sort(lb_addrs); + VerifyLbAddrOutputs(lb_addrs, { + "[::1]:443", + "127.0.0.1:443", + }); +} + +/* Try to rule out false positives in the above two tests in which + * the sorter might think that neither ipv6 or ipv4 loopback is + * available, but ipv6 loopback is still preferred only due + * to precedance table lookups. */ +TEST_F(AddressSortingTest, TestSorterKnowsIpv6LoopbackIsAvailable) { + sockaddr_in6 ipv6_loopback; + memset(&ipv6_loopback, 0, sizeof(ipv6_loopback)); + ipv6_loopback.sin6_family = AF_INET6; + ((char*)&ipv6_loopback.sin6_addr)[15] = 1; + ipv6_loopback.sin6_port = htons(443); + // Set up the source and destination parameters of + // address_sorting_get_source_addr + address_sorting_address sort_input_dest; + memcpy(&sort_input_dest.addr, &ipv6_loopback, sizeof(ipv6_loopback)); + sort_input_dest.len = sizeof(ipv6_loopback); + address_sorting_address source_for_sort_input_dest; + memset(&source_for_sort_input_dest, 0, sizeof(source_for_sort_input_dest)); + // address_sorting_get_source_addr returns true if a source address was found + // for the destination address, otherwise false. + EXPECT_TRUE(address_sorting_get_source_addr_for_testing( + &sort_input_dest, &source_for_sort_input_dest)); + // Now also check that the source address was filled in correctly. + EXPECT_GT(source_for_sort_input_dest.len, 0u); + sockaddr_in6* source_addr_output = + (sockaddr_in6*)source_for_sort_input_dest.addr; + EXPECT_EQ(source_addr_output->sin6_family, AF_INET6); + char* buf = static_cast(gpr_zalloc(100)); + EXPECT_NE(inet_ntop(AF_INET6, &source_addr_output->sin6_addr, buf, 100), + nullptr) + << "inet_ntop failed. Errno: " + std::to_string(errno); + std::string source_addr_str(buf); + gpr_free(buf); + // This test + // assumes that the source address for any loopback destination is also the + // loopback address. + EXPECT_EQ(source_addr_str, "::1"); +} + +} // namespace + int main(int argc, char** argv) { char* resolver = gpr_getenv("GRPC_DNS_RESOLVER"); if (resolver == nullptr || strlen(resolver) == 0) { @@ -754,9 +840,7 @@ int main(int argc, char** argv) { gpr_free(resolver); grpc_test_init(argc, argv); ::testing::InitGoogleTest(&argc, argv); - grpc_init(); auto result = RUN_ALL_TESTS(); - grpc_shutdown(); // Test sequential and nested inits and shutdowns. grpc_init(); grpc_init(); diff --git a/test/cpp/naming/gen_build_yaml.py b/test/cpp/naming/gen_build_yaml.py index baa6512f62..5dad2ea7af 100755 --- a/test/cpp/naming/gen_build_yaml.py +++ b/test/cpp/naming/gen_build_yaml.py @@ -110,7 +110,7 @@ def main(): 'gtest': True, 'run': True, 'src': ['test/cpp/naming/address_sorting_test.cc'], - 'platforms': ['linux', 'posix', 'mac'], + 'platforms': ['linux', 'posix', 'mac', 'windows'], 'deps': [ 'grpc++_test_util' + unsecure_build_config_suffix, 'grpc_test_util' + unsecure_build_config_suffix, diff --git a/third_party/address_sorting/address_sorting.c b/third_party/address_sorting/address_sorting.c index e4f3b53799..9aee0a5419 100644 --- a/third_party/address_sorting/address_sorting.c +++ b/third_party/address_sorting/address_sorting.c @@ -55,12 +55,17 @@ static const int kIPv6AddrScopeGlobal = 3; static address_sorting_source_addr_factory* g_current_source_addr_factory = NULL; -static int address_sorting_get_source_addr(const address_sorting_address* dest, - address_sorting_address* source) { +static bool address_sorting_get_source_addr(const address_sorting_address* dest, + address_sorting_address* source) { return g_current_source_addr_factory->vtable->get_source_addr( g_current_source_addr_factory, dest, source); } +bool address_sorting_get_source_addr_for_testing( + const address_sorting_address* dest, address_sorting_address* source) { + return address_sorting_get_source_addr(dest, source); +} + static int ipv6_prefix_match_length(const struct sockaddr_in6* sa, const struct sockaddr_in6* sb) { unsigned char* a = (unsigned char*)&sa->sin6_addr; diff --git a/third_party/address_sorting/address_sorting_windows.c b/third_party/address_sorting/address_sorting_windows.c index b2f5708649..662a88248e 100644 --- a/third_party/address_sorting/address_sorting_windows.c +++ b/third_party/address_sorting/address_sorting_windows.c @@ -42,14 +42,54 @@ #if defined(ADDRESS_SORTING_WINDOWS) +#include +#include +#include #include +#include +#include -/* TODO : Add address sorting functionality to work on windows. */ +static bool windows_source_addr_factory_get_source_addr( + address_sorting_source_addr_factory* factory, + const address_sorting_address* dest_addr, + address_sorting_address* source_addr) { + bool source_addr_exists = false; + SOCKET s = socket(((struct sockaddr_in6*)dest_addr)->sin6_family, SOCK_DGRAM, + IPPROTO_UDP); + if (s != INVALID_SOCKET) { + if (connect(s, (struct sockaddr*)dest_addr, (int)dest_addr->len) == 0) { + address_sorting_address found_source_addr; + memset(&found_source_addr, 0, sizeof(found_source_addr)); + found_source_addr.len = sizeof(found_source_addr.addr); + if (getsockname(s, (struct sockaddr*)&found_source_addr.addr, + (socklen_t*)&found_source_addr.len) == 0) { + source_addr_exists = true; + *source_addr = found_source_addr; + } + } + closesocket(s); + } + return source_addr_exists; +} + +static void windows_source_addr_factory_destroy( + address_sorting_source_addr_factory* self) { + free(self); +} + +static const address_sorting_source_addr_factory_vtable + windows_source_addr_factory_vtable = { + windows_source_addr_factory_get_source_addr, + windows_source_addr_factory_destroy, +}; address_sorting_source_addr_factory* address_sorting_create_source_addr_factory_for_current_platform() { - abort(); - return NULL; + address_sorting_source_addr_factory* factory = + malloc(sizeof(address_sorting_source_addr_factory)); + memset(factory, 0, sizeof(address_sorting_source_addr_factory)); + factory->vtable = &windows_source_addr_factory_vtable; + return factory; } #endif // defined(ADDRESS_SORTING_WINDOWS) diff --git a/third_party/address_sorting/include/address_sorting/address_sorting.h b/third_party/address_sorting/include/address_sorting/address_sorting.h index f11cd424b5..c58fafe3f7 100644 --- a/third_party/address_sorting/include/address_sorting/address_sorting.h +++ b/third_party/address_sorting/include/address_sorting/address_sorting.h @@ -103,6 +103,9 @@ address_sorting_family address_sorting_abstract_get_family( void address_sorting_override_source_addr_factory_for_testing( address_sorting_source_addr_factory* factory); +bool address_sorting_get_source_addr_for_testing( + const address_sorting_address* dest, address_sorting_address* source); + #ifdef __cplusplus } #endif diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 576950934e..18f56984fe 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -924,9 +924,12 @@ src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc \ +src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h \ src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc \ +src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc \ +src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc \ src/core/ext/filters/client_channel/resolver/dns/native/README.md \ src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc \ src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 072402b2cf..a686dae8b4 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -1032,6 +1032,23 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", + "grpc_test_util" + ], + "headers": [], + "is_filegroup": false, + "language": "c", + "name": "grpc_ipv6_loopback_available_test", + "src": [ + "test/core/iomgr/grpc_ipv6_loopback_available_test.cc" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", @@ -10254,9 +10271,12 @@ "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc", "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h", - "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc" + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc", + "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc" ], "third_party": false, "type": "filegroup" diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index a5439a5db1..5815f82fef 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -1313,6 +1313,32 @@ ], "uses_polling": true }, + { + "args": [], + "benchmark": false, + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "gtest": false, + "language": "c", + "name": "grpc_ipv6_loopback_available_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "uses_polling": true + }, { "args": [], "benchmark": false, @@ -5710,7 +5736,8 @@ "ci_platforms": [ "linux", "mac", - "posix" + "posix", + "windows" ], "cpu_cost": 1.0, "exclude_configs": [], @@ -5722,7 +5749,8 @@ "platforms": [ "linux", "mac", - "posix" + "posix", + "windows" ], "uses_polling": true }, @@ -5732,7 +5760,8 @@ "ci_platforms": [ "linux", "mac", - "posix" + "posix", + "windows" ], "cpu_cost": 1.0, "exclude_configs": [], @@ -5744,7 +5773,8 @@ "platforms": [ "linux", "mac", - "posix" + "posix", + "windows" ], "uses_polling": true }, -- cgit v1.2.3 From c137d233420092f6d7c0da88b315d6c336767ded Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Fri, 27 Jul 2018 11:29:24 -0700 Subject: Don't abort on notify_on_error for poll. Instead simply schedule closure with cancel.. Soft error --- src/core/lib/iomgr/ev_poll_posix.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc index ff4888eeb8..a4a83c4ad7 100644 --- a/src/core/lib/iomgr/ev_poll_posix.cc +++ b/src/core/lib/iomgr/ev_poll_posix.cc @@ -532,8 +532,10 @@ static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) { } static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) { - gpr_log(GPR_ERROR, "Polling engine does not support tracking errors."); - abort(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_ERROR, "Polling engine does not support tracking errors."); + } + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_CANCELLED); } static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset, -- cgit v1.2.3 From 7a0e389a732790ebf6b77da0ac6120634c32487a Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Fri, 27 Jul 2018 11:31:35 -0700 Subject: Do not abort. Just fail softly --- src/core/lib/iomgr/ev_poll_posix.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc index 7801c02355..2a1fef2933 100644 --- a/src/core/lib/iomgr/ev_poll_posix.cc +++ b/src/core/lib/iomgr/ev_poll_posix.cc @@ -570,8 +570,9 @@ static void fd_set_writable(grpc_fd* fd) { } static void fd_set_error(grpc_fd* fd) { - gpr_log(GPR_ERROR, "Polling engine does not support tracking errors."); - abort(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_ERROR, "Polling engine does not support tracking errors."); + } } static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset, -- cgit v1.2.3 From c2a22a1ab8e8221c95f8874668eb6260c1e171b4 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Fri, 27 Jul 2018 16:19:03 -0700 Subject: Address core review comments --- src/core/lib/iomgr/resource_quota.cc | 80 ++++++++++++++------------ src/core/lib/iomgr/resource_quota.h | 8 +-- src/cpp/thread_manager/thread_manager.cc | 6 +- src/cpp/thread_manager/thread_manager.h | 7 ++- test/core/iomgr/resource_quota_test.cc | 45 ++++++++------- test/cpp/thread_manager/thread_manager_test.cc | 30 +++++----- 6 files changed, 94 insertions(+), 82 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc index 47b7856e95..b6fc7579f7 100644 --- a/src/core/lib/iomgr/resource_quota.cc +++ b/src/core/lib/iomgr/resource_quota.cc @@ -97,7 +97,7 @@ struct grpc_resource_user { bool added_to_free_pool; /* The number of threads currently allocated to this resource user */ - gpr_atm num_threads; + gpr_atm num_threads_allocated; /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer */ @@ -138,22 +138,23 @@ struct grpc_resource_quota { gpr_atm last_size; - /* Mutex to protect max_threads and num_threads */ - /* Note: We could have used gpr_atm for max_threads and num_threads and avoid - * having this mutex; but in that case, each invocation of the function - * grpc_resource_user_alloc_threads() would have had to do at least two atomic - * loads (for max_threads and num_threads) followed by a CAS (on num_threads). - * Moreover, we expect grpc_resource_user_alloc_threads() to be often called - * concurrently thereby increasing the chances of failing the CAS operation. - * This additional complexity is not worth the tiny perf gain we may (or may - * not) have by using atomics */ - gpr_mu thd_mu; + /* Mutex to protect max_threads and num_threads_allocated */ + /* Note: We could have used gpr_atm for max_threads and num_threads_allocated + * and avoid having this mutex; but in that case, each invocation of the + * function grpc_resource_user_allocate_threads() would have had to do at + * least two atomic loads (for max_threads and num_threads_allocated) followed + * by a CAS (on num_threads_allocated). + * Moreover, we expect grpc_resource_user_allocate_threads() to be often + * called concurrently thereby increasing the chances of failing the CAS + * operation. This additional complexity is not worth the tiny perf gain we + * may (or may not) have by using atomics */ + gpr_mu thread_count_mu; /* Max number of threads allowed */ int max_threads; /* Number of threads currently allocated via this resource_quota object */ - int num_threads; + int num_threads_allocated; /* Has rq_step been scheduled to occur? */ bool step_scheduled; @@ -548,9 +549,9 @@ static void ru_destroy(void* ru, grpc_error* error) { grpc_resource_user* resource_user = static_cast(ru); GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0); // Free all the remaining thread quota - grpc_resource_user_free_threads( - resource_user, - static_cast(gpr_atm_no_barrier_load(&resource_user->num_threads))); + grpc_resource_user_free_threads(resource_user, + static_cast(gpr_atm_no_barrier_load( + &resource_user->num_threads_allocated))); for (int i = 0; i < GRPC_RULIST_COUNT; i++) { rulist_remove(resource_user, static_cast(i)); @@ -622,9 +623,9 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) { resource_quota->free_pool = INT64_MAX; resource_quota->size = INT64_MAX; gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX); - gpr_mu_init(&resource_quota->thd_mu); + gpr_mu_init(&resource_quota->thread_count_mu); resource_quota->max_threads = INT_MAX; - resource_quota->num_threads = 0; + resource_quota->num_threads_allocated = 0; resource_quota->step_scheduled = false; resource_quota->reclaiming = false; gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0); @@ -647,7 +648,8 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) { void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) { if (gpr_unref(&resource_quota->refs)) { - GPR_ASSERT(resource_quota->num_threads == 0); // No outstanding thd quota + // No outstanding thread quota + GPR_ASSERT(resource_quota->num_threads_allocated == 0); GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota"); gpr_free(resource_quota->name); gpr_free(resource_quota); @@ -681,9 +683,10 @@ double grpc_resource_quota_get_memory_pressure( /* Public API */ void grpc_resource_quota_set_max_threads(grpc_resource_quota* resource_quota, int new_max_threads) { - gpr_mu_lock(&resource_quota->thd_mu); + GPR_ASSERT(new_max_threads >= 0); + gpr_mu_lock(&resource_quota->thread_count_mu); resource_quota->max_threads = new_max_threads; - gpr_mu_unlock(&resource_quota->thd_mu); + gpr_mu_unlock(&resource_quota->thread_count_mu); } /* Public API */ @@ -771,7 +774,7 @@ grpc_resource_user* grpc_resource_user_create( grpc_closure_list_init(&resource_user->on_allocated); resource_user->allocating = false; resource_user->added_to_free_pool = false; - gpr_atm_no_barrier_store(&resource_user->num_threads, 0); + gpr_atm_no_barrier_store(&resource_user->num_threads_allocated, 0); resource_user->reclaimers[0] = nullptr; resource_user->reclaimers[1] = nullptr; resource_user->new_reclaimers[0] = nullptr; @@ -826,35 +829,38 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) { } } -bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user, - int thd_count) { +bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user, + int thread_count) { + GPR_ASSERT(thread_count >= 0); bool is_success = false; - gpr_mu_lock(&resource_user->resource_quota->thd_mu); + gpr_mu_lock(&resource_user->resource_quota->thread_count_mu); grpc_resource_quota* rq = resource_user->resource_quota; - if (rq->num_threads + thd_count <= rq->max_threads) { - rq->num_threads += thd_count; - gpr_atm_no_barrier_fetch_add(&resource_user->num_threads, thd_count); + if (rq->num_threads_allocated + thread_count <= rq->max_threads) { + rq->num_threads_allocated += thread_count; + gpr_atm_no_barrier_fetch_add(&resource_user->num_threads_allocated, + thread_count); is_success = true; } - gpr_mu_unlock(&resource_user->resource_quota->thd_mu); + gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu); return is_success; } void grpc_resource_user_free_threads(grpc_resource_user* resource_user, - int thd_count) { - gpr_mu_lock(&resource_user->resource_quota->thd_mu); + int thread_count) { + GPR_ASSERT(thread_count >= 0); + gpr_mu_lock(&resource_user->resource_quota->thread_count_mu); grpc_resource_quota* rq = resource_user->resource_quota; - rq->num_threads -= thd_count; - int old_cnt = static_cast( - gpr_atm_no_barrier_fetch_add(&resource_user->num_threads, -thd_count)); - if (old_cnt < thd_count || rq->num_threads < 0) { + rq->num_threads_allocated -= thread_count; + int old_count = static_cast(gpr_atm_no_barrier_fetch_add( + &resource_user->num_threads_allocated, -thread_count)); + if (old_count < thread_count || rq->num_threads_allocated < 0) { gpr_log(GPR_ERROR, - "Releasing more threads (%d) that currently allocated (rq threads: " + "Releasing more threads (%d) than currently allocated (rq threads: " "%d, ru threads: %d)", - thd_count, old_cnt, rq->num_threads + thd_count); + thread_count, rq->num_threads_allocated + thread_count, old_count); abort(); } - gpr_mu_unlock(&resource_user->resource_quota->thd_mu); + gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu); } void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size, diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h index 7342ef84c8..1d5e95e04a 100644 --- a/src/core/lib/iomgr/resource_quota.h +++ b/src/core/lib/iomgr/resource_quota.h @@ -96,14 +96,14 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user); /* Attempts to get quota (from the resource_user) to create 'thd_count' number * of threads. Returns true if successful (i.e the caller is now free to create * 'thd_count' number of threads) or false if quota is not available */ -bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user, - int thd_count); +bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user, + int thd_count); /* Releases 'thd_count' worth of quota back to the resource user. The quota * should have been previously obtained successfully by calling - * grpc_resource_user_alloc_threads(). + * grpc_resource_user_allocate_threads(). * * Note: There need not be an exact one-to-one correspondence between - * grpc_resource_user_alloc_threads() and grpc_resource_user_free_threads() + * grpc_resource_user_allocate_threads() and grpc_resource_user_free_threads() * calls. The only requirement is that the number of threads allocated should * all be eventually released */ void grpc_resource_user_free_threads(grpc_resource_user* resource_user, diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc index 5d367511e2..57067d4696 100644 --- a/src/cpp/thread_manager/thread_manager.cc +++ b/src/cpp/thread_manager/thread_manager.cc @@ -123,7 +123,7 @@ void ThreadManager::CleanupCompletedThreads() { } void ThreadManager::Initialize() { - if (!grpc_resource_user_alloc_threads(resource_user_, min_pollers_)) { + if (!grpc_resource_user_allocate_threads(resource_user_, min_pollers_)) { gpr_log(GPR_ERROR, "No thread quota available to even create the minimum required " "polling threads (i.e %d). Unable to start the thread manager", @@ -165,9 +165,9 @@ void ThreadManager::MainWorkLoop() { break; case WORK_FOUND: // If we got work and there are now insufficient pollers and there is - // quota available to create a new thread,start a new poller thread + // quota available to create a new thread, start a new poller thread if (!shutdown_ && num_pollers_ < min_pollers_ && - grpc_resource_user_alloc_threads(resource_user_, 1)) { + grpc_resource_user_allocate_threads(resource_user_, 1)) { num_pollers_++; num_threads_++; max_active_threads_sofar_ = diff --git a/src/cpp/thread_manager/thread_manager.h b/src/cpp/thread_manager/thread_manager.h index 8332befed0..01043edb31 100644 --- a/src/cpp/thread_manager/thread_manager.h +++ b/src/cpp/thread_manager/thread_manager.h @@ -100,7 +100,7 @@ class ThreadManager { // ThreadManager::MarkAsCompleted() // // WHY IS THIS NEEDED?: - // When a thread terminates, some other tread *must* call Join() on that + // When a thread terminates, some other thread *must* call Join() on that // thread so that the resources are released. Having a WorkerThread wrapper // will make this easier. Once Run() completes, each thread calls the // following two functions: @@ -113,8 +113,9 @@ class ThreadManager { // in the completed_threads_ list (since a thread cannot call Join() on // itself, it calls CleanupCompletedThreads() *before* calling // MarkAsCompleted()) - // TODO: sreek - consider creating the threads 'detached' so that Join() need - // not be called + // + // TODO(sreek): Consider creating the threads 'detached' so that Join() need + // not be called (and the need for this WorkerThread class is eliminated) class WorkerThread { public: WorkerThread(ThreadManager* thd_mgr); diff --git a/test/core/iomgr/resource_quota_test.cc b/test/core/iomgr/resource_quota_test.cc index 573e4010fa..f3b35fed32 100644 --- a/test/core/iomgr/resource_quota_test.cc +++ b/test/core/iomgr/resource_quota_test.cc @@ -810,30 +810,31 @@ static void test_thread_limit() { grpc_resource_quota_set_max_threads(rq, 100); // Request quota for 100 threads (50 for ru1, 50 for ru2) - GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 10)); - GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 10)); - GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 40)); - GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 40)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 10)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 10)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 40)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 40)); - // Threads exhaused. Next request must fail - GPR_ASSERT(!grpc_resource_user_alloc_threads(ru2, 20)); + // Threads exhausted. Next request must fail + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru2, 20)); // Free 20 threads from two different users grpc_resource_user_free_threads(ru1, 10); grpc_resource_user_free_threads(ru2, 10); // Next request to 20 threads must succeed - GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 20)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 20)); // No more thread quota again - GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 20)); + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 20)); // Free 10 more grpc_resource_user_free_threads(ru1, 10); - GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 5)); - GPR_ASSERT(!grpc_resource_user_alloc_threads(ru2, 10)); // Only 5 available - GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 5)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 5)); + GPR_ASSERT( + !grpc_resource_user_allocate_threads(ru2, 10)); // Only 5 available + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 5)); // Teardown (ru1 and ru2 release all the quota back to rq) grpc_resource_user_unref(ru1); @@ -841,7 +842,7 @@ static void test_thread_limit() { grpc_resource_quota_unref(rq); } -// Change max quota in either directions dynamically +// Change max quota in either direction dynamically static void test_thread_maxquota_change() { grpc_core::ExecCtx exec_ctx; @@ -854,34 +855,34 @@ static void test_thread_maxquota_change() { grpc_resource_quota_set_max_threads(rq, 100); // Request quota for 100 threads (50 for ru1, 50 for ru2) - GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 50)); - GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 50)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 50)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 50)); - // Threads exhaused. Next request must fail - GPR_ASSERT(!grpc_resource_user_alloc_threads(ru2, 20)); + // Threads exhausted. Next request must fail + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru2, 20)); // Increase maxquota and retry // Max threads = 150; grpc_resource_quota_set_max_threads(rq, 150); - GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 20)); // ru2 = 70, ru1 = 50 + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 20)); // ru2=70, ru1=50 // Decrease maxquota (Note: Quota already given to ru1 and ru2 is unaffected) // Max threads = 10; grpc_resource_quota_set_max_threads(rq, 10); // New requests will fail until quota is available - GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 10)); + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10)); // Make quota available - grpc_resource_user_free_threads(ru1, 50); // ru1 now has 0 - GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 10)); // Still not enough + grpc_resource_user_free_threads(ru1, 50); // ru1 now has 0 + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10)); // not enough grpc_resource_user_free_threads(ru2, 70); // ru2 now has 0 // Now we can get quota up-to 10, the current max - GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 10)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 10)); // No more thread quota again - GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 10)); + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10)); // Teardown (ru1 and ru2 release all the quota back to rq) grpc_resource_user_unref(ru1); diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index a7ed2dd380..838f5f72ad 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -124,16 +124,18 @@ static void TestPollAndWork() { 2 /* min_pollers */, 10 /* max_pollers */, 10 /* poll_duration_ms */, 1 /* work_duration_ms */, 50 /* max_poll_calls */}; - grpc::ThreadManagerTest test_thd_mgr("TestThreadManager", rq, settings); + grpc::ThreadManagerTest test_thread_mgr("TestThreadManager", rq, settings); grpc_resource_quota_unref(rq); - test_thd_mgr.Initialize(); // Start the thread manager - test_thd_mgr.Wait(); // Wait for all threads to finish + test_thread_mgr.Initialize(); // Start the thread manager + test_thread_mgr.Wait(); // Wait for all threads to finish // Verify that The number of times DoWork() was called is equal to the number // of times WORK_FOUND was returned - gpr_log(GPR_DEBUG, "DoWork() called %d times", test_thd_mgr.GetNumDoWork()); - GPR_ASSERT(test_thd_mgr.GetNumDoWork() == test_thd_mgr.GetNumWorkFound()); + gpr_log(GPR_DEBUG, "DoWork() called %d times", + test_thread_mgr.GetNumDoWork()); + GPR_ASSERT(test_thread_mgr.GetNumDoWork() == + test_thread_mgr.GetNumWorkFound()); } static void TestThreadQuota() { @@ -151,18 +153,20 @@ static void TestThreadQuota() { // Create two thread managers (but with same resource quota). This means // that the max number of active threads across BOTH the thread managers // cannot be greater than kMaxNumthreads - grpc::ThreadManagerTest test_thd_mgr_1("TestThreadManager-1", rq, settings); - grpc::ThreadManagerTest test_thd_mgr_2("TestThreadManager-2", rq, settings); + grpc::ThreadManagerTest test_thread_mgr_1("TestThreadManager-1", rq, + settings); + grpc::ThreadManagerTest test_thread_mgr_2("TestThreadManager-2", rq, + settings); // It is ok to unref resource quota before starting thread managers. grpc_resource_quota_unref(rq); // Start both thread managers - test_thd_mgr_1.Initialize(); - test_thd_mgr_2.Initialize(); + test_thread_mgr_1.Initialize(); + test_thread_mgr_2.Initialize(); // Wait for both to finish - test_thd_mgr_1.Wait(); - test_thd_mgr_2.Wait(); + test_thread_mgr_1.Wait(); + test_thread_mgr_2.Wait(); // Now verify that the total number of active threads in either thread manager // never exceeds kMaxNumThreads @@ -173,8 +177,8 @@ static void TestThreadQuota() { // Its okay to not test this case here. The resource quota c-core tests // provide enough coverage to resource quota object with multiple resource // users - int max1 = test_thd_mgr_1.GetMaxActiveThreadsSoFar(); - int max2 = test_thd_mgr_2.GetMaxActiveThreadsSoFar(); + int max1 = test_thread_mgr_1.GetMaxActiveThreadsSoFar(); + int max2 = test_thread_mgr_2.GetMaxActiveThreadsSoFar(); gpr_log( GPR_DEBUG, "MaxActiveThreads in TestThreadManager_1: %d, TestThreadManager_2: %d", -- cgit v1.2.3 From bea98c3c1b32f5959e57f00bb8fd4e129531b671 Mon Sep 17 00:00:00 2001 From: Nicolas Noble Date: Tue, 31 Jul 2018 21:12:45 -0700 Subject: Revert "Restrict the number of threads in C++ sync server" --- grpc.def | 1 - include/grpc/grpc.h | 4 - include/grpcpp/resource_quota.h | 16 +-- include/grpcpp/server.h | 3 +- src/core/lib/iomgr/resource_quota.cc | 78 ------------- src/core/lib/iomgr/resource_quota.h | 16 --- src/cpp/common/resource_quota_cc.cc | 4 - src/cpp/server/server_builder.cc | 2 +- src/cpp/server/server_cc.cc | 31 +---- src/cpp/thread_manager/thread_manager.cc | 53 ++------- src/cpp/thread_manager/thread_manager.h | 48 +------- src/ruby/ext/grpc/rb_grpc_imports.generated.c | 2 - src/ruby/ext/grpc/rb_grpc_imports.generated.h | 3 - test/core/iomgr/resource_quota_test.cc | 97 ---------------- test/core/surface/public_headers_must_be_c89.c | 1 - test/cpp/thread_manager/thread_manager_test.cc | 149 ++++++------------------- 16 files changed, 63 insertions(+), 445 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/grpc.def b/grpc.def index 312e916682..5b98792662 100644 --- a/grpc.def +++ b/grpc.def @@ -68,7 +68,6 @@ EXPORTS grpc_resource_quota_ref grpc_resource_quota_unref grpc_resource_quota_resize - grpc_resource_quota_set_max_threads grpc_resource_quota_arg_vtable grpc_channelz_get_top_channels grpc_channelz_get_channel diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index eb0251443c..f0eb2c0121 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -450,10 +450,6 @@ GRPCAPI void grpc_resource_quota_unref(grpc_resource_quota* resource_quota); GRPCAPI void grpc_resource_quota_resize(grpc_resource_quota* resource_quota, size_t new_size); -/** Update the size of the maximum number of threads allowed */ -GRPCAPI void grpc_resource_quota_set_max_threads( - grpc_resource_quota* resource_quota, int new_max_threads); - /** Fetch a vtable for a grpc_channel_arg that points to a grpc_resource_quota */ GRPCAPI const grpc_arg_pointer_vtable* grpc_resource_quota_arg_vtable(void); diff --git a/include/grpcpp/resource_quota.h b/include/grpcpp/resource_quota.h index 50bd1cb849..554437a40d 100644 --- a/include/grpcpp/resource_quota.h +++ b/include/grpcpp/resource_quota.h @@ -26,10 +26,10 @@ struct grpc_resource_quota; namespace grpc { -/// ResourceQuota represents a bound on memory and thread usage by the gRPC -/// library. A ResourceQuota can be attached to a server (via \a ServerBuilder), +/// ResourceQuota represents a bound on memory usage by the gRPC library. +/// A ResourceQuota can be attached to a server (via \a ServerBuilder), /// or a client channel (via \a ChannelArguments). -/// gRPC will attempt to keep memory and threads used by all attached entities +/// gRPC will attempt to keep memory used by all attached entities /// below the ResourceQuota bound. class ResourceQuota final : private GrpcLibraryCodegen { public: @@ -44,16 +44,6 @@ class ResourceQuota final : private GrpcLibraryCodegen { /// No time bound is given for this to occur however. ResourceQuota& Resize(size_t new_size); - /// Set the max number of threads that can be allocated from this - /// ResourceQuota object. - /// - /// If the new_max_threads value is smaller than the current value, no new - /// threads are allocated until the number of active threads fall below - /// new_max_threads. There is no time bound on when this may happen i.e none - /// of the current threads are forcefully destroyed and all threads run their - /// normal course. - ResourceQuota& SetMaxThreads(int new_max_threads); - grpc_resource_quota* c_resource_quota() const { return impl_; } private: diff --git a/include/grpcpp/server.h b/include/grpcpp/server.h index 189cf8accf..81c3907f86 100644 --- a/include/grpcpp/server.h +++ b/include/grpcpp/server.h @@ -144,8 +144,7 @@ class Server : public ServerInterface, private GrpcLibraryCodegen { Server(int max_message_size, ChannelArguments* args, std::shared_ptr>> sync_server_cqs, - grpc_resource_quota* server_rq, int min_pollers, int max_pollers, - int sync_cq_timeout_msec); + int min_pollers, int max_pollers, int sync_cq_timeout_msec); /// Start the server. /// diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc index b6fc7579f7..539bc120ce 100644 --- a/src/core/lib/iomgr/resource_quota.cc +++ b/src/core/lib/iomgr/resource_quota.cc @@ -96,9 +96,6 @@ struct grpc_resource_user { list, false otherwise */ bool added_to_free_pool; - /* The number of threads currently allocated to this resource user */ - gpr_atm num_threads_allocated; - /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer */ grpc_closure* reclaimers[2]; @@ -138,33 +135,12 @@ struct grpc_resource_quota { gpr_atm last_size; - /* Mutex to protect max_threads and num_threads_allocated */ - /* Note: We could have used gpr_atm for max_threads and num_threads_allocated - * and avoid having this mutex; but in that case, each invocation of the - * function grpc_resource_user_allocate_threads() would have had to do at - * least two atomic loads (for max_threads and num_threads_allocated) followed - * by a CAS (on num_threads_allocated). - * Moreover, we expect grpc_resource_user_allocate_threads() to be often - * called concurrently thereby increasing the chances of failing the CAS - * operation. This additional complexity is not worth the tiny perf gain we - * may (or may not) have by using atomics */ - gpr_mu thread_count_mu; - - /* Max number of threads allowed */ - int max_threads; - - /* Number of threads currently allocated via this resource_quota object */ - int num_threads_allocated; - /* Has rq_step been scheduled to occur? */ bool step_scheduled; - /* Are we currently reclaiming memory */ bool reclaiming; - /* Closure around rq_step */ grpc_closure rq_step_closure; - /* Closure around rq_reclamation_done */ grpc_closure rq_reclamation_done_closure; @@ -548,11 +524,6 @@ static void ru_shutdown(void* ru, grpc_error* error) { static void ru_destroy(void* ru, grpc_error* error) { grpc_resource_user* resource_user = static_cast(ru); GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0); - // Free all the remaining thread quota - grpc_resource_user_free_threads(resource_user, - static_cast(gpr_atm_no_barrier_load( - &resource_user->num_threads_allocated))); - for (int i = 0; i < GRPC_RULIST_COUNT; i++) { rulist_remove(resource_user, static_cast(i)); } @@ -623,9 +594,6 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) { resource_quota->free_pool = INT64_MAX; resource_quota->size = INT64_MAX; gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX); - gpr_mu_init(&resource_quota->thread_count_mu); - resource_quota->max_threads = INT_MAX; - resource_quota->num_threads_allocated = 0; resource_quota->step_scheduled = false; resource_quota->reclaiming = false; gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0); @@ -648,8 +616,6 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) { void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) { if (gpr_unref(&resource_quota->refs)) { - // No outstanding thread quota - GPR_ASSERT(resource_quota->num_threads_allocated == 0); GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota"); gpr_free(resource_quota->name); gpr_free(resource_quota); @@ -680,15 +646,6 @@ double grpc_resource_quota_get_memory_pressure( (static_cast(MEMORY_USAGE_ESTIMATION_MAX)); } -/* Public API */ -void grpc_resource_quota_set_max_threads(grpc_resource_quota* resource_quota, - int new_max_threads) { - GPR_ASSERT(new_max_threads >= 0); - gpr_mu_lock(&resource_quota->thread_count_mu); - resource_quota->max_threads = new_max_threads; - gpr_mu_unlock(&resource_quota->thread_count_mu); -} - /* Public API */ void grpc_resource_quota_resize(grpc_resource_quota* resource_quota, size_t size) { @@ -774,7 +731,6 @@ grpc_resource_user* grpc_resource_user_create( grpc_closure_list_init(&resource_user->on_allocated); resource_user->allocating = false; resource_user->added_to_free_pool = false; - gpr_atm_no_barrier_store(&resource_user->num_threads_allocated, 0); resource_user->reclaimers[0] = nullptr; resource_user->reclaimers[1] = nullptr; resource_user->new_reclaimers[0] = nullptr; @@ -829,40 +785,6 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) { } } -bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user, - int thread_count) { - GPR_ASSERT(thread_count >= 0); - bool is_success = false; - gpr_mu_lock(&resource_user->resource_quota->thread_count_mu); - grpc_resource_quota* rq = resource_user->resource_quota; - if (rq->num_threads_allocated + thread_count <= rq->max_threads) { - rq->num_threads_allocated += thread_count; - gpr_atm_no_barrier_fetch_add(&resource_user->num_threads_allocated, - thread_count); - is_success = true; - } - gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu); - return is_success; -} - -void grpc_resource_user_free_threads(grpc_resource_user* resource_user, - int thread_count) { - GPR_ASSERT(thread_count >= 0); - gpr_mu_lock(&resource_user->resource_quota->thread_count_mu); - grpc_resource_quota* rq = resource_user->resource_quota; - rq->num_threads_allocated -= thread_count; - int old_count = static_cast(gpr_atm_no_barrier_fetch_add( - &resource_user->num_threads_allocated, -thread_count)); - if (old_count < thread_count || rq->num_threads_allocated < 0) { - gpr_log(GPR_ERROR, - "Releasing more threads (%d) than currently allocated (rq threads: " - "%d, ru threads: %d)", - thread_count, rq->num_threads_allocated + thread_count, old_count); - abort(); - } - gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu); -} - void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size, grpc_closure* optional_on_done) { gpr_mu_lock(&resource_user->mu); diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h index 1d5e95e04a..937daf8728 100644 --- a/src/core/lib/iomgr/resource_quota.h +++ b/src/core/lib/iomgr/resource_quota.h @@ -93,22 +93,6 @@ void grpc_resource_user_ref(grpc_resource_user* resource_user); void grpc_resource_user_unref(grpc_resource_user* resource_user); void grpc_resource_user_shutdown(grpc_resource_user* resource_user); -/* Attempts to get quota (from the resource_user) to create 'thd_count' number - * of threads. Returns true if successful (i.e the caller is now free to create - * 'thd_count' number of threads) or false if quota is not available */ -bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user, - int thd_count); -/* Releases 'thd_count' worth of quota back to the resource user. The quota - * should have been previously obtained successfully by calling - * grpc_resource_user_allocate_threads(). - * - * Note: There need not be an exact one-to-one correspondence between - * grpc_resource_user_allocate_threads() and grpc_resource_user_free_threads() - * calls. The only requirement is that the number of threads allocated should - * all be eventually released */ -void grpc_resource_user_free_threads(grpc_resource_user* resource_user, - int thd_count); - /* Allocate from the resource user (and its quota). If optional_on_done is NULL, then allocate immediately. This may push the quota over-limit, at which point reclamation will kick in. diff --git a/src/cpp/common/resource_quota_cc.cc b/src/cpp/common/resource_quota_cc.cc index 276e5f7954..daeb0ba171 100644 --- a/src/cpp/common/resource_quota_cc.cc +++ b/src/cpp/common/resource_quota_cc.cc @@ -33,8 +33,4 @@ ResourceQuota& ResourceQuota::Resize(size_t new_size) { return *this; } -ResourceQuota& ResourceQuota::SetMaxThreads(int new_max_threads) { - grpc_resource_quota_set_max_threads(impl_, new_max_threads); - return *this; -} } // namespace grpc diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index 0ab3cd0e32..e0b9b7a62b 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -261,7 +261,7 @@ std::unique_ptr ServerBuilder::BuildAndStart() { } std::unique_ptr server(new Server( - max_receive_message_size_, &args, sync_server_cqs, resource_quota_, + max_receive_message_size_, &args, sync_server_cqs, sync_server_settings_.min_pollers, sync_server_settings_.max_pollers, sync_server_settings_.cq_timeout_msec)); diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 472c5035fc..0d77510e29 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -47,12 +47,6 @@ namespace grpc { namespace { -// The default value for maximum number of threads that can be created in the -// sync server. This value of 500 is empirically chosen. To increase the max -// number of threads in a sync server, pass a custom ResourceQuota object (with -// the desired number of max-threads set) to the server builder -#define DEFAULT_MAX_SYNC_SERVER_THREADS 500 - class DefaultGlobalCallbacks final : public Server::GlobalCallbacks { public: ~DefaultGlobalCallbacks() override {} @@ -272,9 +266,9 @@ class Server::SyncRequestThreadManager : public ThreadManager { public: SyncRequestThreadManager(Server* server, CompletionQueue* server_cq, std::shared_ptr global_callbacks, - grpc_resource_quota* rq, int min_pollers, - int max_pollers, int cq_timeout_msec) - : ThreadManager("SyncServer", rq, min_pollers, max_pollers), + int min_pollers, int max_pollers, + int cq_timeout_msec) + : ThreadManager(min_pollers, max_pollers), server_(server), server_cq_(server_cq), cq_timeout_msec_(cq_timeout_msec), @@ -382,8 +376,7 @@ Server::Server( int max_receive_message_size, ChannelArguments* args, std::shared_ptr>> sync_server_cqs, - grpc_resource_quota* server_rq, int min_pollers, int max_pollers, - int sync_cq_timeout_msec) + int min_pollers, int max_pollers, int sync_cq_timeout_msec) : max_receive_message_size_(max_receive_message_size), sync_server_cqs_(std::move(sync_server_cqs)), started_(false), @@ -399,22 +392,10 @@ Server::Server( global_callbacks_->UpdateArguments(args); if (sync_server_cqs_ != nullptr) { - bool default_rq_created = false; - if (server_rq == nullptr) { - server_rq = grpc_resource_quota_create("SyncServer-default-rq"); - grpc_resource_quota_set_max_threads(server_rq, - DEFAULT_MAX_SYNC_SERVER_THREADS); - default_rq_created = true; - } - for (const auto& it : *sync_server_cqs_) { sync_req_mgrs_.emplace_back(new SyncRequestThreadManager( - this, it.get(), global_callbacks_, server_rq, min_pollers, - max_pollers, sync_cq_timeout_msec)); - } - - if (default_rq_created) { - grpc_resource_quota_unref(server_rq); + this, it.get(), global_callbacks_, min_pollers, max_pollers, + sync_cq_timeout_msec)); } } diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc index fa9eec5f9b..02ac56a3fd 100644 --- a/src/cpp/thread_manager/thread_manager.cc +++ b/src/cpp/thread_manager/thread_manager.cc @@ -22,8 +22,8 @@ #include #include + #include "src/core/lib/gprpp/thd.h" -#include "src/core/lib/iomgr/exec_ctx.h" namespace grpc { @@ -48,17 +48,12 @@ ThreadManager::WorkerThread::~WorkerThread() { thd_.Join(); } -ThreadManager::ThreadManager(const char* name, - grpc_resource_quota* resource_quota, - int min_pollers, int max_pollers) +ThreadManager::ThreadManager(int min_pollers, int max_pollers) : shutdown_(false), num_pollers_(0), min_pollers_(min_pollers), max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers), - num_threads_(0), - max_active_threads_sofar_(0) { - resource_user_ = grpc_resource_user_create(resource_quota, name); -} + num_threads_(0) {} ThreadManager::~ThreadManager() { { @@ -66,8 +61,6 @@ ThreadManager::~ThreadManager() { GPR_ASSERT(num_threads_ == 0); } - grpc_core::ExecCtx exec_ctx; // grpc_resource_user_unref needs an exec_ctx - grpc_resource_user_unref(resource_user_); CleanupCompletedThreads(); } @@ -88,27 +81,17 @@ bool ThreadManager::IsShutdown() { return shutdown_; } -int ThreadManager::GetMaxActiveThreadsSoFar() { - std::lock_guard list_lock(list_mu_); - return max_active_threads_sofar_; -} - void ThreadManager::MarkAsCompleted(WorkerThread* thd) { { std::lock_guard list_lock(list_mu_); completed_threads_.push_back(thd); } - { - std::lock_guard lock(mu_); - num_threads_--; - if (num_threads_ == 0) { - shutdown_cv_.notify_one(); - } + std::lock_guard lock(mu_); + num_threads_--; + if (num_threads_ == 0) { + shutdown_cv_.notify_one(); } - - // Give a thread back to the resource quota - grpc_resource_user_free_threads(resource_user_, 1); } void ThreadManager::CleanupCompletedThreads() { @@ -123,22 +106,14 @@ void ThreadManager::CleanupCompletedThreads() { } void ThreadManager::Initialize() { - if (!grpc_resource_user_allocate_threads(resource_user_, min_pollers_)) { - gpr_log(GPR_ERROR, - "No thread quota available to even create the minimum required " - "polling threads (i.e %d). Unable to start the thread manager", - min_pollers_); - abort(); - } - { std::unique_lock lock(mu_); num_pollers_ = min_pollers_; num_threads_ = min_pollers_; - max_active_threads_sofar_ = min_pollers_; } for (int i = 0; i < min_pollers_; i++) { + // Create a new thread (which ends up calling the MainWorkLoop() function new WorkerThread(this); } } @@ -164,15 +139,11 @@ void ThreadManager::MainWorkLoop() { done = true; break; case WORK_FOUND: - // If we got work and there are now insufficient pollers and there is - // quota available to create a new thread, start a new poller thread - if (!shutdown_ && num_pollers_ < min_pollers_ && - grpc_resource_user_allocate_threads(resource_user_, 1)) { + // If we got work and there are now insufficient pollers, start a new + // one + if (!shutdown_ && num_pollers_ < min_pollers_) { num_pollers_++; num_threads_++; - if (num_threads_ > max_active_threads_sofar_) { - max_active_threads_sofar_ = num_threads_; - } // Drop lock before spawning thread to avoid contention lock.unlock(); new WorkerThread(this); @@ -225,8 +196,6 @@ void ThreadManager::MainWorkLoop() { } }; - // This thread is exiting. Do some cleanup work i.e delete already completed - // worker threads CleanupCompletedThreads(); // If we are here, either ThreadManager is shutting down or it already has diff --git a/src/cpp/thread_manager/thread_manager.h b/src/cpp/thread_manager/thread_manager.h index 01043edb31..5a40f2de47 100644 --- a/src/cpp/thread_manager/thread_manager.h +++ b/src/cpp/thread_manager/thread_manager.h @@ -27,14 +27,12 @@ #include #include "src/core/lib/gprpp/thd.h" -#include "src/core/lib/iomgr/resource_quota.h" namespace grpc { class ThreadManager { public: - explicit ThreadManager(const char* name, grpc_resource_quota* resource_quota, - int min_pollers, int max_pollers); + explicit ThreadManager(int min_pollers, int max_pollers); virtual ~ThreadManager(); // Initializes and Starts the Rpc Manager threads @@ -86,11 +84,6 @@ class ThreadManager { // all the threads have drained all the outstanding work virtual void Wait(); - // Max number of concurrent threads that were ever active in this thread - // manager so far. This is useful for debugging purposes (and in unit tests) - // to check if resource_quota is properly being enforced. - int GetMaxActiveThreadsSoFar(); - private: // Helper wrapper class around grpc_core::Thread. Takes a ThreadManager object // and starts a new grpc_core::Thread to calls the Run() function. @@ -98,24 +91,6 @@ class ThreadManager { // The Run() function calls ThreadManager::MainWorkLoop() function and once // that completes, it marks the WorkerThread completed by calling // ThreadManager::MarkAsCompleted() - // - // WHY IS THIS NEEDED?: - // When a thread terminates, some other thread *must* call Join() on that - // thread so that the resources are released. Having a WorkerThread wrapper - // will make this easier. Once Run() completes, each thread calls the - // following two functions: - // ThreadManager::CleanupCompletedThreads() - // ThreadManager::MarkAsCompleted() - // - // - MarkAsCompleted() puts the WorkerThread object in the ThreadManger's - // completed_threads_ list - // - CleanupCompletedThreads() calls "Join()" on the threads that are already - // in the completed_threads_ list (since a thread cannot call Join() on - // itself, it calls CleanupCompletedThreads() *before* calling - // MarkAsCompleted()) - // - // TODO(sreek): Consider creating the threads 'detached' so that Join() need - // not be called (and the need for this WorkerThread class is eliminated) class WorkerThread { public: WorkerThread(ThreadManager* thd_mgr); @@ -136,21 +111,13 @@ class ThreadManager { void MarkAsCompleted(WorkerThread* thd); void CleanupCompletedThreads(); - // Protects shutdown_, num_pollers_, num_threads_ and - // max_active_threads_sofar_ + // Protects shutdown_, num_pollers_ and num_threads_ + // TODO: sreek - Change num_pollers and num_threads_ to atomics std::mutex mu_; bool shutdown_; std::condition_variable shutdown_cv_; - // The resource user object to use when requesting quota to create threads - // - // Note: The user of this ThreadManager object must create grpc_resource_quota - // object (that contains the actual max thread quota) and a grpc_resource_user - // object through which quota is requested whenver new threads need to be - // created - grpc_resource_user* resource_user_; - // Number of threads doing polling int num_pollers_; @@ -158,15 +125,10 @@ class ThreadManager { int min_pollers_; int max_pollers_; - // The total number of threads currently active (includes threads includes the - // threads that are currently polling i.e num_pollers_) + // The total number of threads (includes threads includes the threads that are + // currently polling i.e num_pollers_) int num_threads_; - // See GetMaxActiveThreadsSoFar()'s description. - // To be more specific, this variable tracks the max value num_threads_ was - // ever set so far - int max_active_threads_sofar_; - std::mutex list_mu_; std::list completed_threads_; }; diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c index 78090afd6c..2443532bb8 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c @@ -91,7 +91,6 @@ grpc_resource_quota_create_type grpc_resource_quota_create_import; grpc_resource_quota_ref_type grpc_resource_quota_ref_import; grpc_resource_quota_unref_type grpc_resource_quota_unref_import; grpc_resource_quota_resize_type grpc_resource_quota_resize_import; -grpc_resource_quota_set_max_threads_type grpc_resource_quota_set_max_threads_import; grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import; grpc_channelz_get_top_channels_type grpc_channelz_get_top_channels_import; grpc_channelz_get_channel_type grpc_channelz_get_channel_import; @@ -342,7 +341,6 @@ void grpc_rb_load_imports(HMODULE library) { grpc_resource_quota_ref_import = (grpc_resource_quota_ref_type) GetProcAddress(library, "grpc_resource_quota_ref"); grpc_resource_quota_unref_import = (grpc_resource_quota_unref_type) GetProcAddress(library, "grpc_resource_quota_unref"); grpc_resource_quota_resize_import = (grpc_resource_quota_resize_type) GetProcAddress(library, "grpc_resource_quota_resize"); - grpc_resource_quota_set_max_threads_import = (grpc_resource_quota_set_max_threads_type) GetProcAddress(library, "grpc_resource_quota_set_max_threads"); grpc_resource_quota_arg_vtable_import = (grpc_resource_quota_arg_vtable_type) GetProcAddress(library, "grpc_resource_quota_arg_vtable"); grpc_channelz_get_top_channels_import = (grpc_channelz_get_top_channels_type) GetProcAddress(library, "grpc_channelz_get_top_channels"); grpc_channelz_get_channel_import = (grpc_channelz_get_channel_type) GetProcAddress(library, "grpc_channelz_get_channel"); diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index 1807efa761..b08a1f94f7 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -248,9 +248,6 @@ extern grpc_resource_quota_unref_type grpc_resource_quota_unref_import; typedef void(*grpc_resource_quota_resize_type)(grpc_resource_quota* resource_quota, size_t new_size); extern grpc_resource_quota_resize_type grpc_resource_quota_resize_import; #define grpc_resource_quota_resize grpc_resource_quota_resize_import -typedef void(*grpc_resource_quota_set_max_threads_type)(grpc_resource_quota* resource_quota, int new_max_threads); -extern grpc_resource_quota_set_max_threads_type grpc_resource_quota_set_max_threads_import; -#define grpc_resource_quota_set_max_threads grpc_resource_quota_set_max_threads_import typedef const grpc_arg_pointer_vtable*(*grpc_resource_quota_arg_vtable_type)(void); extern grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import; #define grpc_resource_quota_arg_vtable grpc_resource_quota_arg_vtable_import diff --git a/test/core/iomgr/resource_quota_test.cc b/test/core/iomgr/resource_quota_test.cc index f3b35fed32..059ff7b5f8 100644 --- a/test/core/iomgr/resource_quota_test.cc +++ b/test/core/iomgr/resource_quota_test.cc @@ -798,98 +798,6 @@ static void test_negative_rq_free_pool(void) { } } -// Simple test to check resource quota thread limits -static void test_thread_limit() { - grpc_core::ExecCtx exec_ctx; - - grpc_resource_quota* rq = grpc_resource_quota_create("test_thread_limit"); - grpc_resource_user* ru1 = grpc_resource_user_create(rq, "ru1"); - grpc_resource_user* ru2 = grpc_resource_user_create(rq, "ru2"); - - // Max threads = 100 - grpc_resource_quota_set_max_threads(rq, 100); - - // Request quota for 100 threads (50 for ru1, 50 for ru2) - GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 10)); - GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 10)); - GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 40)); - GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 40)); - - // Threads exhausted. Next request must fail - GPR_ASSERT(!grpc_resource_user_allocate_threads(ru2, 20)); - - // Free 20 threads from two different users - grpc_resource_user_free_threads(ru1, 10); - grpc_resource_user_free_threads(ru2, 10); - - // Next request to 20 threads must succeed - GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 20)); - - // No more thread quota again - GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 20)); - - // Free 10 more - grpc_resource_user_free_threads(ru1, 10); - - GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 5)); - GPR_ASSERT( - !grpc_resource_user_allocate_threads(ru2, 10)); // Only 5 available - GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 5)); - - // Teardown (ru1 and ru2 release all the quota back to rq) - grpc_resource_user_unref(ru1); - grpc_resource_user_unref(ru2); - grpc_resource_quota_unref(rq); -} - -// Change max quota in either direction dynamically -static void test_thread_maxquota_change() { - grpc_core::ExecCtx exec_ctx; - - grpc_resource_quota* rq = - grpc_resource_quota_create("test_thread_maxquota_change"); - grpc_resource_user* ru1 = grpc_resource_user_create(rq, "ru1"); - grpc_resource_user* ru2 = grpc_resource_user_create(rq, "ru2"); - - // Max threads = 100 - grpc_resource_quota_set_max_threads(rq, 100); - - // Request quota for 100 threads (50 for ru1, 50 for ru2) - GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 50)); - GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 50)); - - // Threads exhausted. Next request must fail - GPR_ASSERT(!grpc_resource_user_allocate_threads(ru2, 20)); - - // Increase maxquota and retry - // Max threads = 150; - grpc_resource_quota_set_max_threads(rq, 150); - GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 20)); // ru2=70, ru1=50 - - // Decrease maxquota (Note: Quota already given to ru1 and ru2 is unaffected) - // Max threads = 10; - grpc_resource_quota_set_max_threads(rq, 10); - - // New requests will fail until quota is available - GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10)); - - // Make quota available - grpc_resource_user_free_threads(ru1, 50); // ru1 now has 0 - GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10)); // not enough - - grpc_resource_user_free_threads(ru2, 70); // ru2 now has 0 - - // Now we can get quota up-to 10, the current max - GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 10)); - // No more thread quota again - GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10)); - - // Teardown (ru1 and ru2 release all the quota back to rq) - grpc_resource_user_unref(ru1); - grpc_resource_user_unref(ru2); - grpc_resource_quota_unref(rq); -} - int main(int argc, char** argv) { grpc_test_init(argc, argv); grpc_init(); @@ -919,11 +827,6 @@ int main(int argc, char** argv) { test_negative_rq_free_pool(); gpr_mu_destroy(&g_mu); gpr_cv_destroy(&g_cv); - - // Resource quota thread related - test_thread_limit(); - test_thread_maxquota_change(); - grpc_shutdown(); return 0; } diff --git a/test/core/surface/public_headers_must_be_c89.c b/test/core/surface/public_headers_must_be_c89.c index 497f7194d5..9f4ad2b4d7 100644 --- a/test/core/surface/public_headers_must_be_c89.c +++ b/test/core/surface/public_headers_must_be_c89.c @@ -130,7 +130,6 @@ int main(int argc, char **argv) { printf("%lx", (unsigned long) grpc_resource_quota_ref); printf("%lx", (unsigned long) grpc_resource_quota_unref); printf("%lx", (unsigned long) grpc_resource_quota_resize); - printf("%lx", (unsigned long) grpc_resource_quota_set_max_threads); printf("%lx", (unsigned long) grpc_resource_quota_arg_vtable); printf("%lx", (unsigned long) grpc_channelz_get_top_channels); printf("%lx", (unsigned long) grpc_channelz_get_channel); diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index 838f5f72ad..7a95a9f17d 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -30,44 +30,30 @@ #include "test/cpp/util/test_config.h" namespace grpc { - -struct ThreadManagerTestSettings { - // The min number of pollers that SHOULD be active in ThreadManager - int min_pollers; - // The max number of pollers that could be active in ThreadManager - int max_pollers; - // The sleep duration in PollForWork() function to simulate "polling" - int poll_duration_ms; - // The sleep duration in DoWork() function to simulate "work" - int work_duration_ms; - // Max number of times PollForWork() is called before shutting down - int max_poll_calls; -}; - class ThreadManagerTest final : public grpc::ThreadManager { public: - ThreadManagerTest(const char* name, grpc_resource_quota* rq, - const ThreadManagerTestSettings& settings) - : ThreadManager(name, rq, settings.min_pollers, settings.max_pollers), - settings_(settings), + ThreadManagerTest() + : ThreadManager(kMinPollers, kMaxPollers), num_do_work_(0), num_poll_for_work_(0), num_work_found_(0) {} grpc::ThreadManager::WorkStatus PollForWork(void** tag, bool* ok) override; void DoWork(void* tag, bool ok) override; - - // Get number of times PollForWork() returned WORK_FOUND - int GetNumWorkFound(); - // Get number of times DoWork() was called - int GetNumDoWork(); + void PerformTest(); private: void SleepForMs(int sleep_time_ms); - ThreadManagerTestSettings settings_; + static const int kMinPollers = 2; + static const int kMaxPollers = 10; + + static const int kPollingTimeoutMsec = 10; + static const int kDoWorkDurationMsec = 1; + + // PollForWork will return SHUTDOWN after these many number of invocations + static const int kMaxNumPollForWork = 50; - // Counters gpr_atm num_do_work_; // Number of calls to DoWork gpr_atm num_poll_for_work_; // Number of calls to PollForWork gpr_atm num_work_found_; // Number of times WORK_FOUND was returned @@ -83,117 +69,54 @@ void ThreadManagerTest::SleepForMs(int duration_ms) { grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void** tag, bool* ok) { int call_num = gpr_atm_no_barrier_fetch_add(&num_poll_for_work_, 1); - if (call_num >= settings_.max_poll_calls) { + + if (call_num >= kMaxNumPollForWork) { Shutdown(); return SHUTDOWN; } - SleepForMs(settings_.poll_duration_ms); // Simulate "polling" duration + // Simulate "polling for work" by sleeping for sometime + SleepForMs(kPollingTimeoutMsec); + *tag = nullptr; *ok = true; - // Return timeout roughly 1 out of every 3 calls just to make the test a bit - // more interesting + // Return timeout roughly 1 out of every 3 calls if (call_num % 3 == 0) { return TIMEOUT; + } else { + gpr_atm_no_barrier_fetch_add(&num_work_found_, 1); + return WORK_FOUND; } - - gpr_atm_no_barrier_fetch_add(&num_work_found_, 1); - return WORK_FOUND; } void ThreadManagerTest::DoWork(void* tag, bool ok) { gpr_atm_no_barrier_fetch_add(&num_do_work_, 1); - SleepForMs(settings_.work_duration_ms); // Simulate work by sleeping + SleepForMs(kDoWorkDurationMsec); // Simulate doing work by sleeping } -int ThreadManagerTest::GetNumWorkFound() { - return static_cast(gpr_atm_no_barrier_load(&num_work_found_)); -} - -int ThreadManagerTest::GetNumDoWork() { - return static_cast(gpr_atm_no_barrier_load(&num_do_work_)); -} -} // namespace grpc +void ThreadManagerTest::PerformTest() { + // Initialize() starts the ThreadManager + Initialize(); -// Test that the number of times DoWork() is called is equal to the number of -// times PollForWork() returned WORK_FOUND -static void TestPollAndWork() { - grpc_resource_quota* rq = grpc_resource_quota_create("Test-poll-and-work"); - grpc::ThreadManagerTestSettings settings = { - 2 /* min_pollers */, 10 /* max_pollers */, 10 /* poll_duration_ms */, - 1 /* work_duration_ms */, 50 /* max_poll_calls */}; - - grpc::ThreadManagerTest test_thread_mgr("TestThreadManager", rq, settings); - grpc_resource_quota_unref(rq); - - test_thread_mgr.Initialize(); // Start the thread manager - test_thread_mgr.Wait(); // Wait for all threads to finish - - // Verify that The number of times DoWork() was called is equal to the number - // of times WORK_FOUND was returned - gpr_log(GPR_DEBUG, "DoWork() called %d times", - test_thread_mgr.GetNumDoWork()); - GPR_ASSERT(test_thread_mgr.GetNumDoWork() == - test_thread_mgr.GetNumWorkFound()); -} + // Wait for all the threads to gracefully terminate + Wait(); -static void TestThreadQuota() { - const int kMaxNumThreads = 3; - grpc_resource_quota* rq = grpc_resource_quota_create("Test-thread-quota"); - grpc_resource_quota_set_max_threads(rq, kMaxNumThreads); - - // Set work_duration_ms to be much greater than poll_duration_ms. This way, - // the thread manager will be forced to create more 'polling' threads to - // honor the min_pollers guarantee - grpc::ThreadManagerTestSettings settings = { - 1 /* min_pollers */, 1 /* max_pollers */, 1 /* poll_duration_ms */, - 10 /* work_duration_ms */, 50 /* max_poll_calls */}; - - // Create two thread managers (but with same resource quota). This means - // that the max number of active threads across BOTH the thread managers - // cannot be greater than kMaxNumthreads - grpc::ThreadManagerTest test_thread_mgr_1("TestThreadManager-1", rq, - settings); - grpc::ThreadManagerTest test_thread_mgr_2("TestThreadManager-2", rq, - settings); - // It is ok to unref resource quota before starting thread managers. - grpc_resource_quota_unref(rq); - - // Start both thread managers - test_thread_mgr_1.Initialize(); - test_thread_mgr_2.Initialize(); - - // Wait for both to finish - test_thread_mgr_1.Wait(); - test_thread_mgr_2.Wait(); - - // Now verify that the total number of active threads in either thread manager - // never exceeds kMaxNumThreads - // - // NOTE: Actually the total active threads across *both* thread managers at - // any point of time never exceeds kMaxNumThreads but unfortunately there is - // no easy way to verify it (i.e we can't just do (max1 + max2 <= k)) - // Its okay to not test this case here. The resource quota c-core tests - // provide enough coverage to resource quota object with multiple resource - // users - int max1 = test_thread_mgr_1.GetMaxActiveThreadsSoFar(); - int max2 = test_thread_mgr_2.GetMaxActiveThreadsSoFar(); - gpr_log( - GPR_DEBUG, - "MaxActiveThreads in TestThreadManager_1: %d, TestThreadManager_2: %d", - max1, max2); - GPR_ASSERT(max1 <= kMaxNumThreads && max2 <= kMaxNumThreads); + // The number of times DoWork() was called is equal to the number of times + // WORK_FOUND was returned + gpr_log(GPR_DEBUG, "DoWork() called %" PRIdPTR " times", + gpr_atm_no_barrier_load(&num_do_work_)); + GPR_ASSERT(gpr_atm_no_barrier_load(&num_do_work_) == + gpr_atm_no_barrier_load(&num_work_found_)); } +} // namespace grpc int main(int argc, char** argv) { std::srand(std::time(nullptr)); - grpc::testing::InitTest(&argc, &argv, true); - grpc_init(); - TestPollAndWork(); - TestThreadQuota(); + grpc::testing::InitTest(&argc, &argv, true); + grpc::ThreadManagerTest test_rpc_manager; + test_rpc_manager.PerformTest(); - grpc_shutdown(); return 0; } -- cgit v1.2.3 From e8f0e54dce41e4575cc48c390f6d7696be27f22a Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Mon, 6 Aug 2018 18:55:45 -0700 Subject: Enable CFStream with environment variable --- BUILD | 1 + build.yaml | 1 + gRPC-Core.podspec | 1 + src/core/lib/iomgr/iomgr_posix_cfstream.cc | 76 ++++++++++++++++++++++ src/core/lib/iomgr/port.h | 5 +- src/core/lib/iomgr/tcp_client_cfstream.cc | 2 +- src/objective-c/GRPCClient/GRPCCall.m | 19 ++++-- .../GRPCClient/private/GRPCCompletionQueue.m | 5 -- src/objective-c/GRPCClient/private/GRPCHost.m | 19 ++++-- src/objective-c/tests/InteropTests.m | 5 ++ .../tests/Tests.xcodeproj/project.pbxproj | 30 +++++++++ tools/run_tests/generated/sources_and_headers.json | 1 + 12 files changed, 145 insertions(+), 20 deletions(-) create mode 100644 src/core/lib/iomgr/iomgr_posix_cfstream.cc (limited to 'src/core/lib/iomgr') diff --git a/BUILD b/BUILD index 81390dd1aa..433ae27621 100644 --- a/BUILD +++ b/BUILD @@ -1010,6 +1010,7 @@ grpc_cc_library( "src/core/lib/iomgr/cfstream_handle.cc", "src/core/lib/iomgr/endpoint_cfstream.cc", "src/core/lib/iomgr/error_cfstream.cc", + "src/core/lib/iomgr/iomgr_posix_cfstream.cc", "src/core/lib/iomgr/tcp_client_cfstream.cc", ], hdrs = [ diff --git a/build.yaml b/build.yaml index 70af96046c..3473fa09d6 100644 --- a/build.yaml +++ b/build.yaml @@ -548,6 +548,7 @@ filegroups: - src/core/lib/iomgr/cfstream_handle.cc - src/core/lib/iomgr/endpoint_cfstream.cc - src/core/lib/iomgr/error_cfstream.cc + - src/core/lib/iomgr/iomgr_posix_cfstream.cc - src/core/lib/iomgr/tcp_client_cfstream.cc uses: - grpc_base_headers diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 5c3649afbd..81323d2795 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -1112,6 +1112,7 @@ Pod::Spec.new do |s| ss.source_files = 'src/core/lib/iomgr/cfstream_handle.cc', 'src/core/lib/iomgr/endpoint_cfstream.cc', 'src/core/lib/iomgr/error_cfstream.cc', + 'src/core/lib/iomgr/iomgr_posix_cfstream.cc', 'src/core/lib/iomgr/tcp_client_cfstream.cc', 'src/core/lib/iomgr/cfstream_handle.h', 'src/core/lib/iomgr/endpoint_cfstream.h', diff --git a/src/core/lib/iomgr/iomgr_posix_cfstream.cc b/src/core/lib/iomgr/iomgr_posix_cfstream.cc new file mode 100644 index 0000000000..646dd9ee6d --- /dev/null +++ b/src/core/lib/iomgr/iomgr_posix_cfstream.cc @@ -0,0 +1,76 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_CFSTREAM_IOMGR + + +#include "src/core/lib/debug/trace.h" +#include "src/core/lib/iomgr/ev_posix.h" +#include "src/core/lib/iomgr/iomgr_internal.h" +#include "src/core/lib/iomgr/iomgr_posix.h" +#include "src/core/lib/iomgr/resolve_address.h" +#include "src/core/lib/iomgr/tcp_client.h" +#include "src/core/lib/iomgr/tcp_posix.h" +#include "src/core/lib/iomgr/tcp_server.h" +#include "src/core/lib/iomgr/timer.h" + +static const char *grpc_cfstream_env_var = "grpc_cfstream"; + +extern grpc_tcp_server_vtable grpc_posix_tcp_server_vtable; +extern grpc_tcp_client_vtable grpc_posix_tcp_client_vtable; +extern grpc_tcp_client_vtable grpc_cfstream_client_vtable; +extern grpc_timer_vtable grpc_generic_timer_vtable; +extern grpc_pollset_vtable grpc_posix_pollset_vtable; +extern grpc_pollset_set_vtable grpc_posix_pollset_set_vtable; +extern grpc_address_resolver_vtable grpc_posix_resolver_vtable; + +static void iomgr_platform_init(void) { + grpc_wakeup_fd_global_init(); + grpc_event_engine_init(); +} + +static void iomgr_platform_flush(void) {} + +static void iomgr_platform_shutdown(void) { + grpc_event_engine_shutdown(); + grpc_wakeup_fd_global_destroy(); +} + +static grpc_iomgr_platform_vtable vtable = { + iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown}; + +void grpc_set_default_iomgr_platform() { + char *enable_cfstream = getenv(grpc_cfstream_env_var); + grpc_tcp_client_vtable *client_vtable = &grpc_posix_tcp_client_vtable; + if (enable_cfstream != nullptr && enable_cfstream[0] == '1') { + client_vtable = &grpc_cfstream_client_vtable; + } + grpc_set_tcp_client_impl(client_vtable); + grpc_set_tcp_server_impl(&grpc_posix_tcp_server_vtable); + grpc_set_timer_impl(&grpc_generic_timer_vtable); + grpc_set_pollset_vtable(&grpc_posix_pollset_vtable); + grpc_set_pollset_set_vtable(&grpc_posix_pollset_set_vtable); + grpc_set_resolver_impl(&grpc_posix_resolver_vtable); + grpc_set_iomgr_platform_vtable(&vtable); +} + +#endif /* GRPC_CFSTREAM_IOMGR */ diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h index 80d8e63cdd..1d0ecff802 100644 --- a/src/core/lib/iomgr/port.h +++ b/src/core/lib/iomgr/port.h @@ -98,9 +98,9 @@ #define GRPC_POSIX_FORK 1 #define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1 #ifdef GRPC_CFSTREAM -#define GRPC_POSIX_SOCKET_IOMGR 1 -#define GRPC_CFSTREAM_ENDPOINT 1 +#define GRPC_CFSTREAM_IOMGR 1 #define GRPC_CFSTREAM_CLIENT 1 +#define GRPC_CFSTREAM_ENDPOINT 1 #define GRPC_POSIX_SOCKET_ARES_EV_DRIVER 1 #define GRPC_POSIX_SOCKET_EV 1 #define GRPC_POSIX_SOCKET_EV_EPOLL1 1 @@ -111,6 +111,7 @@ #define GRPC_POSIX_SOCKET_SOCKADDR 1 #define GRPC_POSIX_SOCKET_SOCKET_FACTORY 1 #define GRPC_POSIX_SOCKET_TCP 1 +#define GRPC_POSIX_SOCKET_TCP_CLIENT 1 #define GRPC_POSIX_SOCKET_TCP_SERVER 1 #define GRPC_POSIX_SOCKET_TCP_SERVER_UTILS_COMMON 1 #define GRPC_POSIX_SOCKET_UTILS_COMMON 1 diff --git a/src/core/lib/iomgr/tcp_client_cfstream.cc b/src/core/lib/iomgr/tcp_client_cfstream.cc index 5acea91792..4b21322d74 100644 --- a/src/core/lib/iomgr/tcp_client_cfstream.cc +++ b/src/core/lib/iomgr/tcp_client_cfstream.cc @@ -211,6 +211,6 @@ static void CFStreamClientConnect(grpc_closure* closure, grpc_endpoint** ep, gpr_mu_unlock(&connect->mu); } -grpc_tcp_client_vtable grpc_posix_tcp_client_vtable = {CFStreamClientConnect}; +grpc_tcp_client_vtable grpc_cfstream_client_vtable = {CFStreamClientConnect}; #endif /* GRPC_CFSTREAM_CLIENT */ diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m index 9783b06440..b8337ab0cd 100644 --- a/src/objective-c/GRPCClient/GRPCCall.m +++ b/src/objective-c/GRPCClient/GRPCCall.m @@ -45,6 +45,8 @@ static NSMutableDictionary *callFlags; static NSString *const kAuthorizationHeader = @"authorization"; static NSString *const kBearerPrefix = @"Bearer "; +const char *kCFStreamVarName = "grpc_cfstream"; + @interface GRPCCall () // Make them read-write. @property(atomic, strong) NSDictionary *responseHeaders; @@ -206,9 +208,12 @@ static NSString *const kBearerPrefix = @"Bearer "; } else { [_responseWriteable enqueueSuccessfulCompletion]; } -#ifndef GRPC_CFSTREAM - [GRPCConnectivityMonitor unregisterObserver:self]; -#endif + + // Connectivity monitor is not required for CFStream + char *enableCFStream = getenv(kCFStreamVarName); + if (enableCFStream == nil || enableCFStream[0] != '1') { + [GRPCConnectivityMonitor unregisterObserver:self]; + } // If the call isn't retained anywhere else, it can be deallocated now. _retainSelf = nil; @@ -463,9 +468,11 @@ static NSString *const kBearerPrefix = @"Bearer "; [self sendHeaders:_requestHeaders]; [self invokeCall]; -#ifndef GRPC_CFSTREAM - [GRPCConnectivityMonitor registerObserver:self selector:@selector(connectivityChanged:)]; -#endif + // Connectivity monitor is not required for CFStream + char *enableCFStream = getenv(kCFStreamVarName); + if (enableCFStream == nil || enableCFStream[0] != '1') { + [GRPCConnectivityMonitor registerObserver:self selector:@selector(connectivityChanged:)]; + } } - (void)startWithWriteable:(id)writeable { diff --git a/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m b/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m index bda1c3360b..f454a6dc57 100644 --- a/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m +++ b/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m @@ -20,13 +20,8 @@ #import -#ifdef GRPC_CFSTREAM -const grpc_completion_queue_attributes kCompletionQueueAttr = {GRPC_CQ_CURRENT_VERSION, - GRPC_CQ_NEXT, GRPC_CQ_NON_POLLING}; -#else const grpc_completion_queue_attributes kCompletionQueueAttr = { GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING}; -#endif @implementation GRPCCompletionQueue diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m index 2e9f9f243b..862909f238 100644 --- a/src/objective-c/GRPCClient/private/GRPCHost.m +++ b/src/objective-c/GRPCClient/private/GRPCHost.m @@ -34,6 +34,8 @@ NS_ASSUME_NONNULL_BEGIN +extern const char *kCFStreamVarName; + static NSMutableDictionary *kHostCache; @implementation GRPCHost { @@ -49,9 +51,11 @@ static NSMutableDictionary *kHostCache; if (_channelCreds != nil) { grpc_channel_credentials_release(_channelCreds); } -#ifndef GRPC_CFSTREAM - [GRPCConnectivityMonitor unregisterObserver:self]; -#endif + // Connectivity monitor is not required for CFStream + char *enableCFStream = getenv(kCFStreamVarName); + if (enableCFStream == nil || enableCFStream[0] != '1') { + [GRPCConnectivityMonitor unregisterObserver:self]; + } } // Default initializer. @@ -87,9 +91,12 @@ static NSMutableDictionary *kHostCache; _compressAlgorithm = GRPC_COMPRESS_NONE; _retryEnabled = YES; } -#ifndef GRPC_CFSTREAM - [GRPCConnectivityMonitor registerObserver:self selector:@selector(connectivityChange:)]; -#endif + + // Connectivity monitor is not required for CFStream + char *enableCFStream = getenv(kCFStreamVarName); + if (enableCFStream == nil || enableCFStream[0] != '1') { + [GRPCConnectivityMonitor registerObserver:self selector:@selector(connectivityChange:)]; + } } return self; } diff --git a/src/objective-c/tests/InteropTests.m b/src/objective-c/tests/InteropTests.m index 1e1da2dd66..5750dccd89 100644 --- a/src/objective-c/tests/InteropTests.m +++ b/src/objective-c/tests/InteropTests.m @@ -36,6 +36,8 @@ #define TEST_TIMEOUT 32 +extern const char *kCFStreamVarName; + // Convenience constructors for the generated proto messages: @interface RMTStreamingOutputCallRequest (Constructors) @@ -97,6 +99,9 @@ BOOL isRemoteInteropTest(NSString *host) { [Cronet start]; [GRPCCall useCronetWithEngine:[Cronet getGlobalEngine]]; #endif +#ifdef GRPC_CFSTREAM + setenv(kCFStreamVarName, "1", 1); +#endif } - (void)setUp { diff --git a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj index 8ff4633582..ea1066219d 100644 --- a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj +++ b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj @@ -1982,6 +1982,16 @@ CODE_SIGN_IDENTITY = "iPhone Developer"; CODE_SIGN_STYLE = Automatic; GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_PREPROCESSOR_DEFINITIONS = ( + "$(inherited)", + "COCOAPODS=1", + "$(inherited)", + "GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1", + "$(inherited)", + "PB_FIELD_32BIT=1", + "PB_NO_PACKED_STRUCTS=1", + "GRPC_CFSTREAM=1", + ); INFOPLIST_FILE = Info.plist; IPHONEOS_DEPLOYMENT_TARGET = 11.2; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; @@ -2100,6 +2110,16 @@ CODE_SIGN_IDENTITY = "iPhone Developer"; CODE_SIGN_STYLE = Automatic; GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_PREPROCESSOR_DEFINITIONS = ( + "$(inherited)", + "COCOAPODS=1", + "$(inherited)", + "GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1", + "$(inherited)", + "PB_FIELD_32BIT=1", + "PB_NO_PACKED_STRUCTS=1", + "GRPC_CFSTREAM=1", + ); INFOPLIST_FILE = Info.plist; IPHONEOS_DEPLOYMENT_TARGET = 11.2; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; @@ -2218,6 +2238,16 @@ CODE_SIGN_IDENTITY = "iPhone Developer"; CODE_SIGN_STYLE = Automatic; GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_PREPROCESSOR_DEFINITIONS = ( + "$(inherited)", + "COCOAPODS=1", + "$(inherited)", + "GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1", + "$(inherited)", + "PB_FIELD_32BIT=1", + "PB_NO_PACKED_STRUCTS=1", + "GRPC_CFSTREAM=1", + ); INFOPLIST_FILE = Info.plist; IPHONEOS_DEPLOYMENT_TARGET = 11.2; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index a686dae8b4..fc5480fffd 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -9904,6 +9904,7 @@ "src/core/lib/iomgr/endpoint_cfstream.h", "src/core/lib/iomgr/error_cfstream.cc", "src/core/lib/iomgr/error_cfstream.h", + "src/core/lib/iomgr/iomgr_posix_cfstream.cc", "src/core/lib/iomgr/tcp_client_cfstream.cc" ], "third_party": false, -- cgit v1.2.3 From df5205f74d3bfd3d77cb7e076e43e74fcb15c3cf Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Tue, 7 Aug 2018 10:43:51 -0700 Subject: clang-format --- src/core/lib/iomgr/iomgr_posix_cfstream.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/iomgr_posix_cfstream.cc b/src/core/lib/iomgr/iomgr_posix_cfstream.cc index 646dd9ee6d..235a9e0712 100644 --- a/src/core/lib/iomgr/iomgr_posix_cfstream.cc +++ b/src/core/lib/iomgr/iomgr_posix_cfstream.cc @@ -22,7 +22,6 @@ #ifdef GRPC_CFSTREAM_IOMGR - #include "src/core/lib/debug/trace.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/iomgr_internal.h" @@ -33,7 +32,7 @@ #include "src/core/lib/iomgr/tcp_server.h" #include "src/core/lib/iomgr/timer.h" -static const char *grpc_cfstream_env_var = "grpc_cfstream"; +static const char* grpc_cfstream_env_var = "grpc_cfstream"; extern grpc_tcp_server_vtable grpc_posix_tcp_server_vtable; extern grpc_tcp_client_vtable grpc_posix_tcp_client_vtable; @@ -59,8 +58,8 @@ static grpc_iomgr_platform_vtable vtable = { iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown}; void grpc_set_default_iomgr_platform() { - char *enable_cfstream = getenv(grpc_cfstream_env_var); - grpc_tcp_client_vtable *client_vtable = &grpc_posix_tcp_client_vtable; + char* enable_cfstream = getenv(grpc_cfstream_env_var); + grpc_tcp_client_vtable* client_vtable = &grpc_posix_tcp_client_vtable; if (enable_cfstream != nullptr && enable_cfstream[0] == '1') { client_vtable = &grpc_cfstream_client_vtable; } -- cgit v1.2.3 From db1a5962e0e81fb6aa1fefbb7a4e3038f5c32ccc Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Wed, 8 Aug 2018 15:17:19 -0700 Subject: Explictly Flush exec_ctx after resetting call_combiner_set_notify_on_cancel to avoid the need to take refs on the stack for cancellation closures on exec_ctx --- src/core/lib/iomgr/call_combiner.h | 5 ++++- src/core/lib/security/transport/client_auth_filter.cc | 4 ---- src/core/lib/security/transport/server_auth_filter.cc | 2 -- src/core/lib/surface/call.cc | 5 ++++- 4 files changed, 8 insertions(+), 8 deletions(-) (limited to 'src/core/lib/iomgr') diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h index 641fa18082..6f7ddd4043 100644 --- a/src/core/lib/iomgr/call_combiner.h +++ b/src/core/lib/iomgr/call_combiner.h @@ -102,7 +102,10 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner, /// If \a closure is NULL, then no closure will be invoked on /// cancellation; this effectively unregisters the previously set closure. /// However, most filters will not need to explicitly unregister their -/// callbacks, as this is done automatically when the call is destroyed. +/// callbacks, as this is done automatically when the call is destroyed. Filters +/// that schedule the cancellation closure on ExecCtx do not need to take a ref +/// on the call stack to guarantee closure liveness. This is done by explicitly +/// flushing ExecCtx after the unregistration during call destruction. void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner, grpc_closure* closure); diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc index 9b5c6f3490..0f125e7c26 100644 --- a/src/core/lib/security/transport/client_auth_filter.cc +++ b/src/core/lib/security/transport/client_auth_filter.cc @@ -167,7 +167,6 @@ static void cancel_get_request_metadata(void* arg, grpc_error* error) { grpc_call_credentials_cancel_get_request_metadata( calld->creds, &calld->md_array, GRPC_ERROR_REF(error)); } - GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_get_request_metadata"); } static void send_security_metadata(grpc_call_element* elem, @@ -222,7 +221,6 @@ static void send_security_metadata(grpc_call_element* elem, GRPC_ERROR_UNREF(error); } else { // Async return; register cancellation closure with call combiner. - GRPC_CALL_STACK_REF(calld->owning_call, "cancel_get_request_metadata"); grpc_call_combiner_set_notify_on_cancel( calld->call_combiner, GRPC_CLOSURE_INIT(&calld->get_request_metadata_cancel_closure, @@ -265,7 +263,6 @@ static void cancel_check_call_host(void* arg, grpc_error* error) { chand->security_connector, &calld->async_result_closure, GRPC_ERROR_REF(error)); } - GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_check_call_host"); } static void auth_start_transport_stream_op_batch( @@ -318,7 +315,6 @@ static void auth_start_transport_stream_op_batch( GRPC_ERROR_UNREF(error); } else { // Async return; register cancellation closure with call combiner. - GRPC_CALL_STACK_REF(calld->owning_call, "cancel_check_call_host"); grpc_call_combiner_set_notify_on_cancel( calld->call_combiner, GRPC_CLOSURE_INIT(&calld->check_call_host_cancel_closure, diff --git a/src/core/lib/security/transport/server_auth_filter.cc b/src/core/lib/security/transport/server_auth_filter.cc index 2dbefdf131..19cbb03b63 100644 --- a/src/core/lib/security/transport/server_auth_filter.cc +++ b/src/core/lib/security/transport/server_auth_filter.cc @@ -156,7 +156,6 @@ static void cancel_call(void* arg, grpc_error* error) { on_md_processing_done_inner(elem, nullptr, 0, nullptr, 0, GRPC_ERROR_REF(error)); } - GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_call"); } static void recv_initial_metadata_ready(void* arg, grpc_error* error) { @@ -168,7 +167,6 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) { if (chand->creds != nullptr && chand->creds->processor.process != nullptr) { // We're calling out to the application, so we need to make sure // to drop the call combiner early if we get cancelled. - GRPC_CALL_STACK_REF(calld->owning_call, "cancel_call"); GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem, grpc_schedule_on_exec_ctx); grpc_call_combiner_set_notify_on_cancel(calld->call_combiner, diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc index dbad5ded4d..52053e686b 100644 --- a/src/core/lib/surface/call.cc +++ b/src/core/lib/surface/call.cc @@ -613,8 +613,11 @@ void grpc_call_unref(grpc_call* c) { // Unset the call combiner cancellation closure. This has the // effect of scheduling the previously set cancellation closure, if // any, so that it can release any internal references it may be - // holding to the call stack. + // holding to the call stack. Also flush the closures on exec_ctx so that + // filters that schedule cancel notification closures on exec_ctx do not + // need to take a ref of the call stack to guarantee closure liveness. grpc_call_combiner_set_notify_on_cancel(&c->call_combiner, nullptr); + grpc_core::ExecCtx::Get()->Flush(); } GRPC_CALL_INTERNAL_UNREF(c, "destroy"); } -- cgit v1.2.3 From 5cd8b1eb811e79ad68bf91a0296507c153053ecf Mon Sep 17 00:00:00 2001 From: Alex Polcyn Date: Sat, 16 Jun 2018 04:08:55 +0000 Subject: Enable c-ares queries on Windows --- CMakeLists.txt | 12 - .../resolver/dns/c_ares/grpc_ares_ev_driver.cc | 17 +- .../resolver/dns/c_ares/grpc_ares_ev_driver.h | 22 +- .../dns/c_ares/grpc_ares_ev_driver_posix.cc | 18 +- .../dns/c_ares/grpc_ares_ev_driver_windows.cc | 508 ++++++++++++++++++++- .../resolver/dns/c_ares/grpc_ares_wrapper.cc | 2 + .../resolver/dns/c_ares/grpc_ares_wrapper.h | 7 + src/core/lib/iomgr/iocp_windows.cc | 13 +- src/core/lib/iomgr/socket_windows.cc | 4 + src/core/lib/iomgr/socket_windows.h | 2 + src/core/lib/iomgr/tcp_windows.cc | 4 +- src/core/lib/iomgr/tcp_windows.h | 2 + .../naming/resolver_component_tests_defs.include | 19 +- test/cpp/naming/cancel_ares_query_test.cc | 55 ++- test/cpp/naming/gen_build_yaml.py | 4 +- .../naming/manual_run_resolver_component_test.py | 36 ++ test/cpp/naming/resolver_component_test.cc | 72 ++- test/cpp/naming/resolver_component_tests_runner.py | 31 +- test/cpp/naming/resolver_test_record_groups.yaml | 8 + tools/run_tests/generated/tests.json | 6 +- 20 files changed, 771 insertions(+), 71 deletions(-) create mode 100644 test/cpp/naming/manual_run_resolver_component_test.py (limited to 'src/core/lib/iomgr') diff --git a/CMakeLists.txt b/CMakeLists.txt index 855b921ada..f242ee92bb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -662,12 +662,8 @@ add_dependencies(buildtests_cxx transport_security_common_api_test) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx writes_per_rpc_test) endif() -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx resolver_component_test_unsecure) -endif() -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx resolver_component_test) -endif() if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx resolver_component_tests_runner_invoker_unsecure) endif() @@ -676,9 +672,7 @@ add_dependencies(buildtests_cxx resolver_component_tests_runner_invoker) endif() add_dependencies(buildtests_cxx address_sorting_test_unsecure) add_dependencies(buildtests_cxx address_sorting_test) -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx cancel_ares_query_test) -endif() add_custom_target(buildtests DEPENDS buildtests_c buildtests_cxx) @@ -16213,7 +16207,6 @@ target_link_libraries(inproc_nosec_test endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_executable(resolver_component_test_unsecure test/cpp/naming/resolver_component_test.cc @@ -16253,10 +16246,8 @@ target_link_libraries(resolver_component_test_unsecure ${_gRPC_GFLAGS_LIBRARIES} ) -endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_executable(resolver_component_test test/cpp/naming/resolver_component_test.cc @@ -16296,7 +16287,6 @@ target_link_libraries(resolver_component_test ${_gRPC_GFLAGS_LIBRARIES} ) -endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) @@ -16467,7 +16457,6 @@ target_link_libraries(address_sorting_test endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) -if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_executable(cancel_ares_query_test test/cpp/naming/cancel_ares_query_test.cc @@ -16507,7 +16496,6 @@ target_link_libraries(cancel_ares_query_test ${_gRPC_GFLAGS_LIBRARIES} ) -endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc index 0068d0d5f4..fdbd07ebf5 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc @@ -74,6 +74,8 @@ struct grpc_ares_ev_driver { bool shutting_down; /** request object that's using this ev driver */ grpc_ares_request* request; + /** Owned by the ev_driver. Creates new GrpcPolledFd's */ + grpc_core::UniquePtr polled_fd_factory; }; static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver); @@ -93,7 +95,7 @@ static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver* ev_driver) { GRPC_COMBINER_UNREF(ev_driver->combiner, "free ares event driver"); ares_destroy(ev_driver->channel); grpc_ares_complete_request_locked(ev_driver->request); - gpr_free(ev_driver); + grpc_core::Delete(ev_driver); } } @@ -118,13 +120,11 @@ grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver, grpc_pollset_set* pollset_set, grpc_combiner* combiner, grpc_ares_request* request) { - *ev_driver = static_cast( - gpr_malloc(sizeof(grpc_ares_ev_driver))); + *ev_driver = grpc_core::New(); ares_options opts; memset(&opts, 0, sizeof(opts)); opts.flags |= ARES_FLAG_STAYOPEN; int status = ares_init_options(&(*ev_driver)->channel, &opts, ARES_OPT_FLAGS); - grpc_core::ConfigureAresChannelLocked(&(*ev_driver)->channel); gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create_locked"); if (status != ARES_SUCCESS) { char* err_msg; @@ -142,6 +142,10 @@ grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver, (*ev_driver)->working = false; (*ev_driver)->shutting_down = false; (*ev_driver)->request = request; + (*ev_driver)->polled_fd_factory = + grpc_core::NewGrpcPolledFdFactory((*ev_driver)->combiner); + (*ev_driver) + ->polled_fd_factory->ConfigureAresChannelLocked((*ev_driver)->channel); return GRPC_ERROR_NONE; } @@ -245,8 +249,9 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) { // Create a new fd_node if sock[i] is not in the fd_node list. if (fdn == nullptr) { fdn = static_cast(gpr_malloc(sizeof(fd_node))); - fdn->grpc_polled_fd = grpc_core::NewGrpcPolledFdLocked( - socks[i], ev_driver->pollset_set); + fdn->grpc_polled_fd = + ev_driver->polled_fd_factory->NewGrpcPolledFdLocked( + socks[i], ev_driver->pollset_set, ev_driver->combiner); gpr_log(GPR_DEBUG, "new fd: %s", fdn->grpc_polled_fd->GetName()); fdn->ev_driver = ev_driver; fdn->readable_registered = false; diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h index 2c9db71011..671c537fe7 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h @@ -81,10 +81,24 @@ class GrpcPolledFd { GRPC_ABSTRACT_BASE_CLASS }; -/* Creates a new wrapped fd for the current platform */ -GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as, - grpc_pollset_set* driver_pollset_set); -void ConfigureAresChannelLocked(ares_channel* channel); +/* A GrpcPolledFdFactory is 1-to-1 with and owned by the + * ares event driver. It knows how to create GrpcPolledFd's + * for the current platform, and the ares driver uses it for all of + * its fd's. */ +class GrpcPolledFdFactory { + public: + virtual ~GrpcPolledFdFactory() {} + /* Creates a new wrapped fd for the current platform */ + virtual GrpcPolledFd* NewGrpcPolledFdLocked( + ares_socket_t as, grpc_pollset_set* driver_pollset_set, + grpc_combiner* combiner) GRPC_ABSTRACT; + /* Optionally configures the ares channel after creation */ + virtual void ConfigureAresChannelLocked(ares_channel channel) GRPC_ABSTRACT; + + GRPC_ABSTRACT_BASE_CLASS +}; + +UniquePtr NewGrpcPolledFdFactory(grpc_combiner* combiner); } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc index fffe9eda8e..aa58e1aaf5 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc @@ -86,12 +86,20 @@ class GrpcPolledFdPosix : public GrpcPolledFd { grpc_pollset_set* driver_pollset_set_; }; -GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as, - grpc_pollset_set* driver_pollset_set) { - return grpc_core::New(as, driver_pollset_set); -} +class GrpcPolledFdFactoryPosix : public GrpcPolledFdFactory { + public: + GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as, + grpc_pollset_set* driver_pollset_set, + grpc_combiner* combiner) override { + return New(as, driver_pollset_set); + } -void ConfigureAresChannelLocked(ares_channel* channel) {} + void ConfigureAresChannelLocked(ares_channel channel) override {} +}; + +UniquePtr NewGrpcPolledFdFactory(grpc_combiner* combiner) { + return UniquePtr(New()); +} } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc index 5d65ae3ab3..02121aa0ab 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc @@ -21,38 +21,516 @@ #if GRPC_ARES == 1 && defined(GPR_WINDOWS) #include + +#include +#include +#include +#include +#include #include +#include "src/core/lib/gpr/string.h" #include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/socket_windows.h" +#include "src/core/lib/iomgr/tcp_windows.h" +#include "src/core/lib/slice/slice_internal.h" #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" +#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" + +/* TODO(apolcyn): remove this hack after fixing upstream. + * Our grpc/c-ares code on Windows uses the ares_set_socket_functions API, + * which uses "struct iovec" type, which on Windows is defined inside of + * a c-ares header that is not public. + * See https://github.com/c-ares/c-ares/issues/206. */ +struct iovec { + void* iov_base; + size_t iov_len; +}; namespace grpc_core { -/* TODO: fill in the body of GrpcPolledFdWindows to enable c-ares on Windows. - This dummy implementation only allows grpc to compile on windows with - GRPC_ARES=1. */ +/* c-ares creates its own sockets and is meant to read them when readable and + * write them when writeable. To fit this socket usage model into the grpc + * windows poller (which gives notifications when attempted reads and writes are + * actually fulfilled rather than possible), this GrpcPolledFdWindows class + * takes advantage of the ares_set_socket_functions API and acts as a virtual + * socket. It holds its own read and write buffers which are written to and read + * from c-ares and are used with the grpc windows poller, and it, e.g., + * manufactures virtual socket error codes when it e.g. needs to tell the c-ares + * library to wait for an async read. */ class GrpcPolledFdWindows : public GrpcPolledFd { public: - GrpcPolledFdWindows() { abort(); } - ~GrpcPolledFdWindows() { abort(); } + enum WriteState { + WRITE_IDLE, + WRITE_REQUESTED, + WRITE_PENDING, + WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY, + }; + + GrpcPolledFdWindows(ares_socket_t as, grpc_combiner* combiner) + : read_buf_(grpc_empty_slice()), + write_buf_(grpc_empty_slice()), + write_state_(WRITE_IDLE), + gotten_into_driver_list_(false) { + gpr_asprintf(&name_, "c-ares socket: %" PRIdPTR, as); + winsocket_ = grpc_winsocket_create(as, name_); + combiner_ = GRPC_COMBINER_REF(combiner, name_); + GRPC_CLOSURE_INIT(&outer_read_closure_, + &GrpcPolledFdWindows::OnIocpReadable, this, + grpc_combiner_scheduler(combiner_)); + GRPC_CLOSURE_INIT(&outer_write_closure_, + &GrpcPolledFdWindows::OnIocpWriteable, this, + grpc_combiner_scheduler(combiner_)); + } + + ~GrpcPolledFdWindows() { + GRPC_COMBINER_UNREF(combiner_, name_); + grpc_slice_unref_internal(read_buf_); + grpc_slice_unref_internal(write_buf_); + GPR_ASSERT(read_closure_ == nullptr); + GPR_ASSERT(write_closure_ == nullptr); + grpc_winsocket_destroy(winsocket_); + gpr_free(name_); + } + + void ScheduleAndNullReadClosure(grpc_error* error) { + GRPC_CLOSURE_SCHED(read_closure_, error); + read_closure_ = nullptr; + } + + void ScheduleAndNullWriteClosure(grpc_error* error) { + GRPC_CLOSURE_SCHED(write_closure_, error); + write_closure_ = nullptr; + } + void RegisterForOnReadableLocked(grpc_closure* read_closure) override { - abort(); + GPR_ASSERT(read_closure_ == nullptr); + read_closure_ = read_closure; + GPR_ASSERT(GRPC_SLICE_LENGTH(read_buf_) == 0); + grpc_slice_unref_internal(read_buf_); + read_buf_ = GRPC_SLICE_MALLOC(4192); + WSABUF buffer; + buffer.buf = (char*)GRPC_SLICE_START_PTR(read_buf_); + buffer.len = GRPC_SLICE_LENGTH(read_buf_); + memset(&winsocket_->read_info.overlapped, 0, sizeof(OVERLAPPED)); + recv_from_source_addr_len_ = sizeof(recv_from_source_addr_); + DWORD flags = 0; + if (WSARecvFrom(grpc_winsocket_wrapped_socket(winsocket_), &buffer, 1, + nullptr, &flags, (sockaddr*)recv_from_source_addr_, + &recv_from_source_addr_len_, + &winsocket_->read_info.overlapped, nullptr)) { + char* msg = gpr_format_message(WSAGetLastError()); + grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + GRPC_CARES_TRACE_LOG( + "RegisterForOnReadableLocked: WSARecvFrom error:|%s|. fd:|%s|", msg, + GetName()); + gpr_free(msg); + if (WSAGetLastError() != WSA_IO_PENDING) { + ScheduleAndNullReadClosure(error); + return; + } + } + grpc_socket_notify_on_read(winsocket_, &outer_read_closure_); } + void RegisterForOnWriteableLocked(grpc_closure* write_closure) override { + GRPC_CARES_TRACE_LOG( + "RegisterForOnWriteableLocked. fd:|%s|. Current write state: %d", + GetName(), write_state_); + GPR_ASSERT(write_closure_ == nullptr); + write_closure_ = write_closure; + switch (write_state_) { + case WRITE_IDLE: + ScheduleAndNullWriteClosure(GRPC_ERROR_NONE); + break; + case WRITE_REQUESTED: + write_state_ = WRITE_PENDING; + SendWriteBuf(nullptr, &winsocket_->write_info.overlapped); + grpc_socket_notify_on_write(winsocket_, &outer_write_closure_); + break; + case WRITE_PENDING: + case WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY: + abort(); + } + } + + bool IsFdStillReadableLocked() override { + return GRPC_SLICE_LENGTH(read_buf_) > 0; + } + + void ShutdownLocked(grpc_error* error) override { + grpc_winsocket_shutdown(winsocket_); + } + + ares_socket_t GetWrappedAresSocketLocked() override { + return grpc_winsocket_wrapped_socket(winsocket_); + } + + const char* GetName() override { return name_; } + + ares_ssize_t RecvFrom(void* data, ares_socket_t data_len, int flags, + struct sockaddr* from, ares_socklen_t* from_len) { + GRPC_CARES_TRACE_LOG( + "RecvFrom called on fd:|%s|. Current read buf length:|%d|", GetName(), + GRPC_SLICE_LENGTH(read_buf_)); + if (GRPC_SLICE_LENGTH(read_buf_) == 0) { + WSASetLastError(WSAEWOULDBLOCK); + return -1; + } + ares_ssize_t bytes_read = 0; + for (size_t i = 0; i < GRPC_SLICE_LENGTH(read_buf_) && i < data_len; i++) { + ((char*)data)[i] = GRPC_SLICE_START_PTR(read_buf_)[i]; + bytes_read++; + } + read_buf_ = grpc_slice_sub_no_ref(read_buf_, bytes_read, + GRPC_SLICE_LENGTH(read_buf_)); + /* c-ares overloads this recv_from virtual socket function to receive + * data on both UDP and TCP sockets, and from is nullptr for TCP. */ + if (from != nullptr) { + GPR_ASSERT(*from_len <= recv_from_source_addr_len_); + memcpy(from, &recv_from_source_addr_, recv_from_source_addr_len_); + *from_len = recv_from_source_addr_len_; + } + return bytes_read; + } + + grpc_slice FlattenIovec(const struct iovec* iov, int iov_count) { + int total = 0; + for (int i = 0; i < iov_count; i++) { + total += iov[i].iov_len; + } + grpc_slice out = GRPC_SLICE_MALLOC(total); + size_t cur = 0; + for (int i = 0; i < iov_count; i++) { + for (int k = 0; k < iov[i].iov_len; k++) { + GRPC_SLICE_START_PTR(out)[cur++] = ((char*)iov[i].iov_base)[k]; + } + } + return out; + } + + int SendWriteBuf(LPDWORD bytes_sent_ptr, LPWSAOVERLAPPED overlapped) { + WSABUF buf; + buf.len = GRPC_SLICE_LENGTH(write_buf_); + buf.buf = (char*)GRPC_SLICE_START_PTR(write_buf_); + DWORD flags = 0; + int out = WSASend(grpc_winsocket_wrapped_socket(winsocket_), &buf, 1, + bytes_sent_ptr, flags, overlapped, nullptr); + GRPC_CARES_TRACE_LOG( + "WSASend: name:%s. buf len:%d. bytes sent: %d. overlapped %p. return " + "val: %d", + GetName(), buf.len, *bytes_sent_ptr, overlapped, out); + return out; + } + + ares_ssize_t TrySendWriteBufSyncNonBlocking() { + GPR_ASSERT(write_state_ == WRITE_IDLE); + ares_ssize_t total_sent; + DWORD bytes_sent = 0; + if (SendWriteBuf(&bytes_sent, nullptr) != 0) { + char* msg = gpr_format_message(WSAGetLastError()); + GRPC_CARES_TRACE_LOG( + "TrySendWriteBufSyncNonBlocking: SendWriteBuf error:|%s|. fd:|%s|", + msg, GetName()); + gpr_free(msg); + if (WSAGetLastError() == WSA_IO_PENDING) { + WSASetLastError(WSAEWOULDBLOCK); + write_state_ = WRITE_REQUESTED; + } + } + write_buf_ = grpc_slice_sub_no_ref(write_buf_, bytes_sent, + GRPC_SLICE_LENGTH(write_buf_)); + return bytes_sent; + } + + ares_ssize_t SendV(const struct iovec* iov, int iov_count) { + GRPC_CARES_TRACE_LOG("SendV called on fd:|%s|. Current write state: %d", + GetName(), write_state_); + switch (write_state_) { + case WRITE_IDLE: + GPR_ASSERT(GRPC_SLICE_LENGTH(write_buf_) == 0); + grpc_slice_unref_internal(write_buf_); + write_buf_ = FlattenIovec(iov, iov_count); + return TrySendWriteBufSyncNonBlocking(); + case WRITE_REQUESTED: + case WRITE_PENDING: + WSASetLastError(WSAEWOULDBLOCK); + return -1; + case WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY: + grpc_slice currently_attempted = FlattenIovec(iov, iov_count); + GPR_ASSERT(GRPC_SLICE_LENGTH(currently_attempted) >= + GRPC_SLICE_LENGTH(write_buf_)); + ares_ssize_t total_sent = 0; + for (size_t i = 0; i < GRPC_SLICE_LENGTH(write_buf_); i++) { + GPR_ASSERT(GRPC_SLICE_START_PTR(currently_attempted)[i] == + GRPC_SLICE_START_PTR(write_buf_)[i]); + total_sent++; + } + grpc_slice_unref_internal(write_buf_); + write_buf_ = + grpc_slice_sub_no_ref(currently_attempted, total_sent, + GRPC_SLICE_LENGTH(currently_attempted)); + write_state_ = WRITE_IDLE; + total_sent += TrySendWriteBufSyncNonBlocking(); + return total_sent; + } abort(); } - bool IsFdStillReadableLocked() override { abort(); } - void ShutdownLocked(grpc_error* error) override { abort(); } - ares_socket_t GetWrappedAresSocketLocked() override { abort(); } - const char* GetName() override { abort(); } + + int Connect(const struct sockaddr* target, ares_socklen_t target_len) { + SOCKET s = grpc_winsocket_wrapped_socket(winsocket_); + GRPC_CARES_TRACE_LOG("Connect: fd:|%s|", GetName()); + int out = + WSAConnect(s, target, target_len, nullptr, nullptr, nullptr, nullptr); + if (out != 0) { + char* msg = gpr_format_message(WSAGetLastError()); + GRPC_CARES_TRACE_LOG("Connect error code:|%d|, msg:|%s|. fd:|%s|", + WSAGetLastError(), msg, GetName()); + gpr_free(msg); + // c-ares expects a posix-style connect API + out = -1; + } + return out; + } + + static void OnIocpReadable(void* arg, grpc_error* error) { + GrpcPolledFdWindows* polled_fd = static_cast(arg); + polled_fd->OnIocpReadableInner(error); + } + + void OnIocpReadableInner(grpc_error* error) { + if (error == GRPC_ERROR_NONE) { + if (winsocket_->read_info.wsa_error != 0) { + /* WSAEMSGSIZE would be due to receiving more data + * than our read buffer's fixed capacity. Assume that + * the connection is TCP and read the leftovers + * in subsequent c-ares reads. */ + if (winsocket_->read_info.wsa_error != WSAEMSGSIZE) { + GRPC_ERROR_UNREF(error); + char* msg = gpr_format_message(winsocket_->read_info.wsa_error); + GRPC_CARES_TRACE_LOG( + "OnIocpReadableInner. winsocket error:|%s|. fd:|%s|", msg, + GetName()); + error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + gpr_free(msg); + } + } + } + if (error == GRPC_ERROR_NONE) { + read_buf_ = grpc_slice_sub_no_ref(read_buf_, 0, + winsocket_->read_info.bytes_transfered); + } else { + grpc_slice_unref_internal(read_buf_); + read_buf_ = grpc_empty_slice(); + } + GRPC_CARES_TRACE_LOG( + "OnIocpReadable finishing. read buf length now:|%d|. :fd:|%s|", + GRPC_SLICE_LENGTH(read_buf_), GetName()); + ScheduleAndNullReadClosure(error); + } + + static void OnIocpWriteable(void* arg, grpc_error* error) { + GrpcPolledFdWindows* polled_fd = static_cast(arg); + polled_fd->OnIocpWriteableInner(error); + } + + void OnIocpWriteableInner(grpc_error* error) { + GRPC_CARES_TRACE_LOG("OnIocpWriteableInner. fd:|%s|", GetName()); + if (error == GRPC_ERROR_NONE) { + if (winsocket_->write_info.wsa_error != 0) { + char* msg = gpr_format_message(winsocket_->write_info.wsa_error); + GRPC_CARES_TRACE_LOG( + "OnIocpWriteableInner. winsocket error:|%s|. fd:|%s|", msg, + GetName()); + GRPC_ERROR_UNREF(error); + error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + gpr_free(msg); + } + } + GPR_ASSERT(write_state_ == WRITE_PENDING); + if (error == GRPC_ERROR_NONE) { + write_state_ = WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY; + write_buf_ = grpc_slice_sub_no_ref( + write_buf_, 0, winsocket_->write_info.bytes_transfered); + } else { + grpc_slice_unref_internal(write_buf_); + write_buf_ = grpc_empty_slice(); + } + ScheduleAndNullWriteClosure(error); + } + + bool gotten_into_driver_list() const { return gotten_into_driver_list_; } + void set_gotten_into_driver_list() { gotten_into_driver_list_ = true; } + + grpc_combiner* combiner_; + char recv_from_source_addr_[200]; + ares_socklen_t recv_from_source_addr_len_; + grpc_slice read_buf_; + grpc_slice write_buf_; + grpc_closure* read_closure_ = nullptr; + grpc_closure* write_closure_ = nullptr; + grpc_closure outer_read_closure_; + grpc_closure outer_write_closure_; + grpc_winsocket* winsocket_; + WriteState write_state_; + char* name_ = nullptr; + bool gotten_into_driver_list_; }; -GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as, - grpc_pollset_set* driver_pollset_set) { - return nullptr; -} +struct SockToPolledFdEntry { + SockToPolledFdEntry(SOCKET s, GrpcPolledFdWindows* fd) + : socket(s), polled_fd(fd) {} + SOCKET socket; + GrpcPolledFdWindows* polled_fd; + SockToPolledFdEntry* next = nullptr; +}; + +/* A SockToPolledFdMap can make ares_socket_t types (SOCKET's on windows) + * to GrpcPolledFdWindow's, and is used to find the appropriate + * GrpcPolledFdWindows to handle a virtual socket call when c-ares makes that + * socket call on the ares_socket_t type. Instances are owned by and one-to-one + * with a GrpcPolledFdWindows factory and event driver */ +class SockToPolledFdMap { + public: + SockToPolledFdMap(grpc_combiner* combiner) { + combiner_ = GRPC_COMBINER_REF(combiner, "sock to polled fd map"); + } + + ~SockToPolledFdMap() { + GPR_ASSERT(head_ == nullptr); + GRPC_COMBINER_UNREF(combiner_, "sock to polled fd map"); + } + + void AddNewSocket(SOCKET s, GrpcPolledFdWindows* polled_fd) { + SockToPolledFdEntry* new_node = New(s, polled_fd); + new_node->next = head_; + head_ = new_node; + } + + GrpcPolledFdWindows* LookupPolledFd(SOCKET s) { + for (SockToPolledFdEntry* node = head_; node != nullptr; + node = node->next) { + if (node->socket == s) { + GPR_ASSERT(node->polled_fd != nullptr); + return node->polled_fd; + } + } + abort(); + } + + void RemoveEntry(SOCKET s) { + GPR_ASSERT(head_ != nullptr); + SockToPolledFdEntry** prev = &head_; + for (SockToPolledFdEntry* node = head_; node != nullptr; + node = node->next) { + if (node->socket == s) { + *prev = node->next; + Delete(node); + return; + } + prev = &node->next; + } + abort(); + } + + /* These virtual socket functions are called from within the c-ares + * library. These methods generally dispatch those socket calls to the + * appropriate methods. The virtual "socket" and "close" methods are + * special and instead create/add and remove/destroy GrpcPolledFdWindows + * objects. + */ + static ares_socket_t Socket(int af, int type, int protocol, void* user_data) { + SockToPolledFdMap* map = static_cast(user_data); + SOCKET s = WSASocket(af, type, protocol, nullptr, 0, WSA_FLAG_OVERLAPPED); + if (s == INVALID_SOCKET) { + return s; + } + grpc_tcp_set_non_block(s); + GrpcPolledFdWindows* polled_fd = + New(s, map->combiner_); + map->AddNewSocket(s, polled_fd); + return s; + } + + static int Connect(ares_socket_t as, const struct sockaddr* target, + ares_socklen_t target_len, void* user_data) { + SockToPolledFdMap* map = static_cast(user_data); + GrpcPolledFdWindows* polled_fd = map->LookupPolledFd(as); + return polled_fd->Connect(target, target_len); + } + + static ares_ssize_t SendV(ares_socket_t as, const struct iovec* iov, + int iovec_count, void* user_data) { + SockToPolledFdMap* map = static_cast(user_data); + GrpcPolledFdWindows* polled_fd = map->LookupPolledFd(as); + return polled_fd->SendV(iov, iovec_count); + } + + static ares_ssize_t RecvFrom(ares_socket_t as, void* data, size_t data_len, + int flags, struct sockaddr* from, + ares_socklen_t* from_len, void* user_data) { + SockToPolledFdMap* map = static_cast(user_data); + GrpcPolledFdWindows* polled_fd = map->LookupPolledFd(as); + return polled_fd->RecvFrom(data, data_len, flags, from, from_len); + } + + static int CloseSocket(SOCKET s, void* user_data) { + SockToPolledFdMap* map = static_cast(user_data); + GrpcPolledFdWindows* polled_fd = map->LookupPolledFd(s); + map->RemoveEntry(s); + // If a gRPC polled fd has not made it in to the driver's list yet, then + // the driver has not and will never see this socket. + if (!polled_fd->gotten_into_driver_list()) { + polled_fd->ShutdownLocked(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Shut down c-ares fd before without it ever having made it into the " + "driver's list")); + return 0; + } + return 0; + } + + private: + SockToPolledFdEntry* head_ = nullptr; + grpc_combiner* combiner_; +}; + +const struct ares_socket_functions custom_ares_sock_funcs = { + &SockToPolledFdMap::Socket /* socket */, + &SockToPolledFdMap::CloseSocket /* close */, + &SockToPolledFdMap::Connect /* connect */, + &SockToPolledFdMap::RecvFrom /* recvfrom */, + &SockToPolledFdMap::SendV /* sendv */, +}; + +class GrpcPolledFdFactoryWindows : public GrpcPolledFdFactory { + public: + GrpcPolledFdFactoryWindows(grpc_combiner* combiner) + : sock_to_polled_fd_map_(combiner) {} + + GrpcPolledFd* NewGrpcPolledFdLocked(ares_socket_t as, + grpc_pollset_set* driver_pollset_set, + grpc_combiner* combiner) override { + GrpcPolledFdWindows* polled_fd = sock_to_polled_fd_map_.LookupPolledFd(as); + // Set a flag so that the virtual socket "close" method knows it + // doesn't need to call ShutdownLocked, since now the driver will. + polled_fd->set_gotten_into_driver_list(); + return polled_fd; + } -void ConfigureAresChannelLocked(ares_channel* channel) { abort(); } + void ConfigureAresChannelLocked(ares_channel channel) override { + ares_set_socket_functions(channel, &custom_ares_sock_funcs, + &sock_to_polled_fd_map_); + } + + private: + SockToPolledFdMap sock_to_polled_fd_map_; +}; + +UniquePtr NewGrpcPolledFdFactory(grpc_combiner* combiner) { + return UniquePtr( + New(combiner)); +} } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc index b3d6437e9a..485998f5e4 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc @@ -49,6 +49,8 @@ static gpr_mu g_init_mu; grpc_core::TraceFlag grpc_trace_cares_address_sorting(false, "cares_address_sorting"); +grpc_core::TraceFlag grpc_trace_cares_resolver(false, "cares_resolver"); + struct grpc_ares_request { /** indicates the DNS server to use, if specified */ struct ares_addr_port_node dns_server_addr; diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h index 17eaa7ccf0..ca5779e1d7 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h @@ -28,6 +28,13 @@ extern grpc_core::TraceFlag grpc_trace_cares_address_sorting; +extern grpc_core::TraceFlag grpc_trace_cares_resolver; + +#define GRPC_CARES_TRACE_LOG(format, ...) \ + if (grpc_trace_cares_resolver.enabled()) { \ + gpr_log(GPR_DEBUG, "(c-ares resolver) " format, __VA_ARGS__); \ + } + typedef struct grpc_ares_request grpc_ares_request; /* Asynchronously resolve \a name. Use \a default_port if a port isn't diff --git a/src/core/lib/iomgr/iocp_windows.cc b/src/core/lib/iomgr/iocp_windows.cc index ce77231036..ad325fe215 100644 --- a/src/core/lib/iomgr/iocp_windows.cc +++ b/src/core/lib/iomgr/iocp_windows.cc @@ -89,10 +89,15 @@ grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline) { } else { abort(); } - success = WSAGetOverlappedResult(socket->socket, &info->overlapped, &bytes, - FALSE, &flags); - info->bytes_transfered = bytes; - info->wsa_error = success ? 0 : WSAGetLastError(); + if (socket->shutdown_called) { + info->bytes_transfered = 0; + info->wsa_error = WSA_OPERATION_ABORTED; + } else { + success = WSAGetOverlappedResult(socket->socket, &info->overlapped, &bytes, + FALSE, &flags); + info->bytes_transfered = bytes; + info->wsa_error = success ? 0 : WSAGetLastError(); + } GPR_ASSERT(overlapped == &info->overlapped); grpc_socket_become_ready(socket, info); return GRPC_IOCP_WORK_WORK; diff --git a/src/core/lib/iomgr/socket_windows.cc b/src/core/lib/iomgr/socket_windows.cc index 4ad31cb35d..999c6646ad 100644 --- a/src/core/lib/iomgr/socket_windows.cc +++ b/src/core/lib/iomgr/socket_windows.cc @@ -52,6 +52,10 @@ grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name) { return r; } +SOCKET grpc_winsocket_wrapped_socket(grpc_winsocket* socket) { + return socket->socket; +} + /* Schedule a shutdown of the socket operations. Will call the pending operations to abort them. We need to do that this way because of the various callsites of that function, which happens to be in various diff --git a/src/core/lib/iomgr/socket_windows.h b/src/core/lib/iomgr/socket_windows.h index b09b9da562..46d7d58356 100644 --- a/src/core/lib/iomgr/socket_windows.h +++ b/src/core/lib/iomgr/socket_windows.h @@ -92,6 +92,8 @@ typedef struct grpc_winsocket { it will be responsible for closing it. */ grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name); +SOCKET grpc_winsocket_wrapped_socket(grpc_winsocket* socket); + /* Initiate an asynchronous shutdown of the socket. Will call off any pending operation to cancel them. */ void grpc_winsocket_shutdown(grpc_winsocket* socket); diff --git a/src/core/lib/iomgr/tcp_windows.cc b/src/core/lib/iomgr/tcp_windows.cc index 5d316d477b..b3cb442f18 100644 --- a/src/core/lib/iomgr/tcp_windows.cc +++ b/src/core/lib/iomgr/tcp_windows.cc @@ -53,7 +53,7 @@ extern grpc_core::TraceFlag grpc_tcp_trace; -static grpc_error* set_non_block(SOCKET sock) { +grpc_error* grpc_tcp_set_non_block(SOCKET sock) { int status; uint32_t param = 1; DWORD ret; @@ -90,7 +90,7 @@ static grpc_error* enable_loopback_fast_path(SOCKET sock) { grpc_error* grpc_tcp_prepare_socket(SOCKET sock) { grpc_error* err; - err = set_non_block(sock); + err = grpc_tcp_set_non_block(sock); if (err != GRPC_ERROR_NONE) return err; err = set_dualstack(sock); if (err != GRPC_ERROR_NONE) return err; diff --git a/src/core/lib/iomgr/tcp_windows.h b/src/core/lib/iomgr/tcp_windows.h index 161a545a2a..04ef8102b6 100644 --- a/src/core/lib/iomgr/tcp_windows.h +++ b/src/core/lib/iomgr/tcp_windows.h @@ -46,6 +46,8 @@ grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket, grpc_error* grpc_tcp_prepare_socket(SOCKET sock); +grpc_error* grpc_tcp_set_non_block(SOCKET sock); + #endif #endif /* GRPC_CORE_LIB_IOMGR_TCP_WINDOWS_H */ diff --git a/templates/test/cpp/naming/resolver_component_tests_defs.include b/templates/test/cpp/naming/resolver_component_tests_defs.include index bc981dc83e..b34845e01a 100644 --- a/templates/test/cpp/naming/resolver_component_tests_defs.include +++ b/templates/test/cpp/naming/resolver_component_tests_defs.include @@ -22,6 +22,7 @@ import tempfile import os import time import signal +import platform argp = argparse.ArgumentParser(description='Run c-ares resolver tests') @@ -43,6 +44,11 @@ args = argp.parse_args() def test_runner_log(msg): sys.stderr.write('\n%s: %s\n' % (__file__, msg)) +def python_args(arg_list): + if platform.system() == 'Windows': + return [sys.executable] + arg_list + return arg_list + cur_resolver = os.environ.get('GRPC_DNS_RESOLVER') if cur_resolver and cur_resolver != 'ares': test_runner_log(('WARNING: cur resolver set to %s. This set of tests ' @@ -50,26 +56,27 @@ if cur_resolver and cur_resolver != 'ares': test_runner_log('Exit 1 without running tests.') sys.exit(1) os.environ.update({'GRPC_DNS_RESOLVER': 'ares'}) +os.environ.update({'GRPC_TRACE': 'cares_resolver'}) def wait_until_dns_server_is_up(args, dns_server_subprocess, dns_server_subprocess_output): for i in range(0, 30): test_runner_log('Health check: attempt to connect to DNS server over TCP.') - tcp_connect_subprocess = subprocess.Popen([ + tcp_connect_subprocess = subprocess.Popen(python_args([ args.tcp_connect_bin_path, '--server_host', '127.0.0.1', '--server_port', str(args.dns_server_port), - '--timeout', str(1)]) + '--timeout', str(1)])) tcp_connect_subprocess.communicate() if tcp_connect_subprocess.returncode == 0: test_runner_log(('Health check: attempt to make an A-record ' 'query to DNS server.')) - dns_resolver_subprocess = subprocess.Popen([ + dns_resolver_subprocess = subprocess.Popen(python_args([ args.dns_resolver_bin_path, '--qname', 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp', '--server_host', '127.0.0.1', - '--server_port', str(args.dns_server_port)], + '--server_port', str(args.dns_server_port)]), stdout=subprocess.PIPE) dns_resolver_stdout, _ = dns_resolver_subprocess.communicate() if dns_resolver_subprocess.returncode == 0: @@ -91,10 +98,10 @@ def wait_until_dns_server_is_up(args, dns_server_subprocess_output = tempfile.mktemp() with open(dns_server_subprocess_output, 'w') as l: - dns_server_subprocess = subprocess.Popen([ + dns_server_subprocess = subprocess.Popen(python_args([ args.dns_server_bin_path, '--port', str(args.dns_server_port), - '--records_config_path', args.records_config_path], + '--records_config_path', args.records_config_path]), stdin=subprocess.PIPE, stdout=l, stderr=l) diff --git a/test/cpp/naming/cancel_ares_query_test.cc b/test/cpp/naming/cancel_ares_query_test.cc index 0d59bf6fb6..dec7c171dc 100644 --- a/test/cpp/naming/cancel_ares_query_test.cc +++ b/test/cpp/naming/cancel_ares_query_test.cc @@ -45,11 +45,14 @@ #include "test/core/util/port.h" #include "test/core/util/test_config.h" -// TODO: pull in different headers when enabling this -// test on windows. Also set BAD_SOCKET_RETURN_VAL -// to INVALID_SOCKET on windows. +#ifdef GPR_WINDOWS +#include "src/core/lib/iomgr/sockaddr_windows.h" +#include "src/core/lib/iomgr/socket_windows.h" +#define BAD_SOCKET_RETURN_VAL INVALID_SOCKET +#else #include "src/core/lib/iomgr/sockaddr_posix.h" #define BAD_SOCKET_RETURN_VAL -1 +#endif namespace { @@ -91,7 +94,13 @@ class FakeNonResponsiveDNSServer { abort(); } } - ~FakeNonResponsiveDNSServer() { close(socket_); } + ~FakeNonResponsiveDNSServer() { +#ifdef GPR_WINDOWS + closesocket(socket_); +#else + close(socket_); +#endif + } private: int socket_; @@ -193,6 +202,38 @@ TEST(CancelDuringAresQuery, TestCancelActiveDNSQuery) { TestCancelActiveDNSQuery(&args); } +#ifdef GPR_WINDOWS + +void MaybePollArbitraryPollsetTwice() { + grpc_pollset* pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size()); + gpr_mu* mu; + grpc_pollset_init(pollset, &mu); + grpc_pollset_worker* worker = nullptr; + // Make a zero timeout poll + gpr_mu_lock(mu); + GRPC_LOG_IF_ERROR( + "pollset_work", + grpc_pollset_work(pollset, &worker, grpc_core::ExecCtx::Get()->Now())); + gpr_mu_unlock(mu); + grpc_core::ExecCtx::Get()->Flush(); + // Make a second zero-timeout poll (in case the first one + // short-circuited by picking up a previous "kick") + gpr_mu_lock(mu); + GRPC_LOG_IF_ERROR( + "pollset_work", + grpc_pollset_work(pollset, &worker, grpc_core::ExecCtx::Get()->Now())); + gpr_mu_unlock(mu); + grpc_core::ExecCtx::Get()->Flush(); + grpc_pollset_destroy(pollset); + gpr_free(pollset); +} + +#else + +void MaybePollArbitraryPollsetTwice() {} + +#endif + TEST(CancelDuringAresQuery, TestFdsAreDeletedFromPollsetSet) { grpc_core::ExecCtx exec_ctx; ArgsStruct args; @@ -209,6 +250,12 @@ TEST(CancelDuringAresQuery, TestFdsAreDeletedFromPollsetSet) { // this test. This test only cares about what happens to fd's that c-ares // opens. TestCancelActiveDNSQuery(&args); + // This test relies on the assumption that cancelling a c-ares query + // will flush out all callbacks on the current exec ctx, which is true + // on posix platforms but not on Windows, because fd shutdown on Windows + // requires a trip through the polling loop to schedule the callback. + // So we need to do extra polling work on Windows to free things up. + MaybePollArbitraryPollsetTwice(); EXPECT_EQ(grpc_iomgr_count_objects_for_testing(), 0u); grpc_pollset_set_destroy(fake_other_pollset_set); } diff --git a/test/cpp/naming/gen_build_yaml.py b/test/cpp/naming/gen_build_yaml.py index 5dad2ea7af..1c9d0676b8 100755 --- a/test/cpp/naming/gen_build_yaml.py +++ b/test/cpp/naming/gen_build_yaml.py @@ -68,7 +68,7 @@ def main(): 'gtest': False, 'run': False, 'src': ['test/cpp/naming/resolver_component_test.cc'], - 'platforms': ['linux', 'posix', 'mac'], + 'platforms': ['linux', 'posix', 'mac', 'windows'], 'deps': [ 'grpc++_test_util' + unsecure_build_config_suffix, 'grpc_test_util' + unsecure_build_config_suffix, @@ -129,7 +129,7 @@ def main(): 'gtest': True, 'run': True, 'src': ['test/cpp/naming/cancel_ares_query_test.cc'], - 'platforms': ['linux', 'posix', 'mac'], + 'platforms': ['linux', 'posix', 'mac', 'windows'], 'deps': [ 'grpc++_test_util', 'grpc_test_util', diff --git a/test/cpp/naming/manual_run_resolver_component_test.py b/test/cpp/naming/manual_run_resolver_component_test.py new file mode 100644 index 0000000000..fb2157741a --- /dev/null +++ b/test/cpp/naming/manual_run_resolver_component_test.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys + +# The c-ares test suite doesn't get ran regularly on Windows, but +# this script provides a way to run a lot of the tests manually. +_MSBUILD_CONFIG = os.environ['CONFIG'] +os.chdir(os.path.join('..', '..', os.getcwd())) +# This port is arbitrary, but it needs to be available. +_DNS_SERVER_PORT = 15353 + +subprocess.call([ + sys.executable, + 'test\\cpp\\naming\\resolver_component_tests_runner.py', + '--test_bin_path', 'cmake\\build\\%s\\resolver_component_test.exe' % _MSBUILD_CONFIG, + '--dns_server_bin_path', 'test\\cpp\\naming\\utils\\dns_server.py', + '--records_config_path', 'test\\cpp\\naming\\resolver_test_record_groups.yaml', + '--dns_server_port', str(_DNS_SERVER_PORT), + '--dns_resolver_bin_path', 'test\\cpp\\naming\\utils\\dns_resolver.py', + '--tcp_connect_bin_path', 'test\\cpp\\naming\\utils\\tcp_connect.py', +]) diff --git a/test/cpp/naming/resolver_component_test.cc b/test/cpp/naming/resolver_component_test.cc index 6ac548120c..3dc6e7178c 100644 --- a/test/cpp/naming/resolver_component_test.cc +++ b/test/cpp/naming/resolver_component_test.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include #include @@ -55,8 +57,15 @@ // TODO: pull in different headers when enabling this // test on windows. Also set BAD_SOCKET_RETURN_VAL // to INVALID_SOCKET on windows. +#ifdef GPR_WINDOWS +#include "src/core/lib/iomgr/sockaddr_windows.h" +#include "src/core/lib/iomgr/socket_windows.h" +#include "src/core/lib/iomgr/tcp_windows.h" +#define BAD_SOCKET_RETURN_VAL INVALID_SOCKET +#else #include "src/core/lib/iomgr/sockaddr_posix.h" #define BAD_SOCKET_RETURN_VAL -1 +#endif using grpc::SubProcess; using std::vector; @@ -241,6 +250,62 @@ void CheckLBPolicyResultLocked(grpc_channel_args* channel_args, } } +#ifdef GPR_WINDOWS +void OpenAndCloseSocketsStressLoop(int dummy_port, gpr_event* done_ev) { + sockaddr_in6 addr; + memset(&addr, 0, sizeof(addr)); + addr.sin6_family = AF_INET6; + addr.sin6_port = htons(dummy_port); + ((char*)&addr.sin6_addr)[15] = 1; + for (;;) { + if (gpr_event_get(done_ev)) { + return; + } + std::vector sockets; + for (size_t i = 0; i < 50; i++) { + SOCKET s = WSASocket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, nullptr, 0, + WSA_FLAG_OVERLAPPED); + ASSERT_TRUE(s != BAD_SOCKET_RETURN_VAL) + << "Failed to create TCP ipv6 socket"; + gpr_log(GPR_DEBUG, "Opened socket: %d", s); + char val = 1; + ASSERT_TRUE(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)) != + SOCKET_ERROR) + << "Failed to set socketopt reuseaddr. WSA error: " + + std::to_string(WSAGetLastError()); + ASSERT_TRUE(grpc_tcp_set_non_block(s) == GRPC_ERROR_NONE) + << "Failed to set socket non-blocking"; + ASSERT_TRUE(bind(s, (const sockaddr*)&addr, sizeof(addr)) != SOCKET_ERROR) + << "Failed to bind socket " + std::to_string(s) + + " to [::1]:" + std::to_string(dummy_port) + + ". WSA error: " + std::to_string(WSAGetLastError()); + ASSERT_TRUE(listen(s, 1) != SOCKET_ERROR) + << "Failed to listen on socket " + std::to_string(s) + + ". WSA error: " + std::to_string(WSAGetLastError()); + sockets.push_back(s); + } + // Do a non-blocking accept followed by a close on all of those sockets. + // Do this in a separate loop to try to induce a time window to hit races. + for (size_t i = 0; i < sockets.size(); i++) { + gpr_log(GPR_DEBUG, "non-blocking accept then close on %d", sockets[i]); + ASSERT_TRUE(accept(sockets[i], nullptr, nullptr) == INVALID_SOCKET) + << "Accept on dummy socket unexpectedly accepted actual connection."; + ASSERT_TRUE(WSAGetLastError() == WSAEWOULDBLOCK) + << "OpenAndCloseSocketsStressLoop accept on socket " + + std::to_string(sockets[i]) + + " failed in " + "an unexpected way. " + "WSA error: " + + std::to_string(WSAGetLastError()) + + ". Socket use-after-close bugs are likely."; + ASSERT_TRUE(closesocket(sockets[i]) != SOCKET_ERROR) + << "Failed to close socket: " + std::to_string(sockets[i]) + + ". WSA error: " + std::to_string(WSAGetLastError()); + } + } + return; +} +#else void OpenAndCloseSocketsStressLoop(int dummy_port, gpr_event* done_ev) { // The goal of this loop is to catch socket // "use after close" bugs within the c-ares resolver by acting @@ -311,6 +376,7 @@ void OpenAndCloseSocketsStressLoop(int dummy_port, gpr_event* done_ev) { } } } +#endif void CheckResolverResultLocked(void* argsp, grpc_error* err) { EXPECT_EQ(err, GRPC_ERROR_NONE); @@ -372,9 +438,9 @@ void RunResolvesRelevantRecordsTest(void (*OnDoneLocked)(void* arg, args.expected_lb_policy = FLAGS_expected_lb_policy; // maybe build the address with an authority char* whole_uri = nullptr; - GPR_ASSERT(asprintf(&whole_uri, "dns://%s/%s", - FLAGS_local_dns_server_address.c_str(), - FLAGS_target_name.c_str())); + GPR_ASSERT(gpr_asprintf(&whole_uri, "dns://%s/%s", + FLAGS_local_dns_server_address.c_str(), + FLAGS_target_name.c_str())); // create resolver and resolve grpc_core::OrphanablePtr resolver = grpc_core::ResolverRegistry::CreateResolver(whole_uri, nullptr, diff --git a/test/cpp/naming/resolver_component_tests_runner.py b/test/cpp/naming/resolver_component_tests_runner.py index 69386ebeb0..1873eec35b 100755 --- a/test/cpp/naming/resolver_component_tests_runner.py +++ b/test/cpp/naming/resolver_component_tests_runner.py @@ -22,6 +22,7 @@ import tempfile import os import time import signal +import platform argp = argparse.ArgumentParser(description='Run c-ares resolver tests') @@ -43,6 +44,11 @@ args = argp.parse_args() def test_runner_log(msg): sys.stderr.write('\n%s: %s\n' % (__file__, msg)) +def python_args(arg_list): + if platform.system() == 'Windows': + return [sys.executable] + arg_list + return arg_list + cur_resolver = os.environ.get('GRPC_DNS_RESOLVER') if cur_resolver and cur_resolver != 'ares': test_runner_log(('WARNING: cur resolver set to %s. This set of tests ' @@ -50,26 +56,27 @@ if cur_resolver and cur_resolver != 'ares': test_runner_log('Exit 1 without running tests.') sys.exit(1) os.environ.update({'GRPC_DNS_RESOLVER': 'ares'}) +os.environ.update({'GRPC_TRACE': 'cares_resolver'}) def wait_until_dns_server_is_up(args, dns_server_subprocess, dns_server_subprocess_output): for i in range(0, 30): test_runner_log('Health check: attempt to connect to DNS server over TCP.') - tcp_connect_subprocess = subprocess.Popen([ + tcp_connect_subprocess = subprocess.Popen(python_args([ args.tcp_connect_bin_path, '--server_host', '127.0.0.1', '--server_port', str(args.dns_server_port), - '--timeout', str(1)]) + '--timeout', str(1)])) tcp_connect_subprocess.communicate() if tcp_connect_subprocess.returncode == 0: test_runner_log(('Health check: attempt to make an A-record ' 'query to DNS server.')) - dns_resolver_subprocess = subprocess.Popen([ + dns_resolver_subprocess = subprocess.Popen(python_args([ args.dns_resolver_bin_path, '--qname', 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp', '--server_host', '127.0.0.1', - '--server_port', str(args.dns_server_port)], + '--server_port', str(args.dns_server_port)]), stdout=subprocess.PIPE) dns_resolver_stdout, _ = dns_resolver_subprocess.communicate() if dns_resolver_subprocess.returncode == 0: @@ -91,10 +98,10 @@ def wait_until_dns_server_is_up(args, dns_server_subprocess_output = tempfile.mktemp() with open(dns_server_subprocess_output, 'w') as l: - dns_server_subprocess = subprocess.Popen([ + dns_server_subprocess = subprocess.Popen(python_args([ args.dns_server_bin_path, '--port', str(args.dns_server_port), - '--records_config_path', args.records_config_path], + '--records_config_path', args.records_config_path]), stdin=subprocess.PIPE, stdout=l, stderr=l) @@ -112,6 +119,18 @@ wait_until_dns_server_is_up(args, dns_server_subprocess_output) num_test_failures = 0 +test_runner_log('Run test with target: %s' % 'no-srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.') +current_test_subprocess = subprocess.Popen([ + args.test_bin_path, + '--target_name', 'no-srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.', + '--expected_addrs', '5.5.5.5:443,False', + '--expected_chosen_service_config', '', + '--expected_lb_policy', '', + '--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port]) +current_test_subprocess.communicate() +if current_test_subprocess.returncode != 0: + num_test_failures += 1 + test_runner_log('Run test with target: %s' % 'srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.') current_test_subprocess = subprocess.Popen([ args.test_bin_path, diff --git a/test/cpp/naming/resolver_test_record_groups.yaml b/test/cpp/naming/resolver_test_record_groups.yaml index 6c4f89d09b..3c51a00c7b 100644 --- a/test/cpp/naming/resolver_test_record_groups.yaml +++ b/test/cpp/naming/resolver_test_record_groups.yaml @@ -1,5 +1,13 @@ resolver_tests_common_zone_name: resolver-tests-version-4.grpctestingexp. resolver_component_tests: +- expected_addrs: + - {address: '5.5.5.5:443', is_balancer: false} + expected_chosen_service_config: null + expected_lb_policy: null + record_to_resolve: no-srv-ipv4-single-target + records: + no-srv-ipv4-single-target: + - {TTL: '2100', data: 5.5.5.5, type: A} - expected_addrs: - {address: '1.2.3.4:1234', is_balancer: true} expected_chosen_service_config: null diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 5815f82fef..cf3b54e044 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -5784,7 +5784,8 @@ "ci_platforms": [ "linux", "mac", - "posix" + "posix", + "windows" ], "cpu_cost": 1.0, "exclude_configs": [], @@ -5796,7 +5797,8 @@ "platforms": [ "linux", "mac", - "posix" + "posix", + "windows" ], "uses_polling": true }, -- cgit v1.2.3