From b51f6aba269c76519d8d1e24b99908c5f712a5e7 Mon Sep 17 00:00:00 2001 From: Carter Sande Date: Fri, 22 Jun 2018 14:31:29 +0000 Subject: Add Solaris and AIX autodetection --- include/grpc/impl/codegen/port_platform.h | 39 +++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'include/grpc') diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index 01ce5f03e9..7f9d9fd82b 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -282,6 +282,45 @@ #else /* _LP64 */ #define GPR_ARCH_32 1 #endif /* _LP64 */ +#elif defined(__sun) +#define GPR_PLATFORM_STRING "solaris" +#define GPR_SOLARIS 1 +#define GPR_CPU_POSIX 1 +#define GPR_GCC_ATOMIC 1 +#define GPR_GCC_TLS 1 +#define GPR_POSIX_LOG 1 +#define GPR_POSIX_ENV 1 +#define GPR_POSIX_TMPFILE 1 +#define GPR_POSIX_STRING 1 +#define GPR_POSIX_SUBPROCESS 1 +#define GPR_POSIX_SYNC 1 +#define GPR_POSIX_TIME 1 +#ifdef _LP64 +#define GPR_ARCH_64 1 +#else /* _LP64 */ +#define GPR_ARCH_32 1 +#endif /* _LP64 */ +#elif defined(_AIX) +#define GPR_PLATFORM_STRING "aix" +#ifndef _ALL_SOURCE +#define _ALL_SOURCE +#endif +#define GPR_AIX 1 +#define GPR_CPU_POSIX 1 +#define GPR_GCC_ATOMIC 1 +#define GPR_GCC_TLS 1 +#define GPR_POSIX_LOG 1 +#define GPR_POSIX_ENV 1 +#define GPR_POSIX_TMPFILE 1 +#define GPR_POSIX_STRING 1 +#define GPR_POSIX_SUBPROCESS 1 +#define GPR_POSIX_SYNC 1 +#define GPR_POSIX_TIME 1 +#ifdef _LP64 +#define GPR_ARCH_64 1 +#else /* _LP64 */ +#define GPR_ARCH_32 1 +#endif /* _LP64 */ #elif defined(__native_client__) #define GPR_PLATFORM_STRING "nacl" #ifndef _BSD_SOURCE -- cgit v1.2.3 From d5736045d35bcf2823f97cd740b7953514b76215 Mon Sep 17 00:00:00 2001 From: Carter Sande Date: Tue, 3 Jul 2018 21:34:34 +0000 Subject: port_platform.h: Make Solaris detection macro more precise --- include/grpc/impl/codegen/port_platform.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/grpc') diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index 7f9d9fd82b..bb8cba9d55 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -282,7 +282,7 @@ #else /* _LP64 */ #define GPR_ARCH_32 1 #endif /* _LP64 */ -#elif defined(__sun) +#elif defined(__sun) && defined(__SVR4) #define GPR_PLATFORM_STRING "solaris" #define GPR_SOLARIS 1 #define GPR_CPU_POSIX 1 -- cgit v1.2.3 From a0a061034351a6cd616ee2d01fae99604216b9ba Mon Sep 17 00:00:00 2001 From: Carter Sande Date: Thu, 5 Jul 2018 18:55:14 +0000 Subject: port_platform.h: GPR_GETPID_IN_UNISTD_H on Solaris/AIX --- include/grpc/impl/codegen/port_platform.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/grpc') diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index bb8cba9d55..e850d41c5c 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -295,6 +295,7 @@ #define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SYNC 1 #define GPR_POSIX_TIME 1 +#define GPR_GETPID_IN_UNISTD_H 1 #ifdef _LP64 #define GPR_ARCH_64 1 #else /* _LP64 */ @@ -316,6 +317,7 @@ #define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SYNC 1 #define GPR_POSIX_TIME 1 +#define GPR_GETPID_IN_UNISTD_H 1 #ifdef _LP64 #define GPR_ARCH_64 1 #else /* _LP64 */ -- cgit v1.2.3 From bdd13cb0aef7d3f6dbc467148b4b3158485359eb Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 1 Aug 2018 11:22:40 -0700 Subject: Revert "Revert "Restrict the number of threads in C++ sync server"" --- grpc.def | 1 + include/grpc/grpc.h | 4 + include/grpcpp/resource_quota.h | 16 ++- include/grpcpp/server.h | 3 +- src/core/lib/iomgr/resource_quota.cc | 78 +++++++++++++ src/core/lib/iomgr/resource_quota.h | 16 +++ src/cpp/common/resource_quota_cc.cc | 4 + src/cpp/server/server_builder.cc | 2 +- src/cpp/server/server_cc.cc | 31 ++++- src/cpp/thread_manager/thread_manager.cc | 53 +++++++-- src/cpp/thread_manager/thread_manager.h | 48 +++++++- src/ruby/ext/grpc/rb_grpc_imports.generated.c | 2 + src/ruby/ext/grpc/rb_grpc_imports.generated.h | 3 + test/core/iomgr/resource_quota_test.cc | 97 ++++++++++++++++ test/core/surface/public_headers_must_be_c89.c | 1 + test/cpp/thread_manager/thread_manager_test.cc | 149 +++++++++++++++++++------ 16 files changed, 445 insertions(+), 63 deletions(-) (limited to 'include/grpc') diff --git a/grpc.def b/grpc.def index 5b98792662..312e916682 100644 --- a/grpc.def +++ b/grpc.def @@ -68,6 +68,7 @@ EXPORTS grpc_resource_quota_ref grpc_resource_quota_unref grpc_resource_quota_resize + grpc_resource_quota_set_max_threads grpc_resource_quota_arg_vtable grpc_channelz_get_top_channels grpc_channelz_get_channel diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index f0eb2c0121..eb0251443c 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -450,6 +450,10 @@ GRPCAPI void grpc_resource_quota_unref(grpc_resource_quota* resource_quota); GRPCAPI void grpc_resource_quota_resize(grpc_resource_quota* resource_quota, size_t new_size); +/** Update the size of the maximum number of threads allowed */ +GRPCAPI void grpc_resource_quota_set_max_threads( + grpc_resource_quota* resource_quota, int new_max_threads); + /** Fetch a vtable for a grpc_channel_arg that points to a grpc_resource_quota */ GRPCAPI const grpc_arg_pointer_vtable* grpc_resource_quota_arg_vtable(void); diff --git a/include/grpcpp/resource_quota.h b/include/grpcpp/resource_quota.h index 554437a40d..50bd1cb849 100644 --- a/include/grpcpp/resource_quota.h +++ b/include/grpcpp/resource_quota.h @@ -26,10 +26,10 @@ struct grpc_resource_quota; namespace grpc { -/// ResourceQuota represents a bound on memory usage by the gRPC library. -/// A ResourceQuota can be attached to a server (via \a ServerBuilder), +/// ResourceQuota represents a bound on memory and thread usage by the gRPC +/// library. A ResourceQuota can be attached to a server (via \a ServerBuilder), /// or a client channel (via \a ChannelArguments). -/// gRPC will attempt to keep memory used by all attached entities +/// gRPC will attempt to keep memory and threads used by all attached entities /// below the ResourceQuota bound. class ResourceQuota final : private GrpcLibraryCodegen { public: @@ -44,6 +44,16 @@ class ResourceQuota final : private GrpcLibraryCodegen { /// No time bound is given for this to occur however. ResourceQuota& Resize(size_t new_size); + /// Set the max number of threads that can be allocated from this + /// ResourceQuota object. + /// + /// If the new_max_threads value is smaller than the current value, no new + /// threads are allocated until the number of active threads fall below + /// new_max_threads. There is no time bound on when this may happen i.e none + /// of the current threads are forcefully destroyed and all threads run their + /// normal course. + ResourceQuota& SetMaxThreads(int new_max_threads); + grpc_resource_quota* c_resource_quota() const { return impl_; } private: diff --git a/include/grpcpp/server.h b/include/grpcpp/server.h index 81c3907f86..189cf8accf 100644 --- a/include/grpcpp/server.h +++ b/include/grpcpp/server.h @@ -144,7 +144,8 @@ class Server : public ServerInterface, private GrpcLibraryCodegen { Server(int max_message_size, ChannelArguments* args, std::shared_ptr>> sync_server_cqs, - int min_pollers, int max_pollers, int sync_cq_timeout_msec); + grpc_resource_quota* server_rq, int min_pollers, int max_pollers, + int sync_cq_timeout_msec); /// Start the server. /// diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc index 539bc120ce..b6fc7579f7 100644 --- a/src/core/lib/iomgr/resource_quota.cc +++ b/src/core/lib/iomgr/resource_quota.cc @@ -96,6 +96,9 @@ struct grpc_resource_user { list, false otherwise */ bool added_to_free_pool; + /* The number of threads currently allocated to this resource user */ + gpr_atm num_threads_allocated; + /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer */ grpc_closure* reclaimers[2]; @@ -135,12 +138,33 @@ struct grpc_resource_quota { gpr_atm last_size; + /* Mutex to protect max_threads and num_threads_allocated */ + /* Note: We could have used gpr_atm for max_threads and num_threads_allocated + * and avoid having this mutex; but in that case, each invocation of the + * function grpc_resource_user_allocate_threads() would have had to do at + * least two atomic loads (for max_threads and num_threads_allocated) followed + * by a CAS (on num_threads_allocated). + * Moreover, we expect grpc_resource_user_allocate_threads() to be often + * called concurrently thereby increasing the chances of failing the CAS + * operation. This additional complexity is not worth the tiny perf gain we + * may (or may not) have by using atomics */ + gpr_mu thread_count_mu; + + /* Max number of threads allowed */ + int max_threads; + + /* Number of threads currently allocated via this resource_quota object */ + int num_threads_allocated; + /* Has rq_step been scheduled to occur? */ bool step_scheduled; + /* Are we currently reclaiming memory */ bool reclaiming; + /* Closure around rq_step */ grpc_closure rq_step_closure; + /* Closure around rq_reclamation_done */ grpc_closure rq_reclamation_done_closure; @@ -524,6 +548,11 @@ static void ru_shutdown(void* ru, grpc_error* error) { static void ru_destroy(void* ru, grpc_error* error) { grpc_resource_user* resource_user = static_cast(ru); GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0); + // Free all the remaining thread quota + grpc_resource_user_free_threads(resource_user, + static_cast(gpr_atm_no_barrier_load( + &resource_user->num_threads_allocated))); + for (int i = 0; i < GRPC_RULIST_COUNT; i++) { rulist_remove(resource_user, static_cast(i)); } @@ -594,6 +623,9 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) { resource_quota->free_pool = INT64_MAX; resource_quota->size = INT64_MAX; gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX); + gpr_mu_init(&resource_quota->thread_count_mu); + resource_quota->max_threads = INT_MAX; + resource_quota->num_threads_allocated = 0; resource_quota->step_scheduled = false; resource_quota->reclaiming = false; gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0); @@ -616,6 +648,8 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) { void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) { if (gpr_unref(&resource_quota->refs)) { + // No outstanding thread quota + GPR_ASSERT(resource_quota->num_threads_allocated == 0); GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota"); gpr_free(resource_quota->name); gpr_free(resource_quota); @@ -646,6 +680,15 @@ double grpc_resource_quota_get_memory_pressure( (static_cast(MEMORY_USAGE_ESTIMATION_MAX)); } +/* Public API */ +void grpc_resource_quota_set_max_threads(grpc_resource_quota* resource_quota, + int new_max_threads) { + GPR_ASSERT(new_max_threads >= 0); + gpr_mu_lock(&resource_quota->thread_count_mu); + resource_quota->max_threads = new_max_threads; + gpr_mu_unlock(&resource_quota->thread_count_mu); +} + /* Public API */ void grpc_resource_quota_resize(grpc_resource_quota* resource_quota, size_t size) { @@ -731,6 +774,7 @@ grpc_resource_user* grpc_resource_user_create( grpc_closure_list_init(&resource_user->on_allocated); resource_user->allocating = false; resource_user->added_to_free_pool = false; + gpr_atm_no_barrier_store(&resource_user->num_threads_allocated, 0); resource_user->reclaimers[0] = nullptr; resource_user->reclaimers[1] = nullptr; resource_user->new_reclaimers[0] = nullptr; @@ -785,6 +829,40 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) { } } +bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user, + int thread_count) { + GPR_ASSERT(thread_count >= 0); + bool is_success = false; + gpr_mu_lock(&resource_user->resource_quota->thread_count_mu); + grpc_resource_quota* rq = resource_user->resource_quota; + if (rq->num_threads_allocated + thread_count <= rq->max_threads) { + rq->num_threads_allocated += thread_count; + gpr_atm_no_barrier_fetch_add(&resource_user->num_threads_allocated, + thread_count); + is_success = true; + } + gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu); + return is_success; +} + +void grpc_resource_user_free_threads(grpc_resource_user* resource_user, + int thread_count) { + GPR_ASSERT(thread_count >= 0); + gpr_mu_lock(&resource_user->resource_quota->thread_count_mu); + grpc_resource_quota* rq = resource_user->resource_quota; + rq->num_threads_allocated -= thread_count; + int old_count = static_cast(gpr_atm_no_barrier_fetch_add( + &resource_user->num_threads_allocated, -thread_count)); + if (old_count < thread_count || rq->num_threads_allocated < 0) { + gpr_log(GPR_ERROR, + "Releasing more threads (%d) than currently allocated (rq threads: " + "%d, ru threads: %d)", + thread_count, rq->num_threads_allocated + thread_count, old_count); + abort(); + } + gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu); +} + void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size, grpc_closure* optional_on_done) { gpr_mu_lock(&resource_user->mu); diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h index 937daf8728..1d5e95e04a 100644 --- a/src/core/lib/iomgr/resource_quota.h +++ b/src/core/lib/iomgr/resource_quota.h @@ -93,6 +93,22 @@ void grpc_resource_user_ref(grpc_resource_user* resource_user); void grpc_resource_user_unref(grpc_resource_user* resource_user); void grpc_resource_user_shutdown(grpc_resource_user* resource_user); +/* Attempts to get quota (from the resource_user) to create 'thd_count' number + * of threads. Returns true if successful (i.e the caller is now free to create + * 'thd_count' number of threads) or false if quota is not available */ +bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user, + int thd_count); +/* Releases 'thd_count' worth of quota back to the resource user. The quota + * should have been previously obtained successfully by calling + * grpc_resource_user_allocate_threads(). + * + * Note: There need not be an exact one-to-one correspondence between + * grpc_resource_user_allocate_threads() and grpc_resource_user_free_threads() + * calls. The only requirement is that the number of threads allocated should + * all be eventually released */ +void grpc_resource_user_free_threads(grpc_resource_user* resource_user, + int thd_count); + /* Allocate from the resource user (and its quota). If optional_on_done is NULL, then allocate immediately. This may push the quota over-limit, at which point reclamation will kick in. diff --git a/src/cpp/common/resource_quota_cc.cc b/src/cpp/common/resource_quota_cc.cc index daeb0ba171..276e5f7954 100644 --- a/src/cpp/common/resource_quota_cc.cc +++ b/src/cpp/common/resource_quota_cc.cc @@ -33,4 +33,8 @@ ResourceQuota& ResourceQuota::Resize(size_t new_size) { return *this; } +ResourceQuota& ResourceQuota::SetMaxThreads(int new_max_threads) { + grpc_resource_quota_set_max_threads(impl_, new_max_threads); + return *this; +} } // namespace grpc diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index e0b9b7a62b..0ab3cd0e32 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -261,7 +261,7 @@ std::unique_ptr ServerBuilder::BuildAndStart() { } std::unique_ptr server(new Server( - max_receive_message_size_, &args, sync_server_cqs, + max_receive_message_size_, &args, sync_server_cqs, resource_quota_, sync_server_settings_.min_pollers, sync_server_settings_.max_pollers, sync_server_settings_.cq_timeout_msec)); diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 0d77510e29..472c5035fc 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -47,6 +47,12 @@ namespace grpc { namespace { +// The default value for maximum number of threads that can be created in the +// sync server. This value of 500 is empirically chosen. To increase the max +// number of threads in a sync server, pass a custom ResourceQuota object (with +// the desired number of max-threads set) to the server builder +#define DEFAULT_MAX_SYNC_SERVER_THREADS 500 + class DefaultGlobalCallbacks final : public Server::GlobalCallbacks { public: ~DefaultGlobalCallbacks() override {} @@ -266,9 +272,9 @@ class Server::SyncRequestThreadManager : public ThreadManager { public: SyncRequestThreadManager(Server* server, CompletionQueue* server_cq, std::shared_ptr global_callbacks, - int min_pollers, int max_pollers, - int cq_timeout_msec) - : ThreadManager(min_pollers, max_pollers), + grpc_resource_quota* rq, int min_pollers, + int max_pollers, int cq_timeout_msec) + : ThreadManager("SyncServer", rq, min_pollers, max_pollers), server_(server), server_cq_(server_cq), cq_timeout_msec_(cq_timeout_msec), @@ -376,7 +382,8 @@ Server::Server( int max_receive_message_size, ChannelArguments* args, std::shared_ptr>> sync_server_cqs, - int min_pollers, int max_pollers, int sync_cq_timeout_msec) + grpc_resource_quota* server_rq, int min_pollers, int max_pollers, + int sync_cq_timeout_msec) : max_receive_message_size_(max_receive_message_size), sync_server_cqs_(std::move(sync_server_cqs)), started_(false), @@ -392,10 +399,22 @@ Server::Server( global_callbacks_->UpdateArguments(args); if (sync_server_cqs_ != nullptr) { + bool default_rq_created = false; + if (server_rq == nullptr) { + server_rq = grpc_resource_quota_create("SyncServer-default-rq"); + grpc_resource_quota_set_max_threads(server_rq, + DEFAULT_MAX_SYNC_SERVER_THREADS); + default_rq_created = true; + } + for (const auto& it : *sync_server_cqs_) { sync_req_mgrs_.emplace_back(new SyncRequestThreadManager( - this, it.get(), global_callbacks_, min_pollers, max_pollers, - sync_cq_timeout_msec)); + this, it.get(), global_callbacks_, server_rq, min_pollers, + max_pollers, sync_cq_timeout_msec)); + } + + if (default_rq_created) { + grpc_resource_quota_unref(server_rq); } } diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc index 02ac56a3fd..fa9eec5f9b 100644 --- a/src/cpp/thread_manager/thread_manager.cc +++ b/src/cpp/thread_manager/thread_manager.cc @@ -22,8 +22,8 @@ #include #include - #include "src/core/lib/gprpp/thd.h" +#include "src/core/lib/iomgr/exec_ctx.h" namespace grpc { @@ -48,12 +48,17 @@ ThreadManager::WorkerThread::~WorkerThread() { thd_.Join(); } -ThreadManager::ThreadManager(int min_pollers, int max_pollers) +ThreadManager::ThreadManager(const char* name, + grpc_resource_quota* resource_quota, + int min_pollers, int max_pollers) : shutdown_(false), num_pollers_(0), min_pollers_(min_pollers), max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers), - num_threads_(0) {} + num_threads_(0), + max_active_threads_sofar_(0) { + resource_user_ = grpc_resource_user_create(resource_quota, name); +} ThreadManager::~ThreadManager() { { @@ -61,6 +66,8 @@ ThreadManager::~ThreadManager() { GPR_ASSERT(num_threads_ == 0); } + grpc_core::ExecCtx exec_ctx; // grpc_resource_user_unref needs an exec_ctx + grpc_resource_user_unref(resource_user_); CleanupCompletedThreads(); } @@ -81,17 +88,27 @@ bool ThreadManager::IsShutdown() { return shutdown_; } +int ThreadManager::GetMaxActiveThreadsSoFar() { + std::lock_guard list_lock(list_mu_); + return max_active_threads_sofar_; +} + void ThreadManager::MarkAsCompleted(WorkerThread* thd) { { std::lock_guard list_lock(list_mu_); completed_threads_.push_back(thd); } - std::lock_guard lock(mu_); - num_threads_--; - if (num_threads_ == 0) { - shutdown_cv_.notify_one(); + { + std::lock_guard lock(mu_); + num_threads_--; + if (num_threads_ == 0) { + shutdown_cv_.notify_one(); + } } + + // Give a thread back to the resource quota + grpc_resource_user_free_threads(resource_user_, 1); } void ThreadManager::CleanupCompletedThreads() { @@ -106,14 +123,22 @@ void ThreadManager::CleanupCompletedThreads() { } void ThreadManager::Initialize() { + if (!grpc_resource_user_allocate_threads(resource_user_, min_pollers_)) { + gpr_log(GPR_ERROR, + "No thread quota available to even create the minimum required " + "polling threads (i.e %d). Unable to start the thread manager", + min_pollers_); + abort(); + } + { std::unique_lock lock(mu_); num_pollers_ = min_pollers_; num_threads_ = min_pollers_; + max_active_threads_sofar_ = min_pollers_; } for (int i = 0; i < min_pollers_; i++) { - // Create a new thread (which ends up calling the MainWorkLoop() function new WorkerThread(this); } } @@ -139,11 +164,15 @@ void ThreadManager::MainWorkLoop() { done = true; break; case WORK_FOUND: - // If we got work and there are now insufficient pollers, start a new - // one - if (!shutdown_ && num_pollers_ < min_pollers_) { + // If we got work and there are now insufficient pollers and there is + // quota available to create a new thread, start a new poller thread + if (!shutdown_ && num_pollers_ < min_pollers_ && + grpc_resource_user_allocate_threads(resource_user_, 1)) { num_pollers_++; num_threads_++; + if (num_threads_ > max_active_threads_sofar_) { + max_active_threads_sofar_ = num_threads_; + } // Drop lock before spawning thread to avoid contention lock.unlock(); new WorkerThread(this); @@ -196,6 +225,8 @@ void ThreadManager::MainWorkLoop() { } }; + // This thread is exiting. Do some cleanup work i.e delete already completed + // worker threads CleanupCompletedThreads(); // If we are here, either ThreadManager is shutting down or it already has diff --git a/src/cpp/thread_manager/thread_manager.h b/src/cpp/thread_manager/thread_manager.h index 5a40f2de47..01043edb31 100644 --- a/src/cpp/thread_manager/thread_manager.h +++ b/src/cpp/thread_manager/thread_manager.h @@ -27,12 +27,14 @@ #include #include "src/core/lib/gprpp/thd.h" +#include "src/core/lib/iomgr/resource_quota.h" namespace grpc { class ThreadManager { public: - explicit ThreadManager(int min_pollers, int max_pollers); + explicit ThreadManager(const char* name, grpc_resource_quota* resource_quota, + int min_pollers, int max_pollers); virtual ~ThreadManager(); // Initializes and Starts the Rpc Manager threads @@ -84,6 +86,11 @@ class ThreadManager { // all the threads have drained all the outstanding work virtual void Wait(); + // Max number of concurrent threads that were ever active in this thread + // manager so far. This is useful for debugging purposes (and in unit tests) + // to check if resource_quota is properly being enforced. + int GetMaxActiveThreadsSoFar(); + private: // Helper wrapper class around grpc_core::Thread. Takes a ThreadManager object // and starts a new grpc_core::Thread to calls the Run() function. @@ -91,6 +98,24 @@ class ThreadManager { // The Run() function calls ThreadManager::MainWorkLoop() function and once // that completes, it marks the WorkerThread completed by calling // ThreadManager::MarkAsCompleted() + // + // WHY IS THIS NEEDED?: + // When a thread terminates, some other thread *must* call Join() on that + // thread so that the resources are released. Having a WorkerThread wrapper + // will make this easier. Once Run() completes, each thread calls the + // following two functions: + // ThreadManager::CleanupCompletedThreads() + // ThreadManager::MarkAsCompleted() + // + // - MarkAsCompleted() puts the WorkerThread object in the ThreadManger's + // completed_threads_ list + // - CleanupCompletedThreads() calls "Join()" on the threads that are already + // in the completed_threads_ list (since a thread cannot call Join() on + // itself, it calls CleanupCompletedThreads() *before* calling + // MarkAsCompleted()) + // + // TODO(sreek): Consider creating the threads 'detached' so that Join() need + // not be called (and the need for this WorkerThread class is eliminated) class WorkerThread { public: WorkerThread(ThreadManager* thd_mgr); @@ -111,13 +136,21 @@ class ThreadManager { void MarkAsCompleted(WorkerThread* thd); void CleanupCompletedThreads(); - // Protects shutdown_, num_pollers_ and num_threads_ - // TODO: sreek - Change num_pollers and num_threads_ to atomics + // Protects shutdown_, num_pollers_, num_threads_ and + // max_active_threads_sofar_ std::mutex mu_; bool shutdown_; std::condition_variable shutdown_cv_; + // The resource user object to use when requesting quota to create threads + // + // Note: The user of this ThreadManager object must create grpc_resource_quota + // object (that contains the actual max thread quota) and a grpc_resource_user + // object through which quota is requested whenver new threads need to be + // created + grpc_resource_user* resource_user_; + // Number of threads doing polling int num_pollers_; @@ -125,10 +158,15 @@ class ThreadManager { int min_pollers_; int max_pollers_; - // The total number of threads (includes threads includes the threads that are - // currently polling i.e num_pollers_) + // The total number of threads currently active (includes threads includes the + // threads that are currently polling i.e num_pollers_) int num_threads_; + // See GetMaxActiveThreadsSoFar()'s description. + // To be more specific, this variable tracks the max value num_threads_ was + // ever set so far + int max_active_threads_sofar_; + std::mutex list_mu_; std::list completed_threads_; }; diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c index 2443532bb8..78090afd6c 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c @@ -91,6 +91,7 @@ grpc_resource_quota_create_type grpc_resource_quota_create_import; grpc_resource_quota_ref_type grpc_resource_quota_ref_import; grpc_resource_quota_unref_type grpc_resource_quota_unref_import; grpc_resource_quota_resize_type grpc_resource_quota_resize_import; +grpc_resource_quota_set_max_threads_type grpc_resource_quota_set_max_threads_import; grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import; grpc_channelz_get_top_channels_type grpc_channelz_get_top_channels_import; grpc_channelz_get_channel_type grpc_channelz_get_channel_import; @@ -341,6 +342,7 @@ void grpc_rb_load_imports(HMODULE library) { grpc_resource_quota_ref_import = (grpc_resource_quota_ref_type) GetProcAddress(library, "grpc_resource_quota_ref"); grpc_resource_quota_unref_import = (grpc_resource_quota_unref_type) GetProcAddress(library, "grpc_resource_quota_unref"); grpc_resource_quota_resize_import = (grpc_resource_quota_resize_type) GetProcAddress(library, "grpc_resource_quota_resize"); + grpc_resource_quota_set_max_threads_import = (grpc_resource_quota_set_max_threads_type) GetProcAddress(library, "grpc_resource_quota_set_max_threads"); grpc_resource_quota_arg_vtable_import = (grpc_resource_quota_arg_vtable_type) GetProcAddress(library, "grpc_resource_quota_arg_vtable"); grpc_channelz_get_top_channels_import = (grpc_channelz_get_top_channels_type) GetProcAddress(library, "grpc_channelz_get_top_channels"); grpc_channelz_get_channel_import = (grpc_channelz_get_channel_type) GetProcAddress(library, "grpc_channelz_get_channel"); diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index b08a1f94f7..1807efa761 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -248,6 +248,9 @@ extern grpc_resource_quota_unref_type grpc_resource_quota_unref_import; typedef void(*grpc_resource_quota_resize_type)(grpc_resource_quota* resource_quota, size_t new_size); extern grpc_resource_quota_resize_type grpc_resource_quota_resize_import; #define grpc_resource_quota_resize grpc_resource_quota_resize_import +typedef void(*grpc_resource_quota_set_max_threads_type)(grpc_resource_quota* resource_quota, int new_max_threads); +extern grpc_resource_quota_set_max_threads_type grpc_resource_quota_set_max_threads_import; +#define grpc_resource_quota_set_max_threads grpc_resource_quota_set_max_threads_import typedef const grpc_arg_pointer_vtable*(*grpc_resource_quota_arg_vtable_type)(void); extern grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import; #define grpc_resource_quota_arg_vtable grpc_resource_quota_arg_vtable_import diff --git a/test/core/iomgr/resource_quota_test.cc b/test/core/iomgr/resource_quota_test.cc index 059ff7b5f8..f3b35fed32 100644 --- a/test/core/iomgr/resource_quota_test.cc +++ b/test/core/iomgr/resource_quota_test.cc @@ -798,6 +798,98 @@ static void test_negative_rq_free_pool(void) { } } +// Simple test to check resource quota thread limits +static void test_thread_limit() { + grpc_core::ExecCtx exec_ctx; + + grpc_resource_quota* rq = grpc_resource_quota_create("test_thread_limit"); + grpc_resource_user* ru1 = grpc_resource_user_create(rq, "ru1"); + grpc_resource_user* ru2 = grpc_resource_user_create(rq, "ru2"); + + // Max threads = 100 + grpc_resource_quota_set_max_threads(rq, 100); + + // Request quota for 100 threads (50 for ru1, 50 for ru2) + GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 10)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 10)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 40)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 40)); + + // Threads exhausted. Next request must fail + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru2, 20)); + + // Free 20 threads from two different users + grpc_resource_user_free_threads(ru1, 10); + grpc_resource_user_free_threads(ru2, 10); + + // Next request to 20 threads must succeed + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 20)); + + // No more thread quota again + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 20)); + + // Free 10 more + grpc_resource_user_free_threads(ru1, 10); + + GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 5)); + GPR_ASSERT( + !grpc_resource_user_allocate_threads(ru2, 10)); // Only 5 available + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 5)); + + // Teardown (ru1 and ru2 release all the quota back to rq) + grpc_resource_user_unref(ru1); + grpc_resource_user_unref(ru2); + grpc_resource_quota_unref(rq); +} + +// Change max quota in either direction dynamically +static void test_thread_maxquota_change() { + grpc_core::ExecCtx exec_ctx; + + grpc_resource_quota* rq = + grpc_resource_quota_create("test_thread_maxquota_change"); + grpc_resource_user* ru1 = grpc_resource_user_create(rq, "ru1"); + grpc_resource_user* ru2 = grpc_resource_user_create(rq, "ru2"); + + // Max threads = 100 + grpc_resource_quota_set_max_threads(rq, 100); + + // Request quota for 100 threads (50 for ru1, 50 for ru2) + GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 50)); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 50)); + + // Threads exhausted. Next request must fail + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru2, 20)); + + // Increase maxquota and retry + // Max threads = 150; + grpc_resource_quota_set_max_threads(rq, 150); + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 20)); // ru2=70, ru1=50 + + // Decrease maxquota (Note: Quota already given to ru1 and ru2 is unaffected) + // Max threads = 10; + grpc_resource_quota_set_max_threads(rq, 10); + + // New requests will fail until quota is available + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10)); + + // Make quota available + grpc_resource_user_free_threads(ru1, 50); // ru1 now has 0 + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10)); // not enough + + grpc_resource_user_free_threads(ru2, 70); // ru2 now has 0 + + // Now we can get quota up-to 10, the current max + GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 10)); + // No more thread quota again + GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10)); + + // Teardown (ru1 and ru2 release all the quota back to rq) + grpc_resource_user_unref(ru1); + grpc_resource_user_unref(ru2); + grpc_resource_quota_unref(rq); +} + int main(int argc, char** argv) { grpc_test_init(argc, argv); grpc_init(); @@ -827,6 +919,11 @@ int main(int argc, char** argv) { test_negative_rq_free_pool(); gpr_mu_destroy(&g_mu); gpr_cv_destroy(&g_cv); + + // Resource quota thread related + test_thread_limit(); + test_thread_maxquota_change(); + grpc_shutdown(); return 0; } diff --git a/test/core/surface/public_headers_must_be_c89.c b/test/core/surface/public_headers_must_be_c89.c index 9f4ad2b4d7..497f7194d5 100644 --- a/test/core/surface/public_headers_must_be_c89.c +++ b/test/core/surface/public_headers_must_be_c89.c @@ -130,6 +130,7 @@ int main(int argc, char **argv) { printf("%lx", (unsigned long) grpc_resource_quota_ref); printf("%lx", (unsigned long) grpc_resource_quota_unref); printf("%lx", (unsigned long) grpc_resource_quota_resize); + printf("%lx", (unsigned long) grpc_resource_quota_set_max_threads); printf("%lx", (unsigned long) grpc_resource_quota_arg_vtable); printf("%lx", (unsigned long) grpc_channelz_get_top_channels); printf("%lx", (unsigned long) grpc_channelz_get_channel); diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index 7a95a9f17d..838f5f72ad 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -30,30 +30,44 @@ #include "test/cpp/util/test_config.h" namespace grpc { + +struct ThreadManagerTestSettings { + // The min number of pollers that SHOULD be active in ThreadManager + int min_pollers; + // The max number of pollers that could be active in ThreadManager + int max_pollers; + // The sleep duration in PollForWork() function to simulate "polling" + int poll_duration_ms; + // The sleep duration in DoWork() function to simulate "work" + int work_duration_ms; + // Max number of times PollForWork() is called before shutting down + int max_poll_calls; +}; + class ThreadManagerTest final : public grpc::ThreadManager { public: - ThreadManagerTest() - : ThreadManager(kMinPollers, kMaxPollers), + ThreadManagerTest(const char* name, grpc_resource_quota* rq, + const ThreadManagerTestSettings& settings) + : ThreadManager(name, rq, settings.min_pollers, settings.max_pollers), + settings_(settings), num_do_work_(0), num_poll_for_work_(0), num_work_found_(0) {} grpc::ThreadManager::WorkStatus PollForWork(void** tag, bool* ok) override; void DoWork(void* tag, bool ok) override; - void PerformTest(); + + // Get number of times PollForWork() returned WORK_FOUND + int GetNumWorkFound(); + // Get number of times DoWork() was called + int GetNumDoWork(); private: void SleepForMs(int sleep_time_ms); - static const int kMinPollers = 2; - static const int kMaxPollers = 10; - - static const int kPollingTimeoutMsec = 10; - static const int kDoWorkDurationMsec = 1; - - // PollForWork will return SHUTDOWN after these many number of invocations - static const int kMaxNumPollForWork = 50; + ThreadManagerTestSettings settings_; + // Counters gpr_atm num_do_work_; // Number of calls to DoWork gpr_atm num_poll_for_work_; // Number of calls to PollForWork gpr_atm num_work_found_; // Number of times WORK_FOUND was returned @@ -69,54 +83,117 @@ void ThreadManagerTest::SleepForMs(int duration_ms) { grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void** tag, bool* ok) { int call_num = gpr_atm_no_barrier_fetch_add(&num_poll_for_work_, 1); - - if (call_num >= kMaxNumPollForWork) { + if (call_num >= settings_.max_poll_calls) { Shutdown(); return SHUTDOWN; } - // Simulate "polling for work" by sleeping for sometime - SleepForMs(kPollingTimeoutMsec); - + SleepForMs(settings_.poll_duration_ms); // Simulate "polling" duration *tag = nullptr; *ok = true; - // Return timeout roughly 1 out of every 3 calls + // Return timeout roughly 1 out of every 3 calls just to make the test a bit + // more interesting if (call_num % 3 == 0) { return TIMEOUT; - } else { - gpr_atm_no_barrier_fetch_add(&num_work_found_, 1); - return WORK_FOUND; } + + gpr_atm_no_barrier_fetch_add(&num_work_found_, 1); + return WORK_FOUND; } void ThreadManagerTest::DoWork(void* tag, bool ok) { gpr_atm_no_barrier_fetch_add(&num_do_work_, 1); - SleepForMs(kDoWorkDurationMsec); // Simulate doing work by sleeping + SleepForMs(settings_.work_duration_ms); // Simulate work by sleeping } -void ThreadManagerTest::PerformTest() { - // Initialize() starts the ThreadManager - Initialize(); - - // Wait for all the threads to gracefully terminate - Wait(); +int ThreadManagerTest::GetNumWorkFound() { + return static_cast(gpr_atm_no_barrier_load(&num_work_found_)); +} - // The number of times DoWork() was called is equal to the number of times - // WORK_FOUND was returned - gpr_log(GPR_DEBUG, "DoWork() called %" PRIdPTR " times", - gpr_atm_no_barrier_load(&num_do_work_)); - GPR_ASSERT(gpr_atm_no_barrier_load(&num_do_work_) == - gpr_atm_no_barrier_load(&num_work_found_)); +int ThreadManagerTest::GetNumDoWork() { + return static_cast(gpr_atm_no_barrier_load(&num_do_work_)); } } // namespace grpc +// Test that the number of times DoWork() is called is equal to the number of +// times PollForWork() returned WORK_FOUND +static void TestPollAndWork() { + grpc_resource_quota* rq = grpc_resource_quota_create("Test-poll-and-work"); + grpc::ThreadManagerTestSettings settings = { + 2 /* min_pollers */, 10 /* max_pollers */, 10 /* poll_duration_ms */, + 1 /* work_duration_ms */, 50 /* max_poll_calls */}; + + grpc::ThreadManagerTest test_thread_mgr("TestThreadManager", rq, settings); + grpc_resource_quota_unref(rq); + + test_thread_mgr.Initialize(); // Start the thread manager + test_thread_mgr.Wait(); // Wait for all threads to finish + + // Verify that The number of times DoWork() was called is equal to the number + // of times WORK_FOUND was returned + gpr_log(GPR_DEBUG, "DoWork() called %d times", + test_thread_mgr.GetNumDoWork()); + GPR_ASSERT(test_thread_mgr.GetNumDoWork() == + test_thread_mgr.GetNumWorkFound()); +} + +static void TestThreadQuota() { + const int kMaxNumThreads = 3; + grpc_resource_quota* rq = grpc_resource_quota_create("Test-thread-quota"); + grpc_resource_quota_set_max_threads(rq, kMaxNumThreads); + + // Set work_duration_ms to be much greater than poll_duration_ms. This way, + // the thread manager will be forced to create more 'polling' threads to + // honor the min_pollers guarantee + grpc::ThreadManagerTestSettings settings = { + 1 /* min_pollers */, 1 /* max_pollers */, 1 /* poll_duration_ms */, + 10 /* work_duration_ms */, 50 /* max_poll_calls */}; + + // Create two thread managers (but with same resource quota). This means + // that the max number of active threads across BOTH the thread managers + // cannot be greater than kMaxNumthreads + grpc::ThreadManagerTest test_thread_mgr_1("TestThreadManager-1", rq, + settings); + grpc::ThreadManagerTest test_thread_mgr_2("TestThreadManager-2", rq, + settings); + // It is ok to unref resource quota before starting thread managers. + grpc_resource_quota_unref(rq); + + // Start both thread managers + test_thread_mgr_1.Initialize(); + test_thread_mgr_2.Initialize(); + + // Wait for both to finish + test_thread_mgr_1.Wait(); + test_thread_mgr_2.Wait(); + + // Now verify that the total number of active threads in either thread manager + // never exceeds kMaxNumThreads + // + // NOTE: Actually the total active threads across *both* thread managers at + // any point of time never exceeds kMaxNumThreads but unfortunately there is + // no easy way to verify it (i.e we can't just do (max1 + max2 <= k)) + // Its okay to not test this case here. The resource quota c-core tests + // provide enough coverage to resource quota object with multiple resource + // users + int max1 = test_thread_mgr_1.GetMaxActiveThreadsSoFar(); + int max2 = test_thread_mgr_2.GetMaxActiveThreadsSoFar(); + gpr_log( + GPR_DEBUG, + "MaxActiveThreads in TestThreadManager_1: %d, TestThreadManager_2: %d", + max1, max2); + GPR_ASSERT(max1 <= kMaxNumThreads && max2 <= kMaxNumThreads); +} + int main(int argc, char** argv) { std::srand(std::time(nullptr)); - grpc::testing::InitTest(&argc, &argv, true); - grpc::ThreadManagerTest test_rpc_manager; - test_rpc_manager.PerformTest(); + grpc_init(); + + TestPollAndWork(); + TestThreadQuota(); + grpc_shutdown(); return 0; } -- cgit v1.2.3 From e84096bbe5f0e471d90906e93cba9332c392aa60 Mon Sep 17 00:00:00 2001 From: Vijay Pai Date: Thu, 9 Aug 2018 13:20:35 -0700 Subject: Experimental infrastructure for callback-based CQ --- grpc.def | 1 + include/grpc/grpc.h | 6 + include/grpc/impl/codegen/grpc_types.h | 19 ++- include/grpcpp/impl/codegen/client_unary_call.h | 4 +- include/grpcpp/impl/codegen/completion_queue.h | 5 +- include/grpcpp/impl/codegen/sync_stream.h | 12 +- src/core/lib/surface/completion_queue.cc | 150 +++++++++++++++++++-- src/core/lib/surface/completion_queue.h | 13 +- src/core/lib/surface/completion_queue_factory.cc | 17 ++- .../GRPCClient/private/GRPCCompletionQueue.m | 6 +- src/ruby/ext/grpc/rb_grpc_imports.generated.c | 2 + src/ruby/ext/grpc/rb_grpc_imports.generated.h | 3 + test/core/surface/public_headers_must_be_c89.c | 1 + 13 files changed, 211 insertions(+), 28 deletions(-) (limited to 'include/grpc') diff --git a/grpc.def b/grpc.def index 5e9d86c769..962a2ec716 100644 --- a/grpc.def +++ b/grpc.def @@ -20,6 +20,7 @@ EXPORTS grpc_completion_queue_factory_lookup grpc_completion_queue_create_for_next grpc_completion_queue_create_for_pluck + grpc_completion_queue_create_for_callback grpc_completion_queue_create grpc_completion_queue_next grpc_completion_queue_pluck diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index 348c7a316f..4c3af45100 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -101,6 +101,12 @@ GRPCAPI grpc_completion_queue* grpc_completion_queue_create_for_next( GRPCAPI grpc_completion_queue* grpc_completion_queue_create_for_pluck( void* reserved); +/** Helper function to create a completion queue with grpc_cq_completion_type + of GRPC_CQ_CALLBACK and grpc_cq_polling_type of GRPC_CQ_DEFAULT_POLLING. + This function is experimental. */ +GRPCAPI grpc_completion_queue* grpc_completion_queue_create_for_callback( + void* shutdown_callback, void* reserved); + /** Create a completion queue */ GRPCAPI grpc_completion_queue* grpc_completion_queue_create( const grpc_completion_queue_factory* factory, diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h index 5fd080c48b..b5353c1dea 100644 --- a/include/grpc/impl/codegen/grpc_types.h +++ b/include/grpc/impl/codegen/grpc_types.h @@ -651,10 +651,16 @@ typedef enum { GRPC_CQ_NEXT, /** Events are popped out by calling grpc_completion_queue_pluck() API ONLY*/ - GRPC_CQ_PLUCK + GRPC_CQ_PLUCK, + + /** EXPERIMENTAL: Events trigger a callback specified as the tag */ + GRPC_CQ_CALLBACK } grpc_cq_completion_type; -#define GRPC_CQ_CURRENT_VERSION 1 +/* The upgrade to version 2 is currently experimental. */ + +#define GRPC_CQ_CURRENT_VERSION 2 +#define GRPC_CQ_VERSION_MINIMUM_FOR_CALLBACKABLE 2 typedef struct grpc_completion_queue_attributes { /** The version number of this structure. More fields might be added to this structure in future. */ @@ -663,6 +669,15 @@ typedef struct grpc_completion_queue_attributes { grpc_cq_completion_type cq_completion_type; grpc_cq_polling_type cq_polling_type; + + /* END OF VERSION 1 CQ ATTRIBUTES */ + + /* EXPERIMENTAL: START OF VERSION 2 CQ ATTRIBUTES */ + /** When creating a callbackable CQ, pass in a functor to get invoked when + * shutdown is complete */ + void* cq_shutdown_cb; + + /* END OF VERSION 2 CQ ATTRIBUTES */ } grpc_completion_queue_attributes; /** The completion queue factory structure is opaque to the callers of grpc */ diff --git a/include/grpcpp/impl/codegen/client_unary_call.h b/include/grpcpp/impl/codegen/client_unary_call.h index a37a81b75b..e4e8364e07 100644 --- a/include/grpcpp/impl/codegen/client_unary_call.h +++ b/include/grpcpp/impl/codegen/client_unary_call.h @@ -50,8 +50,8 @@ class BlockingUnaryCallImpl { ClientContext* context, const InputMessage& request, OutputMessage* result) { CompletionQueue cq(grpc_completion_queue_attributes{ - GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, - GRPC_CQ_DEFAULT_POLLING}); // Pluckable completion queue + GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING, + nullptr}); // Pluckable completion queue Call call(channel->CreateCall(method, context, &cq)); CallOpSet, diff --git a/include/grpcpp/impl/codegen/completion_queue.h b/include/grpcpp/impl/codegen/completion_queue.h index 5819e068ba..272575dac2 100644 --- a/include/grpcpp/impl/codegen/completion_queue.h +++ b/include/grpcpp/impl/codegen/completion_queue.h @@ -97,7 +97,8 @@ class CompletionQueue : private GrpcLibraryCodegen { /// instance. CompletionQueue() : CompletionQueue(grpc_completion_queue_attributes{ - GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING}) {} + GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING, + nullptr}) {} /// Wrap \a take, taking ownership of the instance. /// @@ -376,7 +377,7 @@ class ServerCompletionQueue : public CompletionQueue { /// frequently polled. ServerCompletionQueue(grpc_cq_polling_type polling_type) : CompletionQueue(grpc_completion_queue_attributes{ - GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, polling_type}), + GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, polling_type, nullptr}), polling_type_(polling_type) {} grpc_cq_polling_type polling_type_; diff --git a/include/grpcpp/impl/codegen/sync_stream.h b/include/grpcpp/impl/codegen/sync_stream.h index 7152eaf41f..cbfcf25d0a 100644 --- a/include/grpcpp/impl/codegen/sync_stream.h +++ b/include/grpcpp/impl/codegen/sync_stream.h @@ -243,8 +243,8 @@ class ClientReader final : public ClientReaderInterface { ClientContext* context, const W& request) : context_(context), cq_(grpc_completion_queue_attributes{ - GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, - GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq + GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING, + nullptr}), // Pluckable cq call_(channel->CreateCall(method, context, &cq_)) { ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, ::grpc::internal::CallOpSendMessage, @@ -377,8 +377,8 @@ class ClientWriter : public ClientWriterInterface { ClientContext* context, R* response) : context_(context), cq_(grpc_completion_queue_attributes{ - GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, - GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq + GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING, + nullptr}), // Pluckable cq call_(channel->CreateCall(method, context, &cq_)) { finish_ops_.RecvMessage(response); finish_ops_.AllowNoMessage(); @@ -551,8 +551,8 @@ class ClientReaderWriter final : public ClientReaderWriterInterface { ClientContext* context) : context_(context), cq_(grpc_completion_queue_attributes{ - GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, - GRPC_CQ_DEFAULT_POLLING}), // Pluckable cq + GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING, + nullptr}), // Pluckable cq call_(channel->CreateCall(method, context, &cq_)) { if (!context_->initial_metadata_corked_) { ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc index 7da9e6b74c..fd33ce044c 100644 --- a/src/core/lib/surface/completion_queue.cc +++ b/src/core/lib/surface/completion_queue.cc @@ -184,7 +184,7 @@ static const cq_poller_vtable g_poller_vtable_by_poller_type[] = { typedef struct cq_vtable { grpc_cq_completion_type cq_completion_type; size_t data_size; - void (*init)(void* data); + void (*init)(void* data, grpc_core::CQCallbackInterface* shutdown_callback); void (*shutdown)(grpc_completion_queue* cq); void (*destroy)(void* data); bool (*begin_op)(grpc_completion_queue* cq, void* tag); @@ -253,6 +253,29 @@ typedef struct cq_pluck_data { plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS]; } cq_pluck_data; +typedef struct cq_callback_data { + /** No actual completed events queue, unlike other types */ + + /** Number of pending events (+1 if we're not shutdown) */ + gpr_atm pending_events; + + /** Counter of how many things have ever been queued on this completion queue + useful for avoiding locks to check the queue */ + gpr_atm things_queued_ever; + + /** 0 initially. 1 once we completed shutting */ + /* TODO: (sreek) This is not needed since (shutdown == 1) if and only if + * (pending_events == 0). So consider removing this in future and use + * pending_events */ + gpr_atm shutdown; + + /** 0 initially. 1 once we initiated shutdown */ + bool shutdown_called; + + /** A callback that gets invoked when the CQ completes shutdown */ + grpc_core::CQCallbackInterface* shutdown_callback; +} cq_callback_data; + /* Completion queue structure */ struct grpc_completion_queue { /** Once owning_refs drops to zero, we will destroy the cq */ @@ -276,11 +299,14 @@ struct grpc_completion_queue { /* Forward declarations */ static void cq_finish_shutdown_next(grpc_completion_queue* cq); static void cq_finish_shutdown_pluck(grpc_completion_queue* cq); +static void cq_finish_shutdown_callback(grpc_completion_queue* cq); static void cq_shutdown_next(grpc_completion_queue* cq); static void cq_shutdown_pluck(grpc_completion_queue* cq); +static void cq_shutdown_callback(grpc_completion_queue* cq); static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag); static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag); +static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* tag); static void cq_end_op_for_next(grpc_completion_queue* cq, void* tag, grpc_error* error, @@ -294,16 +320,25 @@ static void cq_end_op_for_pluck(grpc_completion_queue* cq, void* tag, grpc_cq_completion* storage), void* done_arg, grpc_cq_completion* storage); +static void cq_end_op_for_callback(grpc_completion_queue* cq, void* tag, + grpc_error* error, + void (*done)(void* done_arg, + grpc_cq_completion* storage), + void* done_arg, grpc_cq_completion* storage); + static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline, void* reserved); static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag, gpr_timespec deadline, void* reserved); -static void cq_init_next(void* data); -static void cq_init_pluck(void* data); +static void cq_init_next(void* data, grpc_core::CQCallbackInterface*); +static void cq_init_pluck(void* data, grpc_core::CQCallbackInterface*); +static void cq_init_callback(void* data, + grpc_core::CQCallbackInterface* shutdown_callback); static void cq_destroy_next(void* data); static void cq_destroy_pluck(void* data); +static void cq_destroy_callback(void* data); /* Completion queue vtables based on the completion-type */ static const cq_vtable g_cq_vtable[] = { @@ -315,6 +350,10 @@ static const cq_vtable g_cq_vtable[] = { {GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck, cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, nullptr, cq_pluck}, + /* GRPC_CQ_CALLBACK */ + {GRPC_CQ_CALLBACK, sizeof(cq_callback_data), cq_init_callback, + cq_shutdown_callback, cq_destroy_callback, cq_begin_op_for_callback, + cq_end_op_for_callback, nullptr, nullptr}, }; #define DATA_FROM_CQ(cq) ((void*)(cq + 1)) @@ -419,8 +458,8 @@ static long cq_event_queue_num_items(grpc_cq_event_queue* q) { } grpc_completion_queue* grpc_completion_queue_create_internal( - grpc_cq_completion_type completion_type, - grpc_cq_polling_type polling_type) { + grpc_cq_completion_type completion_type, grpc_cq_polling_type polling_type, + grpc_core::CQCallbackInterface* shutdown_callback) { GPR_TIMER_SCOPE("grpc_completion_queue_create_internal", 0); grpc_completion_queue* cq; @@ -448,14 +487,14 @@ grpc_completion_queue* grpc_completion_queue_create_internal( gpr_ref_init(&cq->owning_refs, 2); poller_vtable->init(POLLSET_FROM_CQ(cq), &cq->mu); - vtable->init(DATA_FROM_CQ(cq)); + vtable->init(DATA_FROM_CQ(cq), shutdown_callback); GRPC_CLOSURE_INIT(&cq->pollset_shutdown_done, on_pollset_shutdown_done, cq, grpc_schedule_on_exec_ctx); return cq; } -static void cq_init_next(void* ptr) { +static void cq_init_next(void* ptr, grpc_core::CQCallbackInterface*) { cq_next_data* cqd = static_cast(ptr); /* Initial count is dropped by grpc_completion_queue_shutdown */ gpr_atm_no_barrier_store(&cqd->pending_events, 1); @@ -470,7 +509,7 @@ static void cq_destroy_next(void* ptr) { cq_event_queue_destroy(&cqd->queue); } -static void cq_init_pluck(void* ptr) { +static void cq_init_pluck(void* ptr, grpc_core::CQCallbackInterface*) { cq_pluck_data* cqd = static_cast(ptr); /* Initial count is dropped by grpc_completion_queue_shutdown */ gpr_atm_no_barrier_store(&cqd->pending_events, 1); @@ -487,6 +526,19 @@ static void cq_destroy_pluck(void* ptr) { GPR_ASSERT(cqd->completed_head.next == (uintptr_t)&cqd->completed_head); } +static void cq_init_callback( + void* ptr, grpc_core::CQCallbackInterface* shutdown_callback) { + cq_callback_data* cqd = static_cast(ptr); + /* Initial count is dropped by grpc_completion_queue_shutdown */ + gpr_atm_no_barrier_store(&cqd->pending_events, 1); + gpr_atm_no_barrier_store(&cqd->shutdown, 0); + cqd->shutdown_called = false; + gpr_atm_no_barrier_store(&cqd->things_queued_ever, 0); + cqd->shutdown_callback = shutdown_callback; +} + +static void cq_destroy_callback(void* ptr) {} + grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue* cq) { return cq->vtable->cq_completion_type; } @@ -596,6 +648,11 @@ static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag) { return atm_inc_if_nonzero(&cqd->pending_events); } +static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* tag) { + cq_callback_data* cqd = static_cast DATA_FROM_CQ(cq); + return atm_inc_if_nonzero(&cqd->pending_events); +} + bool grpc_cq_begin_op(grpc_completion_queue* cq, void* tag) { #ifndef NDEBUG gpr_mu_lock(cq->mu); @@ -759,6 +816,47 @@ static void cq_end_op_for_pluck(grpc_completion_queue* cq, void* tag, GRPC_ERROR_UNREF(error); } +/* Complete an event on a completion queue of type GRPC_CQ_CALLBACK */ +static void cq_end_op_for_callback( + grpc_completion_queue* cq, void* tag, grpc_error* error, + void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg, + grpc_cq_completion* storage) { + GPR_TIMER_SCOPE("cq_end_op_for_callback", 0); + + cq_callback_data* cqd = static_cast DATA_FROM_CQ(cq); + bool is_success = (error == GRPC_ERROR_NONE); + + if (grpc_api_trace.enabled() || + (grpc_trace_operation_failures.enabled() && error != GRPC_ERROR_NONE)) { + const char* errmsg = grpc_error_string(error); + GRPC_API_TRACE( + "cq_end_op_for_callback(cq=%p, tag=%p, error=%s, " + "done=%p, done_arg=%p, storage=%p)", + 6, (cq, tag, errmsg, done, done_arg, storage)); + if (grpc_trace_operation_failures.enabled() && error != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg); + } + } + + /* We don't care for the storage content */ + done(done_arg, storage); + + gpr_mu_lock(cq->mu); + cq_check_tag(cq, tag, false); /* Used in debug builds only */ + + gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1); + if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { + cq_finish_shutdown_callback(cq); + gpr_mu_unlock(cq->mu); + } else { + gpr_mu_unlock(cq->mu); + } + + GRPC_ERROR_UNREF(error); + + (static_cast(tag))->Run(is_success); +} + void grpc_cq_end_op(grpc_completion_queue* cq, void* tag, grpc_error* error, void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg, grpc_cq_completion* storage) { @@ -1233,6 +1331,42 @@ static void cq_shutdown_pluck(grpc_completion_queue* cq) { GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (pluck cq)"); } +static void cq_finish_shutdown_callback(grpc_completion_queue* cq) { + cq_callback_data* cqd = static_cast DATA_FROM_CQ(cq); + auto* callback = cqd->shutdown_callback; + + GPR_ASSERT(cqd->shutdown_called); + GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown)); + gpr_atm_no_barrier_store(&cqd->shutdown, 1); + + cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done); + callback->Run(true); +} + +static void cq_shutdown_callback(grpc_completion_queue* cq) { + cq_callback_data* cqd = static_cast DATA_FROM_CQ(cq); + + /* Need an extra ref for cq here because: + * We call cq_finish_shutdown_pluck() below, that would call pollset shutdown. + * Pollset shutdown decrements the cq ref count which can potentially destroy + * the cq (if that happens to be the last ref). + * Creating an extra ref here prevents the cq from getting destroyed while + * this function is still active */ + GRPC_CQ_INTERNAL_REF(cq, "shutting_down (callback cq)"); + gpr_mu_lock(cq->mu); + if (cqd->shutdown_called) { + gpr_mu_unlock(cq->mu); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (callback cq)"); + return; + } + cqd->shutdown_called = true; + if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { + cq_finish_shutdown_callback(cq); + } + gpr_mu_unlock(cq->mu); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (callback cq)"); +} + /* Shutdown simply drops a ref that we reserved at creation time; if we drop to zero here, then enter shutdown mode and wake up any waiters */ void grpc_completion_queue_shutdown(grpc_completion_queue* cq) { diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h index 84446a4d92..6d8c6c9b06 100644 --- a/src/core/lib/surface/completion_queue.h +++ b/src/core/lib/surface/completion_queue.h @@ -47,6 +47,16 @@ typedef struct grpc_cq_completion { uintptr_t next; } grpc_cq_completion; +/// For callback CQs, the following is what is actually intended by +/// the tag. +namespace grpc_core { +class CQCallbackInterface { + public: + virtual ~CQCallbackInterface() {} + virtual void Run(bool) = 0; +}; +} // namespace grpc_core + #ifndef NDEBUG void grpc_cq_internal_ref(grpc_completion_queue* cc, const char* reason, const char* file, int line); @@ -87,6 +97,7 @@ grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue* cc); int grpc_get_cq_poll_num(grpc_completion_queue* cc); grpc_completion_queue* grpc_completion_queue_create_internal( - grpc_cq_completion_type completion_type, grpc_cq_polling_type polling_type); + grpc_cq_completion_type completion_type, grpc_cq_polling_type polling_type, + grpc_core::CQCallbackInterface* shutdown_callback); #endif /* GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_H */ diff --git a/src/core/lib/surface/completion_queue_factory.cc b/src/core/lib/surface/completion_queue_factory.cc index 51c1183c5f..ed92dd7eba 100644 --- a/src/core/lib/surface/completion_queue_factory.cc +++ b/src/core/lib/surface/completion_queue_factory.cc @@ -30,8 +30,9 @@ static grpc_completion_queue* default_create( const grpc_completion_queue_factory* factory, const grpc_completion_queue_attributes* attr) { - return grpc_completion_queue_create_internal(attr->cq_completion_type, - attr->cq_polling_type); + return grpc_completion_queue_create_internal( + attr->cq_completion_type, attr->cq_polling_type, + static_cast(attr->cq_shutdown_cb)); } static grpc_completion_queue_factory_vtable default_vtable = {default_create}; @@ -60,14 +61,22 @@ const grpc_completion_queue_factory* grpc_completion_queue_factory_lookup( grpc_completion_queue* grpc_completion_queue_create_for_next(void* reserved) { GPR_ASSERT(!reserved); grpc_completion_queue_attributes attr = {1, GRPC_CQ_NEXT, - GRPC_CQ_DEFAULT_POLLING}; + GRPC_CQ_DEFAULT_POLLING, nullptr}; return g_default_cq_factory.vtable->create(&g_default_cq_factory, &attr); } grpc_completion_queue* grpc_completion_queue_create_for_pluck(void* reserved) { GPR_ASSERT(!reserved); grpc_completion_queue_attributes attr = {1, GRPC_CQ_PLUCK, - GRPC_CQ_DEFAULT_POLLING}; + GRPC_CQ_DEFAULT_POLLING, nullptr}; + return g_default_cq_factory.vtable->create(&g_default_cq_factory, &attr); +} + +grpc_completion_queue* grpc_completion_queue_create_for_callback( + void* shutdown_callback, void* reserved) { + GPR_ASSERT(!reserved); + grpc_completion_queue_attributes attr = { + 2, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING, shutdown_callback}; return g_default_cq_factory.vtable->create(&g_default_cq_factory, &attr); } diff --git a/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m b/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m index bda1c3360b..69db340e98 100644 --- a/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m +++ b/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m @@ -21,11 +21,11 @@ #import #ifdef GRPC_CFSTREAM -const grpc_completion_queue_attributes kCompletionQueueAttr = {GRPC_CQ_CURRENT_VERSION, - GRPC_CQ_NEXT, GRPC_CQ_NON_POLLING}; +const grpc_completion_queue_attributes kCompletionQueueAttr = { + GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_NON_POLLING, nullptr}; #else const grpc_completion_queue_attributes kCompletionQueueAttr = { - GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING}; + GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING, nullptr}; #endif @implementation GRPCCompletionQueue diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c index 38b68462df..8a2edc41f8 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c @@ -43,6 +43,7 @@ grpc_g_stands_for_type grpc_g_stands_for_import; grpc_completion_queue_factory_lookup_type grpc_completion_queue_factory_lookup_import; grpc_completion_queue_create_for_next_type grpc_completion_queue_create_for_next_import; grpc_completion_queue_create_for_pluck_type grpc_completion_queue_create_for_pluck_import; +grpc_completion_queue_create_for_callback_type grpc_completion_queue_create_for_callback_import; grpc_completion_queue_create_type grpc_completion_queue_create_import; grpc_completion_queue_next_type grpc_completion_queue_next_import; grpc_completion_queue_pluck_type grpc_completion_queue_pluck_import; @@ -294,6 +295,7 @@ void grpc_rb_load_imports(HMODULE library) { grpc_completion_queue_factory_lookup_import = (grpc_completion_queue_factory_lookup_type) GetProcAddress(library, "grpc_completion_queue_factory_lookup"); grpc_completion_queue_create_for_next_import = (grpc_completion_queue_create_for_next_type) GetProcAddress(library, "grpc_completion_queue_create_for_next"); grpc_completion_queue_create_for_pluck_import = (grpc_completion_queue_create_for_pluck_type) GetProcAddress(library, "grpc_completion_queue_create_for_pluck"); + grpc_completion_queue_create_for_callback_import = (grpc_completion_queue_create_for_callback_type) GetProcAddress(library, "grpc_completion_queue_create_for_callback"); grpc_completion_queue_create_import = (grpc_completion_queue_create_type) GetProcAddress(library, "grpc_completion_queue_create"); grpc_completion_queue_next_import = (grpc_completion_queue_next_type) GetProcAddress(library, "grpc_completion_queue_next"); grpc_completion_queue_pluck_import = (grpc_completion_queue_pluck_type) GetProcAddress(library, "grpc_completion_queue_pluck"); diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index d6add00d12..5a7884cdcd 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -104,6 +104,9 @@ extern grpc_completion_queue_create_for_next_type grpc_completion_queue_create_f typedef grpc_completion_queue*(*grpc_completion_queue_create_for_pluck_type)(void* reserved); extern grpc_completion_queue_create_for_pluck_type grpc_completion_queue_create_for_pluck_import; #define grpc_completion_queue_create_for_pluck grpc_completion_queue_create_for_pluck_import +typedef grpc_completion_queue*(*grpc_completion_queue_create_for_callback_type)(void* shutdown_callback, void* reserved); +extern grpc_completion_queue_create_for_callback_type grpc_completion_queue_create_for_callback_import; +#define grpc_completion_queue_create_for_callback grpc_completion_queue_create_for_callback_import typedef grpc_completion_queue*(*grpc_completion_queue_create_type)(const grpc_completion_queue_factory* factory, const grpc_completion_queue_attributes* attributes, void* reserved); extern grpc_completion_queue_create_type grpc_completion_queue_create_import; #define grpc_completion_queue_create grpc_completion_queue_create_import diff --git a/test/core/surface/public_headers_must_be_c89.c b/test/core/surface/public_headers_must_be_c89.c index 7b3e875cf0..69b3de16c4 100644 --- a/test/core/surface/public_headers_must_be_c89.c +++ b/test/core/surface/public_headers_must_be_c89.c @@ -82,6 +82,7 @@ int main(int argc, char **argv) { printf("%lx", (unsigned long) grpc_completion_queue_factory_lookup); printf("%lx", (unsigned long) grpc_completion_queue_create_for_next); printf("%lx", (unsigned long) grpc_completion_queue_create_for_pluck); + printf("%lx", (unsigned long) grpc_completion_queue_create_for_callback); printf("%lx", (unsigned long) grpc_completion_queue_create); printf("%lx", (unsigned long) grpc_completion_queue_next); printf("%lx", (unsigned long) grpc_completion_queue_pluck); -- cgit v1.2.3 From 7a7e4f56529c4da16ec6a3035e7b3bdf9dfb6f67 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Thu, 16 Aug 2018 12:04:24 -0700 Subject: Move C++ mu_guard class out of C-core public headers and fix style. --- BUILD | 1 + build.yaml | 1 + gRPC-C++.podspec | 2 ++ gRPC-Core.podspec | 2 ++ grpc.gemspec | 1 + include/grpc/support/sync.h | 16 --------- package.xml | 1 + .../client_channel/lb_policy/grpclb/grpclb.cc | 3 +- .../lb_policy/pick_first/pick_first.cc | 5 +-- .../lb_policy/round_robin/round_robin.cc | 5 +-- src/core/lib/channel/channelz_registry.cc | 7 ++-- src/core/lib/gprpp/mutex_lock.h | 42 ++++++++++++++++++++++ src/core/lib/iomgr/ev_epollex_linux.cc | 3 +- .../tsi/ssl/session_cache/ssl_session_cache.cc | 10 +++--- tools/doxygen/Doxyfile.c++.internal | 1 + tools/doxygen/Doxyfile.core.internal | 1 + tools/run_tests/generated/sources_and_headers.json | 2 ++ 17 files changed, 73 insertions(+), 30 deletions(-) create mode 100644 src/core/lib/gprpp/mutex_lock.h (limited to 'include/grpc') diff --git a/BUILD b/BUILD index 35cf86288d..7835e8ea25 100644 --- a/BUILD +++ b/BUILD @@ -560,6 +560,7 @@ grpc_cc_library( "src/core/lib/gprpp/fork.h", "src/core/lib/gprpp/manual_constructor.h", "src/core/lib/gprpp/memory.h", + "src/core/lib/gprpp/mutex_lock.h", "src/core/lib/gprpp/thd.h", "src/core/lib/profiling/timers.h", ], diff --git a/build.yaml b/build.yaml index bd9c4237a1..bb72c41a8c 100644 --- a/build.yaml +++ b/build.yaml @@ -196,6 +196,7 @@ filegroups: - src/core/lib/gprpp/fork.h - src/core/lib/gprpp/manual_constructor.h - src/core/lib/gprpp/memory.h + - src/core/lib/gprpp/mutex_lock.h - src/core/lib/gprpp/thd.h - src/core/lib/profiling/timers.h uses: diff --git a/gRPC-C++.podspec b/gRPC-C++.podspec index 1d3cedb16b..ae3bd4b312 100644 --- a/gRPC-C++.podspec +++ b/gRPC-C++.podspec @@ -236,6 +236,7 @@ Pod::Spec.new do |s| 'src/core/lib/gprpp/fork.h', 'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/memory.h', + 'src/core/lib/gprpp/mutex_lock.h', 'src/core/lib/gprpp/thd.h', 'src/core/lib/profiling/timers.h', 'src/core/ext/transport/chttp2/transport/bin_decoder.h', @@ -534,6 +535,7 @@ Pod::Spec.new do |s| 'src/core/lib/gprpp/fork.h', 'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/memory.h', + 'src/core/lib/gprpp/mutex_lock.h', 'src/core/lib/gprpp/thd.h', 'src/core/lib/profiling/timers.h', 'src/core/lib/avl/avl.h', diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 1998bc8b4c..146e5cc1ed 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -208,6 +208,7 @@ Pod::Spec.new do |s| 'src/core/lib/gprpp/fork.h', 'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/memory.h', + 'src/core/lib/gprpp/mutex_lock.h', 'src/core/lib/gprpp/thd.h', 'src/core/lib/profiling/timers.h', 'src/core/lib/gpr/alloc.cc', @@ -844,6 +845,7 @@ Pod::Spec.new do |s| 'src/core/lib/gprpp/fork.h', 'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/memory.h', + 'src/core/lib/gprpp/mutex_lock.h', 'src/core/lib/gprpp/thd.h', 'src/core/lib/profiling/timers.h', 'src/core/ext/transport/chttp2/transport/bin_decoder.h', diff --git a/grpc.gemspec b/grpc.gemspec index 55d53cb71d..a5252b06fd 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -105,6 +105,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/gprpp/fork.h ) s.files += %w( src/core/lib/gprpp/manual_constructor.h ) s.files += %w( src/core/lib/gprpp/memory.h ) + s.files += %w( src/core/lib/gprpp/mutex_lock.h ) s.files += %w( src/core/lib/gprpp/thd.h ) s.files += %w( src/core/lib/profiling/timers.h ) s.files += %w( src/core/lib/gpr/alloc.cc ) diff --git a/include/grpc/support/sync.h b/include/grpc/support/sync.h index 91d1fa79b5..da820dece5 100644 --- a/include/grpc/support/sync.h +++ b/include/grpc/support/sync.h @@ -277,22 +277,6 @@ GPRAPI intptr_t gpr_stats_read(const gpr_stats_counter* c); #ifdef __cplusplus } // extern "C" - -namespace grpc_core { - -class mu_guard { - public: - mu_guard(gpr_mu* mu) : mu_(mu) { gpr_mu_lock(mu); } - ~mu_guard() { gpr_mu_unlock(mu_); } - - mu_guard(const mu_guard&) = delete; - mu_guard& operator=(const mu_guard&) = delete; - - private: - gpr_mu* const mu_; -}; - -} // namespace grpc_core #endif #endif /* GRPC_SUPPORT_SYNC_H */ diff --git a/package.xml b/package.xml index 76bdd5ac8f..c89d490b00 100644 --- a/package.xml +++ b/package.xml @@ -110,6 +110,7 @@ + diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc index 6581385ff9..a624850f4b 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc @@ -92,6 +92,7 @@ #include "src/core/lib/gpr/string.h" #include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/gprpp/orphanable.h" #include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/iomgr/combiner.h" @@ -1259,7 +1260,7 @@ void GrpcLb::FillChildRefsForChannelz(ChildRefsList* child_subchannels, ChildRefsList* child_channels) { // delegate to the RoundRobin to fill the children subchannels. rr_policy_->FillChildRefsForChannelz(child_subchannels, child_channels); - mu_guard guard(&lb_channel_mu_); + MutexLock lock(&lb_channel_mu_); if (lb_channel_ != nullptr) { grpc_core::channelz::ChannelNode* channel_node = grpc_channel_get_channelz_node(lb_channel_); diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc index 2b6a9ba8c5..0f5041ff51 100644 --- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc @@ -27,6 +27,7 @@ #include "src/core/ext/filters/client_channel/subchannel.h" #include "src/core/ext/filters/client_channel/subchannel_index.h" #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/transport/connectivity_state.h" @@ -308,7 +309,7 @@ void PickFirst::NotifyOnStateChangeLocked(grpc_connectivity_state* current, void PickFirst::FillChildRefsForChannelz( ChildRefsList* child_subchannels_to_fill, ChildRefsList* ignored) { - mu_guard guard(&child_refs_mu_); + MutexLock lock(&child_refs_mu_); for (size_t i = 0; i < child_subchannels_.size(); ++i) { // TODO(ncteisen): implement a de dup loop that is not O(n^2). Might // have to implement lightweight set. For now, we don't care about @@ -335,7 +336,7 @@ void PickFirst::UpdateChildRefsLocked() { latest_pending_subchannel_list_->PopulateChildRefsList(&cs); } // atomically update the data that channelz will actually be looking at. - mu_guard guard(&child_refs_mu_); + MutexLock lock(&child_refs_mu_); child_subchannels_ = std::move(cs); } diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc index fea84331d8..c730b3bd2b 100644 --- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc @@ -36,6 +36,7 @@ #include "src/core/ext/filters/client_channel/subchannel_index.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/debug/trace.h" +#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/sockaddr_utils.h" @@ -400,7 +401,7 @@ bool RoundRobin::PickLocked(PickState* pick, grpc_error** error) { void RoundRobin::FillChildRefsForChannelz( ChildRefsList* child_subchannels_to_fill, ChildRefsList* ignored) { - mu_guard guard(&child_refs_mu_); + MutexLock lock(&child_refs_mu_); for (size_t i = 0; i < child_subchannels_.size(); ++i) { // TODO(ncteisen): implement a de dup loop that is not O(n^2). Might // have to implement lightweight set. For now, we don't care about @@ -427,7 +428,7 @@ void RoundRobin::UpdateChildRefsLocked() { latest_pending_subchannel_list_->PopulateChildRefsList(&cs); } // atomically update the data that channelz will actually be looking at. - mu_guard guard(&child_refs_mu_); + MutexLock lock(&child_refs_mu_); child_subchannels_ = std::move(cs); } diff --git a/src/core/lib/channel/channelz_registry.cc b/src/core/lib/channel/channelz_registry.cc index 38496b3d78..f79d2f0c17 100644 --- a/src/core/lib/channel/channelz_registry.cc +++ b/src/core/lib/channel/channelz_registry.cc @@ -23,6 +23,7 @@ #include "src/core/lib/channel/channelz_registry.h" #include "src/core/lib/gpr/useful.h" #include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/mutex_lock.h" #include #include @@ -53,7 +54,7 @@ ChannelzRegistry::ChannelzRegistry() { gpr_mu_init(&mu_); } ChannelzRegistry::~ChannelzRegistry() { gpr_mu_destroy(&mu_); } intptr_t ChannelzRegistry::InternalRegisterEntry(const RegistryEntry& entry) { - mu_guard guard(&mu_); + MutexLock lock(&mu_); entities_.push_back(entry); intptr_t uuid = entities_.size(); return uuid; @@ -61,7 +62,7 @@ intptr_t ChannelzRegistry::InternalRegisterEntry(const RegistryEntry& entry) { void ChannelzRegistry::InternalUnregisterEntry(intptr_t uuid, EntityType type) { GPR_ASSERT(uuid >= 1); - mu_guard guard(&mu_); + MutexLock lock(&mu_); GPR_ASSERT(static_cast(uuid) <= entities_.size()); GPR_ASSERT(entities_[uuid - 1].type == type); entities_[uuid - 1].object = nullptr; @@ -69,7 +70,7 @@ void ChannelzRegistry::InternalUnregisterEntry(intptr_t uuid, EntityType type) { } void* ChannelzRegistry::InternalGetEntry(intptr_t uuid, EntityType type) { - mu_guard guard(&mu_); + MutexLock lock(&mu_); if (uuid < 1 || uuid > static_cast(entities_.size())) { return nullptr; } diff --git a/src/core/lib/gprpp/mutex_lock.h b/src/core/lib/gprpp/mutex_lock.h new file mode 100644 index 0000000000..54751d5fe4 --- /dev/null +++ b/src/core/lib/gprpp/mutex_lock.h @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_GPRPP_MUTEX_LOCK_H +#define GRPC_CORE_LIB_GPRPP_MUTEX_LOCK_H + +#include + +#include + +namespace grpc_core { + +class MutexLock { + public: + explicit MutexLock(gpr_mu* mu) : mu_(mu) { gpr_mu_lock(mu); } + ~MutexLock() { gpr_mu_unlock(mu_); } + + MutexLock(const MutexLock&) = delete; + MutexLock& operator=(const MutexLock&) = delete; + + private: + gpr_mu* const mu_; +}; + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_GPRPP_MUTEX_LOCK_H */ diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc index 96eae30345..b082634af1 100644 --- a/src/core/lib/iomgr/ev_epollex_linux.cc +++ b/src/core/lib/iomgr/ev_epollex_linux.cc @@ -46,6 +46,7 @@ #include "src/core/lib/gpr/tls.h" #include "src/core/lib/gpr/useful.h" #include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/is_epollexclusive_available.h" @@ -735,7 +736,7 @@ static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) { static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) { GPR_TIMER_SCOPE("kick_one_worker", 0); pollable* p = specific_worker->pollable_obj; - grpc_core::mu_guard lock(&p->mu); + grpc_core::MutexLock lock(&p->mu); GPR_ASSERT(specific_worker != nullptr); if (specific_worker->kicked) { if (grpc_polling_trace.enabled()) { diff --git a/src/core/tsi/ssl/session_cache/ssl_session_cache.cc b/src/core/tsi/ssl/session_cache/ssl_session_cache.cc index fe4f83a13d..ce74fde343 100644 --- a/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +++ b/src/core/tsi/ssl/session_cache/ssl_session_cache.cc @@ -18,9 +18,9 @@ #include -#include "src/core/tsi/ssl/session_cache/ssl_session_cache.h" - +#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/tsi/ssl/session_cache/ssl_session.h" +#include "src/core/tsi/ssl/session_cache/ssl_session_cache.h" #include #include @@ -97,7 +97,7 @@ SslSessionLRUCache::~SslSessionLRUCache() { } size_t SslSessionLRUCache::Size() { - grpc_core::mu_guard guard(&lock_); + grpc_core::MutexLock lock(&lock_); return use_order_list_size_; } @@ -117,7 +117,7 @@ SslSessionLRUCache::Node* SslSessionLRUCache::FindLocked( } void SslSessionLRUCache::Put(const char* key, SslSessionPtr session) { - grpc_core::mu_guard guard(&lock_); + grpc_core::MutexLock lock(&lock_); Node* node = FindLocked(grpc_slice_from_static_string(key)); if (node != nullptr) { node->SetSession(std::move(session)); @@ -140,7 +140,7 @@ void SslSessionLRUCache::Put(const char* key, SslSessionPtr session) { } SslSessionPtr SslSessionLRUCache::Get(const char* key) { - grpc_core::mu_guard guard(&lock_); + grpc_core::MutexLock lock(&lock_); // Key is only used for lookups. grpc_slice key_slice = grpc_slice_from_static_string(key); Node* node = FindLocked(key_slice); diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 592b0b20e6..43ebf8cad9 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -1058,6 +1058,7 @@ src/core/lib/gprpp/fork.h \ src/core/lib/gprpp/inlined_vector.h \ src/core/lib/gprpp/manual_constructor.h \ src/core/lib/gprpp/memory.h \ +src/core/lib/gprpp/mutex_lock.h \ src/core/lib/gprpp/orphanable.h \ src/core/lib/gprpp/ref_counted.h \ src/core/lib/gprpp/ref_counted_ptr.h \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index fa2ad93a45..0f5047a305 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -1142,6 +1142,7 @@ src/core/lib/gprpp/fork.h \ src/core/lib/gprpp/inlined_vector.h \ src/core/lib/gprpp/manual_constructor.h \ src/core/lib/gprpp/memory.h \ +src/core/lib/gprpp/mutex_lock.h \ src/core/lib/gprpp/orphanable.h \ src/core/lib/gprpp/ref_counted.h \ src/core/lib/gprpp/ref_counted_ptr.h \ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 34e23f09c2..f81f3bb677 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -9306,6 +9306,7 @@ "src/core/lib/gprpp/fork.h", "src/core/lib/gprpp/manual_constructor.h", "src/core/lib/gprpp/memory.h", + "src/core/lib/gprpp/mutex_lock.h", "src/core/lib/gprpp/thd.h", "src/core/lib/profiling/timers.h" ], @@ -9353,6 +9354,7 @@ "src/core/lib/gprpp/fork.h", "src/core/lib/gprpp/manual_constructor.h", "src/core/lib/gprpp/memory.h", + "src/core/lib/gprpp/mutex_lock.h", "src/core/lib/gprpp/thd.h", "src/core/lib/profiling/timers.h" ], -- cgit v1.2.3 From b59d8674d24d9b40d8d8b2b40f2be0118ff524d2 Mon Sep 17 00:00:00 2001 From: Eric Gribkoff Date: Fri, 10 Aug 2018 10:13:36 -0700 Subject: Python post-fork handler: exit if grpc shutdown fails --- grpc.def | 1 + include/grpc/grpc.h | 6 ++++++ src/core/lib/iomgr/fork_posix.cc | 2 +- src/core/lib/surface/init.h | 1 - src/python/grpcio/grpc/_cython/_cygrpc/fork_posix.pyx.pxi | 4 ++++ src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi | 1 + src/ruby/ext/grpc/rb_grpc_imports.generated.c | 2 ++ src/ruby/ext/grpc/rb_grpc_imports.generated.h | 3 +++ test/core/surface/public_headers_must_be_c89.c | 1 + 9 files changed, 19 insertions(+), 2 deletions(-) (limited to 'include/grpc') diff --git a/grpc.def b/grpc.def index 009de4e868..72e3e90c62 100644 --- a/grpc.def +++ b/grpc.def @@ -15,6 +15,7 @@ EXPORTS grpc_register_plugin grpc_init grpc_shutdown + grpc_is_initialized grpc_version_string grpc_g_stands_for grpc_completion_queue_factory_lookup diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index 897b89851a..4e50cd0bac 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -79,6 +79,12 @@ GRPCAPI void grpc_init(void); destroyed. */ GRPCAPI void grpc_shutdown(void); +/** EXPERIMENTAL. Returns 1 if the grpc library has been initialized. + TODO(ericgribkoff) Decide if this should be promoted to non-experimental as + part of stabilizing the fork support API, as tracked in + https://github.com/grpc/grpc/issues/15334 */ +GRPCAPI int grpc_is_initialized(void); + /** Return a string representing the current version of grpc */ GRPCAPI const char* grpc_version_string(void); diff --git a/src/core/lib/iomgr/fork_posix.cc b/src/core/lib/iomgr/fork_posix.cc index a5b61fb4ce..ac85c81de2 100644 --- a/src/core/lib/iomgr/fork_posix.cc +++ b/src/core/lib/iomgr/fork_posix.cc @@ -25,6 +25,7 @@ #include #include +#include #include #include "src/core/lib/gpr/env.h" @@ -34,7 +35,6 @@ #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/timer_manager.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" -#include "src/core/lib/surface/init.h" /* * NOTE: FORKING IS NOT GENERALLY SUPPORTED, THIS IS ONLY INTENDED TO WORK diff --git a/src/core/lib/surface/init.h b/src/core/lib/surface/init.h index 9353208332..193f51447d 100644 --- a/src/core/lib/surface/init.h +++ b/src/core/lib/surface/init.h @@ -22,6 +22,5 @@ void grpc_register_security_filters(void); void grpc_security_pre_init(void); void grpc_security_init(void); -int grpc_is_initialized(void); #endif /* GRPC_CORE_LIB_SURFACE_INIT_H */ diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/fork_posix.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/fork_posix.pyx.pxi index 1176258da8..0d2516977b 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/fork_posix.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/fork_posix.pyx.pxi @@ -73,6 +73,10 @@ cdef void __postfork_child() nogil: # TODO(ericgribkoff) Check and abort if core is not shutdown with _fork_state.fork_in_progress_condition: _fork_state.fork_in_progress = False + if grpc_is_initialized() > 0: + with gil: + _LOGGER.error('Failed to shutdown gRPC Core after fork()') + os._exit(os.EX_USAGE) def fork_handlers_and_grpc_init(): diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi index bcbfec0c9f..4781219319 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi @@ -322,6 +322,7 @@ cdef extern from "grpc/grpc.h": void grpc_init() nogil void grpc_shutdown() nogil + int grpc_is_initialized() nogil ctypedef struct grpc_completion_queue_factory: pass diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c index 37b97aabd4..f44f89dfa0 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c @@ -38,6 +38,7 @@ grpc_call_details_destroy_type grpc_call_details_destroy_import; grpc_register_plugin_type grpc_register_plugin_import; grpc_init_type grpc_init_import; grpc_shutdown_type grpc_shutdown_import; +grpc_is_initialized_type grpc_is_initialized_import; grpc_version_string_type grpc_version_string_import; grpc_g_stands_for_type grpc_g_stands_for_import; grpc_completion_queue_factory_lookup_type grpc_completion_queue_factory_lookup_import; @@ -291,6 +292,7 @@ void grpc_rb_load_imports(HMODULE library) { grpc_register_plugin_import = (grpc_register_plugin_type) GetProcAddress(library, "grpc_register_plugin"); grpc_init_import = (grpc_init_type) GetProcAddress(library, "grpc_init"); grpc_shutdown_import = (grpc_shutdown_type) GetProcAddress(library, "grpc_shutdown"); + grpc_is_initialized_import = (grpc_is_initialized_type) GetProcAddress(library, "grpc_is_initialized"); grpc_version_string_import = (grpc_version_string_type) GetProcAddress(library, "grpc_version_string"); grpc_g_stands_for_import = (grpc_g_stands_for_type) GetProcAddress(library, "grpc_g_stands_for"); grpc_completion_queue_factory_lookup_import = (grpc_completion_queue_factory_lookup_type) GetProcAddress(library, "grpc_completion_queue_factory_lookup"); diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index f7a00046e3..99433f0e40 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -89,6 +89,9 @@ extern grpc_init_type grpc_init_import; typedef void(*grpc_shutdown_type)(void); extern grpc_shutdown_type grpc_shutdown_import; #define grpc_shutdown grpc_shutdown_import +typedef int(*grpc_is_initialized_type)(void); +extern grpc_is_initialized_type grpc_is_initialized_import; +#define grpc_is_initialized grpc_is_initialized_import typedef const char*(*grpc_version_string_type)(void); extern grpc_version_string_type grpc_version_string_import; #define grpc_version_string grpc_version_string_import diff --git a/test/core/surface/public_headers_must_be_c89.c b/test/core/surface/public_headers_must_be_c89.c index 42dc95b9bb..b832a1661b 100644 --- a/test/core/surface/public_headers_must_be_c89.c +++ b/test/core/surface/public_headers_must_be_c89.c @@ -77,6 +77,7 @@ int main(int argc, char **argv) { printf("%lx", (unsigned long) grpc_register_plugin); printf("%lx", (unsigned long) grpc_init); printf("%lx", (unsigned long) grpc_shutdown); + printf("%lx", (unsigned long) grpc_is_initialized); printf("%lx", (unsigned long) grpc_version_string); printf("%lx", (unsigned long) grpc_g_stands_for); printf("%lx", (unsigned long) grpc_completion_queue_factory_lookup); -- cgit v1.2.3