diff options
author | Sree Kuchibhotla <sreek@google.com> | 2018-06-12 16:17:54 -0700 |
---|---|---|
committer | Sree Kuchibhotla <sreek@google.com> | 2018-07-17 19:54:47 -0700 |
commit | 913f9b930a7fb6a5377c1b5e15ec47f5645828e7 (patch) | |
tree | cab963cb867be898d253001a8fbdd383486aea03 | |
parent | 67bb4e30302cec45c9e05144a64ee6a38c0f9559 (diff) |
Add Core resource quota implementation
-rw-r--r-- | include/grpcpp/resource_quota.h | 6 | ||||
-rw-r--r-- | src/core/lib/iomgr/resource_quota.cc | 48 | ||||
-rw-r--r-- | src/core/lib/iomgr/resource_quota.h | 2 |
3 files changed, 47 insertions, 9 deletions
diff --git a/include/grpcpp/resource_quota.h b/include/grpcpp/resource_quota.h index 77cdd48dcc..50bd1cb849 100644 --- a/include/grpcpp/resource_quota.h +++ b/include/grpcpp/resource_quota.h @@ -26,10 +26,10 @@ struct grpc_resource_quota; namespace grpc { -/// ResourceQuota represents a bound on memory usage by the gRPC library. -/// A ResourceQuota can be attached to a server (via \a ServerBuilder), +/// ResourceQuota represents a bound on memory and thread usage by the gRPC +/// library. A ResourceQuota can be attached to a server (via \a ServerBuilder), /// or a client channel (via \a ChannelArguments). -/// gRPC will attempt to keep memory used by all attached entities +/// gRPC will attempt to keep memory and threads used by all attached entities /// below the ResourceQuota bound. class ResourceQuota final : private GrpcLibraryCodegen { public: diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc index b50b2f2e46..a30688bd87 100644 --- a/src/core/lib/iomgr/resource_quota.cc +++ b/src/core/lib/iomgr/resource_quota.cc @@ -138,11 +138,22 @@ struct grpc_resource_quota { gpr_atm last_size; + /* Mutex to protect max_threads and num_threads */ + /* Note: We could have used gpr_atm for max_threads and num_threads and avoid + * having this mutex; but in that case, each invocation of the function + * grpc_resource_user_alloc_threads() will have to do atleast two atomic loads + * (for max_threads and num_threads) followed by a CAS (on num_threads). + * Moreover, we expect grpc_resource_user_alloc_threads() to be often called + * concurrently thereby increasing the chances of failing the CAS operation. + * This additional complexity is not worth the tiny perf gain we may (or may + * not) have by using atomics */ + gpr_mu thd_mu; + /* Max number of threads allowed */ int max_threads; /* Number of threads currently allocated via this resource_quota object */ - gpr_atm num_threads; + int num_threads; /* Has rq_step been scheduled to occur? */ bool step_scheduled; @@ -606,8 +617,9 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) { resource_quota->free_pool = INT64_MAX; resource_quota->size = INT64_MAX; gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX); + gpr_mu_init(&resource_quota->thd_mu); resource_quota->max_threads = INT_MAX; - gpr_atm_no_barrier_store(&resource_quota->num_threads, 0); + resource_quota->num_threads = 0; resource_quota->step_scheduled = false; resource_quota->reclaiming = false; gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0); @@ -662,7 +674,11 @@ double grpc_resource_quota_get_memory_pressure( /* Public API */ void grpc_resource_quota_set_max_threads(grpc_resource_quota* resource_quota, - int new_max_threads) {} + int new_max_threads) { + gpr_mu_lock(&resource_quota->thd_mu); + resource_quota->max_threads = new_max_threads; + gpr_mu_unlock(&resource_quota->thd_mu); +} /* Public API */ void grpc_resource_quota_resize(grpc_resource_quota* resource_quota, @@ -806,11 +822,33 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) { bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user, int thd_count) { - return true; + bool is_success = false; + gpr_mu_lock(&resource_user->resource_quota->thd_mu); + grpc_resource_quota* rq = resource_user->resource_quota; + if (rq->num_threads + thd_count <= rq->max_threads) { + rq->num_threads += thd_count; + gpr_atm_no_barrier_fetch_add(&resource_user->num_threads, thd_count); + is_success = true; + } + gpr_mu_unlock(&resource_user->resource_quota->thd_mu); + return is_success; } void grpc_resource_user_free_threads(grpc_resource_user* resource_user, - int thd_count) {} + int thd_count) { + gpr_mu_lock(&resource_user->resource_quota->thd_mu); + grpc_resource_quota* rq = resource_user->resource_quota; + rq->num_threads -= thd_count; + int old_cnt = static_cast<int>( + gpr_atm_no_barrier_fetch_add(&resource_user->num_threads, -thd_count)); + if (old_cnt < thd_count || rq->num_threads < 0) { + gpr_log(GPR_ERROR, + "Releasing more threads (%d) that currently allocated (rq threads: " + "%d, ru threads: %d)", + thd_count, old_cnt, rq->num_threads + thd_count); + } + gpr_mu_unlock(&resource_user->resource_quota->thd_mu); +} void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size, grpc_closure* optional_on_done) { diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h index a111ebb4d8..7342ef84c8 100644 --- a/src/core/lib/iomgr/resource_quota.h +++ b/src/core/lib/iomgr/resource_quota.h @@ -95,7 +95,7 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user); /* Attempts to get quota (from the resource_user) to create 'thd_count' number * of threads. Returns true if successful (i.e the caller is now free to create - * 'thd_count' number of threads or false if quota is not available */ + * 'thd_count' number of threads) or false if quota is not available */ bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user, int thd_count); /* Releases 'thd_count' worth of quota back to the resource user. The quota |