aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Sree Kuchibhotla <sreek@google.com>2018-06-12 16:17:54 -0700
committerGravatar Sree Kuchibhotla <sreek@google.com>2018-07-17 19:54:47 -0700
commit913f9b930a7fb6a5377c1b5e15ec47f5645828e7 (patch)
treecab963cb867be898d253001a8fbdd383486aea03 /src/core
parent67bb4e30302cec45c9e05144a64ee6a38c0f9559 (diff)
Add Core resource quota implementation
Diffstat (limited to 'src/core')
-rw-r--r--src/core/lib/iomgr/resource_quota.cc48
-rw-r--r--src/core/lib/iomgr/resource_quota.h2
2 files changed, 44 insertions, 6 deletions
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index b50b2f2e46..a30688bd87 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -138,11 +138,22 @@ struct grpc_resource_quota {
gpr_atm last_size;
+ /* Mutex to protect max_threads and num_threads */
+ /* Note: We could have used gpr_atm for max_threads and num_threads and avoid
+ * having this mutex; but in that case, each invocation of the function
+ * grpc_resource_user_alloc_threads() will have to do atleast two atomic loads
+ * (for max_threads and num_threads) followed by a CAS (on num_threads).
+ * Moreover, we expect grpc_resource_user_alloc_threads() to be often called
+ * concurrently thereby increasing the chances of failing the CAS operation.
+ * This additional complexity is not worth the tiny perf gain we may (or may
+ * not) have by using atomics */
+ gpr_mu thd_mu;
+
/* Max number of threads allowed */
int max_threads;
/* Number of threads currently allocated via this resource_quota object */
- gpr_atm num_threads;
+ int num_threads;
/* Has rq_step been scheduled to occur? */
bool step_scheduled;
@@ -606,8 +617,9 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
resource_quota->free_pool = INT64_MAX;
resource_quota->size = INT64_MAX;
gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX);
+ gpr_mu_init(&resource_quota->thd_mu);
resource_quota->max_threads = INT_MAX;
- gpr_atm_no_barrier_store(&resource_quota->num_threads, 0);
+ resource_quota->num_threads = 0;
resource_quota->step_scheduled = false;
resource_quota->reclaiming = false;
gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0);
@@ -662,7 +674,11 @@ double grpc_resource_quota_get_memory_pressure(
/* Public API */
void grpc_resource_quota_set_max_threads(grpc_resource_quota* resource_quota,
- int new_max_threads) {}
+ int new_max_threads) {
+ gpr_mu_lock(&resource_quota->thd_mu);
+ resource_quota->max_threads = new_max_threads;
+ gpr_mu_unlock(&resource_quota->thd_mu);
+}
/* Public API */
void grpc_resource_quota_resize(grpc_resource_quota* resource_quota,
@@ -806,11 +822,33 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) {
bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user,
int thd_count) {
- return true;
+ bool is_success = false;
+ gpr_mu_lock(&resource_user->resource_quota->thd_mu);
+ grpc_resource_quota* rq = resource_user->resource_quota;
+ if (rq->num_threads + thd_count <= rq->max_threads) {
+ rq->num_threads += thd_count;
+ gpr_atm_no_barrier_fetch_add(&resource_user->num_threads, thd_count);
+ is_success = true;
+ }
+ gpr_mu_unlock(&resource_user->resource_quota->thd_mu);
+ return is_success;
}
void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
- int thd_count) {}
+ int thd_count) {
+ gpr_mu_lock(&resource_user->resource_quota->thd_mu);
+ grpc_resource_quota* rq = resource_user->resource_quota;
+ rq->num_threads -= thd_count;
+ int old_cnt = static_cast<int>(
+ gpr_atm_no_barrier_fetch_add(&resource_user->num_threads, -thd_count));
+ if (old_cnt < thd_count || rq->num_threads < 0) {
+ gpr_log(GPR_ERROR,
+ "Releasing more threads (%d) that currently allocated (rq threads: "
+ "%d, ru threads: %d)",
+ thd_count, old_cnt, rq->num_threads + thd_count);
+ }
+ gpr_mu_unlock(&resource_user->resource_quota->thd_mu);
+}
void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
grpc_closure* optional_on_done) {
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index a111ebb4d8..7342ef84c8 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -95,7 +95,7 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user);
/* Attempts to get quota (from the resource_user) to create 'thd_count' number
* of threads. Returns true if successful (i.e the caller is now free to create
- * 'thd_count' number of threads or false if quota is not available */
+ * 'thd_count' number of threads) or false if quota is not available */
bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user,
int thd_count);
/* Releases 'thd_count' worth of quota back to the resource user. The quota