aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Sree Kuchibhotla <sreek@google.com>2018-07-27 16:19:03 -0700
committerGravatar Sree Kuchibhotla <sreek@google.com>2018-07-27 16:19:03 -0700
commitc2a22a1ab8e8221c95f8874668eb6260c1e171b4 (patch)
tree6a1de70b7e4d8c82ef1eba01c7a92f91c4d31333
parent9bc8ee42c20385214027ced5991b9973a0282655 (diff)
Address core review comments
-rw-r--r--src/core/lib/iomgr/resource_quota.cc80
-rw-r--r--src/core/lib/iomgr/resource_quota.h8
-rw-r--r--src/cpp/thread_manager/thread_manager.cc6
-rw-r--r--src/cpp/thread_manager/thread_manager.h7
-rw-r--r--test/core/iomgr/resource_quota_test.cc45
-rw-r--r--test/cpp/thread_manager/thread_manager_test.cc30
6 files changed, 94 insertions, 82 deletions
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index 47b7856e95..b6fc7579f7 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -97,7 +97,7 @@ struct grpc_resource_user {
bool added_to_free_pool;
/* The number of threads currently allocated to this resource user */
- gpr_atm num_threads;
+ gpr_atm num_threads_allocated;
/* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer
*/
@@ -138,22 +138,23 @@ struct grpc_resource_quota {
gpr_atm last_size;
- /* Mutex to protect max_threads and num_threads */
- /* Note: We could have used gpr_atm for max_threads and num_threads and avoid
- * having this mutex; but in that case, each invocation of the function
- * grpc_resource_user_alloc_threads() would have had to do at least two atomic
- * loads (for max_threads and num_threads) followed by a CAS (on num_threads).
- * Moreover, we expect grpc_resource_user_alloc_threads() to be often called
- * concurrently thereby increasing the chances of failing the CAS operation.
- * This additional complexity is not worth the tiny perf gain we may (or may
- * not) have by using atomics */
- gpr_mu thd_mu;
+ /* Mutex to protect max_threads and num_threads_allocated */
+ /* Note: We could have used gpr_atm for max_threads and num_threads_allocated
+ * and avoid having this mutex; but in that case, each invocation of the
+ * function grpc_resource_user_allocate_threads() would have had to do at
+ * least two atomic loads (for max_threads and num_threads_allocated) followed
+ * by a CAS (on num_threads_allocated).
+ * Moreover, we expect grpc_resource_user_allocate_threads() to be often
+ * called concurrently thereby increasing the chances of failing the CAS
+ * operation. This additional complexity is not worth the tiny perf gain we
+ * may (or may not) have by using atomics */
+ gpr_mu thread_count_mu;
/* Max number of threads allowed */
int max_threads;
/* Number of threads currently allocated via this resource_quota object */
- int num_threads;
+ int num_threads_allocated;
/* Has rq_step been scheduled to occur? */
bool step_scheduled;
@@ -548,9 +549,9 @@ static void ru_destroy(void* ru, grpc_error* error) {
grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
// Free all the remaining thread quota
- grpc_resource_user_free_threads(
- resource_user,
- static_cast<int>(gpr_atm_no_barrier_load(&resource_user->num_threads)));
+ grpc_resource_user_free_threads(resource_user,
+ static_cast<int>(gpr_atm_no_barrier_load(
+ &resource_user->num_threads_allocated)));
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, static_cast<grpc_rulist>(i));
@@ -622,9 +623,9 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
resource_quota->free_pool = INT64_MAX;
resource_quota->size = INT64_MAX;
gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX);
- gpr_mu_init(&resource_quota->thd_mu);
+ gpr_mu_init(&resource_quota->thread_count_mu);
resource_quota->max_threads = INT_MAX;
- resource_quota->num_threads = 0;
+ resource_quota->num_threads_allocated = 0;
resource_quota->step_scheduled = false;
resource_quota->reclaiming = false;
gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0);
@@ -647,7 +648,8 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) {
if (gpr_unref(&resource_quota->refs)) {
- GPR_ASSERT(resource_quota->num_threads == 0); // No outstanding thd quota
+ // No outstanding thread quota
+ GPR_ASSERT(resource_quota->num_threads_allocated == 0);
GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota");
gpr_free(resource_quota->name);
gpr_free(resource_quota);
@@ -681,9 +683,10 @@ double grpc_resource_quota_get_memory_pressure(
/* Public API */
void grpc_resource_quota_set_max_threads(grpc_resource_quota* resource_quota,
int new_max_threads) {
- gpr_mu_lock(&resource_quota->thd_mu);
+ GPR_ASSERT(new_max_threads >= 0);
+ gpr_mu_lock(&resource_quota->thread_count_mu);
resource_quota->max_threads = new_max_threads;
- gpr_mu_unlock(&resource_quota->thd_mu);
+ gpr_mu_unlock(&resource_quota->thread_count_mu);
}
/* Public API */
@@ -771,7 +774,7 @@ grpc_resource_user* grpc_resource_user_create(
grpc_closure_list_init(&resource_user->on_allocated);
resource_user->allocating = false;
resource_user->added_to_free_pool = false;
- gpr_atm_no_barrier_store(&resource_user->num_threads, 0);
+ gpr_atm_no_barrier_store(&resource_user->num_threads_allocated, 0);
resource_user->reclaimers[0] = nullptr;
resource_user->reclaimers[1] = nullptr;
resource_user->new_reclaimers[0] = nullptr;
@@ -826,35 +829,38 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) {
}
}
-bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user,
- int thd_count) {
+bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user,
+ int thread_count) {
+ GPR_ASSERT(thread_count >= 0);
bool is_success = false;
- gpr_mu_lock(&resource_user->resource_quota->thd_mu);
+ gpr_mu_lock(&resource_user->resource_quota->thread_count_mu);
grpc_resource_quota* rq = resource_user->resource_quota;
- if (rq->num_threads + thd_count <= rq->max_threads) {
- rq->num_threads += thd_count;
- gpr_atm_no_barrier_fetch_add(&resource_user->num_threads, thd_count);
+ if (rq->num_threads_allocated + thread_count <= rq->max_threads) {
+ rq->num_threads_allocated += thread_count;
+ gpr_atm_no_barrier_fetch_add(&resource_user->num_threads_allocated,
+ thread_count);
is_success = true;
}
- gpr_mu_unlock(&resource_user->resource_quota->thd_mu);
+ gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu);
return is_success;
}
void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
- int thd_count) {
- gpr_mu_lock(&resource_user->resource_quota->thd_mu);
+ int thread_count) {
+ GPR_ASSERT(thread_count >= 0);
+ gpr_mu_lock(&resource_user->resource_quota->thread_count_mu);
grpc_resource_quota* rq = resource_user->resource_quota;
- rq->num_threads -= thd_count;
- int old_cnt = static_cast<int>(
- gpr_atm_no_barrier_fetch_add(&resource_user->num_threads, -thd_count));
- if (old_cnt < thd_count || rq->num_threads < 0) {
+ rq->num_threads_allocated -= thread_count;
+ int old_count = static_cast<int>(gpr_atm_no_barrier_fetch_add(
+ &resource_user->num_threads_allocated, -thread_count));
+ if (old_count < thread_count || rq->num_threads_allocated < 0) {
gpr_log(GPR_ERROR,
- "Releasing more threads (%d) that currently allocated (rq threads: "
+ "Releasing more threads (%d) than currently allocated (rq threads: "
"%d, ru threads: %d)",
- thd_count, old_cnt, rq->num_threads + thd_count);
+ thread_count, rq->num_threads_allocated + thread_count, old_count);
abort();
}
- gpr_mu_unlock(&resource_user->resource_quota->thd_mu);
+ gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu);
}
void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index 7342ef84c8..1d5e95e04a 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -96,14 +96,14 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user);
/* Attempts to get quota (from the resource_user) to create 'thd_count' number
* of threads. Returns true if successful (i.e the caller is now free to create
* 'thd_count' number of threads) or false if quota is not available */
-bool grpc_resource_user_alloc_threads(grpc_resource_user* resource_user,
- int thd_count);
+bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user,
+ int thd_count);
/* Releases 'thd_count' worth of quota back to the resource user. The quota
* should have been previously obtained successfully by calling
- * grpc_resource_user_alloc_threads().
+ * grpc_resource_user_allocate_threads().
*
* Note: There need not be an exact one-to-one correspondence between
- * grpc_resource_user_alloc_threads() and grpc_resource_user_free_threads()
+ * grpc_resource_user_allocate_threads() and grpc_resource_user_free_threads()
* calls. The only requirement is that the number of threads allocated should
* all be eventually released */
void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc
index 5d367511e2..57067d4696 100644
--- a/src/cpp/thread_manager/thread_manager.cc
+++ b/src/cpp/thread_manager/thread_manager.cc
@@ -123,7 +123,7 @@ void ThreadManager::CleanupCompletedThreads() {
}
void ThreadManager::Initialize() {
- if (!grpc_resource_user_alloc_threads(resource_user_, min_pollers_)) {
+ if (!grpc_resource_user_allocate_threads(resource_user_, min_pollers_)) {
gpr_log(GPR_ERROR,
"No thread quota available to even create the minimum required "
"polling threads (i.e %d). Unable to start the thread manager",
@@ -165,9 +165,9 @@ void ThreadManager::MainWorkLoop() {
break;
case WORK_FOUND:
// If we got work and there are now insufficient pollers and there is
- // quota available to create a new thread,start a new poller thread
+ // quota available to create a new thread, start a new poller thread
if (!shutdown_ && num_pollers_ < min_pollers_ &&
- grpc_resource_user_alloc_threads(resource_user_, 1)) {
+ grpc_resource_user_allocate_threads(resource_user_, 1)) {
num_pollers_++;
num_threads_++;
max_active_threads_sofar_ =
diff --git a/src/cpp/thread_manager/thread_manager.h b/src/cpp/thread_manager/thread_manager.h
index 8332befed0..01043edb31 100644
--- a/src/cpp/thread_manager/thread_manager.h
+++ b/src/cpp/thread_manager/thread_manager.h
@@ -100,7 +100,7 @@ class ThreadManager {
// ThreadManager::MarkAsCompleted()
//
// WHY IS THIS NEEDED?:
- // When a thread terminates, some other tread *must* call Join() on that
+ // When a thread terminates, some other thread *must* call Join() on that
// thread so that the resources are released. Having a WorkerThread wrapper
// will make this easier. Once Run() completes, each thread calls the
// following two functions:
@@ -113,8 +113,9 @@ class ThreadManager {
// in the completed_threads_ list (since a thread cannot call Join() on
// itself, it calls CleanupCompletedThreads() *before* calling
// MarkAsCompleted())
- // TODO: sreek - consider creating the threads 'detached' so that Join() need
- // not be called
+ //
+ // TODO(sreek): Consider creating the threads 'detached' so that Join() need
+ // not be called (and the need for this WorkerThread class is eliminated)
class WorkerThread {
public:
WorkerThread(ThreadManager* thd_mgr);
diff --git a/test/core/iomgr/resource_quota_test.cc b/test/core/iomgr/resource_quota_test.cc
index 573e4010fa..f3b35fed32 100644
--- a/test/core/iomgr/resource_quota_test.cc
+++ b/test/core/iomgr/resource_quota_test.cc
@@ -810,30 +810,31 @@ static void test_thread_limit() {
grpc_resource_quota_set_max_threads(rq, 100);
// Request quota for 100 threads (50 for ru1, 50 for ru2)
- GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 10));
- GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 10));
- GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 40));
- GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 40));
+ GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 10));
+ GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 10));
+ GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 40));
+ GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 40));
- // Threads exhaused. Next request must fail
- GPR_ASSERT(!grpc_resource_user_alloc_threads(ru2, 20));
+ // Threads exhausted. Next request must fail
+ GPR_ASSERT(!grpc_resource_user_allocate_threads(ru2, 20));
// Free 20 threads from two different users
grpc_resource_user_free_threads(ru1, 10);
grpc_resource_user_free_threads(ru2, 10);
// Next request to 20 threads must succeed
- GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 20));
+ GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 20));
// No more thread quota again
- GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 20));
+ GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 20));
// Free 10 more
grpc_resource_user_free_threads(ru1, 10);
- GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 5));
- GPR_ASSERT(!grpc_resource_user_alloc_threads(ru2, 10)); // Only 5 available
- GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 5));
+ GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 5));
+ GPR_ASSERT(
+ !grpc_resource_user_allocate_threads(ru2, 10)); // Only 5 available
+ GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 5));
// Teardown (ru1 and ru2 release all the quota back to rq)
grpc_resource_user_unref(ru1);
@@ -841,7 +842,7 @@ static void test_thread_limit() {
grpc_resource_quota_unref(rq);
}
-// Change max quota in either directions dynamically
+// Change max quota in either direction dynamically
static void test_thread_maxquota_change() {
grpc_core::ExecCtx exec_ctx;
@@ -854,34 +855,34 @@ static void test_thread_maxquota_change() {
grpc_resource_quota_set_max_threads(rq, 100);
// Request quota for 100 threads (50 for ru1, 50 for ru2)
- GPR_ASSERT(grpc_resource_user_alloc_threads(ru1, 50));
- GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 50));
+ GPR_ASSERT(grpc_resource_user_allocate_threads(ru1, 50));
+ GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 50));
- // Threads exhaused. Next request must fail
- GPR_ASSERT(!grpc_resource_user_alloc_threads(ru2, 20));
+ // Threads exhausted. Next request must fail
+ GPR_ASSERT(!grpc_resource_user_allocate_threads(ru2, 20));
// Increase maxquota and retry
// Max threads = 150;
grpc_resource_quota_set_max_threads(rq, 150);
- GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 20)); // ru2 = 70, ru1 = 50
+ GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 20)); // ru2=70, ru1=50
// Decrease maxquota (Note: Quota already given to ru1 and ru2 is unaffected)
// Max threads = 10;
grpc_resource_quota_set_max_threads(rq, 10);
// New requests will fail until quota is available
- GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 10));
+ GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10));
// Make quota available
- grpc_resource_user_free_threads(ru1, 50); // ru1 now has 0
- GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 10)); // Still not enough
+ grpc_resource_user_free_threads(ru1, 50); // ru1 now has 0
+ GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10)); // not enough
grpc_resource_user_free_threads(ru2, 70); // ru2 now has 0
// Now we can get quota up-to 10, the current max
- GPR_ASSERT(grpc_resource_user_alloc_threads(ru2, 10));
+ GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 10));
// No more thread quota again
- GPR_ASSERT(!grpc_resource_user_alloc_threads(ru1, 10));
+ GPR_ASSERT(!grpc_resource_user_allocate_threads(ru1, 10));
// Teardown (ru1 and ru2 release all the quota back to rq)
grpc_resource_user_unref(ru1);
diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc
index a7ed2dd380..838f5f72ad 100644
--- a/test/cpp/thread_manager/thread_manager_test.cc
+++ b/test/cpp/thread_manager/thread_manager_test.cc
@@ -124,16 +124,18 @@ static void TestPollAndWork() {
2 /* min_pollers */, 10 /* max_pollers */, 10 /* poll_duration_ms */,
1 /* work_duration_ms */, 50 /* max_poll_calls */};
- grpc::ThreadManagerTest test_thd_mgr("TestThreadManager", rq, settings);
+ grpc::ThreadManagerTest test_thread_mgr("TestThreadManager", rq, settings);
grpc_resource_quota_unref(rq);
- test_thd_mgr.Initialize(); // Start the thread manager
- test_thd_mgr.Wait(); // Wait for all threads to finish
+ test_thread_mgr.Initialize(); // Start the thread manager
+ test_thread_mgr.Wait(); // Wait for all threads to finish
// Verify that The number of times DoWork() was called is equal to the number
// of times WORK_FOUND was returned
- gpr_log(GPR_DEBUG, "DoWork() called %d times", test_thd_mgr.GetNumDoWork());
- GPR_ASSERT(test_thd_mgr.GetNumDoWork() == test_thd_mgr.GetNumWorkFound());
+ gpr_log(GPR_DEBUG, "DoWork() called %d times",
+ test_thread_mgr.GetNumDoWork());
+ GPR_ASSERT(test_thread_mgr.GetNumDoWork() ==
+ test_thread_mgr.GetNumWorkFound());
}
static void TestThreadQuota() {
@@ -151,18 +153,20 @@ static void TestThreadQuota() {
// Create two thread managers (but with same resource quota). This means
// that the max number of active threads across BOTH the thread managers
// cannot be greater than kMaxNumthreads
- grpc::ThreadManagerTest test_thd_mgr_1("TestThreadManager-1", rq, settings);
- grpc::ThreadManagerTest test_thd_mgr_2("TestThreadManager-2", rq, settings);
+ grpc::ThreadManagerTest test_thread_mgr_1("TestThreadManager-1", rq,
+ settings);
+ grpc::ThreadManagerTest test_thread_mgr_2("TestThreadManager-2", rq,
+ settings);
// It is ok to unref resource quota before starting thread managers.
grpc_resource_quota_unref(rq);
// Start both thread managers
- test_thd_mgr_1.Initialize();
- test_thd_mgr_2.Initialize();
+ test_thread_mgr_1.Initialize();
+ test_thread_mgr_2.Initialize();
// Wait for both to finish
- test_thd_mgr_1.Wait();
- test_thd_mgr_2.Wait();
+ test_thread_mgr_1.Wait();
+ test_thread_mgr_2.Wait();
// Now verify that the total number of active threads in either thread manager
// never exceeds kMaxNumThreads
@@ -173,8 +177,8 @@ static void TestThreadQuota() {
// Its okay to not test this case here. The resource quota c-core tests
// provide enough coverage to resource quota object with multiple resource
// users
- int max1 = test_thd_mgr_1.GetMaxActiveThreadsSoFar();
- int max2 = test_thd_mgr_2.GetMaxActiveThreadsSoFar();
+ int max1 = test_thread_mgr_1.GetMaxActiveThreadsSoFar();
+ int max2 = test_thread_mgr_2.GetMaxActiveThreadsSoFar();
gpr_log(
GPR_DEBUG,
"MaxActiveThreads in TestThreadManager_1: %d, TestThreadManager_2: %d",