aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/lib/iomgr/resource_quota.cc78
-rw-r--r--src/core/lib/iomgr/resource_quota.h16
-rw-r--r--src/cpp/common/resource_quota_cc.cc4
-rw-r--r--src/cpp/server/server_builder.cc2
-rw-r--r--src/cpp/server/server_cc.cc31
-rw-r--r--src/cpp/thread_manager/thread_manager.cc53
-rw-r--r--src/cpp/thread_manager/thread_manager.h48
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.c2
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.h3
9 files changed, 214 insertions, 23 deletions
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index 539bc120ce..b6fc7579f7 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -96,6 +96,9 @@ struct grpc_resource_user {
list, false otherwise */
bool added_to_free_pool;
+ /* The number of threads currently allocated to this resource user */
+ gpr_atm num_threads_allocated;
+
/* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer
*/
grpc_closure* reclaimers[2];
@@ -135,12 +138,33 @@ struct grpc_resource_quota {
gpr_atm last_size;
+ /* Mutex to protect max_threads and num_threads_allocated */
+ /* Note: We could have used gpr_atm for max_threads and num_threads_allocated
+ * and avoid having this mutex; but in that case, each invocation of the
+ * function grpc_resource_user_allocate_threads() would have had to do at
+ * least two atomic loads (for max_threads and num_threads_allocated) followed
+ * by a CAS (on num_threads_allocated).
+ * Moreover, we expect grpc_resource_user_allocate_threads() to be often
+ * called concurrently thereby increasing the chances of failing the CAS
+ * operation. This additional complexity is not worth the tiny perf gain we
+ * may (or may not) have by using atomics */
+ gpr_mu thread_count_mu;
+
+ /* Max number of threads allowed */
+ int max_threads;
+
+ /* Number of threads currently allocated via this resource_quota object */
+ int num_threads_allocated;
+
/* Has rq_step been scheduled to occur? */
bool step_scheduled;
+
/* Are we currently reclaiming memory */
bool reclaiming;
+
/* Closure around rq_step */
grpc_closure rq_step_closure;
+
/* Closure around rq_reclamation_done */
grpc_closure rq_reclamation_done_closure;
@@ -524,6 +548,11 @@ static void ru_shutdown(void* ru, grpc_error* error) {
static void ru_destroy(void* ru, grpc_error* error) {
grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
+ // Free all the remaining thread quota
+ grpc_resource_user_free_threads(resource_user,
+ static_cast<int>(gpr_atm_no_barrier_load(
+ &resource_user->num_threads_allocated)));
+
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, static_cast<grpc_rulist>(i));
}
@@ -594,6 +623,9 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
resource_quota->free_pool = INT64_MAX;
resource_quota->size = INT64_MAX;
gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX);
+ gpr_mu_init(&resource_quota->thread_count_mu);
+ resource_quota->max_threads = INT_MAX;
+ resource_quota->num_threads_allocated = 0;
resource_quota->step_scheduled = false;
resource_quota->reclaiming = false;
gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0);
@@ -616,6 +648,8 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) {
if (gpr_unref(&resource_quota->refs)) {
+ // No outstanding thread quota
+ GPR_ASSERT(resource_quota->num_threads_allocated == 0);
GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota");
gpr_free(resource_quota->name);
gpr_free(resource_quota);
@@ -647,6 +681,15 @@ double grpc_resource_quota_get_memory_pressure(
}
/* Public API */
+void grpc_resource_quota_set_max_threads(grpc_resource_quota* resource_quota,
+ int new_max_threads) {
+ GPR_ASSERT(new_max_threads >= 0);
+ gpr_mu_lock(&resource_quota->thread_count_mu);
+ resource_quota->max_threads = new_max_threads;
+ gpr_mu_unlock(&resource_quota->thread_count_mu);
+}
+
+/* Public API */
void grpc_resource_quota_resize(grpc_resource_quota* resource_quota,
size_t size) {
grpc_core::ExecCtx exec_ctx;
@@ -731,6 +774,7 @@ grpc_resource_user* grpc_resource_user_create(
grpc_closure_list_init(&resource_user->on_allocated);
resource_user->allocating = false;
resource_user->added_to_free_pool = false;
+ gpr_atm_no_barrier_store(&resource_user->num_threads_allocated, 0);
resource_user->reclaimers[0] = nullptr;
resource_user->reclaimers[1] = nullptr;
resource_user->new_reclaimers[0] = nullptr;
@@ -785,6 +829,40 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) {
}
}
+bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user,
+ int thread_count) {
+ GPR_ASSERT(thread_count >= 0);
+ bool is_success = false;
+ gpr_mu_lock(&resource_user->resource_quota->thread_count_mu);
+ grpc_resource_quota* rq = resource_user->resource_quota;
+ if (rq->num_threads_allocated + thread_count <= rq->max_threads) {
+ rq->num_threads_allocated += thread_count;
+ gpr_atm_no_barrier_fetch_add(&resource_user->num_threads_allocated,
+ thread_count);
+ is_success = true;
+ }
+ gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu);
+ return is_success;
+}
+
+void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
+ int thread_count) {
+ GPR_ASSERT(thread_count >= 0);
+ gpr_mu_lock(&resource_user->resource_quota->thread_count_mu);
+ grpc_resource_quota* rq = resource_user->resource_quota;
+ rq->num_threads_allocated -= thread_count;
+ int old_count = static_cast<int>(gpr_atm_no_barrier_fetch_add(
+ &resource_user->num_threads_allocated, -thread_count));
+ if (old_count < thread_count || rq->num_threads_allocated < 0) {
+ gpr_log(GPR_ERROR,
+ "Releasing more threads (%d) than currently allocated (rq threads: "
+ "%d, ru threads: %d)",
+ thread_count, rq->num_threads_allocated + thread_count, old_count);
+ abort();
+ }
+ gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu);
+}
+
void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
grpc_closure* optional_on_done) {
gpr_mu_lock(&resource_user->mu);
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index 937daf8728..1d5e95e04a 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -93,6 +93,22 @@ void grpc_resource_user_ref(grpc_resource_user* resource_user);
void grpc_resource_user_unref(grpc_resource_user* resource_user);
void grpc_resource_user_shutdown(grpc_resource_user* resource_user);
+/* Attempts to get quota (from the resource_user) to create 'thd_count' number
+ * of threads. Returns true if successful (i.e the caller is now free to create
+ * 'thd_count' number of threads) or false if quota is not available */
+bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user,
+ int thd_count);
+/* Releases 'thd_count' worth of quota back to the resource user. The quota
+ * should have been previously obtained successfully by calling
+ * grpc_resource_user_allocate_threads().
+ *
+ * Note: There need not be an exact one-to-one correspondence between
+ * grpc_resource_user_allocate_threads() and grpc_resource_user_free_threads()
+ * calls. The only requirement is that the number of threads allocated should
+ * all be eventually released */
+void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
+ int thd_count);
+
/* Allocate from the resource user (and its quota).
If optional_on_done is NULL, then allocate immediately. This may push the
quota over-limit, at which point reclamation will kick in.
diff --git a/src/cpp/common/resource_quota_cc.cc b/src/cpp/common/resource_quota_cc.cc
index daeb0ba171..276e5f7954 100644
--- a/src/cpp/common/resource_quota_cc.cc
+++ b/src/cpp/common/resource_quota_cc.cc
@@ -33,4 +33,8 @@ ResourceQuota& ResourceQuota::Resize(size_t new_size) {
return *this;
}
+ResourceQuota& ResourceQuota::SetMaxThreads(int new_max_threads) {
+ grpc_resource_quota_set_max_threads(impl_, new_max_threads);
+ return *this;
+}
} // namespace grpc
diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc
index e0b9b7a62b..8417c45e64 100644
--- a/src/cpp/server/server_builder.cc
+++ b/src/cpp/server/server_builder.cc
@@ -263,7 +263,7 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
std::unique_ptr<Server> server(new Server(
max_receive_message_size_, &args, sync_server_cqs,
sync_server_settings_.min_pollers, sync_server_settings_.max_pollers,
- sync_server_settings_.cq_timeout_msec));
+ sync_server_settings_.cq_timeout_msec, resource_quota_));
if (has_sync_methods) {
// This is a Sync server
diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc
index 0d77510e29..43e6b27de2 100644
--- a/src/cpp/server/server_cc.cc
+++ b/src/cpp/server/server_cc.cc
@@ -47,6 +47,12 @@
namespace grpc {
namespace {
+// The default value for maximum number of threads that can be created in the
+// sync server. This value of 500 is empirically chosen. To increase the max
+// number of threads in a sync server, pass a custom ResourceQuota object (with
+// the desired number of max-threads set) to the server builder
+#define DEFAULT_MAX_SYNC_SERVER_THREADS 500
+
class DefaultGlobalCallbacks final : public Server::GlobalCallbacks {
public:
~DefaultGlobalCallbacks() override {}
@@ -266,9 +272,9 @@ class Server::SyncRequestThreadManager : public ThreadManager {
public:
SyncRequestThreadManager(Server* server, CompletionQueue* server_cq,
std::shared_ptr<GlobalCallbacks> global_callbacks,
- int min_pollers, int max_pollers,
- int cq_timeout_msec)
- : ThreadManager(min_pollers, max_pollers),
+ grpc_resource_quota* rq, int min_pollers,
+ int max_pollers, int cq_timeout_msec)
+ : ThreadManager("SyncServer", rq, min_pollers, max_pollers),
server_(server),
server_cq_(server_cq),
cq_timeout_msec_(cq_timeout_msec),
@@ -376,7 +382,8 @@ Server::Server(
int max_receive_message_size, ChannelArguments* args,
std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
sync_server_cqs,
- int min_pollers, int max_pollers, int sync_cq_timeout_msec)
+ int min_pollers, int max_pollers, int sync_cq_timeout_msec,
+ grpc_resource_quota* server_rq = nullptr)
: max_receive_message_size_(max_receive_message_size),
sync_server_cqs_(std::move(sync_server_cqs)),
started_(false),
@@ -392,10 +399,22 @@ Server::Server(
global_callbacks_->UpdateArguments(args);
if (sync_server_cqs_ != nullptr) {
+ bool default_rq_created = false;
+ if (server_rq == nullptr) {
+ server_rq = grpc_resource_quota_create("SyncServer-default-rq");
+ grpc_resource_quota_set_max_threads(server_rq,
+ DEFAULT_MAX_SYNC_SERVER_THREADS);
+ default_rq_created = true;
+ }
+
for (const auto& it : *sync_server_cqs_) {
sync_req_mgrs_.emplace_back(new SyncRequestThreadManager(
- this, it.get(), global_callbacks_, min_pollers, max_pollers,
- sync_cq_timeout_msec));
+ this, it.get(), global_callbacks_, server_rq, min_pollers,
+ max_pollers, sync_cq_timeout_msec));
+ }
+
+ if (default_rq_created) {
+ grpc_resource_quota_unref(server_rq);
}
}
diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc
index 02ac56a3fd..fa9eec5f9b 100644
--- a/src/cpp/thread_manager/thread_manager.cc
+++ b/src/cpp/thread_manager/thread_manager.cc
@@ -22,8 +22,8 @@
#include <mutex>
#include <grpc/support/log.h>
-
#include "src/core/lib/gprpp/thd.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
namespace grpc {
@@ -48,12 +48,17 @@ ThreadManager::WorkerThread::~WorkerThread() {
thd_.Join();
}
-ThreadManager::ThreadManager(int min_pollers, int max_pollers)
+ThreadManager::ThreadManager(const char* name,
+ grpc_resource_quota* resource_quota,
+ int min_pollers, int max_pollers)
: shutdown_(false),
num_pollers_(0),
min_pollers_(min_pollers),
max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers),
- num_threads_(0) {}
+ num_threads_(0),
+ max_active_threads_sofar_(0) {
+ resource_user_ = grpc_resource_user_create(resource_quota, name);
+}
ThreadManager::~ThreadManager() {
{
@@ -61,6 +66,8 @@ ThreadManager::~ThreadManager() {
GPR_ASSERT(num_threads_ == 0);
}
+ grpc_core::ExecCtx exec_ctx; // grpc_resource_user_unref needs an exec_ctx
+ grpc_resource_user_unref(resource_user_);
CleanupCompletedThreads();
}
@@ -81,17 +88,27 @@ bool ThreadManager::IsShutdown() {
return shutdown_;
}
+int ThreadManager::GetMaxActiveThreadsSoFar() {
+ std::lock_guard<std::mutex> list_lock(list_mu_);
+ return max_active_threads_sofar_;
+}
+
void ThreadManager::MarkAsCompleted(WorkerThread* thd) {
{
std::lock_guard<std::mutex> list_lock(list_mu_);
completed_threads_.push_back(thd);
}
- std::lock_guard<std::mutex> lock(mu_);
- num_threads_--;
- if (num_threads_ == 0) {
- shutdown_cv_.notify_one();
+ {
+ std::lock_guard<std::mutex> lock(mu_);
+ num_threads_--;
+ if (num_threads_ == 0) {
+ shutdown_cv_.notify_one();
+ }
}
+
+ // Give a thread back to the resource quota
+ grpc_resource_user_free_threads(resource_user_, 1);
}
void ThreadManager::CleanupCompletedThreads() {
@@ -106,14 +123,22 @@ void ThreadManager::CleanupCompletedThreads() {
}
void ThreadManager::Initialize() {
+ if (!grpc_resource_user_allocate_threads(resource_user_, min_pollers_)) {
+ gpr_log(GPR_ERROR,
+ "No thread quota available to even create the minimum required "
+ "polling threads (i.e %d). Unable to start the thread manager",
+ min_pollers_);
+ abort();
+ }
+
{
std::unique_lock<std::mutex> lock(mu_);
num_pollers_ = min_pollers_;
num_threads_ = min_pollers_;
+ max_active_threads_sofar_ = min_pollers_;
}
for (int i = 0; i < min_pollers_; i++) {
- // Create a new thread (which ends up calling the MainWorkLoop() function
new WorkerThread(this);
}
}
@@ -139,11 +164,15 @@ void ThreadManager::MainWorkLoop() {
done = true;
break;
case WORK_FOUND:
- // If we got work and there are now insufficient pollers, start a new
- // one
- if (!shutdown_ && num_pollers_ < min_pollers_) {
+ // If we got work and there are now insufficient pollers and there is
+ // quota available to create a new thread, start a new poller thread
+ if (!shutdown_ && num_pollers_ < min_pollers_ &&
+ grpc_resource_user_allocate_threads(resource_user_, 1)) {
num_pollers_++;
num_threads_++;
+ if (num_threads_ > max_active_threads_sofar_) {
+ max_active_threads_sofar_ = num_threads_;
+ }
// Drop lock before spawning thread to avoid contention
lock.unlock();
new WorkerThread(this);
@@ -196,6 +225,8 @@ void ThreadManager::MainWorkLoop() {
}
};
+ // This thread is exiting. Do some cleanup work i.e delete already completed
+ // worker threads
CleanupCompletedThreads();
// If we are here, either ThreadManager is shutting down or it already has
diff --git a/src/cpp/thread_manager/thread_manager.h b/src/cpp/thread_manager/thread_manager.h
index 5a40f2de47..01043edb31 100644
--- a/src/cpp/thread_manager/thread_manager.h
+++ b/src/cpp/thread_manager/thread_manager.h
@@ -27,12 +27,14 @@
#include <grpcpp/support/config.h>
#include "src/core/lib/gprpp/thd.h"
+#include "src/core/lib/iomgr/resource_quota.h"
namespace grpc {
class ThreadManager {
public:
- explicit ThreadManager(int min_pollers, int max_pollers);
+ explicit ThreadManager(const char* name, grpc_resource_quota* resource_quota,
+ int min_pollers, int max_pollers);
virtual ~ThreadManager();
// Initializes and Starts the Rpc Manager threads
@@ -84,6 +86,11 @@ class ThreadManager {
// all the threads have drained all the outstanding work
virtual void Wait();
+ // Max number of concurrent threads that were ever active in this thread
+ // manager so far. This is useful for debugging purposes (and in unit tests)
+ // to check if resource_quota is properly being enforced.
+ int GetMaxActiveThreadsSoFar();
+
private:
// Helper wrapper class around grpc_core::Thread. Takes a ThreadManager object
// and starts a new grpc_core::Thread to calls the Run() function.
@@ -91,6 +98,24 @@ class ThreadManager {
// The Run() function calls ThreadManager::MainWorkLoop() function and once
// that completes, it marks the WorkerThread completed by calling
// ThreadManager::MarkAsCompleted()
+ //
+ // WHY IS THIS NEEDED?:
+ // When a thread terminates, some other thread *must* call Join() on that
+ // thread so that the resources are released. Having a WorkerThread wrapper
+ // will make this easier. Once Run() completes, each thread calls the
+ // following two functions:
+ // ThreadManager::CleanupCompletedThreads()
+ // ThreadManager::MarkAsCompleted()
+ //
+ // - MarkAsCompleted() puts the WorkerThread object in the ThreadManger's
+ // completed_threads_ list
+ // - CleanupCompletedThreads() calls "Join()" on the threads that are already
+ // in the completed_threads_ list (since a thread cannot call Join() on
+ // itself, it calls CleanupCompletedThreads() *before* calling
+ // MarkAsCompleted())
+ //
+ // TODO(sreek): Consider creating the threads 'detached' so that Join() need
+ // not be called (and the need for this WorkerThread class is eliminated)
class WorkerThread {
public:
WorkerThread(ThreadManager* thd_mgr);
@@ -111,13 +136,21 @@ class ThreadManager {
void MarkAsCompleted(WorkerThread* thd);
void CleanupCompletedThreads();
- // Protects shutdown_, num_pollers_ and num_threads_
- // TODO: sreek - Change num_pollers and num_threads_ to atomics
+ // Protects shutdown_, num_pollers_, num_threads_ and
+ // max_active_threads_sofar_
std::mutex mu_;
bool shutdown_;
std::condition_variable shutdown_cv_;
+ // The resource user object to use when requesting quota to create threads
+ //
+ // Note: The user of this ThreadManager object must create grpc_resource_quota
+ // object (that contains the actual max thread quota) and a grpc_resource_user
+ // object through which quota is requested whenver new threads need to be
+ // created
+ grpc_resource_user* resource_user_;
+
// Number of threads doing polling
int num_pollers_;
@@ -125,10 +158,15 @@ class ThreadManager {
int min_pollers_;
int max_pollers_;
- // The total number of threads (includes threads includes the threads that are
- // currently polling i.e num_pollers_)
+ // The total number of threads currently active (includes threads includes the
+ // threads that are currently polling i.e num_pollers_)
int num_threads_;
+ // See GetMaxActiveThreadsSoFar()'s description.
+ // To be more specific, this variable tracks the max value num_threads_ was
+ // ever set so far
+ int max_active_threads_sofar_;
+
std::mutex list_mu_;
std::list<WorkerThread*> completed_threads_;
};
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index 2443532bb8..78090afd6c 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -91,6 +91,7 @@ grpc_resource_quota_create_type grpc_resource_quota_create_import;
grpc_resource_quota_ref_type grpc_resource_quota_ref_import;
grpc_resource_quota_unref_type grpc_resource_quota_unref_import;
grpc_resource_quota_resize_type grpc_resource_quota_resize_import;
+grpc_resource_quota_set_max_threads_type grpc_resource_quota_set_max_threads_import;
grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import;
grpc_channelz_get_top_channels_type grpc_channelz_get_top_channels_import;
grpc_channelz_get_channel_type grpc_channelz_get_channel_import;
@@ -341,6 +342,7 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_resource_quota_ref_import = (grpc_resource_quota_ref_type) GetProcAddress(library, "grpc_resource_quota_ref");
grpc_resource_quota_unref_import = (grpc_resource_quota_unref_type) GetProcAddress(library, "grpc_resource_quota_unref");
grpc_resource_quota_resize_import = (grpc_resource_quota_resize_type) GetProcAddress(library, "grpc_resource_quota_resize");
+ grpc_resource_quota_set_max_threads_import = (grpc_resource_quota_set_max_threads_type) GetProcAddress(library, "grpc_resource_quota_set_max_threads");
grpc_resource_quota_arg_vtable_import = (grpc_resource_quota_arg_vtable_type) GetProcAddress(library, "grpc_resource_quota_arg_vtable");
grpc_channelz_get_top_channels_import = (grpc_channelz_get_top_channels_type) GetProcAddress(library, "grpc_channelz_get_top_channels");
grpc_channelz_get_channel_import = (grpc_channelz_get_channel_type) GetProcAddress(library, "grpc_channelz_get_channel");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index b08a1f94f7..1807efa761 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -248,6 +248,9 @@ extern grpc_resource_quota_unref_type grpc_resource_quota_unref_import;
typedef void(*grpc_resource_quota_resize_type)(grpc_resource_quota* resource_quota, size_t new_size);
extern grpc_resource_quota_resize_type grpc_resource_quota_resize_import;
#define grpc_resource_quota_resize grpc_resource_quota_resize_import
+typedef void(*grpc_resource_quota_set_max_threads_type)(grpc_resource_quota* resource_quota, int new_max_threads);
+extern grpc_resource_quota_set_max_threads_type grpc_resource_quota_set_max_threads_import;
+#define grpc_resource_quota_set_max_threads grpc_resource_quota_set_max_threads_import
typedef const grpc_arg_pointer_vtable*(*grpc_resource_quota_arg_vtable_type)(void);
extern grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import;
#define grpc_resource_quota_arg_vtable grpc_resource_quota_arg_vtable_import