diff options
author | Craig Tiller <ctiller@google.com> | 2016-11-08 08:46:46 -0800 |
---|---|---|
committer | Craig Tiller <ctiller@google.com> | 2016-11-08 08:46:46 -0800 |
commit | eba86e18531c653d47b10645bd77ae4930bce563 (patch) | |
tree | ed3045218d4563257742f13e6d750c7cafa92cab /src/core/lib | |
parent | 3a41bbbe9bf3da96a7aead551b43f72bd470d1c0 (diff) | |
parent | 03a58cf3873879a4e4470a437e2c0b42c75c6a7c (diff) |
Merge github.com:grpc/grpc into securityfuzzit
Diffstat (limited to 'src/core/lib')
-rw-r--r-- | src/core/lib/channel/compress_filter.c | 10 | ||||
-rw-r--r-- | src/core/lib/channel/http_server_filter.c | 1 | ||||
-rw-r--r-- | src/core/lib/iomgr/endpoint_pair_uv.c | 5 | ||||
-rw-r--r-- | src/core/lib/iomgr/network_status_tracker.c | 2 | ||||
-rw-r--r-- | src/core/lib/iomgr/resource_quota.c | 204 | ||||
-rw-r--r-- | src/core/lib/iomgr/resource_quota.h | 93 | ||||
-rw-r--r-- | src/core/lib/iomgr/tcp_client_uv.c | 36 | ||||
-rw-r--r-- | src/core/lib/iomgr/tcp_posix.c | 37 | ||||
-rw-r--r-- | src/core/lib/iomgr/tcp_server_posix.c | 45 | ||||
-rw-r--r-- | src/core/lib/iomgr/tcp_server_uv.c | 24 | ||||
-rw-r--r-- | src/core/lib/iomgr/tcp_uv.c | 71 | ||||
-rw-r--r-- | src/core/lib/iomgr/tcp_uv.h | 4 | ||||
-rw-r--r-- | src/core/lib/iomgr/tcp_windows.c | 53 | ||||
-rw-r--r-- | src/core/lib/security/transport/security_connector.c | 4 | ||||
-rw-r--r-- | src/core/lib/surface/call.c | 6 | ||||
-rw-r--r-- | src/core/lib/surface/version.c | 2 | ||||
-rw-r--r-- | src/core/lib/transport/pid_controller.c | 57 | ||||
-rw-r--r-- | src/core/lib/transport/pid_controller.h | 64 | ||||
-rw-r--r-- | src/core/lib/tsi/ssl_transport_security.c | 16 |
19 files changed, 466 insertions, 268 deletions
diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c index 0981d59f63..23b7dfb8fd 100644 --- a/src/core/lib/channel/compress_filter.c +++ b/src/core/lib/channel/compress_filter.c @@ -111,9 +111,13 @@ static grpc_mdelem *compression_md_filter(void *user_data, grpc_mdelem *md) { return md; } -static int skip_compression(grpc_call_element *elem) { +static int skip_compression(grpc_call_element *elem, uint32_t flags) { call_data *calld = elem->call_data; channel_data *channeld = elem->channel_data; + + if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) { + return 1; + } if (calld->has_compression_algorithm) { if (calld->compression_algorithm == GRPC_COMPRESS_NONE) { return 1; @@ -241,8 +245,8 @@ static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx, if (op->send_initial_metadata) { process_send_initial_metadata(elem, op->send_initial_metadata); } - if (op->send_message != NULL && !skip_compression(elem) && - 0 == (op->send_message->flags & GRPC_WRITE_NO_COMPRESS)) { + if (op->send_message != NULL && + !skip_compression(elem, op->send_message->flags)) { calld->send_op = op; calld->send_length = op->send_message->length; calld->send_flags = op->send_message->flags; diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c index 8340b5cd0f..04670ff233 100644 --- a/src/core/lib/channel/http_server_filter.c +++ b/src/core/lib/channel/http_server_filter.c @@ -162,7 +162,6 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) { /* Retrieve the payload from the value of the 'grpc-internal-payload-bin' header field */ calld->seen_payload_bin = 1; - gpr_slice_buffer_init(&calld->read_slice_buffer); gpr_slice_buffer_add(&calld->read_slice_buffer, gpr_slice_ref(md->value->slice)); grpc_slice_buffer_stream_init(&calld->read_stream, diff --git a/src/core/lib/iomgr/endpoint_pair_uv.c b/src/core/lib/iomgr/endpoint_pair_uv.c index 7941e20388..ff24894c6d 100644 --- a/src/core/lib/iomgr/endpoint_pair_uv.c +++ b/src/core/lib/iomgr/endpoint_pair_uv.c @@ -41,8 +41,9 @@ #include "src/core/lib/iomgr/endpoint_pair.h" -grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, - size_t read_slice_size) { +grpc_endpoint_pair grpc_iomgr_create_endpoint_pair( + const char *name, grpc_resource_quota *resource_quota, + size_t read_slice_size) { grpc_endpoint_pair endpoint_pair; // TODO(mlumish): implement this properly under libuv GPR_ASSERT(false && diff --git a/src/core/lib/iomgr/network_status_tracker.c b/src/core/lib/iomgr/network_status_tracker.c index b4bb7e3cf7..a5ca9ed2c3 100644 --- a/src/core/lib/iomgr/network_status_tracker.c +++ b/src/core/lib/iomgr/network_status_tracker.c @@ -46,7 +46,7 @@ static gpr_mu g_endpoint_mutex; void grpc_network_status_shutdown(void) { if (head != NULL) { gpr_log(GPR_ERROR, - "Memory leaked as all network endpoints were not shut down"); + "Memory leaked as not all network endpoints were shut down"); } gpr_mu_destroy(&g_endpoint_mutex); } diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c index e39cf28e35..1770d72268 100644 --- a/src/core/lib/iomgr/resource_quota.c +++ b/src/core/lib/iomgr/resource_quota.c @@ -44,6 +44,81 @@ int grpc_resource_quota_trace = 0; +/* Internal linked list pointers for a resource user */ +typedef struct { + grpc_resource_user *next; + grpc_resource_user *prev; +} grpc_resource_user_link; + +/* Resource users are kept in (potentially) several intrusive linked lists + at once. These are the list names. */ +typedef enum { + /* Resource users that are waiting for an allocation */ + GRPC_RULIST_AWAITING_ALLOCATION, + /* Resource users that have free memory available for internal reclamation */ + GRPC_RULIST_NON_EMPTY_FREE_POOL, + /* Resource users that have published a benign reclamation is available */ + GRPC_RULIST_RECLAIMER_BENIGN, + /* Resource users that have published a destructive reclamation is + available */ + GRPC_RULIST_RECLAIMER_DESTRUCTIVE, + /* Number of lists: must be last */ + GRPC_RULIST_COUNT +} grpc_rulist; + +struct grpc_resource_user { + /* The quota this resource user consumes from */ + grpc_resource_quota *resource_quota; + + /* Closure to schedule an allocation under the resource quota combiner lock */ + grpc_closure allocate_closure; + /* Closure to publish a non empty free pool under the resource quota combiner + lock */ + grpc_closure add_to_free_pool_closure; + + /* one ref for each ref call (released by grpc_resource_user_unref), and one + ref for each byte allocated (released by grpc_resource_user_free) */ + gpr_atm refs; + /* is this resource user unlocked? starts at 0, increases for each shutdown + call */ + gpr_atm shutdown; + + gpr_mu mu; + /* The amount of memory (in bytes) this user has cached for its own use: to + avoid quota contention, each resource user can keep some memory in + addition to what it is immediately using (e.g., for caching), and the quota + can pull it back under memory pressure. + This value can become negative if more memory has been requested than + existed in the free pool, at which point the quota is consulted to bring + this value non-negative (asynchronously). */ + int64_t free_pool; + /* A list of closures to call once free_pool becomes non-negative - ie when + all outstanding allocations have been granted. */ + grpc_closure_list on_allocated; + /* True if we are currently trying to allocate from the quota, false if not */ + bool allocating; + /* True if we are currently trying to add ourselves to the non-free quota + list, false otherwise */ + bool added_to_free_pool; + + /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer + */ + grpc_closure *reclaimers[2]; + /* Trampoline closures to finish reclamation and re-enter the quota combiner + lock */ + grpc_closure post_reclaimer_closure[2]; + + /* Closure to execute under the quota combiner to de-register and shutdown the + resource user */ + grpc_closure destroy_closure; + + /* Links in the various grpc_rulist lists */ + grpc_resource_user_link links[GRPC_RULIST_COUNT]; + + /* The name of this resource user, for debugging/tracing */ + char *name; +}; + struct grpc_resource_quota { /* refcount */ gpr_refcount refs; @@ -373,9 +448,19 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru, rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE); } +static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { + grpc_resource_user *resource_user = ru; + grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0], + GRPC_ERROR_CANCELLED, NULL); + grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1], + GRPC_ERROR_CANCELLED, NULL); + resource_user->reclaimers[0] = NULL; + resource_user->reclaimers[1] = NULL; +} + static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { grpc_resource_user *resource_user = ru; - GPR_ASSERT(resource_user->allocated == 0); + GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0); for (int i = 0; i < GRPC_RULIST_COUNT; i++) { rulist_remove(resource_user, (grpc_rulist)i); } @@ -383,13 +468,14 @@ static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { GRPC_ERROR_CANCELLED, NULL); grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1], GRPC_ERROR_CANCELLED, NULL); - grpc_exec_ctx_sched(exec_ctx, (grpc_closure *)gpr_atm_no_barrier_load( - &resource_user->on_done_destroy_closure), - GRPC_ERROR_NONE, NULL); if (resource_user->free_pool != 0) { resource_user->resource_quota->free_pool += resource_user->free_pool; rq_step_sched(exec_ctx, resource_user->resource_quota); } + grpc_resource_quota_internal_unref(exec_ctx, resource_user->resource_quota); + gpr_mu_destroy(&resource_user->mu); + gpr_free(resource_user->name); + gpr_free(resource_user); } static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg, @@ -539,9 +625,9 @@ const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) { * grpc_resource_user api */ -void grpc_resource_user_init(grpc_resource_user *resource_user, - grpc_resource_quota *resource_quota, - const char *name) { +grpc_resource_user *grpc_resource_user_create( + grpc_resource_quota *resource_quota, const char *name) { + grpc_resource_user *resource_user = gpr_malloc(sizeof(*resource_user)); resource_user->resource_quota = grpc_resource_quota_internal_ref(resource_quota); grpc_closure_init(&resource_user->allocate_closure, &ru_allocate, @@ -555,12 +641,12 @@ void grpc_resource_user_init(grpc_resource_user *resource_user, grpc_closure_init(&resource_user->destroy_closure, &ru_destroy, resource_user); gpr_mu_init(&resource_user->mu); - resource_user->allocated = 0; + gpr_atm_rel_store(&resource_user->refs, 1); + gpr_atm_rel_store(&resource_user->shutdown, 0); resource_user->free_pool = 0; grpc_closure_list_init(&resource_user->on_allocated); resource_user->allocating = false; resource_user->added_to_free_pool = false; - gpr_atm_no_barrier_store(&resource_user->on_done_destroy_closure, 0); resource_user->reclaimers[0] = NULL; resource_user->reclaimers[1] = NULL; for (int i = 0; i < GRPC_RULIST_COUNT; i++) { @@ -572,56 +658,54 @@ void grpc_resource_user_init(grpc_resource_user *resource_user, gpr_asprintf(&resource_user->name, "anonymous_resource_user_%" PRIxPTR, (intptr_t)resource_user); } + return resource_user; } -void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, - grpc_closure *on_done) { - gpr_mu_lock(&resource_user->mu); - GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->on_done_destroy_closure) == - 0); - gpr_atm_no_barrier_store(&resource_user->on_done_destroy_closure, - (gpr_atm)on_done); - if (resource_user->allocated == 0) { +static void ru_ref_by(grpc_resource_user *resource_user, gpr_atm amount) { + GPR_ASSERT(amount > 0); + GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&resource_user->refs, amount) != 0); +} + +static void ru_unref_by(grpc_exec_ctx *exec_ctx, + grpc_resource_user *resource_user, gpr_atm amount) { + GPR_ASSERT(amount > 0); + gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount); + GPR_ASSERT(old >= amount); + if (old == amount) { grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, &resource_user->destroy_closure, GRPC_ERROR_NONE, false); } - gpr_mu_unlock(&resource_user->mu); } -void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user) { - grpc_resource_quota_internal_unref(exec_ctx, resource_user->resource_quota); - gpr_mu_destroy(&resource_user->mu); - gpr_free(resource_user->name); +void grpc_resource_user_ref(grpc_resource_user *resource_user) { + ru_ref_by(resource_user, 1); +} + +void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx, + grpc_resource_user *resource_user) { + ru_unref_by(exec_ctx, resource_user, 1); +} + +void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, + grpc_resource_user *resource_user) { + if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) { + grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, + grpc_closure_create(ru_shutdown, resource_user), + GRPC_ERROR_NONE, false); + } } void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx, grpc_resource_user *resource_user, size_t size, grpc_closure *optional_on_done) { gpr_mu_lock(&resource_user->mu); - grpc_closure *on_done_destroy = (grpc_closure *)gpr_atm_no_barrier_load( - &resource_user->on_done_destroy_closure); - if (on_done_destroy != NULL) { - /* already shutdown */ - if (grpc_resource_quota_trace) { - gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR " after shutdown", - resource_user->resource_quota->name, resource_user->name, size); - } - grpc_exec_ctx_sched( - exec_ctx, optional_on_done, - GRPC_ERROR_CREATE("Buffer pool user is already shutdown"), NULL); - gpr_mu_unlock(&resource_user->mu); - return; - } - resource_user->allocated += (int64_t)size; + ru_ref_by(resource_user, (gpr_atm)size); resource_user->free_pool -= (int64_t)size; if (grpc_resource_quota_trace) { - gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; allocated -> %" PRId64 - ", free_pool -> %" PRId64, + gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64, resource_user->resource_quota->name, resource_user->name, size, - resource_user->allocated, resource_user->free_pool); + resource_user->free_pool); } if (resource_user->free_pool < 0) { grpc_closure_list_append(&resource_user->on_allocated, optional_on_done, @@ -641,15 +725,12 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx, void grpc_resource_user_free(grpc_exec_ctx *exec_ctx, grpc_resource_user *resource_user, size_t size) { gpr_mu_lock(&resource_user->mu); - GPR_ASSERT(resource_user->allocated >= (int64_t)size); bool was_zero_or_negative = resource_user->free_pool <= 0; resource_user->free_pool += (int64_t)size; - resource_user->allocated -= (int64_t)size; if (grpc_resource_quota_trace) { - gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; allocated -> %" PRId64 - ", free_pool -> %" PRId64, + gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64, resource_user->resource_quota->name, resource_user->name, size, - resource_user->allocated, resource_user->free_pool); + resource_user->free_pool); } bool is_bigger_than_zero = resource_user->free_pool > 0; if (is_bigger_than_zero && was_zero_or_negative && @@ -659,29 +740,23 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx, &resource_user->add_to_free_pool_closure, GRPC_ERROR_NONE, false); } - grpc_closure *on_done_destroy = (grpc_closure *)gpr_atm_no_barrier_load( - &resource_user->on_done_destroy_closure); - if (on_done_destroy != NULL && resource_user->allocated == 0) { - grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, - &resource_user->destroy_closure, GRPC_ERROR_NONE, - false); - } gpr_mu_unlock(&resource_user->mu); + ru_unref_by(exec_ctx, resource_user, (gpr_atm)size); } void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx, grpc_resource_user *resource_user, bool destructive, grpc_closure *closure) { - if (gpr_atm_acq_load(&resource_user->on_done_destroy_closure) == 0) { - GPR_ASSERT(resource_user->reclaimers[destructive] == NULL); - resource_user->reclaimers[destructive] = closure; - grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, - &resource_user->post_reclaimer_closure[destructive], - GRPC_ERROR_NONE, false); - } else { + GPR_ASSERT(resource_user->reclaimers[destructive] == NULL); + if (gpr_atm_acq_load(&resource_user->shutdown) > 0) { grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED, NULL); + return; } + resource_user->reclaimers[destructive] = closure; + grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner, + &resource_user->post_reclaimer_closure[destructive], + GRPC_ERROR_NONE, false); } void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx, @@ -715,3 +790,10 @@ void grpc_resource_user_alloc_slices( grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user, count * length, &slice_allocator->on_allocated); } + +gpr_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx, + grpc_resource_user *resource_user, + size_t size) { + grpc_resource_user_alloc(exec_ctx, resource_user, size, NULL); + return ru_slice_create(resource_user, size); +} diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h index 6dfac55f88..bbe0af1a14 100644 --- a/src/core/lib/iomgr/resource_quota.h +++ b/src/core/lib/iomgr/resource_quota.h @@ -84,91 +84,15 @@ void grpc_resource_quota_internal_unref(grpc_exec_ctx *exec_ctx, grpc_resource_quota *grpc_resource_quota_from_channel_args( const grpc_channel_args *channel_args); -/* Resource users are kept in (potentially) several intrusive linked lists - at once. These are the list names. */ -typedef enum { - /* Resource users that are waiting for an allocation */ - GRPC_RULIST_AWAITING_ALLOCATION, - /* Resource users that have free memory available for internal reclamation */ - GRPC_RULIST_NON_EMPTY_FREE_POOL, - /* Resource users that have published a benign reclamation is available */ - GRPC_RULIST_RECLAIMER_BENIGN, - /* Resource users that have published a destructive reclamation is - available */ - GRPC_RULIST_RECLAIMER_DESTRUCTIVE, - /* Number of lists: must be last */ - GRPC_RULIST_COUNT -} grpc_rulist; - typedef struct grpc_resource_user grpc_resource_user; -/* Internal linked list pointers for a resource user */ -typedef struct { - grpc_resource_user *next; - grpc_resource_user *prev; -} grpc_resource_user_link; - -struct grpc_resource_user { - /* The quota this resource user consumes from */ - grpc_resource_quota *resource_quota; - - /* Closure to schedule an allocation under the resource quota combiner lock */ - grpc_closure allocate_closure; - /* Closure to publish a non empty free pool under the resource quota combiner - lock */ - grpc_closure add_to_free_pool_closure; - - gpr_mu mu; - /* Total allocated memory outstanding by this resource user in bytes; - always positive */ - int64_t allocated; - /* The amount of memory (in bytes) this user has cached for its own use: to - avoid quota contention, each resource user can keep some memory in - addition to what it is immediately using (e.g., for caching), and the quota - can pull it back under memory pressure. - This value can become negative if more memory has been requested than - existed in the free pool, at which point the quota is consulted to bring - this value non-negative (asynchronously). */ - int64_t free_pool; - /* A list of closures to call once free_pool becomes non-negative - ie when - all outstanding allocations have been granted. */ - grpc_closure_list on_allocated; - /* True if we are currently trying to allocate from the quota, false if not */ - bool allocating; - /* True if we are currently trying to add ourselves to the non-free quota - list, false otherwise */ - bool added_to_free_pool; - - /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer - */ - grpc_closure *reclaimers[2]; - /* Trampoline closures to finish reclamation and re-enter the quota combiner - lock */ - grpc_closure post_reclaimer_closure[2]; - - /* Closure to execute under the quota combiner to de-register and shutdown the - resource user */ - grpc_closure destroy_closure; - /* User supplied closure to call once the user has finished shutting down AND - all outstanding allocations have been freed. Real type is grpc_closure*, - but it's stored as an atomic to avoid a mutex on some fast paths. */ - gpr_atm on_done_destroy_closure; - - /* Links in the various grpc_rulist lists */ - grpc_resource_user_link links[GRPC_RULIST_COUNT]; - - /* The name of this resource user, for debugging/tracing */ - char *name; -}; - -void grpc_resource_user_init(grpc_resource_user *resource_user, - grpc_resource_quota *resource_quota, - const char *name); +grpc_resource_user *grpc_resource_user_create( + grpc_resource_quota *resource_quota, const char *name); +void grpc_resource_user_ref(grpc_resource_user *resource_user); +void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx, + grpc_resource_user *resource_user); void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, - grpc_closure *on_done); -void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user); + grpc_resource_user *resource_user); /* Allocate from the resource user (and its quota). If optional_on_done is NULL, then allocate immediately. This may push the @@ -221,4 +145,9 @@ void grpc_resource_user_alloc_slices( grpc_resource_user_slice_allocator *slice_allocator, size_t length, size_t count, gpr_slice_buffer *dest); +/* Allocate one slice of length \a size synchronously. */ +gpr_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx, + grpc_resource_user *resource_user, + size_t size); + #endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */ diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c index 6274667042..b07f9ceffa 100644 --- a/src/core/lib/iomgr/tcp_client_uv.c +++ b/src/core/lib/iomgr/tcp_client_uv.c @@ -54,9 +54,12 @@ typedef struct grpc_uv_tcp_connect { grpc_endpoint **endpoint; int refs; char *addr_name; + grpc_resource_quota *resource_quota; } grpc_uv_tcp_connect; -static void uv_tcp_connect_cleanup(grpc_uv_tcp_connect *connect) { +static void uv_tcp_connect_cleanup(grpc_exec_ctx *exec_ctx, + grpc_uv_tcp_connect *connect) { + grpc_resource_quota_internal_unref(exec_ctx, connect->resource_quota); gpr_free(connect); } @@ -74,7 +77,7 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, } done = (--connect->refs == 0); if (done) { - uv_tcp_connect_cleanup(connect); + uv_tcp_connect_cleanup(exec_ctx, connect); } } @@ -86,8 +89,8 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) { grpc_closure *closure = connect->closure; grpc_timer_cancel(&exec_ctx, &connect->alarm); if (status == 0) { - *connect->endpoint = - grpc_tcp_create(connect->tcp_handle, connect->addr_name); + *connect->endpoint = grpc_tcp_create( + connect->tcp_handle, connect->resource_quota, connect->addr_name); } else { error = GRPC_ERROR_CREATE("Failed to connect to remote host"); error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, -status); @@ -105,7 +108,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) { } done = (--connect->refs == 0); if (done) { - uv_tcp_connect_cleanup(connect); + uv_tcp_connect_cleanup(&exec_ctx, connect); } grpc_exec_ctx_sched(&exec_ctx, closure, error, NULL); grpc_exec_ctx_finish(&exec_ctx); @@ -114,16 +117,31 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) { static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, + const grpc_channel_args *channel_args, const grpc_resolved_address *resolved_addr, gpr_timespec deadline) { grpc_uv_tcp_connect *connect; + grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL); + (void)channel_args; (void)interested_parties; + + if (channel_args != NULL) { + for (size_t i = 0; i < channel_args->num_args; i++) { + if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) { + grpc_resource_quota_internal_unref(exec_ctx, resource_quota); + resource_quota = grpc_resource_quota_internal_ref( + channel_args->args[i].value.pointer.p); + } + } + } + connect = gpr_malloc(sizeof(grpc_uv_tcp_connect)); memset(connect, 0, sizeof(grpc_uv_tcp_connect)); connect->closure = closure; connect->endpoint = ep; connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t)); connect->addr_name = grpc_sockaddr_to_uri(resolved_addr); + connect->resource_quota = resource_quota; uv_tcp_init(uv_default_loop(), connect->tcp_handle); connect->connect_req.data = connect; // TODO(murgatroid99): figure out what the return value here means @@ -138,16 +156,18 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, // overridden by api_fuzzer.c void (*grpc_tcp_client_connect_impl)( grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, - grpc_pollset_set *interested_parties, const grpc_resolved_address *addr, + grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, + const grpc_resolved_address *addr, gpr_timespec deadline) = tcp_client_connect_impl; void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, + const grpc_channel_args *channel_args, const grpc_resolved_address *addr, gpr_timespec deadline) { - grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, addr, - deadline); + grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, + channel_args, addr, deadline); } #endif /* GRPC_UV */ diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index 880af93ee1..70416c6eef 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -102,7 +102,7 @@ typedef struct { char *peer_string; - grpc_resource_user resource_user; + grpc_resource_user *resource_user; grpc_resource_user_slice_allocator slice_allocator; } grpc_tcp; @@ -110,28 +110,18 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, grpc_error *error); static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, grpc_error *error); -static void tcp_unref_closure(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error); - -static void tcp_maybe_shutdown_resource_user(grpc_exec_ctx *exec_ctx, - grpc_tcp *tcp) { - if (gpr_atm_full_fetch_add(&tcp->shutdown_count, 1) == 0) { - grpc_resource_user_shutdown(exec_ctx, &tcp->resource_user, - grpc_closure_create(tcp_unref_closure, tcp)); - } -} static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; - tcp_maybe_shutdown_resource_user(exec_ctx, tcp); grpc_fd_shutdown(exec_ctx, tcp->em_fd); + grpc_resource_user_shutdown(exec_ctx, tcp->resource_user); } static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd, "tcp_unref_orphan"); gpr_slice_buffer_destroy(&tcp->last_read_buffer); - grpc_resource_user_destroy(exec_ctx, &tcp->resource_user); + grpc_resource_user_unref(exec_ctx, tcp->resource_user); gpr_free(tcp->peer_string); gpr_free(tcp); } @@ -168,15 +158,9 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } #endif -static void tcp_unref_closure(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - TCP_UNREF(exec_ctx, arg, "resource_user"); -} - static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; - tcp_maybe_shutdown_resource_user(exec_ctx, tcp); gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer); TCP_UNREF(exec_ctx, tcp, "destroy"); } @@ -515,7 +499,7 @@ static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) { static grpc_resource_user *tcp_get_resource_user(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; - return &tcp->resource_user; + return tcp->resource_user; } static const grpc_endpoint_vtable vtable = {tcp_read, @@ -543,9 +527,8 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, tcp->slice_size = slice_size; tcp->iov_size = 1; tcp->finished_edge = true; - /* paired with unref in grpc_tcp_destroy, and with the shutdown for our - * resource_user */ - gpr_ref_init(&tcp->refcount, 2); + /* paired with unref in grpc_tcp_destroy */ + gpr_ref_init(&tcp->refcount, 1); gpr_atm_no_barrier_store(&tcp->shutdown_count, 0); tcp->em_fd = em_fd; tcp->read_closure.cb = tcp_handle_read; @@ -553,10 +536,9 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, tcp->write_closure.cb = tcp_handle_write; tcp->write_closure.cb_arg = tcp; gpr_slice_buffer_init(&tcp->last_read_buffer); - grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string); - grpc_resource_user_slice_allocator_init(&tcp->slice_allocator, - &tcp->resource_user, - tcp_read_allocation_done, tcp); + tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); + grpc_resource_user_slice_allocator_init( + &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp); /* Tell network status tracker about new endpoint */ grpc_network_status_register_endpoint(&tcp->base); @@ -576,7 +558,6 @@ void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, GPR_ASSERT(ep->vtable == &vtable); tcp->release_fd = fd; tcp->release_fd_cb = done; - tcp_maybe_shutdown_resource_user(exec_ctx, tcp); gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer); TCP_UNREF(exec_ctx, tcp, "destroy"); } diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c index b6fc1e4ca2..7e2fb0f1f9 100644 --- a/src/core/lib/iomgr/tcp_server_posix.c +++ b/src/core/lib/iomgr/tcp_server_posix.c @@ -657,41 +657,46 @@ done: } } +/* Return listener at port_index or NULL. Should only be called with s->mu + locked. */ +static grpc_tcp_listener *get_port_index(grpc_tcp_server *s, + unsigned port_index) { + unsigned num_ports = 0; + grpc_tcp_listener *sp; + for (sp = s->head; sp; sp = sp->next) { + if (!sp->is_sibling) { + if (++num_ports > port_index) { + return sp; + } + } + } + return NULL; +} + unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s, unsigned port_index) { unsigned num_fds = 0; - grpc_tcp_listener *sp; gpr_mu_lock(&s->mu); - for (sp = s->head; sp && port_index != 0; sp = sp->next) { - if (!sp->is_sibling) { - --port_index; - } + grpc_tcp_listener *sp = get_port_index(s, port_index); + for (; sp; sp = sp->sibling) { + ++num_fds; } - for (; sp; sp = sp->sibling, ++num_fds) - ; gpr_mu_unlock(&s->mu); return num_fds; } int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index, unsigned fd_index) { - grpc_tcp_listener *sp; - int fd; gpr_mu_lock(&s->mu); - for (sp = s->head; sp && port_index != 0; sp = sp->next) { - if (!sp->is_sibling) { - --port_index; + grpc_tcp_listener *sp = get_port_index(s, port_index); + for (; sp; sp = sp->sibling, --fd_index) { + if (fd_index == 0) { + gpr_mu_unlock(&s->mu); + return sp->fd; } } - for (; sp && fd_index != 0; sp = sp->sibling, --fd_index) - ; - if (sp) { - fd = sp->fd; - } else { - fd = -1; - } gpr_mu_unlock(&s->mu); - return fd; + return -1; } void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c index 73e4db3d65..b5b9b92a20 100644 --- a/src/core/lib/iomgr/tcp_server_uv.c +++ b/src/core/lib/iomgr/tcp_server_uv.c @@ -76,13 +76,30 @@ struct grpc_tcp_server { /* shutdown callback */ grpc_closure *shutdown_complete; + + grpc_resource_quota *resource_quota; }; -grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete, +grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, + grpc_closure *shutdown_complete, const grpc_channel_args *args, grpc_tcp_server **server) { grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server)); - (void)args; + s->resource_quota = grpc_resource_quota_create(NULL); + for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) { + if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) { + if (args->args[i].type == GRPC_ARG_POINTER) { + grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota); + s->resource_quota = + grpc_resource_quota_internal_ref(args->args[i].value.pointer.p); + } else { + grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota); + gpr_free(s); + return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA + " must be a pointer to a buffer pool"); + } + } + } gpr_ref_init(&s->refs, 1); s->on_accept_cb = NULL; s->on_accept_cb_arg = NULL; @@ -119,6 +136,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { gpr_free(sp->handle); gpr_free(sp); } + grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota); gpr_free(s); } @@ -201,7 +219,7 @@ static void on_connect(uv_stream_t *server, int status) { } else { gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status)); } - ep = grpc_tcp_create(client, peer_name_string); + ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string); sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, &acceptor); grpc_exec_ctx_finish(&exec_ctx); diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c index 3860fe3e9b..8e74c9e863 100644 --- a/src/core/lib/iomgr/tcp_uv.c +++ b/src/core/lib/iomgr/tcp_uv.c @@ -54,6 +54,9 @@ typedef struct { grpc_endpoint base; gpr_refcount refcount; + uv_write_t write_req; + uv_shutdown_t shutdown_req; + uv_tcp_t *handle; grpc_closure *read_cb; @@ -64,14 +67,23 @@ typedef struct { gpr_slice_buffer *write_slices; uv_buf_t *write_buffers; + grpc_resource_user resource_user; + bool shutting_down; + bool resource_user_shutting_down; + char *peer_string; grpc_pollset *pollset; } grpc_tcp; static void uv_close_callback(uv_handle_t *handle) { gpr_free(handle); } -static void tcp_free(grpc_tcp *tcp) { gpr_free(tcp); } +static void tcp_free(grpc_tcp *tcp) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_resource_user_destroy(&exec_ctx, &tcp->resource_user); + gpr_free(tcp); + grpc_exec_ctx_finish(&exec_ctx); +} /*#define GRPC_TCP_REFCOUNT_DEBUG*/ #ifdef GRPC_TCP_REFCOUNT_DEBUG @@ -106,11 +118,14 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_tcp *tcp = handle->data; (void)suggested_size; - tcp->read_slice = gpr_slice_malloc(GRPC_TCP_DEFAULT_READ_SLICE_SIZE); + tcp->read_slice = grpc_resource_user_slice_malloc( + &exec_ctx, &tcp->resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE); buf->base = (char *)GPR_SLICE_START_PTR(tcp->read_slice); buf->len = GPR_SLICE_LENGTH(tcp->read_slice); + grpc_exec_ctx_finish(&exec_ctx); } static void read_callback(uv_stream_t *stream, ssize_t nread, @@ -198,7 +213,8 @@ static void write_callback(uv_write_t *req, int status) { gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str); } gpr_free(tcp->write_buffers); - gpr_free(req); + grpc_resource_user_free(&exec_ctx, &tcp->resource_user, + sizeof(uv_buf_t) * tcp->write_slices->count); grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL); grpc_exec_ctx_finish(&exec_ctx); } @@ -243,12 +259,15 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, tcp->write_cb = cb; buffer_count = (unsigned int)tcp->write_slices->count; buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count); + grpc_resource_user_alloc(exec_ctx, &tcp->resource_user, + sizeof(uv_buf_t) * buffer_count, NULL); for (i = 0; i < buffer_count; i++) { slice = &tcp->write_slices->slices[i]; buffers[i].base = (char *)GPR_SLICE_START_PTR(*slice); buffers[i].len = GPR_SLICE_LENGTH(*slice); } - write_req = gpr_malloc(sizeof(uv_write_t)); + tcp->write_buffers = buffers; + write_req = &tcp->write_req; write_req->data = tcp; TCP_REF(tcp, "write"); // TODO(murgatroid99): figure out what the return value here means @@ -274,13 +293,29 @@ static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, (void)pollset; } -static void shutdown_callback(uv_shutdown_t *req, int status) { gpr_free(req); } +static void shutdown_callback(uv_shutdown_t *req, int status) {} + +static void resource_user_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { + TCP_UNREF(arg, "resource_user"); +} + +static void uv_resource_user_maybe_shutdown(grpc_exec_ctx *exec_ctx, + grpc_tcp *tcp) { + if (!tcp->resource_user_shutting_down) { + tcp->resource_user_shutting_down = true; + TCP_REF(tcp, "resource_user"); + grpc_resource_user_shutdown( + exec_ctx, &tcp->resource_user, + grpc_closure_create(resource_user_shutdown_done, tcp)); + } +} static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; if (!tcp->shutting_down) { tcp->shutting_down = true; - uv_shutdown_t *req = gpr_malloc(sizeof(uv_shutdown_t)); + uv_shutdown_t *req = &tcp->shutdown_req; uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback); } } @@ -289,6 +324,7 @@ static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; uv_close((uv_handle_t *)tcp->handle, uv_close_callback); + uv_resource_user_maybe_shutdown(exec_ctx, tcp); TCP_UNREF(tcp, "destroy"); } @@ -297,18 +333,21 @@ static char *uv_get_peer(grpc_endpoint *ep) { return gpr_strdup(tcp->peer_string); } +static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) { + grpc_tcp *tcp = (grpc_tcp *)ep; + return &tcp->resource_user; +} + static grpc_workqueue *uv_get_workqueue(grpc_endpoint *ep) { return NULL; } -static grpc_endpoint_vtable vtable = {uv_endpoint_read, - uv_endpoint_write, - uv_get_workqueue, - uv_add_to_pollset, - uv_add_to_pollset_set, - uv_endpoint_shutdown, - uv_destroy, - uv_get_peer}; +static grpc_endpoint_vtable vtable = { + uv_endpoint_read, uv_endpoint_write, uv_get_workqueue, + uv_add_to_pollset, uv_add_to_pollset_set, uv_endpoint_shutdown, + uv_destroy, uv_get_resource_user, uv_get_peer}; -grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string) { +grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, + grpc_resource_quota *resource_quota, + char *peer_string) { grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp)); if (grpc_tcp_trace) { @@ -325,6 +364,8 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string) { gpr_ref_init(&tcp->refcount, 1); tcp->peer_string = gpr_strdup(peer_string); tcp->shutting_down = false; + tcp->resource_user_shutting_down = false; + grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string); /* Tell network status tracking code about the new endpoint */ grpc_network_status_register_endpoint(&tcp->base); diff --git a/src/core/lib/iomgr/tcp_uv.h b/src/core/lib/iomgr/tcp_uv.h index eed41151ea..970fcafe4a 100644 --- a/src/core/lib/iomgr/tcp_uv.h +++ b/src/core/lib/iomgr/tcp_uv.h @@ -52,6 +52,8 @@ extern int grpc_tcp_trace; #define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192 -grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string); +grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, + grpc_resource_quota *resource_quota, + char *peer_string); #endif /* GRPC_CORE_LIB_IOMGR_TCP_UV_H */ diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c index 46f0491d10..e7abf2971f 100644 --- a/src/core/lib/iomgr/tcp_windows.c +++ b/src/core/lib/iomgr/tcp_windows.c @@ -109,46 +109,35 @@ typedef struct grpc_tcp { gpr_slice_buffer *write_slices; gpr_slice_buffer *read_slices; - grpc_resource_user resource_user; + grpc_resource_user *resource_user; /* The IO Completion Port runs from another thread. We need some mechanism to protect ourselves when requesting a shutdown. */ gpr_mu mu; int shutting_down; - gpr_atm resource_user_shutdown_count; - char *peer_string; } grpc_tcp; -static void win_unref_closure(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error); - -static void win_maybe_shutdown_resource_user(grpc_exec_ctx *exec_ctx, - grpc_tcp *tcp) { - if (gpr_atm_full_fetch_add(&tcp->resource_user_shutdown_count, 1) == 0) { - grpc_resource_user_shutdown(exec_ctx, &tcp->resource_user, - grpc_closure_create(win_unref_closure, tcp)); - } -} - -static void tcp_free(grpc_tcp *tcp) { +static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { grpc_winsocket_destroy(tcp->socket); gpr_mu_destroy(&tcp->mu); gpr_free(tcp->peer_string); + grpc_resource_user_unref(exec_ctx, tcp->resource_user); gpr_free(tcp); } /*#define GRPC_TCP_REFCOUNT_DEBUG*/ #ifdef GRPC_TCP_REFCOUNT_DEBUG -#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) +#define TCP_UNREF(exec_ctx, tcp, reason) \ + tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__) #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) -static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, - int line) { +static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, + const char *reason, const char *file, int line) { gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp, reason, tcp->refcount.count, tcp->refcount.count - 1); if (gpr_unref(&tcp->refcount)) { - tcp_free(tcp); + tcp_free(exec_ctx, tcp); } } @@ -159,22 +148,17 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, gpr_ref(&tcp->refcount); } #else -#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) +#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp)) #define TCP_REF(tcp, reason) tcp_ref((tcp)) -static void tcp_unref(grpc_tcp *tcp) { +static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { if (gpr_unref(&tcp->refcount)) { - tcp_free(tcp); + tcp_free(exec_ctx, tcp); } } static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } #endif -static void win_unref_closure(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - TCP_UNREF(arg, "resource_user"); -} - /* Asynchronous callback from the IOCP, or the background thread. */ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { grpc_tcp *tcp = tcpp; @@ -203,7 +187,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { } tcp->read_cb = NULL; - TCP_UNREF(tcp, "read"); + TCP_UNREF(exec_ctx, tcp, "read"); grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); } @@ -287,7 +271,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { } } - TCP_UNREF(tcp, "write"); + TCP_UNREF(exec_ctx, tcp, "write"); grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); } @@ -355,7 +339,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (status != 0) { int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { - TCP_UNREF(tcp, "write"); + TCP_UNREF(exec_ctx, tcp, "write"); grpc_exec_ctx_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"), NULL); return; @@ -396,15 +380,14 @@ static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { callback. See the comments in on_read and on_write. */ tcp->shutting_down = 1; grpc_winsocket_shutdown(tcp->socket); - win_maybe_shutdown_resource_user(exec_ctx, tcp); gpr_mu_unlock(&tcp->mu); + grpc_resource_user_shutdown(exec_ctx, tcp->resource_user); } static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; - win_maybe_shutdown_resource_user(exec_ctx, tcp); - TCP_UNREF(tcp, "destroy"); + TCP_UNREF(exec_ctx, tcp, "destroy"); } static char *win_get_peer(grpc_endpoint *ep) { @@ -416,7 +399,7 @@ static grpc_workqueue *win_get_workqueue(grpc_endpoint *ep) { return NULL; } static grpc_resource_user *win_get_resource_user(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; - return &tcp->resource_user; + return tcp->resource_user; } static grpc_endpoint_vtable vtable = {win_read, @@ -441,7 +424,7 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, grpc_closure_init(&tcp->on_read, on_read, tcp); grpc_closure_init(&tcp->on_write, on_write, tcp); tcp->peer_string = gpr_strdup(peer_string); - grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string); + tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); /* Tell network status tracking code about the new endpoint */ grpc_network_status_register_endpoint(&tcp->base); diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c index 0eca46eb52..ebf72a3abb 100644 --- a/src/core/lib/security/transport/security_connector.c +++ b/src/core/lib/security/transport/security_connector.c @@ -210,11 +210,11 @@ void grpc_security_connector_unref(grpc_security_connector *sc) { } static void connector_pointer_arg_destroy(void *p) { - GRPC_SECURITY_CONNECTOR_UNREF(p, "connector_pointer_arg"); + GRPC_SECURITY_CONNECTOR_UNREF(p, "connector_pointer_arg_destroy"); } static void *connector_pointer_arg_copy(void *p) { - return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg"); + return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg_copy"); } static int connector_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); } diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c index 6c25952c0a..e3b088f663 100644 --- a/src/core/lib/surface/call.c +++ b/src/core/lib/surface/call.c @@ -1461,6 +1461,12 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, grpc_slice_buffer_stream_init( &call->sending_stream, &op->data.send_message->data.raw.slice_buffer, op->flags); + /* If the outgoing buffer is already compressed, mark it as so in the + flags. These will be picked up by the compression filter and further + (wasteful) attempts at compression skipped. */ + if (op->data.send_message->data.raw.compression > GRPC_COMPRESS_NONE) { + call->sending_stream.base.flags |= GRPC_WRITE_INTERNAL_COMPRESS; + } stream_op->send_message = &call->sending_stream.base; break; case GRPC_OP_SEND_CLOSE_FROM_CLIENT: diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c index 41242684da..0db8b41aa9 100644 --- a/src/core/lib/surface/version.c +++ b/src/core/lib/surface/version.c @@ -36,6 +36,6 @@ #include <grpc/grpc.h> -const char *grpc_version_string(void) { return "1.1.0-dev"; } +const char *grpc_version_string(void) { return "2.0.0-dev"; } const char *grpc_g_stands_for(void) { return "good"; } diff --git a/src/core/lib/transport/pid_controller.c b/src/core/lib/transport/pid_controller.c new file mode 100644 index 0000000000..3cef225d4b --- /dev/null +++ b/src/core/lib/transport/pid_controller.c @@ -0,0 +1,57 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "src/core/lib/transport/pid_controller.h" + +void grpc_pid_controller_init(grpc_pid_controller *pid_controller, + double gain_p, double gain_i, double gain_d) { + pid_controller->gain_p = gain_p; + pid_controller->gain_i = gain_i; + pid_controller->gain_d = gain_d; + grpc_pid_controller_reset(pid_controller); +} + +void grpc_pid_controller_reset(grpc_pid_controller *pid_controller) { + pid_controller->last_error = 0.0; + pid_controller->error_integral = 0.0; +} + +double grpc_pid_controller_update(grpc_pid_controller *pid_controller, + double error, double dt) { + pid_controller->error_integral += error * dt; + double diff_error = (error - pid_controller->last_error) / dt; + pid_controller->last_error = error; + return dt * (pid_controller->gain_p * error + + pid_controller->gain_i * pid_controller->error_integral + + pid_controller->gain_d * diff_error); +} diff --git a/src/core/lib/transport/pid_controller.h b/src/core/lib/transport/pid_controller.h new file mode 100644 index 0000000000..059b5b0834 --- /dev/null +++ b/src/core/lib/transport/pid_controller.h @@ -0,0 +1,64 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H +#define GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H + +/* \file Simple PID controller. + Implements a proportional-integral-derivative controller. + Used when we want to iteratively control a variable to converge some other + observed value to a 'set-point'. + Gains can be set to adjust sensitivity to current error (p), the integral + of error (i), and the derivative of error (d). */ + +typedef struct { + double gain_p; + double gain_i; + double gain_d; + double last_error; + double error_integral; +} grpc_pid_controller; + +/** Initialize the controller */ +void grpc_pid_controller_init(grpc_pid_controller *pid_controller, + double gain_p, double gain_i, double gain_d); + +/** Reset the controller: useful when things have changed significantly */ +void grpc_pid_controller_reset(grpc_pid_controller *pid_controller); + +/** Update the controller: given a current error estimate, and the time since + the last update, returns a delta to the control value */ +double grpc_pid_controller_update(grpc_pid_controller *pid_controller, + double error, double dt); + +#endif diff --git a/src/core/lib/tsi/ssl_transport_security.c b/src/core/lib/tsi/ssl_transport_security.c index 749b46e19f..366dca9507 100644 --- a/src/core/lib/tsi/ssl_transport_security.c +++ b/src/core/lib/tsi/ssl_transport_security.c @@ -31,9 +31,6 @@ * */ -#include "src/core/lib/iomgr/sockaddr.h" - -#include "src/core/lib/iomgr/socket_utils.h" #include "src/core/lib/tsi/ssl_transport_security.h" #include <grpc/support/port_platform.h> @@ -41,6 +38,15 @@ #include <limits.h> #include <string.h> +/* TODO(jboeuf): refactor inet_ntop into a portability header. */ +/* Note: for whomever reads this and tries to refactor this, this + can't be in grpc, it has to be in gpr. */ +#ifdef GPR_WINDOWS +#include <ws2tcpip.h> +#else +#include <arpa/inet.h> +#endif + #include <grpc/support/alloc.h> #include <grpc/support/log.h> #include <grpc/support/sync.h> @@ -349,8 +355,8 @@ static tsi_result add_subject_alt_names_properties_to_peer( result = TSI_INTERNAL_ERROR; break; } - const char *name = grpc_inet_ntop(af, subject_alt_name->d.iPAddress->data, - ntop_buf, INET6_ADDRSTRLEN); + const char *name = inet_ntop(af, subject_alt_name->d.iPAddress->data, + ntop_buf, INET6_ADDRSTRLEN); if (name == NULL) { gpr_log(GPR_ERROR, "Could not get IP string from asn1 octet."); result = TSI_INTERNAL_ERROR; |