diff options
author | Yash Tibrewal <yashkt@google.com> | 2017-10-13 16:07:13 -0700 |
---|---|---|
committer | Yash Tibrewal <yashkt@google.com> | 2017-10-18 17:12:19 -0700 |
commit | 0ee7574732a06e8cace4e099a678f4bd5dbff679 (patch) | |
tree | e43d5de442fdcc3d39cd5af687f319fa39612d3f /src/core/lib | |
parent | 6bf5f833efe2cb9e2ecc14358dd9699cd5d05263 (diff) |
Removing instances of exec_ctx being passed around in functions in
src/core. exec_ctx is now a thread_local pointer of type ExecCtx instead of
grpc_exec_ctx which is initialized whenever ExecCtx is instantiated. ExecCtx
also keeps track of the previous exec_ctx so that nesting of exec_ctx is
allowed. This means that there is only one exec_ctx being used at any
time. Also, grpc_exec_ctx_finish is called in the destructor of the
object, and the previous exec_ctx is restored to avoid breaking current
functionality. The code still explicitly calls grpc_exec_ctx_finish
because removing all such instances causes the code to break.
Diffstat (limited to 'src/core/lib')
163 files changed, 3383 insertions, 4302 deletions
diff --git a/src/core/lib/backoff/backoff.cc b/src/core/lib/backoff/backoff.cc index fe0a751817..bb9388e3b4 100644 --- a/src/core/lib/backoff/backoff.cc +++ b/src/core/lib/backoff/backoff.cc @@ -32,11 +32,11 @@ void grpc_backoff_init(grpc_backoff *backoff, backoff->rng_state = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec; } -grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) { +grpc_millis grpc_backoff_begin(grpc_backoff *backoff) { backoff->current_timeout_millis = backoff->initial_connect_timeout; const grpc_millis first_timeout = GPR_MAX(backoff->current_timeout_millis, backoff->min_timeout_millis); - return grpc_exec_ctx_now(exec_ctx) + first_timeout; + return grpc_exec_ctx_now() + first_timeout; } /* Generate a random number between 0 and 1. */ @@ -45,7 +45,7 @@ static double generate_uniform_random_number(uint32_t *rng_state) { return *rng_state / (double)((uint32_t)1 << 31); } -grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) { +grpc_millis grpc_backoff_step(grpc_backoff *backoff) { const double new_timeout_millis = backoff->multiplier * (double)backoff->current_timeout_millis; backoff->current_timeout_millis = @@ -60,10 +60,10 @@ grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) { (grpc_millis)((double)(backoff->current_timeout_millis) + jitter); const grpc_millis current_deadline = - grpc_exec_ctx_now(exec_ctx) + backoff->current_timeout_millis; + grpc_exec_ctx_now() + backoff->current_timeout_millis; const grpc_millis min_deadline = - grpc_exec_ctx_now(exec_ctx) + backoff->min_timeout_millis; + grpc_exec_ctx_now() + backoff->min_timeout_millis; return GPR_MAX(current_deadline, min_deadline); } diff --git a/src/core/lib/backoff/backoff.h b/src/core/lib/backoff/backoff.h index 80e49ea52a..c48483758e 100644 --- a/src/core/lib/backoff/backoff.h +++ b/src/core/lib/backoff/backoff.h @@ -51,9 +51,9 @@ void grpc_backoff_init(grpc_backoff *backoff, grpc_millis max_timeout_millis); /// Begin retry loop: returns a timespec for the NEXT retry -grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff); +grpc_millis grpc_backoff_begin(grpc_backoff *backoff); /// Step a retry loop: returns a timespec for the NEXT retry -grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff); +grpc_millis grpc_backoff_step(grpc_backoff *backoff); /// Reset the backoff, so the next grpc_backoff_step will be a /// grpc_backoff_begin /// instead diff --git a/src/core/lib/channel/channel_args.cc b/src/core/lib/channel/channel_args.cc index 30248b3c60..5aa4ac2d3a 100644 --- a/src/core/lib/channel/channel_args.cc +++ b/src/core/lib/channel/channel_args.cc @@ -188,7 +188,7 @@ grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) { return b; } -void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a) { +void grpc_channel_args_destroy(grpc_channel_args *a) { size_t i; if (!a) return; for (i = 0; i < a->num_args; i++) { @@ -199,8 +199,7 @@ void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a) { case GRPC_ARG_INTEGER: break; case GRPC_ARG_POINTER: - a->args[i].value.pointer.vtable->destroy(exec_ctx, - a->args[i].value.pointer.p); + a->args[i].value.pointer.vtable->destroy(a->args[i].value.pointer.p); break; } gpr_free(a->args[i].key); @@ -299,8 +298,7 @@ static int find_stream_compression_algorithm_states_bitset( } grpc_channel_args *grpc_channel_args_compression_algorithm_set_state( - grpc_exec_ctx *exec_ctx, grpc_channel_args **a, - grpc_compression_algorithm algorithm, int state) { + grpc_channel_args **a, grpc_compression_algorithm algorithm, int state) { int *states_arg = NULL; grpc_channel_args *result = *a; const int states_arg_found = @@ -333,15 +331,15 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state( GPR_BITCLEAR((unsigned *)&tmp.value.integer, algorithm); } result = grpc_channel_args_copy_and_add(*a, &tmp, 1); - grpc_channel_args_destroy(exec_ctx, *a); + grpc_channel_args_destroy(*a); *a = result; } return result; } grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state( - grpc_exec_ctx *exec_ctx, grpc_channel_args **a, - grpc_stream_compression_algorithm algorithm, int state) { + grpc_channel_args **a, grpc_stream_compression_algorithm algorithm, + int state) { int *states_arg = NULL; grpc_channel_args *result = *a; const int states_arg_found = @@ -375,7 +373,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state( GPR_BITCLEAR((unsigned *)&tmp.value.integer, algorithm); } result = grpc_channel_args_copy_and_add(*a, &tmp, 1); - grpc_channel_args_destroy(exec_ctx, *a); + grpc_channel_args_destroy(*a); *a = result; } return result; diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h index 1896d35cf4..6ef3562142 100644 --- a/src/core/lib/channel/channel_args.h +++ b/src/core/lib/channel/channel_args.h @@ -57,7 +57,7 @@ grpc_channel_args *grpc_channel_args_union(const grpc_channel_args *a, const grpc_channel_args *b); /** Destroy arguments created by \a grpc_channel_args_copy */ -void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a); +void grpc_channel_args_destroy(grpc_channel_args *a); /** Returns the compression algorithm set in \a a. */ grpc_compression_algorithm grpc_channel_args_get_compression_algorithm( @@ -89,8 +89,7 @@ grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm( * modified to point to the returned instance (which may be different from the * input value of \a a). */ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state( - grpc_exec_ctx *exec_ctx, grpc_channel_args **a, - grpc_compression_algorithm algorithm, int enabled); + grpc_channel_args **a, grpc_compression_algorithm algorithm, int enabled); /** Sets the support for the given stream compression algorithm. By default, all * stream compression algorithms are enabled. It's an error to disable an @@ -100,8 +99,8 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state( * modified to point to the returned instance (which may be different from the * input value of \a a). */ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state( - grpc_exec_ctx *exec_ctx, grpc_channel_args **a, - grpc_stream_compression_algorithm algorithm, int enabled); + grpc_channel_args **a, grpc_stream_compression_algorithm algorithm, + int enabled); /** Returns the bitset representing the support state (true for enabled, false * for disabled) for compression algorithms. diff --git a/src/core/lib/channel/channel_stack.cc b/src/core/lib/channel/channel_stack.cc index 775c8bc667..22372aa7d7 100644 --- a/src/core/lib/channel/channel_stack.cc +++ b/src/core/lib/channel/channel_stack.cc @@ -88,8 +88,8 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack, } grpc_error *grpc_channel_stack_init( - grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy, - void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count, + int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg, + const grpc_channel_filter **filters, size_t filter_count, const grpc_channel_args *channel_args, grpc_transport *optional_transport, const char *name, grpc_channel_stack *stack) { size_t call_size = @@ -118,8 +118,7 @@ grpc_error *grpc_channel_stack_init( args.is_last = i == (filter_count - 1); elems[i].filter = filters[i]; elems[i].channel_data = user_data; - grpc_error *error = - elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args); + grpc_error *error = elems[i].filter->init_channel_elem(&elems[i], &args); if (error != GRPC_ERROR_NONE) { if (first_error == GRPC_ERROR_NONE) { first_error = error; @@ -139,20 +138,18 @@ grpc_error *grpc_channel_stack_init( return first_error; } -void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, - grpc_channel_stack *stack) { +void grpc_channel_stack_destroy(grpc_channel_stack *stack) { grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(stack); size_t count = stack->count; size_t i; /* destroy per-filter data */ for (i = 0; i < count; i++) { - channel_elems[i].filter->destroy_channel_elem(exec_ctx, &channel_elems[i]); + channel_elems[i].filter->destroy_channel_elem(&channel_elems[i]); } } -grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx, - grpc_channel_stack *channel_stack, +grpc_error *grpc_call_stack_init(grpc_channel_stack *channel_stack, int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg, const grpc_call_element_args *elem_args) { @@ -175,8 +172,8 @@ grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx, call_elems[i].filter = channel_elems[i].filter; call_elems[i].channel_data = channel_elems[i].channel_data; call_elems[i].call_data = user_data; - grpc_error *error = call_elems[i].filter->init_call_elem( - exec_ctx, &call_elems[i], elem_args); + grpc_error *error = + call_elems[i].filter->init_call_elem(&call_elems[i], elem_args); if (error != GRPC_ERROR_NONE) { if (first_error == GRPC_ERROR_NONE) { first_error = error; @@ -190,8 +187,7 @@ grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx, return first_error; } -void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_call_stack *call_stack, +void grpc_call_stack_set_pollset_or_pollset_set(grpc_call_stack *call_stack, grpc_polling_entity *pollent) { size_t count = call_stack->count; grpc_call_element *call_elems; @@ -204,18 +200,16 @@ void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, /* init per-filter data */ for (i = 0; i < count; i++) { - call_elems[i].filter->set_pollset_or_pollset_set(exec_ctx, &call_elems[i], - pollent); + call_elems[i].filter->set_pollset_or_pollset_set(&call_elems[i], pollent); user_data += ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data); } } void grpc_call_stack_ignore_set_pollset_or_pollset_set( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_polling_entity *pollent) {} + grpc_call_element *elem, grpc_polling_entity *pollent) {} -void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack, +void grpc_call_stack_destroy(grpc_call_stack *stack, const grpc_call_final_info *final_info, grpc_closure *then_schedule_closure) { grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack); @@ -225,29 +219,26 @@ void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack, /* destroy per-filter data */ for (i = 0; i < count; i++) { elems[i].filter->destroy_call_elem( - exec_ctx, &elems[i], final_info, - i == count - 1 ? then_schedule_closure : NULL); + &elems[i], final_info, i == count - 1 ? then_schedule_closure : NULL); } } -void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, +void grpc_call_next_op(grpc_call_element *elem, grpc_transport_stream_op_batch *op) { grpc_call_element *next_elem = elem + 1; GRPC_CALL_LOG_OP(GPR_INFO, next_elem, op); - next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op); + next_elem->filter->start_transport_stream_op_batch(next_elem, op); } -void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, +void grpc_channel_next_get_info(grpc_channel_element *elem, const grpc_channel_info *channel_info) { grpc_channel_element *next_elem = elem + 1; - next_elem->filter->get_channel_info(exec_ctx, next_elem, channel_info); + next_elem->filter->get_channel_info(next_elem, channel_info); } -void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - grpc_transport_op *op) { +void grpc_channel_next_op(grpc_channel_element *elem, grpc_transport_op *op) { grpc_channel_element *next_elem = elem + 1; - next_elem->filter->start_transport_op(exec_ctx, next_elem, op); + next_elem->filter->start_transport_op(next_elem, op); } grpc_channel_stack *grpc_channel_stack_from_top_element( diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h index 5c00c09889..6ff6394e03 100644 --- a/src/core/lib/channel/channel_stack.h +++ b/src/core/lib/channel/channel_stack.h @@ -99,14 +99,12 @@ typedef struct { typedef struct { /* Called to eg. send/receive data on a call. See grpc_call_next_op on how to call the next element in the stack */ - void (*start_transport_stream_op_batch)(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, + void (*start_transport_stream_op_batch)(grpc_call_element *elem, grpc_transport_stream_op_batch *op); /* Called to handle channel level operations - e.g. new calls, or transport closure. See grpc_channel_next_op on how to call the next element in the stack */ - void (*start_transport_op)(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, grpc_transport_op *op); + void (*start_transport_op)(grpc_channel_element *elem, grpc_transport_op *op); /* sizeof(per call data) */ size_t sizeof_call_data; @@ -119,11 +117,9 @@ typedef struct { transport and is on the server. Most filters want to ignore this argument. Implementations may assume that elem->call_data is all zeros. */ - grpc_error *(*init_call_elem)(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, + grpc_error *(*init_call_elem)(grpc_call_element *elem, const grpc_call_element_args *args); - void (*set_pollset_or_pollset_set)(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, + void (*set_pollset_or_pollset_set)(grpc_call_element *elem, grpc_polling_entity *pollent); /* Destroy per call data. The filter does not need to do any chaining. @@ -131,7 +127,7 @@ typedef struct { \a then_schedule_closure that should be passed to GRPC_CLOSURE_SCHED when destruction is complete. \a final_info contains data about the completed call, mainly for reporting purposes. */ - void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, + void (*destroy_call_elem)(grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *then_schedule_closure); @@ -144,16 +140,14 @@ typedef struct { useful for asserting correct configuration by upper layer code. The filter does not need to do any chaining. Implementations may assume that elem->call_data is all zeros. */ - grpc_error *(*init_channel_elem)(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, + grpc_error *(*init_channel_elem)(grpc_channel_element *elem, grpc_channel_element_args *args); /* Destroy per channel data. The filter does not need to do any chaining */ - void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem); + void (*destroy_channel_elem)(grpc_channel_element *elem); /* Implement grpc_channel_get_info() */ - void (*get_channel_info)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, + void (*get_channel_info)(grpc_channel_element *elem, const grpc_channel_info *channel_info); /* The name of this filter */ @@ -211,68 +205,62 @@ size_t grpc_channel_stack_size(const grpc_channel_filter **filters, size_t filter_count); /* Initialize a channel stack given some filters */ grpc_error *grpc_channel_stack_init( - grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy, - void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count, + int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg, + const grpc_channel_filter **filters, size_t filter_count, const grpc_channel_args *args, grpc_transport *optional_transport, const char *name, grpc_channel_stack *stack); /* Destroy a channel stack */ -void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, - grpc_channel_stack *stack); +void grpc_channel_stack_destroy(grpc_channel_stack *stack); /* Initialize a call stack given a channel stack. transport_server_data is expected to be NULL on a client, or an opaque transport owned pointer on the server. */ -grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx, - grpc_channel_stack *channel_stack, +grpc_error *grpc_call_stack_init(grpc_channel_stack *channel_stack, int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg, const grpc_call_element_args *elem_args); /* Set a pollset or a pollset_set for a call stack: must occur before the first * op is started */ -void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_call_stack *call_stack, +void grpc_call_stack_set_pollset_or_pollset_set(grpc_call_stack *call_stack, grpc_polling_entity *pollent); #ifndef NDEBUG #define GRPC_CALL_STACK_REF(call_stack, reason) \ grpc_stream_ref(&(call_stack)->refcount, reason) -#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \ - grpc_stream_unref(exec_ctx, &(call_stack)->refcount, reason) +#define GRPC_CALL_STACK_UNREF(call_stack, reason) \ + grpc_stream_unref(&(call_stack)->refcount, reason) #define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \ grpc_stream_ref(&(channel_stack)->refcount, reason) -#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \ - grpc_stream_unref(exec_ctx, &(channel_stack)->refcount, reason) +#define GRPC_CHANNEL_STACK_UNREF(channel_stack, reason) \ + grpc_stream_unref(&(channel_stack)->refcount, reason) #else #define GRPC_CALL_STACK_REF(call_stack, reason) \ grpc_stream_ref(&(call_stack)->refcount) -#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \ - grpc_stream_unref(exec_ctx, &(call_stack)->refcount) +#define GRPC_CALL_STACK_UNREF(call_stack, reason) \ + grpc_stream_unref(&(call_stack)->refcount) #define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \ grpc_stream_ref(&(channel_stack)->refcount) -#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \ - grpc_stream_unref(exec_ctx, &(channel_stack)->refcount) +#define GRPC_CHANNEL_STACK_UNREF(channel_stack, reason) \ + grpc_stream_unref(&(channel_stack)->refcount) #endif /* Destroy a call stack */ -void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack, +void grpc_call_stack_destroy(grpc_call_stack *stack, const grpc_call_final_info *final_info, grpc_closure *then_schedule_closure); /* Ignore set pollset{_set} - used by filters if they don't care about pollsets * at all. Does nothing. */ void grpc_call_stack_ignore_set_pollset_or_pollset_set( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_polling_entity *pollent); + grpc_call_element *elem, grpc_polling_entity *pollent); /* Call the next operation in a call stack */ -void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, +void grpc_call_next_op(grpc_call_element *elem, grpc_transport_stream_op_batch *op); /* Call the next operation (depending on call directionality) in a channel stack */ -void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - grpc_transport_op *op); +void grpc_channel_next_op(grpc_channel_element *elem, grpc_transport_op *op); /* Pass through a request to get_channel_info() to the next child element */ -void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, +void grpc_channel_next_get_info(grpc_channel_element *elem, const grpc_channel_info *channel_info); /* Given the top element of a channel stack, get the channel stack itself */ diff --git a/src/core/lib/channel/channel_stack_builder.cc b/src/core/lib/channel/channel_stack_builder.cc index b663ebfb52..4de606a277 100644 --- a/src/core/lib/channel/channel_stack_builder.cc +++ b/src/core/lib/channel/channel_stack_builder.cc @@ -150,10 +150,9 @@ void grpc_channel_stack_builder_set_name(grpc_channel_stack_builder *builder, } void grpc_channel_stack_builder_set_channel_arguments( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, - const grpc_channel_args *args) { + grpc_channel_stack_builder *builder, const grpc_channel_args *args) { if (builder->args != NULL) { - grpc_channel_args_destroy(exec_ctx, builder->args); + grpc_channel_args_destroy(builder->args); } builder->args = grpc_channel_args_copy(args); } @@ -241,8 +240,7 @@ bool grpc_channel_stack_builder_add_filter_after( return true; } -void grpc_channel_stack_builder_destroy(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder) { +void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder *builder) { filter_node *p = builder->begin.next; while (p != &builder->end) { filter_node *next = p->next; @@ -250,16 +248,15 @@ void grpc_channel_stack_builder_destroy(grpc_exec_ctx *exec_ctx, p = next; } if (builder->args != NULL) { - grpc_channel_args_destroy(exec_ctx, builder->args); + grpc_channel_args_destroy(builder->args); } gpr_free(builder->target); gpr_free(builder); } grpc_error *grpc_channel_stack_builder_finish( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, - size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy, - void *destroy_arg, void **result) { + grpc_channel_stack_builder *builder, size_t prefix_bytes, int initial_refs, + grpc_iomgr_cb_func destroy, void *destroy_arg, void **result) { // count the number of filters size_t num_filters = 0; for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) { @@ -284,12 +281,12 @@ grpc_error *grpc_channel_stack_builder_finish( (grpc_channel_stack *)((char *)(*result) + prefix_bytes); // and initialize it grpc_error *error = grpc_channel_stack_init( - exec_ctx, initial_refs, destroy, - destroy_arg == NULL ? *result : destroy_arg, filters, num_filters, - builder->args, builder->transport, builder->name, channel_stack); + initial_refs, destroy, destroy_arg == NULL ? *result : destroy_arg, + filters, num_filters, builder->args, builder->transport, builder->name, + channel_stack); if (error != GRPC_ERROR_NONE) { - grpc_channel_stack_destroy(exec_ctx, channel_stack); + grpc_channel_stack_destroy(channel_stack); gpr_free(*result); *result = NULL; } else { @@ -305,7 +302,7 @@ grpc_error *grpc_channel_stack_builder_finish( } } - grpc_channel_stack_builder_destroy(exec_ctx, builder); + grpc_channel_stack_builder_destroy(builder); gpr_free((grpc_channel_filter **)filters); return error; diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h index fdff2a2b6d..683b5817f3 100644 --- a/src/core/lib/channel/channel_stack_builder.h +++ b/src/core/lib/channel/channel_stack_builder.h @@ -58,8 +58,7 @@ grpc_transport *grpc_channel_stack_builder_get_transport( /// Set channel arguments: copies args void grpc_channel_stack_builder_set_channel_arguments( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, - const grpc_channel_args *args); + grpc_channel_stack_builder *builder, const grpc_channel_args *args); /// Return a borrowed pointer to the channel arguments const grpc_channel_args *grpc_channel_stack_builder_get_channel_arguments( @@ -152,13 +151,11 @@ void grpc_channel_stack_builder_iterator_destroy( /// \a initial_refs, \a destroy, \a destroy_arg are as per /// grpc_channel_stack_init grpc_error *grpc_channel_stack_builder_finish( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, - size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy, - void *destroy_arg, void **result); + grpc_channel_stack_builder *builder, size_t prefix_bytes, int initial_refs, + grpc_iomgr_cb_func destroy, void *destroy_arg, void **result); /// Destroy the builder without creating a channel stack -void grpc_channel_stack_builder_destroy(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder); +void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder *builder); extern grpc_tracer_flag grpc_trace_channel_stack_builder; diff --git a/src/core/lib/channel/connected_channel.cc b/src/core/lib/channel/connected_channel.cc index 4f37908958..460e8b4c65 100644 --- a/src/core/lib/channel/connected_channel.cc +++ b/src/core/lib/channel/connected_channel.cc @@ -51,17 +51,14 @@ typedef struct connected_channel_call_data { callback_state recv_message_ready; } call_data; -static void run_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void run_in_call_combiner(void *arg, grpc_error *error) { callback_state *state = (callback_state *)arg; - GRPC_CALL_COMBINER_START(exec_ctx, state->call_combiner, - state->original_closure, GRPC_ERROR_REF(error), - state->reason); + GRPC_CALL_COMBINER_START(state->call_combiner, state->original_closure, + GRPC_ERROR_REF(error), state->reason); } -static void run_cancel_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - run_in_call_combiner(exec_ctx, arg, error); +static void run_cancel_in_call_combiner(void *arg, grpc_error *error) { + run_in_call_combiner(arg, error); gpr_free(arg); } @@ -98,8 +95,7 @@ static callback_state *get_state_for_batch( /* Intercept a call operation and either push it directly up or translate it into transport stream operations */ static void con_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { + grpc_call_element *elem, grpc_transport_stream_op_batch *batch) { call_data *calld = (call_data *)elem->call_data; channel_data *chand = (channel_data *)elem->channel_data; if (batch->recv_initial_metadata) { @@ -126,58 +122,52 @@ static void con_start_transport_stream_op_batch( callback_state *state = get_state_for_batch(calld, batch); intercept_callback(calld, state, false, "on_complete", &batch->on_complete); } - grpc_transport_perform_stream_op(exec_ctx, chand->transport, - TRANSPORT_STREAM_FROM_CALL_DATA(calld), - batch); - GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, - "passed batch to transport"); + grpc_transport_perform_stream_op( + chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), batch); + GRPC_CALL_COMBINER_STOP(calld->call_combiner, "passed batch to transport"); } -static void con_start_transport_op(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, +static void con_start_transport_op(grpc_channel_element *elem, grpc_transport_op *op) { channel_data *chand = (channel_data *)elem->channel_data; - grpc_transport_perform_op(exec_ctx, chand->transport, op); + grpc_transport_perform_op(chand->transport, op); } /* Constructor for call_data */ -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, +static grpc_error *init_call_elem(grpc_call_element *elem, const grpc_call_element_args *args) { call_data *calld = (call_data *)elem->call_data; channel_data *chand = (channel_data *)elem->channel_data; calld->call_combiner = args->call_combiner; int r = grpc_transport_init_stream( - exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), + chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), &args->call_stack->refcount, args->server_transport_data, args->arena); return r == 0 ? GRPC_ERROR_NONE : GRPC_ERROR_CREATE_FROM_STATIC_STRING( "transport stream initialization failed"); } -static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, +static void set_pollset_or_pollset_set(grpc_call_element *elem, grpc_polling_entity *pollent) { call_data *calld = (call_data *)elem->call_data; channel_data *chand = (channel_data *)elem->channel_data; - grpc_transport_set_pops(exec_ctx, chand->transport, + grpc_transport_set_pops(chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent); } /* Destructor for call_data */ -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, +static void destroy_call_elem(grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *then_schedule_closure) { call_data *calld = (call_data *)elem->call_data; channel_data *chand = (channel_data *)elem->channel_data; - grpc_transport_destroy_stream(exec_ctx, chand->transport, + grpc_transport_destroy_stream(chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), then_schedule_closure); } /* Constructor for channel_data */ -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, +static grpc_error *init_channel_elem(grpc_channel_element *elem, grpc_channel_element_args *args) { channel_data *cd = (channel_data *)elem->channel_data; GPR_ASSERT(args->is_last); @@ -186,17 +176,15 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, } /* Destructor for channel_data */ -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { +static void destroy_channel_elem(grpc_channel_element *elem) { channel_data *cd = (channel_data *)elem->channel_data; if (cd->transport) { - grpc_transport_destroy(exec_ctx, cd->transport); + grpc_transport_destroy(cd->transport); } } /* No-op. */ -static void con_get_channel_info(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, +static void con_get_channel_info(grpc_channel_element *elem, const grpc_channel_info *channel_info) {} const grpc_channel_filter grpc_connected_filter = { @@ -230,8 +218,7 @@ static void bind_transport(grpc_channel_stack *channel_stack, grpc_transport_stream_size((grpc_transport *)t); } -bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, +bool grpc_add_connected_filter(grpc_channel_stack_builder *builder, void *arg_must_be_null) { GPR_ASSERT(arg_must_be_null == NULL); grpc_transport *t = grpc_channel_stack_builder_get_transport(builder); diff --git a/src/core/lib/channel/connected_channel.h b/src/core/lib/channel/connected_channel.h index 4615727baa..4697006197 100644 --- a/src/core/lib/channel/connected_channel.h +++ b/src/core/lib/channel/connected_channel.h @@ -27,8 +27,7 @@ extern "C" { extern const grpc_channel_filter grpc_connected_filter; -bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, +bool grpc_add_connected_filter(grpc_channel_stack_builder *builder, void *arg_must_be_null); /* Debug helper to dig the transport stream out of a call element */ diff --git a/src/core/lib/channel/handshaker.cc b/src/core/lib/channel/handshaker.cc index b27ee37e5b..ff7e10a5b9 100644 --- a/src/core/lib/channel/handshaker.cc +++ b/src/core/lib/channel/handshaker.cc @@ -34,23 +34,20 @@ void grpc_handshaker_init(const grpc_handshaker_vtable* vtable, handshaker->vtable = vtable; } -void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker) { - handshaker->vtable->destroy(exec_ctx, handshaker); +void grpc_handshaker_destroy(grpc_handshaker* handshaker) { + handshaker->vtable->destroy(handshaker); } -void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker, grpc_error* why) { - handshaker->vtable->shutdown(exec_ctx, handshaker, why); +void grpc_handshaker_shutdown(grpc_handshaker* handshaker, grpc_error* why) { + handshaker->vtable->shutdown(handshaker, why); } -void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker, +void grpc_handshaker_do_handshake(grpc_handshaker* handshaker, grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done, grpc_handshaker_args* args) { - handshaker->vtable->do_handshake(exec_ctx, handshaker, acceptor, - on_handshake_done, args); + handshaker->vtable->do_handshake(handshaker, acceptor, on_handshake_done, + args); } // @@ -116,9 +113,9 @@ void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head, } void grpc_handshake_manager_pending_list_shutdown_all( - grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why) { + grpc_handshake_manager* head, grpc_error* why) { while (head != NULL) { - grpc_handshake_manager_shutdown(exec_ctx, head, GRPC_ERROR_REF(why)); + grpc_handshake_manager_shutdown(head, GRPC_ERROR_REF(why)); head = head->next; } GRPC_ERROR_UNREF(why); @@ -145,11 +142,10 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr, gpr_mu_unlock(&mgr->mu); } -static void grpc_handshake_manager_unref(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr) { +static void grpc_handshake_manager_unref(grpc_handshake_manager* mgr) { if (gpr_unref(&mgr->refs)) { for (size_t i = 0; i < mgr->count; ++i) { - grpc_handshaker_destroy(exec_ctx, mgr->handshakers[i]); + grpc_handshaker_destroy(mgr->handshakers[i]); } gpr_free(mgr->handshakers); gpr_mu_destroy(&mgr->mu); @@ -157,19 +153,17 @@ static void grpc_handshake_manager_unref(grpc_exec_ctx* exec_ctx, } } -void grpc_handshake_manager_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr) { - grpc_handshake_manager_unref(exec_ctx, mgr); +void grpc_handshake_manager_destroy(grpc_handshake_manager* mgr) { + grpc_handshake_manager_unref(mgr); } -void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr, +void grpc_handshake_manager_shutdown(grpc_handshake_manager* mgr, grpc_error* why) { gpr_mu_lock(&mgr->mu); // Shutdown the handshaker that's currently in progress, if any. if (!mgr->shutdown && mgr->index > 0) { mgr->shutdown = true; - grpc_handshaker_shutdown(exec_ctx, mgr->handshakers[mgr->index - 1], + grpc_handshaker_shutdown(mgr->handshakers[mgr->index - 1], GRPC_ERROR_REF(why)); } gpr_mu_unlock(&mgr->mu); @@ -179,8 +173,7 @@ void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx, // Helper function to call either the next handshaker or the // on_handshake_done callback. // Returns true if we've scheduled the on_handshake_done callback. -static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr, +static bool call_next_handshaker_locked(grpc_handshake_manager* mgr, grpc_error* error) { GPR_ASSERT(mgr->index <= mgr->count); // If we got an error or we've been shut down or we're exiting early or @@ -190,13 +183,12 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx, mgr->index == mgr->count) { // Cancel deadline timer, since we're invoking the on_handshake_done // callback now. - grpc_timer_cancel(exec_ctx, &mgr->deadline_timer); - GRPC_CLOSURE_SCHED(exec_ctx, &mgr->on_handshake_done, error); + grpc_timer_cancel(&mgr->deadline_timer); + GRPC_CLOSURE_SCHED(&mgr->on_handshake_done, error); mgr->shutdown = true; } else { - grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index], - mgr->acceptor, &mgr->call_next_handshaker, - &mgr->args); + grpc_handshaker_do_handshake(mgr->handshakers[mgr->index], mgr->acceptor, + &mgr->call_next_handshaker, &mgr->args); } ++mgr->index; return mgr->shutdown; @@ -204,36 +196,36 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx, // A function used as the handshaker-done callback when chaining // handshakers together. -static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { +static void call_next_handshaker(void* arg, grpc_error* error) { grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg; gpr_mu_lock(&mgr->mu); - bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_REF(error)); + bool done = call_next_handshaker_locked(mgr, GRPC_ERROR_REF(error)); gpr_mu_unlock(&mgr->mu); // If we're invoked the final callback, we won't be coming back // to this function, so we can release our reference to the // handshake manager. if (done) { - grpc_handshake_manager_unref(exec_ctx, mgr); + grpc_handshake_manager_unref(mgr); } } // Callback invoked when deadline is exceeded. -static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { +static void on_timeout(void* arg, grpc_error* error) { grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg; if (error == GRPC_ERROR_NONE) { // Timer fired, rather than being cancelled. grpc_handshake_manager_shutdown( - exec_ctx, mgr, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake timed out")); + mgr, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake timed out")); } - grpc_handshake_manager_unref(exec_ctx, mgr); + grpc_handshake_manager_unref(mgr); } -void grpc_handshake_manager_do_handshake( - grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr, - grpc_endpoint* endpoint, const grpc_channel_args* channel_args, - grpc_millis deadline, grpc_tcp_server_acceptor* acceptor, - grpc_iomgr_cb_func on_handshake_done, void* user_data) { +void grpc_handshake_manager_do_handshake(grpc_handshake_manager* mgr, + grpc_endpoint* endpoint, + const grpc_channel_args* channel_args, + grpc_millis deadline, + grpc_tcp_server_acceptor* acceptor, + grpc_iomgr_cb_func on_handshake_done, + void* user_data) { gpr_mu_lock(&mgr->mu); GPR_ASSERT(mgr->index == 0); GPR_ASSERT(!mgr->shutdown); @@ -255,12 +247,12 @@ void grpc_handshake_manager_do_handshake( gpr_ref(&mgr->refs); GRPC_CLOSURE_INIT(&mgr->on_timeout, on_timeout, mgr, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &mgr->deadline_timer, deadline, &mgr->on_timeout); + grpc_timer_init(&mgr->deadline_timer, deadline, &mgr->on_timeout); // Start first handshaker, which also owns a ref. gpr_ref(&mgr->refs); - bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_NONE); + bool done = call_next_handshaker_locked(mgr, GRPC_ERROR_NONE); gpr_mu_unlock(&mgr->mu); if (done) { - grpc_handshake_manager_unref(exec_ctx, mgr); + grpc_handshake_manager_unref(mgr); } } diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h index 8ed38c15ba..a7d3e5700f 100644 --- a/src/core/lib/channel/handshaker.h +++ b/src/core/lib/channel/handshaker.h @@ -71,18 +71,17 @@ typedef struct { typedef struct { /// Destroys the handshaker. - void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker); + void (*destroy)(grpc_handshaker* handshaker); /// Shuts down the handshaker (e.g., to clean up when the operation is /// aborted in the middle). - void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker, - grpc_error* why); + void (*shutdown)(grpc_handshaker* handshaker, grpc_error* why); /// Performs handshaking, modifying \a args as needed (e.g., to /// replace \a endpoint with a wrapped endpoint). /// When finished, invokes \a on_handshake_done. /// \a acceptor will be NULL for client-side handshakers. - void (*do_handshake)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker, + void (*do_handshake)(grpc_handshaker* handshaker, grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done, grpc_handshaker_args* args); @@ -98,12 +97,9 @@ struct grpc_handshaker { void grpc_handshaker_init(const grpc_handshaker_vtable* vtable, grpc_handshaker* handshaker); -void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker); -void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker, grpc_error* why); -void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker, +void grpc_handshaker_destroy(grpc_handshaker* handshaker); +void grpc_handshaker_shutdown(grpc_handshaker* handshaker, grpc_error* why); +void grpc_handshaker_do_handshake(grpc_handshaker* handshaker, grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done, grpc_handshaker_args* args); @@ -123,15 +119,13 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr, grpc_handshaker* handshaker); /// Destroys the handshake manager. -void grpc_handshake_manager_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr); +void grpc_handshake_manager_destroy(grpc_handshake_manager* mgr); /// Shuts down the handshake manager (e.g., to clean up when the operation is /// aborted in the middle). /// The caller must still call grpc_handshake_manager_destroy() after /// calling this function. -void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr, +void grpc_handshake_manager_shutdown(grpc_handshake_manager* mgr, grpc_error* why); /// Invokes handshakers in the order they were added. @@ -146,11 +140,13 @@ void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx, /// GRPC_ERROR_NONE, then handshaking failed and the handshaker has done /// the necessary clean-up. Otherwise, the callback takes ownership of /// the arguments. -void grpc_handshake_manager_do_handshake( - grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr, - grpc_endpoint* endpoint, const grpc_channel_args* channel_args, - grpc_millis deadline, grpc_tcp_server_acceptor* acceptor, - grpc_iomgr_cb_func on_handshake_done, void* user_data); +void grpc_handshake_manager_do_handshake(grpc_handshake_manager* mgr, + grpc_endpoint* endpoint, + const grpc_channel_args* channel_args, + grpc_millis deadline, + grpc_tcp_server_acceptor* acceptor, + grpc_iomgr_cb_func on_handshake_done, + void* user_data); /// Add \a mgr to the server side list of all pending handshake managers, the /// list starts with \a *head. @@ -166,7 +162,7 @@ void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head, /// Shutdown all pending handshake managers on the server side. // Not thread-safe. Caller needs to synchronize. void grpc_handshake_manager_pending_list_shutdown_all( - grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why); + grpc_handshake_manager* head, grpc_error* why); #ifdef __cplusplus } diff --git a/src/core/lib/channel/handshaker_factory.cc b/src/core/lib/channel/handshaker_factory.cc index 4deb280c60..663cdf4a69 100644 --- a/src/core/lib/channel/handshaker_factory.cc +++ b/src/core/lib/channel/handshaker_factory.cc @@ -21,19 +21,19 @@ #include <grpc/support/log.h> void grpc_handshaker_factory_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory, - const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr) { + grpc_handshaker_factory *handshaker_factory, const grpc_channel_args *args, + grpc_handshake_manager *handshake_mgr) { if (handshaker_factory != NULL) { GPR_ASSERT(handshaker_factory->vtable != NULL); - handshaker_factory->vtable->add_handshakers(exec_ctx, handshaker_factory, - args, handshake_mgr); + handshaker_factory->vtable->add_handshakers(handshaker_factory, args, + handshake_mgr); } } void grpc_handshaker_factory_destroy( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory) { + grpc_handshaker_factory *handshaker_factory) { if (handshaker_factory != NULL) { GPR_ASSERT(handshaker_factory->vtable != NULL); - handshaker_factory->vtable->destroy(exec_ctx, handshaker_factory); + handshaker_factory->vtable->destroy(handshaker_factory); } } diff --git a/src/core/lib/channel/handshaker_factory.h b/src/core/lib/channel/handshaker_factory.h index 59008adf05..ac07491d44 100644 --- a/src/core/lib/channel/handshaker_factory.h +++ b/src/core/lib/channel/handshaker_factory.h @@ -33,12 +33,10 @@ extern "C" { typedef struct grpc_handshaker_factory grpc_handshaker_factory; typedef struct { - void (*add_handshakers)(grpc_exec_ctx *exec_ctx, - grpc_handshaker_factory *handshaker_factory, + void (*add_handshakers)(grpc_handshaker_factory *handshaker_factory, const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr); - void (*destroy)(grpc_exec_ctx *exec_ctx, - grpc_handshaker_factory *handshaker_factory); + void (*destroy)(grpc_handshaker_factory *handshaker_factory); } grpc_handshaker_factory_vtable; struct grpc_handshaker_factory { @@ -46,11 +44,11 @@ struct grpc_handshaker_factory { }; void grpc_handshaker_factory_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory, - const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr); + grpc_handshaker_factory *handshaker_factory, const grpc_channel_args *args, + grpc_handshake_manager *handshake_mgr); void grpc_handshaker_factory_destroy( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory); + grpc_handshaker_factory *handshaker_factory); #ifdef __cplusplus } diff --git a/src/core/lib/channel/handshaker_registry.cc b/src/core/lib/channel/handshaker_registry.cc index c6bc87d704..098eabf084 100644 --- a/src/core/lib/channel/handshaker_registry.cc +++ b/src/core/lib/channel/handshaker_registry.cc @@ -47,18 +47,17 @@ static void grpc_handshaker_factory_list_register( } static void grpc_handshaker_factory_list_add_handshakers( - grpc_exec_ctx* exec_ctx, grpc_handshaker_factory_list* list, - const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) { + grpc_handshaker_factory_list* list, const grpc_channel_args* args, + grpc_handshake_manager* handshake_mgr) { for (size_t i = 0; i < list->num_factories; ++i) { - grpc_handshaker_factory_add_handshakers(exec_ctx, list->list[i], args, - handshake_mgr); + grpc_handshaker_factory_add_handshakers(list->list[i], args, handshake_mgr); } } static void grpc_handshaker_factory_list_destroy( - grpc_exec_ctx* exec_ctx, grpc_handshaker_factory_list* list) { + grpc_handshaker_factory_list* list) { for (size_t i = 0; i < list->num_factories; ++i) { - grpc_handshaker_factory_destroy(exec_ctx, list->list[i]); + grpc_handshaker_factory_destroy(list->list[i]); } gpr_free(list->list); } @@ -74,10 +73,9 @@ void grpc_handshaker_factory_registry_init() { memset(g_handshaker_factory_lists, 0, sizeof(g_handshaker_factory_lists)); } -void grpc_handshaker_factory_registry_shutdown(grpc_exec_ctx* exec_ctx) { +void grpc_handshaker_factory_registry_shutdown() { for (size_t i = 0; i < NUM_HANDSHAKER_TYPES; ++i) { - grpc_handshaker_factory_list_destroy(exec_ctx, - &g_handshaker_factory_lists[i]); + grpc_handshaker_factory_list_destroy(&g_handshaker_factory_lists[i]); } } @@ -88,11 +86,9 @@ void grpc_handshaker_factory_register(bool at_start, &g_handshaker_factory_lists[handshaker_type], at_start, factory); } -void grpc_handshakers_add(grpc_exec_ctx* exec_ctx, - grpc_handshaker_type handshaker_type, +void grpc_handshakers_add(grpc_handshaker_type handshaker_type, const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) { grpc_handshaker_factory_list_add_handshakers( - exec_ctx, &g_handshaker_factory_lists[handshaker_type], args, - handshake_mgr); + &g_handshaker_factory_lists[handshaker_type], args, handshake_mgr); } diff --git a/src/core/lib/channel/handshaker_registry.h b/src/core/lib/channel/handshaker_registry.h index ddd280bea8..edcf205fee 100644 --- a/src/core/lib/channel/handshaker_registry.h +++ b/src/core/lib/channel/handshaker_registry.h @@ -35,7 +35,7 @@ typedef enum { } grpc_handshaker_type; void grpc_handshaker_factory_registry_init(); -void grpc_handshaker_factory_registry_shutdown(grpc_exec_ctx* exec_ctx); +void grpc_handshaker_factory_registry_shutdown(); /// Registers a new handshaker factory. Takes ownership. /// If \a at_start is true, the new handshaker will be at the beginning of @@ -44,8 +44,7 @@ void grpc_handshaker_factory_register(bool at_start, grpc_handshaker_type handshaker_type, grpc_handshaker_factory* factory); -void grpc_handshakers_add(grpc_exec_ctx* exec_ctx, - grpc_handshaker_type handshaker_type, +void grpc_handshakers_add(grpc_handshaker_type handshaker_type, const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr); diff --git a/src/core/lib/compression/message_compress.cc b/src/core/lib/compression/message_compress.cc index c051e28864..aa43a53f2b 100644 --- a/src/core/lib/compression/message_compress.cc +++ b/src/core/lib/compression/message_compress.cc @@ -29,8 +29,8 @@ #define OUTPUT_BLOCK_SIZE 1024 -static int zlib_body(grpc_exec_ctx* exec_ctx, z_stream* zs, - grpc_slice_buffer* input, grpc_slice_buffer* output, +static int zlib_body(z_stream* zs, grpc_slice_buffer* input, + grpc_slice_buffer* output, int (*flate)(z_stream* zs, int flush)) { int r; int flush; @@ -74,7 +74,7 @@ static int zlib_body(grpc_exec_ctx* exec_ctx, z_stream* zs, return 1; error: - grpc_slice_unref_internal(exec_ctx, outbuf); + grpc_slice_unref_internal(outbuf); return 0; } @@ -84,8 +84,8 @@ static void* zalloc_gpr(void* opaque, unsigned int items, unsigned int size) { static void zfree_gpr(void* opaque, void* address) { gpr_free(address); } -static int zlib_compress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input, - grpc_slice_buffer* output, int gzip) { +static int zlib_compress(grpc_slice_buffer* input, grpc_slice_buffer* output, + int gzip) { z_stream zs; int r; size_t i; @@ -97,11 +97,10 @@ static int zlib_compress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input, r = deflateInit2(&zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 15 | (gzip ? 16 : 0), 8, Z_DEFAULT_STRATEGY); GPR_ASSERT(r == Z_OK); - r = zlib_body(exec_ctx, &zs, input, output, deflate) && - output->length < input->length; + r = zlib_body(&zs, input, output, deflate) && output->length < input->length; if (!r) { for (i = count_before; i < output->count; i++) { - grpc_slice_unref_internal(exec_ctx, output->slices[i]); + grpc_slice_unref_internal(output->slices[i]); } output->count = count_before; output->length = length_before; @@ -110,8 +109,8 @@ static int zlib_compress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input, return r; } -static int zlib_decompress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input, - grpc_slice_buffer* output, int gzip) { +static int zlib_decompress(grpc_slice_buffer* input, grpc_slice_buffer* output, + int gzip) { z_stream zs; int r; size_t i; @@ -122,10 +121,10 @@ static int zlib_decompress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input, zs.zfree = zfree_gpr; r = inflateInit2(&zs, 15 | (gzip ? 16 : 0)); GPR_ASSERT(r == Z_OK); - r = zlib_body(exec_ctx, &zs, input, output, inflate); + r = zlib_body(&zs, input, output, inflate); if (!r) { for (i = count_before; i < output->count; i++) { - grpc_slice_unref_internal(exec_ctx, output->slices[i]); + grpc_slice_unref_internal(output->slices[i]); } output->count = count_before; output->length = length_before; @@ -142,8 +141,7 @@ static int copy(grpc_slice_buffer* input, grpc_slice_buffer* output) { return 1; } -static int compress_inner(grpc_exec_ctx* exec_ctx, - grpc_compression_algorithm algorithm, +static int compress_inner(grpc_compression_algorithm algorithm, grpc_slice_buffer* input, grpc_slice_buffer* output) { switch (algorithm) { case GRPC_COMPRESS_NONE: @@ -151,9 +149,9 @@ static int compress_inner(grpc_exec_ctx* exec_ctx, rely on that here */ return 0; case GRPC_COMPRESS_DEFLATE: - return zlib_compress(exec_ctx, input, output, 0); + return zlib_compress(input, output, 0); case GRPC_COMPRESS_GZIP: - return zlib_compress(exec_ctx, input, output, 1); + return zlib_compress(input, output, 1); case GRPC_COMPRESS_ALGORITHMS_COUNT: break; } @@ -161,26 +159,24 @@ static int compress_inner(grpc_exec_ctx* exec_ctx, return 0; } -int grpc_msg_compress(grpc_exec_ctx* exec_ctx, - grpc_compression_algorithm algorithm, +int grpc_msg_compress(grpc_compression_algorithm algorithm, grpc_slice_buffer* input, grpc_slice_buffer* output) { - if (!compress_inner(exec_ctx, algorithm, input, output)) { + if (!compress_inner(algorithm, input, output)) { copy(input, output); return 0; } return 1; } -int grpc_msg_decompress(grpc_exec_ctx* exec_ctx, - grpc_compression_algorithm algorithm, +int grpc_msg_decompress(grpc_compression_algorithm algorithm, grpc_slice_buffer* input, grpc_slice_buffer* output) { switch (algorithm) { case GRPC_COMPRESS_NONE: return copy(input, output); case GRPC_COMPRESS_DEFLATE: - return zlib_decompress(exec_ctx, input, output, 0); + return zlib_decompress(input, output, 0); case GRPC_COMPRESS_GZIP: - return zlib_decompress(exec_ctx, input, output, 1); + return zlib_decompress(input, output, 1); case GRPC_COMPRESS_ALGORITHMS_COUNT: break; } diff --git a/src/core/lib/compression/message_compress.h b/src/core/lib/compression/message_compress.h index fffe175fd2..9e109127eb 100644 --- a/src/core/lib/compression/message_compress.h +++ b/src/core/lib/compression/message_compress.h @@ -29,15 +29,13 @@ extern "C" { /* compress 'input' to 'output' using 'algorithm'. On success, appends compressed slices to output and returns 1. On failure, appends uncompressed slices to output and returns 0. */ -int grpc_msg_compress(grpc_exec_ctx* exec_ctx, - grpc_compression_algorithm algorithm, +int grpc_msg_compress(grpc_compression_algorithm algorithm, grpc_slice_buffer* input, grpc_slice_buffer* output); /* decompress 'input' to 'output' using 'algorithm'. On success, appends slices to output and returns 1. On failure, output is unchanged, and returns 0. */ -int grpc_msg_decompress(grpc_exec_ctx* exec_ctx, - grpc_compression_algorithm algorithm, +int grpc_msg_decompress(grpc_compression_algorithm algorithm, grpc_slice_buffer* input, grpc_slice_buffer* output); #ifdef __cplusplus diff --git a/src/core/lib/compression/stream_compression_gzip.cc b/src/core/lib/compression/stream_compression_gzip.cc index 087b018be5..d1d5564559 100644 --- a/src/core/lib/compression/stream_compression_gzip.cc +++ b/src/core/lib/compression/stream_compression_gzip.cc @@ -40,7 +40,7 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, /* Full flush is not allowed when inflating. */ GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH))); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; int r; bool eoc = false; size_t original_max_output_size = max_output_size; @@ -57,8 +57,8 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, r = ctx->flate(&ctx->zs, Z_NO_FLUSH); if (r < 0 && r != Z_BUF_ERROR) { gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); + grpc_slice_unref_internal(slice_out); + grpc_exec_ctx_finish(); return false; } else if (r == Z_STREAM_END && ctx->flate == inflate) { eoc = true; @@ -69,7 +69,7 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in, GRPC_SLICE_LENGTH(slice))); } - grpc_slice_unref_internal(&exec_ctx, slice); + grpc_slice_unref_internal(slice); } if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) { GPR_ASSERT(in->length == 0); @@ -88,8 +88,8 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, break; default: gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); + grpc_slice_unref_internal(slice_out); + grpc_exec_ctx_finish(); return false; } } else if (flush == Z_FINISH) { @@ -104,8 +104,8 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, break; default: gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); + grpc_slice_unref_internal(slice_out); + grpc_exec_ctx_finish(); return false; } } @@ -117,11 +117,11 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, slice_out.data.refcounted.length -= ctx->zs.avail_out; grpc_slice_buffer_add(out, slice_out); } else { - grpc_slice_unref_internal(&exec_ctx, slice_out); + grpc_slice_unref_internal(slice_out); } max_output_size -= (slice_size - ctx->zs.avail_out); } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); if (end_of_context) { *end_of_context = eoc; } diff --git a/src/core/lib/debug/stats.cc b/src/core/lib/debug/stats.cc index 4096384dd9..3117de920e 100644 --- a/src/core/lib/debug/stats.cc +++ b/src/core/lib/debug/stats.cc @@ -62,9 +62,9 @@ void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, } } -int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value, - const int *table, int table_size) { - GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx); +int grpc_stats_histo_find_bucket_slow(int value, const int *table, + int table_size) { + GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(); const int *const start = table; while (table_size > 0) { int step = table_size / 2; diff --git a/src/core/lib/debug/stats.h b/src/core/lib/debug/stats.h index fec1d651e6..69d1a9e6bf 100644 --- a/src/core/lib/debug/stats.h +++ b/src/core/lib/debug/stats.h @@ -34,17 +34,15 @@ typedef struct grpc_stats_data { extern grpc_stats_data *grpc_stats_per_cpu_storage; -#define GRPC_THREAD_STATS_DATA(exec_ctx) \ +#define GRPC_THREAD_STATS_DATA() \ (&grpc_stats_per_cpu_storage[(exec_ctx)->starting_cpu]) -#define GRPC_STATS_INC_COUNTER(exec_ctx, ctr) \ - (gpr_atm_no_barrier_fetch_add( \ - &GRPC_THREAD_STATS_DATA((exec_ctx))->counters[(ctr)], 1)) +#define GRPC_STATS_INC_COUNTER(ctr) \ + (gpr_atm_no_barrier_fetch_add(&GRPC_THREAD_STATS_DATA()->counters[(ctr)], 1)) -#define GRPC_STATS_INC_HISTOGRAM(exec_ctx, histogram, index) \ - (gpr_atm_no_barrier_fetch_add( \ - &GRPC_THREAD_STATS_DATA((exec_ctx)) \ - ->histograms[histogram##_FIRST_SLOT + (index)], \ +#define GRPC_STATS_INC_HISTOGRAM(histogram, index) \ + (gpr_atm_no_barrier_fetch_add( \ + &GRPC_THREAD_STATS_DATA()->histograms[histogram##_FIRST_SLOT + (index)], \ 1)) void grpc_stats_init(void); @@ -54,8 +52,8 @@ void grpc_stats_collect(grpc_stats_data *output); void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, grpc_stats_data *c); char *grpc_stats_data_as_json(const grpc_stats_data *data); -int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value, - const int *table, int table_size); +int grpc_stats_histo_find_bucket_slow(int value, const int *table, + int table_size); double grpc_stats_histo_percentile(const grpc_stats_data *data, grpc_stats_histograms histogram, double percentile); diff --git a/src/core/lib/debug/stats_data.cc b/src/core/lib/debug/stats_data.cc index 5d737c56cb..9b087a5ed1 100644 --- a/src/core/lib/debug/stats_data.cc +++ b/src/core/lib/debug/stats_data.cc @@ -339,11 +339,10 @@ const uint8_t grpc_stats_table_7[102] = { 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51}; const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64}; const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5}; -void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_call_initial_size(int value) { value = GPR_CLAMP(value, 0, 262144); if (value < 6) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, - value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, value); return; } union { @@ -356,19 +355,17 @@ void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_1[((_val.uint - 4618441417868443648ull) >> 49)] + 6; _bkt.dbl = grpc_stats_table_0[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, - bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_0, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_0, 64)); } -void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_poll_events_returned(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 29) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value); return; } union { @@ -381,20 +378,17 @@ void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_3[((_val.uint - 4628855992006737920ull) >> 47)] + 29; _bkt.dbl = grpc_stats_table_2[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_2, 128)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_2, 128)); } -void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_tcp_write_size(int value) { value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, - value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, value); return; } union { @@ -407,19 +401,17 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_4[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, - bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_4, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64)); } -void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_tcp_write_iov_size(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value); return; } union { @@ -432,19 +424,17 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13; _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_6, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_tcp_read_size(int value) { value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, - value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, value); return; } union { @@ -457,19 +447,17 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_4[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, - bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_4, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64)); } -void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_tcp_read_offer(int value) { value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, - value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, value); return; } union { @@ -482,20 +470,18 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_4[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, - bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_4, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64)); } -void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, - int value) { +void grpc_stats_inc_tcp_read_offer_iov_size(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, + value); return; } union { @@ -508,21 +494,19 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13; _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, + bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_6, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, - int value) { +void grpc_stats_inc_http2_send_message_size(int value) { value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, + value); return; } union { @@ -535,22 +519,19 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_4[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, + bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_4, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64)); } -void grpc_stats_inc_http2_send_initial_metadata_per_write( - grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_http2_send_initial_metadata_per_write(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, - value); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, value); return; } union { @@ -564,21 +545,18 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write( _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, - bucket); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, bucket); return; } GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, - grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6, - 64)); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx, - int value) { +void grpc_stats_inc_http2_send_message_per_write(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, + value); return; } union { @@ -591,22 +569,19 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx, grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13; _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, + bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_6, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_http2_send_trailing_metadata_per_write( - grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_http2_send_trailing_metadata_per_write(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, - value); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, value); return; } union { @@ -620,21 +595,18 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write( _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, - bucket); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, bucket); return; } GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, - grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6, - 64)); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, - int value) { +void grpc_stats_inc_http2_send_flowctl_per_write(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, + value); return; } union { @@ -647,20 +619,18 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13; _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, + bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_6, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_server_cqs_checked(int value) { value = GPR_CLAMP(value, 0, 64); if (value < 3) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value); return; } union { @@ -673,13 +643,12 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_9[((_val.uint - 4613937818241073152ull) >> 51)] + 3; _bkt.dbl = grpc_stats_table_8[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_8, 8)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_8, 8)); } const int grpc_stats_histo_buckets[13] = {64, 128, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 8}; @@ -691,7 +660,7 @@ const int *const grpc_stats_histo_bucket_boundaries[13] = { grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_8}; -void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, int x) = { +void (*const grpc_stats_inc_histogram[13])(int x) = { grpc_stats_inc_call_initial_size, grpc_stats_inc_poll_events_returned, grpc_stats_inc_tcp_write_size, diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 031942df5c..a92153ee89 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -176,331 +176,263 @@ typedef enum { GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8, GRPC_STATS_HISTOGRAM_BUCKETS = 840 } grpc_stats_histogram_constants; -#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) -#define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED) -#define GRPC_STATS_INC_CQS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CQS_CREATED) -#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED) -#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED) -#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED) -#define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL) -#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT) -#define GRPC_STATS_INC_POLLSET_KICK(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK) -#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER) -#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN) -#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD) -#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV) -#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD) -#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS) -#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE) -#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ) -#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED) -#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS) -#define GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_BATCHES) -#define GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_CANCEL) -#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA) -#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE) -#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA) -#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA) -#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE) -#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA) -#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES) -#define GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PINGS_SENT) -#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN) -#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED) -#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED) -#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_CLIENT_CALLS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) +#define GRPC_STATS_INC_SERVER_CALLS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_CALLS_CREATED) +#define GRPC_STATS_INC_CQS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQS_CREATED) +#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED) +#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED) +#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED) +#define GRPC_STATS_INC_SYSCALL_POLL() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_POLL) +#define GRPC_STATS_INC_SYSCALL_WAIT() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_WAIT) +#define GRPC_STATS_INC_POLLSET_KICK() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK) +#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER) +#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN) +#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD) +#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV) +#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD) +#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS) +#define GRPC_STATS_INC_SYSCALL_WRITE() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_WRITE) +#define GRPC_STATS_INC_SYSCALL_READ() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_READ) +#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED) +#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS) +#define GRPC_STATS_INC_HTTP2_OP_BATCHES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_BATCHES) +#define GRPC_STATS_INC_HTTP2_OP_CANCEL() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_CANCEL) +#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE) +#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE) +#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA) +#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES) +#define GRPC_STATS_INC_HTTP2_PINGS_SENT() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_PINGS_SENT) +#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN) +#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED) +#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED) +#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES) +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA( \ - exec_ctx) \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE() \ + GRPC_STATS_INC_COUNTER( \ + GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE) +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA() \ GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ + GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA) +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT() \ + GRPC_STATS_INC_COUNTER( \ + GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT) +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM() \ + GRPC_STATS_INC_COUNTER( \ + GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM) +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM) -#define GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN) -#define GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_INDEXED) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V) -#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED) -#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN) -#define GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_BINARY) -#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64) -#define GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_INDEXED) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V) -#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED) -#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN) -#define GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_BINARY) -#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64) -#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED) -#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS) -#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx) \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM() \ GRPC_STATS_INC_COUNTER( \ - (exec_ctx), GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS) -#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED) -#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_INITIATED) -#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS) -#define GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), GRPC_STATS_COUNTER_CALL_COMBINER_SET_NOTIFY_ON_CANCEL) -#define GRPC_STATS_INC_CALL_COMBINER_CANCELLED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CALL_COMBINER_CANCELLED) -#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS) -#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS) -#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF) -#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED) -#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED) -#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES) -#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS) -#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED) -#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_FAILURES) -#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_SUCCESSES) -#define GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES) -#define GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, value) \ - grpc_stats_inc_call_initial_size((exec_ctx), (int)(value)) -void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, value) \ - grpc_stats_inc_poll_events_returned((exec_ctx), (int)(value)) -void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \ - grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value)) -void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \ - grpc_stats_inc_tcp_write_iov_size((exec_ctx), (int)(value)) -void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \ - grpc_stats_inc_tcp_read_size((exec_ctx), (int)(value)) -void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, value) \ - grpc_stats_inc_tcp_read_offer((exec_ctx), (int)(value)) -void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, value) \ - grpc_stats_inc_tcp_read_offer_iov_size((exec_ctx), (int)(value)) -void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \ - grpc_stats_inc_http2_send_message_size((exec_ctx), (int)(value)) -void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(exec_ctx, value) \ - grpc_stats_inc_http2_send_initial_metadata_per_write((exec_ctx), (int)(value)) -void grpc_stats_inc_http2_send_initial_metadata_per_write( - grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, value) \ - grpc_stats_inc_http2_send_message_per_write((exec_ctx), (int)(value)) -void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx, - int x); -#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(exec_ctx, value) \ - grpc_stats_inc_http2_send_trailing_metadata_per_write((exec_ctx), \ - (int)(value)) -void grpc_stats_inc_http2_send_trailing_metadata_per_write( - grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, value) \ - grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value)) -void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, - int x); -#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \ - grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value)) -void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x); + GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM) +#define GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN) +#define GRPC_STATS_INC_HPACK_RECV_INDEXED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_INDEXED) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V) +#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED) +#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN) +#define GRPC_STATS_INC_HPACK_RECV_BINARY() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_BINARY) +#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64) +#define GRPC_STATS_INC_HPACK_SEND_INDEXED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_INDEXED) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V) +#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED) +#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN) +#define GRPC_STATS_INC_HPACK_SEND_BINARY() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_BINARY) +#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64) +#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED) +#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS) +#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS() \ + GRPC_STATS_INC_COUNTER( \ + GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS) +#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED) +#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_INITIATED) +#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS) +#define GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_SET_NOTIFY_ON_CANCEL) +#define GRPC_STATS_INC_CALL_COMBINER_CANCELLED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_CANCELLED) +#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS) +#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS) +#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF) +#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED) +#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED) +#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES) +#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS) +#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED) +#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_FAILURES) +#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_SUCCESSES) +#define GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES) +#define GRPC_STATS_INC_CALL_INITIAL_SIZE(value) \ + grpc_stats_inc_call_initial_size((int)(value)) +void grpc_stats_inc_call_initial_size(int x); +#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(value) \ + grpc_stats_inc_poll_events_returned((int)(value)) +void grpc_stats_inc_poll_events_returned(int x); +#define GRPC_STATS_INC_TCP_WRITE_SIZE(value) \ + grpc_stats_inc_tcp_write_size((int)(value)) +void grpc_stats_inc_tcp_write_size(int x); +#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(value) \ + grpc_stats_inc_tcp_write_iov_size((int)(value)) +void grpc_stats_inc_tcp_write_iov_size(int x); +#define GRPC_STATS_INC_TCP_READ_SIZE(value) \ + grpc_stats_inc_tcp_read_size((int)(value)) +void grpc_stats_inc_tcp_read_size(int x); +#define GRPC_STATS_INC_TCP_READ_OFFER(value) \ + grpc_stats_inc_tcp_read_offer((int)(value)) +void grpc_stats_inc_tcp_read_offer(int x); +#define GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(value) \ + grpc_stats_inc_tcp_read_offer_iov_size((int)(value)) +void grpc_stats_inc_tcp_read_offer_iov_size(int x); +#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(value) \ + grpc_stats_inc_http2_send_message_size((int)(value)) +void grpc_stats_inc_http2_send_message_size(int x); +#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(value) \ + grpc_stats_inc_http2_send_initial_metadata_per_write((int)(value)) +void grpc_stats_inc_http2_send_initial_metadata_per_write(int x); +#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(value) \ + grpc_stats_inc_http2_send_message_per_write((int)(value)) +void grpc_stats_inc_http2_send_message_per_write(int x); +#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(value) \ + grpc_stats_inc_http2_send_trailing_metadata_per_write((int)(value)) +void grpc_stats_inc_http2_send_trailing_metadata_per_write(int x); +#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(value) \ + grpc_stats_inc_http2_send_flowctl_per_write((int)(value)) +void grpc_stats_inc_http2_send_flowctl_per_write(int x); +#define GRPC_STATS_INC_SERVER_CQS_CHECKED(value) \ + grpc_stats_inc_server_cqs_checked((int)(value)) +void grpc_stats_inc_server_cqs_checked(int x); extern const int grpc_stats_histo_buckets[13]; extern const int grpc_stats_histo_start[13]; extern const int *const grpc_stats_histo_bucket_boundaries[13]; -extern void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, - int x); +extern void (*const grpc_stats_inc_histogram[13])(int x); #ifdef __cplusplus } diff --git a/src/core/lib/http/httpcli.cc b/src/core/lib/http/httpcli.cc index c96800b85c..02da35ea99 100644 --- a/src/core/lib/http/httpcli.cc +++ b/src/core/lib/http/httpcli.cc @@ -63,13 +63,11 @@ typedef struct { static grpc_httpcli_get_override g_get_override = NULL; static grpc_httpcli_post_override g_post_override = NULL; -static void plaintext_handshake(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *endpoint, const char *host, - grpc_millis deadline, - void (*on_done)(grpc_exec_ctx *exec_ctx, - void *arg, +static void plaintext_handshake(void *arg, grpc_endpoint *endpoint, + const char *host, grpc_millis deadline, + void (*on_done)(void *arg, grpc_endpoint *endpoint)) { - on_done(exec_ctx, arg, endpoint); + on_done(arg, endpoint); } const grpc_httpcli_handshaker grpc_httpcli_plaintext = {"http", @@ -79,34 +77,31 @@ void grpc_httpcli_context_init(grpc_httpcli_context *context) { context->pollset_set = grpc_pollset_set_create(); } -void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx, - grpc_httpcli_context *context) { - grpc_pollset_set_destroy(exec_ctx, context->pollset_set); +void grpc_httpcli_context_destroy(grpc_httpcli_context *context) { + grpc_pollset_set_destroy(context->pollset_set); } -static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req, - grpc_error *due_to_error); +static void next_address(internal_request *req, grpc_error *due_to_error); -static void finish(grpc_exec_ctx *exec_ctx, internal_request *req, - grpc_error *error) { - grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent, +static void finish(internal_request *req, grpc_error *error) { + grpc_polling_entity_del_from_pollset_set(req->pollent, req->context->pollset_set); - GRPC_CLOSURE_SCHED(exec_ctx, req->on_done, error); + GRPC_CLOSURE_SCHED(req->on_done, error); grpc_http_parser_destroy(&req->parser); if (req->addresses != NULL) { grpc_resolved_addresses_destroy(req->addresses); } if (req->ep != NULL) { - grpc_endpoint_destroy(exec_ctx, req->ep); + grpc_endpoint_destroy(req->ep); } - grpc_slice_unref_internal(exec_ctx, req->request_text); + grpc_slice_unref_internal(req->request_text); gpr_free(req->host); gpr_free(req->ssl_host_override); grpc_iomgr_unregister_object(&req->iomgr_obj); - grpc_slice_buffer_destroy_internal(exec_ctx, &req->incoming); - grpc_slice_buffer_destroy_internal(exec_ctx, &req->outgoing); + grpc_slice_buffer_destroy_internal(&req->incoming); + grpc_slice_buffer_destroy_internal(&req->outgoing); GRPC_ERROR_UNREF(req->overall_error); - grpc_resource_quota_unref_internal(exec_ctx, req->resource_quota); + grpc_resource_quota_unref_internal(req->resource_quota); gpr_free(req); } @@ -124,12 +119,11 @@ static void append_error(internal_request *req, grpc_error *error) { gpr_free(addr_text); } -static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) { - grpc_endpoint_read(exec_ctx, req->ep, &req->incoming, &req->on_read); +static void do_read(internal_request *req) { + grpc_endpoint_read(req->ep, &req->incoming, &req->on_read); } -static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *error) { +static void on_read(void *user_data, grpc_error *error) { internal_request *req = (internal_request *)user_data; size_t i; @@ -139,76 +133,70 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, grpc_error *err = grpc_http_parser_parse(&req->parser, req->incoming.slices[i], NULL); if (err != GRPC_ERROR_NONE) { - finish(exec_ctx, req, err); + finish(req, err); return; } } } if (error == GRPC_ERROR_NONE) { - do_read(exec_ctx, req); + do_read(req); } else if (!req->have_read_byte) { - next_address(exec_ctx, req, GRPC_ERROR_REF(error)); + next_address(req, GRPC_ERROR_REF(error)); } else { - finish(exec_ctx, req, grpc_http_parser_eof(&req->parser)); + finish(req, grpc_http_parser_eof(&req->parser)); } } -static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) { - do_read(exec_ctx, req); -} +static void on_written(internal_request *req) { do_read(req); } -static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { +static void done_write(void *arg, grpc_error *error) { internal_request *req = (internal_request *)arg; if (error == GRPC_ERROR_NONE) { - on_written(exec_ctx, req); + on_written(req); } else { - next_address(exec_ctx, req, GRPC_ERROR_REF(error)); + next_address(req, GRPC_ERROR_REF(error)); } } -static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) { +static void start_write(internal_request *req) { grpc_slice_ref_internal(req->request_text); grpc_slice_buffer_add(&req->outgoing, req->request_text); - grpc_endpoint_write(exec_ctx, req->ep, &req->outgoing, &req->done_write); + grpc_endpoint_write(req->ep, &req->outgoing, &req->done_write); } -static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *ep) { +static void on_handshake_done(void *arg, grpc_endpoint *ep) { internal_request *req = (internal_request *)arg; if (!ep) { - next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Unexplained handshake failure")); + next_address(req, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Unexplained handshake failure")); return; } req->ep = ep; - start_write(exec_ctx, req); + start_write(req); } -static void on_connected(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void on_connected(void *arg, grpc_error *error) { internal_request *req = (internal_request *)arg; if (!req->ep) { - next_address(exec_ctx, req, GRPC_ERROR_REF(error)); + next_address(req, GRPC_ERROR_REF(error)); return; } req->handshaker->handshake( - exec_ctx, req, req->ep, - req->ssl_host_override ? req->ssl_host_override : req->host, + req, req->ep, req->ssl_host_override ? req->ssl_host_override : req->host, req->deadline, on_handshake_done); } -static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req, - grpc_error *error) { +static void next_address(internal_request *req, grpc_error *error) { grpc_resolved_address *addr; if (error != GRPC_ERROR_NONE) { append_error(req, error); } if (req->next_address == req->addresses->naddrs) { - finish(exec_ctx, req, + finish(req, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Failed HTTP requests to all targets", &req->overall_error, 1)); return; @@ -220,23 +208,21 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req, (char *)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota, grpc_resource_quota_arg_vtable()); grpc_channel_args args = {1, &arg}; - grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep, - req->context->pollset_set, &args, addr, - req->deadline); + grpc_tcp_client_connect(&req->connected, &req->ep, req->context->pollset_set, + &args, addr, req->deadline); } -static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { +static void on_resolved(void *arg, grpc_error *error) { internal_request *req = (internal_request *)arg; if (error != GRPC_ERROR_NONE) { - finish(exec_ctx, req, GRPC_ERROR_REF(error)); + finish(req, GRPC_ERROR_REF(error)); return; } req->next_address = 0; - next_address(exec_ctx, req, GRPC_ERROR_NONE); + next_address(req, GRPC_ERROR_NONE); } -static void internal_request_begin(grpc_exec_ctx *exec_ctx, - grpc_httpcli_context *context, +static void internal_request_begin(grpc_httpcli_context *context, grpc_polling_entity *pollent, grpc_resource_quota *resource_quota, const grpc_httpcli_request *request, @@ -266,33 +252,31 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx, req->ssl_host_override = gpr_strdup(request->ssl_host_override); GPR_ASSERT(pollent); - grpc_polling_entity_add_to_pollset_set(exec_ctx, req->pollent, + grpc_polling_entity_add_to_pollset_set(req->pollent, req->context->pollset_set); grpc_resolve_address( - exec_ctx, request->host, req->handshaker->default_port, - req->context->pollset_set, + request->host, req->handshaker->default_port, req->context->pollset_set, GRPC_CLOSURE_CREATE(on_resolved, req, grpc_schedule_on_exec_ctx), &req->addresses); } -void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, +void grpc_httpcli_get(grpc_httpcli_context *context, grpc_polling_entity *pollent, grpc_resource_quota *resource_quota, const grpc_httpcli_request *request, grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { char *name; - if (g_get_override && - g_get_override(exec_ctx, request, deadline, on_done, response)) { + if (g_get_override && g_get_override(request, deadline, on_done, response)) { return; } gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->http.path); - internal_request_begin(exec_ctx, context, pollent, resource_quota, request, - deadline, on_done, response, name, + internal_request_begin(context, pollent, resource_quota, request, deadline, + on_done, response, name, grpc_httpcli_format_get_request(request)); gpr_free(name); } -void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, +void grpc_httpcli_post(grpc_httpcli_context *context, grpc_polling_entity *pollent, grpc_resource_quota *resource_quota, const grpc_httpcli_request *request, @@ -300,16 +284,14 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, grpc_millis deadline, grpc_closure *on_done, grpc_httpcli_response *response) { char *name; - if (g_post_override && - g_post_override(exec_ctx, request, body_bytes, body_size, deadline, - on_done, response)) { + if (g_post_override && g_post_override(request, body_bytes, body_size, + deadline, on_done, response)) { return; } gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->http.path); internal_request_begin( - exec_ctx, context, pollent, resource_quota, request, deadline, on_done, - response, name, - grpc_httpcli_format_post_request(request, body_bytes, body_size)); + context, pollent, resource_quota, request, deadline, on_done, response, + name, grpc_httpcli_format_post_request(request, body_bytes, body_size)); gpr_free(name); } diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h index 76b790fa8a..dd372775ec 100644 --- a/src/core/lib/http/httpcli.h +++ b/src/core/lib/http/httpcli.h @@ -45,10 +45,9 @@ typedef struct grpc_httpcli_context { typedef struct { const char *default_port; - void (*handshake)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint, - const char *host, grpc_millis deadline, - void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *endpoint)); + void (*handshake)(void *arg, grpc_endpoint *endpoint, const char *host, + grpc_millis deadline, + void (*on_done)(void *arg, grpc_endpoint *endpoint)); } grpc_httpcli_handshaker; extern const grpc_httpcli_handshaker grpc_httpcli_plaintext; @@ -72,8 +71,7 @@ typedef struct grpc_httpcli_request { typedef struct grpc_http_response grpc_httpcli_response; void grpc_httpcli_context_init(grpc_httpcli_context *context); -void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx, - grpc_httpcli_context *context); +void grpc_httpcli_context_destroy(grpc_httpcli_context *context); /* Asynchronously perform a HTTP GET. 'context' specifies the http context under which to do the get @@ -84,7 +82,7 @@ void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx, destroyed once the call returns 'deadline' contains a deadline for the request (or gpr_inf_future) 'on_response' is a callback to report results to */ -void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, +void grpc_httpcli_get(grpc_httpcli_context *context, grpc_polling_entity *pollent, grpc_resource_quota *resource_quota, const grpc_httpcli_request *request, grpc_millis deadline, @@ -105,7 +103,7 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, lifetime of the request 'on_response' is a callback to report results to Does not support ?var1=val1&var2=val2 in the path. */ -void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, +void grpc_httpcli_post(grpc_httpcli_context *context, grpc_polling_entity *pollent, grpc_resource_quota *resource_quota, const grpc_httpcli_request *request, @@ -114,15 +112,16 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, grpc_httpcli_response *response); /* override functions return 1 if they handled the request, 0 otherwise */ -typedef int (*grpc_httpcli_get_override)(grpc_exec_ctx *exec_ctx, - const grpc_httpcli_request *request, +typedef int (*grpc_httpcli_get_override)(const grpc_httpcli_request *request, grpc_millis deadline, grpc_closure *on_complete, grpc_httpcli_response *response); -typedef int (*grpc_httpcli_post_override)( - grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - const char *body_bytes, size_t body_size, grpc_millis deadline, - grpc_closure *on_complete, grpc_httpcli_response *response); +typedef int (*grpc_httpcli_post_override)(const grpc_httpcli_request *request, + const char *body_bytes, + size_t body_size, + grpc_millis deadline, + grpc_closure *on_complete, + grpc_httpcli_response *response); void grpc_httpcli_set_override(grpc_httpcli_get_override get, grpc_httpcli_post_override post); diff --git a/src/core/lib/http/httpcli_security_connector.cc b/src/core/lib/http/httpcli_security_connector.cc index d832dacb69..814f75cbfa 100644 --- a/src/core/lib/http/httpcli_security_connector.cc +++ b/src/core/lib/http/httpcli_security_connector.cc @@ -38,8 +38,7 @@ typedef struct { char *secure_peer_name; } grpc_httpcli_ssl_channel_security_connector; -static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { +static void httpcli_ssl_destroy(grpc_security_connector *sc) { grpc_httpcli_ssl_channel_security_connector *c = (grpc_httpcli_ssl_channel_security_connector *)sc; if (c->handshaker_factory != NULL) { @@ -50,8 +49,7 @@ static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx, gpr_free(sc); } -static void httpcli_ssl_add_handshakers(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, +static void httpcli_ssl_add_handshakers(grpc_channel_security_connector *sc, grpc_handshake_manager *handshake_mgr) { grpc_httpcli_ssl_channel_security_connector *c = (grpc_httpcli_ssl_channel_security_connector *)sc; @@ -65,13 +63,11 @@ static void httpcli_ssl_add_handshakers(grpc_exec_ctx *exec_ctx, } } grpc_handshake_manager_add( - handshake_mgr, - grpc_security_handshaker_create( - exec_ctx, tsi_create_adapter_handshaker(handshaker), &sc->base)); + handshake_mgr, grpc_security_handshaker_create( + tsi_create_adapter_handshaker(handshaker), &sc->base)); } -static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, +static void httpcli_ssl_check_peer(grpc_security_connector *sc, tsi_peer peer, grpc_auth_context **auth_context, grpc_closure *on_peer_checked) { grpc_httpcli_ssl_channel_security_connector *c = @@ -87,7 +83,7 @@ static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx, error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); } - GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error); + GRPC_CLOSURE_SCHED(on_peer_checked, error); tsi_peer_destruct(&peer); } @@ -104,8 +100,8 @@ static grpc_security_connector_vtable httpcli_ssl_vtable = { httpcli_ssl_destroy, httpcli_ssl_check_peer, httpcli_ssl_cmp}; static grpc_security_status httpcli_ssl_channel_security_connector_create( - grpc_exec_ctx *exec_ctx, const char *pem_root_certs, - const char *secure_peer_name, grpc_channel_security_connector **sc) { + const char *pem_root_certs, const char *secure_peer_name, + grpc_channel_security_connector **sc) { tsi_result result = TSI_OK; grpc_httpcli_ssl_channel_security_connector *c; @@ -128,7 +124,7 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create( if (result != TSI_OK) { gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", tsi_result_to_string(result)); - httpcli_ssl_destroy(exec_ctx, &c->base.base); + httpcli_ssl_destroy(&c->base.base); *sc = NULL; return GRPC_SECURITY_ERROR; } @@ -144,40 +140,37 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create( /* handshaker */ typedef struct { - void (*func)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint); + void (*func)(void *arg, grpc_endpoint *endpoint); void *arg; grpc_handshake_manager *handshake_mgr; } on_done_closure; -static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void on_handshake_done(void *arg, grpc_error *error) { grpc_handshaker_args *args = (grpc_handshaker_args *)arg; on_done_closure *c = (on_done_closure *)args->user_data; if (error != GRPC_ERROR_NONE) { const char *msg = grpc_error_string(error); gpr_log(GPR_ERROR, "Secure transport setup failed: %s", msg); - c->func(exec_ctx, c->arg, NULL); + c->func(c->arg, NULL); } else { - grpc_channel_args_destroy(exec_ctx, args->args); - grpc_slice_buffer_destroy_internal(exec_ctx, args->read_buffer); + grpc_channel_args_destroy(args->args); + grpc_slice_buffer_destroy_internal(args->read_buffer); gpr_free(args->read_buffer); - c->func(exec_ctx, c->arg, args->endpoint); + c->func(c->arg, args->endpoint); } - grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr); + grpc_handshake_manager_destroy(c->handshake_mgr); gpr_free(c); } -static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *tcp, const char *host, +static void ssl_handshake(void *arg, grpc_endpoint *tcp, const char *host, grpc_millis deadline, - void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *endpoint)) { + void (*on_done)(void *arg, grpc_endpoint *endpoint)) { on_done_closure *c = (on_done_closure *)gpr_malloc(sizeof(*c)); const char *pem_root_certs = grpc_get_default_ssl_roots(); if (pem_root_certs == NULL) { gpr_log(GPR_ERROR, "Could not get default pem root certs."); - on_done(exec_ctx, arg, NULL); + on_done(arg, NULL); gpr_free(c); return; } @@ -185,15 +178,15 @@ static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg, c->arg = arg; grpc_channel_security_connector *sc = NULL; GPR_ASSERT(httpcli_ssl_channel_security_connector_create( - exec_ctx, pem_root_certs, host, &sc) == GRPC_SECURITY_OK); + pem_root_certs, host, &sc) == GRPC_SECURITY_OK); grpc_arg channel_arg = grpc_security_connector_to_arg(&sc->base); grpc_channel_args args = {1, &channel_arg}; c->handshake_mgr = grpc_handshake_manager_create(); - grpc_handshakers_add(exec_ctx, HANDSHAKER_CLIENT, &args, c->handshake_mgr); + grpc_handshakers_add(HANDSHAKER_CLIENT, &args, c->handshake_mgr); grpc_handshake_manager_do_handshake( - exec_ctx, c->handshake_mgr, tcp, NULL /* channel_args */, deadline, + c->handshake_mgr, tcp, NULL /* channel_args */, deadline, NULL /* acceptor */, on_handshake_done, c /* user_data */); - GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, &sc->base, "httpcli"); + GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "httpcli"); } const grpc_httpcli_handshaker grpc_httpcli_ssl = {"https", ssl_handshake}; diff --git a/src/core/lib/iomgr/block_annotate.h b/src/core/lib/iomgr/block_annotate.h index fcbfe9eb1a..9db3cf0199 100644 --- a/src/core/lib/iomgr/block_annotate.h +++ b/src/core/lib/iomgr/block_annotate.h @@ -43,10 +43,10 @@ void gpr_thd_end_blocking_region(); do { \ gpr_thd_end_blocking_region(); \ } while (0) -#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(ec) \ - do { \ - gpr_thd_end_blocking_region(); \ - grpc_exec_ctx_invalidate_now((ec)); \ +#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX() \ + do { \ + gpr_thd_end_blocking_region(); \ + grpc_exec_ctx_invalidate_now(); \ } while (0) #else #define GRPC_SCHEDULING_START_BLOCKING_REGION \ @@ -55,9 +55,9 @@ void gpr_thd_end_blocking_region(); #define GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX \ do { \ } while (0) -#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(ec) \ - do { \ - grpc_exec_ctx_invalidate_now((ec)); \ +#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX() \ + do { \ + grpc_exec_ctx_invalidate_now(); \ } while (0) #endif diff --git a/src/core/lib/iomgr/call_combiner.cc b/src/core/lib/iomgr/call_combiner.cc index d45719608b..c2f5b4bb84 100644 --- a/src/core/lib/iomgr/call_combiner.cc +++ b/src/core/lib/iomgr/call_combiner.cc @@ -57,8 +57,7 @@ void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) { #define DEBUG_FMT_ARGS #endif -void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_start(grpc_call_combiner* call_combiner, grpc_closure* closure, grpc_error* error DEBUG_ARGS, const char* reason) { @@ -76,15 +75,16 @@ void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx, gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size, prev_size + 1); } - GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx); + GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS(); if (prev_size == 0) { - GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED(exec_ctx); + GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED(); + GPR_TIMER_MARK("call_combiner_initiate", 0); if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY"); } // Queue was empty, so execute this closure immediately. - GRPC_CLOSURE_SCHED(exec_ctx, closure, error); + GRPC_CLOSURE_SCHED(closure, error); } else { if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { gpr_log(GPR_INFO, " QUEUING"); @@ -96,8 +96,7 @@ void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx, GPR_TIMER_END("call_combiner_start", 0); } -void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner DEBUG_ARGS, +void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS, const char* reason) { GPR_TIMER_BEGIN("call_combiner_stop", 0); if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { @@ -132,7 +131,7 @@ void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s", closure, grpc_error_string(closure->error_data.error)); } - GRPC_CLOSURE_SCHED(exec_ctx, closure, closure->error_data.error); + GRPC_CLOSURE_SCHED(closure, closure->error_data.error); break; } } else if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { @@ -141,10 +140,9 @@ void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, GPR_TIMER_END("call_combiner_stop", 0); } -void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner, grpc_closure* closure) { - GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL(exec_ctx); + GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL(); while (true) { // Decode original state. gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state); @@ -158,7 +156,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, "for pre-existing cancellation", call_combiner, closure); } - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error)); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_REF(original_error)); break; } else { if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state, @@ -177,7 +175,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, "call_combiner=%p: scheduling old cancel callback=%p", call_combiner, closure); } - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); } break; } @@ -186,10 +184,9 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, } } -void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner, grpc_error* error) { - GRPC_STATS_INC_CALL_COMBINER_CANCELLED(exec_ctx); + GRPC_STATS_INC_CALL_COMBINER_CANCELLED(); while (true) { gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state); grpc_error* original_error = decode_cancel_state_error(original_state); @@ -206,7 +203,7 @@ void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx, "call_combiner=%p: scheduling notify_on_cancel callback=%p", call_combiner, notify_on_cancel); } - GRPC_CLOSURE_SCHED(exec_ctx, notify_on_cancel, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(notify_on_cancel, GRPC_ERROR_REF(error)); } break; } diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h index 527f84fce0..364f77a167 100644 --- a/src/core/lib/iomgr/call_combiner.h +++ b/src/core/lib/iomgr/call_combiner.h @@ -57,37 +57,29 @@ void grpc_call_combiner_init(grpc_call_combiner* call_combiner); void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner); #ifndef NDEBUG -#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \ - reason) \ - grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \ - __FILE__, __LINE__, (reason)) -#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \ - grpc_call_combiner_stop((exec_ctx), (call_combiner), __FILE__, __LINE__, \ - (reason)) +#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason) \ + grpc_call_combiner_start((call_combiner), (closure), (error), __FILE__, \ + __LINE__, (reason)) +#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \ + grpc_call_combiner_stop((call_combiner), __FILE__, __LINE__, (reason)) /// Starts processing \a closure on \a call_combiner. -void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_start(grpc_call_combiner* call_combiner, grpc_closure* closure, grpc_error* error, const char* file, int line, const char* reason); /// Yields the call combiner to the next closure in the queue, if any. -void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_stop(grpc_call_combiner* call_combiner, const char* file, int line, const char* reason); #else -#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \ - reason) \ - grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \ - (reason)) -#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \ - grpc_call_combiner_stop((exec_ctx), (call_combiner), (reason)) +#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason) \ + grpc_call_combiner_start((call_combiner), (closure), (error), (reason)) +#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \ + grpc_call_combiner_stop((call_combiner), (reason)) /// Starts processing \a closure on \a call_combiner. -void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_start(grpc_call_combiner* call_combiner, grpc_closure* closure, grpc_error* error, const char* reason); /// Yields the call combiner to the next closure in the queue, if any. -void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_stop(grpc_call_combiner* call_combiner, const char* reason); #endif @@ -113,13 +105,11 @@ void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, /// cancellation; this effectively unregisters the previously set closure. /// However, most filters will not need to explicitly unregister their /// callbacks, as this is done automatically when the call is destroyed. -void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner, grpc_closure* closure); /// Indicates that the call has been cancelled. -void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner, grpc_error* error); #ifdef __cplusplus diff --git a/src/core/lib/iomgr/closure.cc b/src/core/lib/iomgr/closure.cc index 00edefc6ae..71521daeb6 100644 --- a/src/core/lib/iomgr/closure.cc +++ b/src/core/lib/iomgr/closure.cc @@ -107,13 +107,12 @@ typedef struct { grpc_closure wrapper; } wrapped_closure; -static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void closure_wrapper(void *arg, grpc_error *error) { wrapped_closure *wc = (wrapped_closure *)arg; grpc_iomgr_cb_func cb = wc->cb; void *cb_arg = wc->cb_arg; gpr_free(wc); - cb(exec_ctx, cb_arg, error); + cb(cb_arg, error); } #ifndef NDEBUG @@ -139,8 +138,7 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg, void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx, grpc_closure *c, grpc_error *error) { #else -void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c, - grpc_error *error) { +void grpc_closure_run(grpc_closure *c, grpc_error *error) { #endif GPR_TIMER_BEGIN("grpc_closure_run", 0); if (c != NULL) { @@ -150,7 +148,7 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c, c->run = true; #endif assert(c->cb); - c->scheduler->vtable->run(exec_ctx, c, error); + c->scheduler->vtable->run(c, error); } else { GRPC_ERROR_UNREF(error); } @@ -161,8 +159,7 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c, void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx, grpc_closure *c, grpc_error *error) { #else -void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c, - grpc_error *error) { +void grpc_closure_sched(grpc_closure *c, grpc_error *error) { #endif GPR_TIMER_BEGIN("grpc_closure_sched", 0); if (c != NULL) { @@ -181,7 +178,7 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c, c->run = false; #endif assert(c->cb); - c->scheduler->vtable->sched(exec_ctx, c, error); + c->scheduler->vtable->sched(c, error); } else { GRPC_ERROR_UNREF(error); } @@ -192,7 +189,7 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c, void grpc_closure_list_sched(const char *file, int line, grpc_exec_ctx *exec_ctx, grpc_closure_list *list) { #else -void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) { +void grpc_closure_list_sched(grpc_closure_list *list) { #endif grpc_closure *c = list->head; while (c != NULL) { @@ -212,7 +209,7 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) { c->run = false; #endif assert(c->cb); - c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error); + c->scheduler->vtable->sched(c, c->error_data.error); c = next; } list->head = list->tail = NULL; diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h index cd32a4ba38..8b59b17dfb 100644 --- a/src/core/lib/iomgr/closure.h +++ b/src/core/lib/iomgr/closure.h @@ -49,18 +49,15 @@ typedef struct grpc_closure_list { * describing what went wrong. * Error contract: it is not the cb's job to unref this error; * the closure scheduler will do that after the cb returns */ -typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); +typedef void (*grpc_iomgr_cb_func)(void *arg, grpc_error *error); typedef struct grpc_closure_scheduler grpc_closure_scheduler; typedef struct grpc_closure_scheduler_vtable { /* NOTE: for all these functions, closure->scheduler == the scheduler that was used to find this vtable */ - void (*run)(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); - void (*sched)(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); + void (*run)(grpc_closure *closure, grpc_error *error); + void (*sched)(grpc_closure *closure, grpc_error *error); const char *name; } grpc_closure_scheduler_vtable; @@ -164,26 +161,22 @@ bool grpc_closure_list_empty(grpc_closure_list list); #ifndef NDEBUG void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error); -#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \ +#define GRPC_CLOSURE_RUN(closure, error) \ grpc_closure_run(__FILE__, __LINE__, exec_ctx, closure, error) #else -void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); -#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \ - grpc_closure_run(exec_ctx, closure, error) +void grpc_closure_run(grpc_closure *closure, grpc_error *error); +#define GRPC_CLOSURE_RUN(closure, error) grpc_closure_run(closure, error) #endif /** Schedule a closure to be run. Does not need to be run from a safe point. */ #ifndef NDEBUG void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error); -#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \ +#define GRPC_CLOSURE_SCHED(closure, error) \ grpc_closure_sched(__FILE__, __LINE__, exec_ctx, closure, error) #else -void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); -#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \ - grpc_closure_sched(exec_ctx, closure, error) +void grpc_closure_sched(grpc_closure *closure, grpc_error *error); +#define GRPC_CLOSURE_SCHED(closure, error) grpc_closure_sched(closure, error) #endif /** Schedule all closures in a list to be run. Does not need to be run from a @@ -192,13 +185,12 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, void grpc_closure_list_sched(const char *file, int line, grpc_exec_ctx *exec_ctx, grpc_closure_list *closure_list); -#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \ +#define GRPC_CLOSURE_LIST_SCHED(closure_list) \ grpc_closure_list_sched(__FILE__, __LINE__, exec_ctx, closure_list) #else -void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, - grpc_closure_list *closure_list); -#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \ - grpc_closure_list_sched(exec_ctx, closure_list) +void grpc_closure_list_sched(grpc_closure_list *closure_list); +#define GRPC_CLOSURE_LIST_SCHED(closure_list) \ + grpc_closure_list_sched(closure_list) #endif #ifdef __cplusplus diff --git a/src/core/lib/iomgr/combiner.cc b/src/core/lib/iomgr/combiner.cc index 53f4b7eaa7..c6d6d0fc43 100644 --- a/src/core/lib/iomgr/combiner.cc +++ b/src/core/lib/iomgr/combiner.cc @@ -62,17 +62,15 @@ struct grpc_combiner { gpr_refcount refs; }; -static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); -static void combiner_finally_exec(grpc_exec_ctx *exec_ctx, - grpc_closure *closure, grpc_error *error); +static void combiner_exec(grpc_closure *closure, grpc_error *error); +static void combiner_finally_exec(grpc_closure *closure, grpc_error *error); static const grpc_closure_scheduler_vtable scheduler = { combiner_exec, combiner_exec, "combiner:immediately"}; static const grpc_closure_scheduler_vtable finally_scheduler = { combiner_finally_exec, combiner_finally_exec, "combiner:finally"}; -static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); +static void offload(void *arg, grpc_error *error); grpc_combiner *grpc_combiner_create(void) { grpc_combiner *lock = (grpc_combiner *)gpr_zalloc(sizeof(*lock)); @@ -88,19 +86,19 @@ grpc_combiner *grpc_combiner_create(void) { return lock; } -static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { +static void really_destroy(grpc_combiner *lock) { GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock)); GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0); gpr_mpscq_destroy(&lock->queue); gpr_free(lock); } -static void start_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { +static void start_destroy(grpc_combiner *lock) { gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED); GRPC_COMBINER_TRACE(gpr_log( GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state)); if (old_state == 1) { - really_destroy(exec_ctx, lock); + really_destroy(lock); } } @@ -116,11 +114,10 @@ static void start_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { #define GRPC_COMBINER_DEBUG_SPAM(op, delta) #endif -void grpc_combiner_unref(grpc_exec_ctx *exec_ctx, - grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS) { +void grpc_combiner_unref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS) { GRPC_COMBINER_DEBUG_SPAM("UNREF", -1); if (gpr_unref(&lock->refs)) { - start_destroy(exec_ctx, lock); + start_destroy(lock); } } @@ -130,8 +127,7 @@ grpc_combiner *grpc_combiner_ref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS) { return lock; } -static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx, - grpc_combiner *lock) { +static void push_last_on_exec_ctx(grpc_combiner *lock) { lock->next_combiner_on_this_exec_ctx = NULL; if (exec_ctx->active_combiner == NULL) { exec_ctx->active_combiner = exec_ctx->last_combiner = lock; @@ -141,8 +137,7 @@ static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx, } } -static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx, - grpc_combiner *lock) { +static void push_first_on_exec_ctx(grpc_combiner *lock) { lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner; exec_ctx->active_combiner = lock; if (lock->next_combiner_on_this_exec_ctx == NULL) { @@ -154,9 +149,8 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx, ((grpc_combiner *)(((char *)((closure)->scheduler)) - \ offsetof(grpc_combiner, scheduler_name))) -static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl, - grpc_error *error) { - GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx); +static void combiner_exec(grpc_closure *cl, grpc_error *error) { + GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(); GPR_TIMER_BEGIN("combiner.execute", 0); grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler); gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); @@ -164,19 +158,19 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl, "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR, lock, cl, last)); if (last == 1) { - GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx); + GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(); GPR_TIMER_MARK("combiner.initiated", 0); gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, - (gpr_atm)exec_ctx); + (gpr_atm)&exec_ctx); // first element on this list: add it to the list of combiner locks // executing within this exec_ctx - push_last_on_exec_ctx(exec_ctx, lock); + push_last_on_exec_ctx(lock); } else { // there may be a race with setting here: if that happens, we may delay // offload for one or two actions, and that's fine gpr_atm initiator = gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null); - if (initiator != 0 && initiator != (gpr_atm)exec_ctx) { + if (initiator != 0 && initiator != (gpr_atm)&exec_ctx) { gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 0); } } @@ -187,7 +181,7 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl, GPR_TIMER_END("combiner.execute", 0); } -static void move_next(grpc_exec_ctx *exec_ctx) { +static void move_next() { exec_ctx->active_combiner = exec_ctx->active_combiner->next_combiner_on_this_exec_ctx; if (exec_ctx->active_combiner == NULL) { @@ -195,19 +189,19 @@ static void move_next(grpc_exec_ctx *exec_ctx) { } } -static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { +static void offload(void *arg, grpc_error *error) { grpc_combiner *lock = (grpc_combiner *)arg; - push_last_on_exec_ctx(exec_ctx, lock); + push_last_on_exec_ctx(lock); } -static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { - GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx); - move_next(exec_ctx); +static void queue_offload(grpc_combiner *lock) { + GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(); + move_next(); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock)); - GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&lock->offload, GRPC_ERROR_NONE); } -bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { +bool grpc_combiner_continue_exec_ctx() { GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0); grpc_combiner *lock = exec_ctx->active_combiner; if (lock == NULL) { @@ -223,16 +217,15 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { "contended=%d " "exec_ctx_ready_to_finish=%d " "time_to_execute_final_list=%d", - lock, contended, - grpc_exec_ctx_ready_to_finish(exec_ctx), + lock, contended, grpc_exec_ctx_ready_to_finish(), lock->time_to_execute_final_list)); - if (contended && grpc_exec_ctx_ready_to_finish(exec_ctx) && + if (contended && grpc_exec_ctx_ready_to_finish() && grpc_executor_is_threaded()) { GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0); // this execution context wants to move on: schedule remaining work to be // picked up on the executor - queue_offload(exec_ctx, lock); + queue_offload(lock); GPR_TIMER_END("combiner.continue_exec_ctx", 0); return true; } @@ -248,7 +241,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { // queue is in an inconsistent state: use this as a cue that we should // go off and do something else for a while (and come back later) GPR_TIMER_MARK("delay_busy", 0); - queue_offload(exec_ctx, lock); + queue_offload(lock); GPR_TIMER_END("combiner.continue_exec_ctx", 0); return true; } @@ -258,7 +251,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { #ifndef NDEBUG cl->scheduled = false; #endif - cl->cb(exec_ctx, cl->cb_arg, cl_err); + cl->cb(cl->cb_arg, cl_err); GRPC_ERROR_UNREF(cl_err); GPR_TIMER_END("combiner.exec1", 0); } else { @@ -275,7 +268,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { #ifndef NDEBUG c->scheduled = false; #endif - c->cb(exec_ctx, c->cb_arg, error); + c->cb(c->cb_arg, error); GRPC_ERROR_UNREF(error); c = next; GPR_TIMER_END("combiner.exec_1final", 0); @@ -283,7 +276,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { } GPR_TIMER_MARK("unref", 0); - move_next(exec_ctx); + move_next(); lock->time_to_execute_final_list = false; gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT); @@ -312,7 +305,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { return true; case OLD_STATE_WAS(true, 1): // and one count, one orphaned --> unlocked and orphaned - really_destroy(exec_ctx, lock); + really_destroy(lock); GPR_TIMER_END("combiner.continue_exec_ctx", 0); return true; case OLD_STATE_WAS(false, 0): @@ -322,17 +315,15 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { GPR_TIMER_END("combiner.continue_exec_ctx", 0); GPR_UNREACHABLE_CODE(return true); } - push_first_on_exec_ctx(exec_ctx, lock); + push_first_on_exec_ctx(lock); GPR_TIMER_END("combiner.continue_exec_ctx", 0); return true; } -static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, - grpc_error *error); +static void enqueue_finally(void *closure, grpc_error *error); -static void combiner_finally_exec(grpc_exec_ctx *exec_ctx, - grpc_closure *closure, grpc_error *error) { - GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx); +static void combiner_finally_exec(grpc_closure *closure, grpc_error *error) { + GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(); grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, @@ -341,8 +332,7 @@ static void combiner_finally_exec(grpc_exec_ctx *exec_ctx, GPR_TIMER_BEGIN("combiner.execute_finally", 0); if (exec_ctx->active_combiner != lock) { GPR_TIMER_MARK("slowpath", 0); - GRPC_CLOSURE_SCHED(exec_ctx, - GRPC_CLOSURE_CREATE(enqueue_finally, closure, + GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(enqueue_finally, closure, grpc_combiner_scheduler(lock)), error); GPR_TIMER_END("combiner.execute_finally", 0); @@ -356,10 +346,8 @@ static void combiner_finally_exec(grpc_exec_ctx *exec_ctx, GPR_TIMER_END("combiner.execute_finally", 0); } -static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, - grpc_error *error) { - combiner_finally_exec(exec_ctx, (grpc_closure *)closure, - GRPC_ERROR_REF(error)); +static void enqueue_finally(void *closure, grpc_error *error) { + combiner_finally_exec((grpc_closure *)closure, GRPC_ERROR_REF(error)); } grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) { diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h index 10e5fb480d..4e30bc284f 100644 --- a/src/core/lib/iomgr/combiner.h +++ b/src/core/lib/iomgr/combiner.h @@ -44,26 +44,24 @@ grpc_combiner *grpc_combiner_create(void); , const char *file, int line, const char *reason #define GRPC_COMBINER_REF(combiner, reason) \ grpc_combiner_ref((combiner), __FILE__, __LINE__, (reason)) -#define GRPC_COMBINER_UNREF(exec_ctx, combiner, reason) \ - grpc_combiner_unref((exec_ctx), (combiner), __FILE__, __LINE__, (reason)) +#define GRPC_COMBINER_UNREF(combiner, reason) \ + grpc_combiner_unref((combiner), __FILE__, __LINE__, (reason)) #else #define GRPC_COMBINER_DEBUG_ARGS #define GRPC_COMBINER_REF(combiner, reason) grpc_combiner_ref((combiner)) -#define GRPC_COMBINER_UNREF(exec_ctx, combiner, reason) \ - grpc_combiner_unref((exec_ctx), (combiner)) +#define GRPC_COMBINER_UNREF(combiner, reason) grpc_combiner_unref((combiner)) #endif // Ref/unref the lock, for when we're sharing the lock ownership // Prefer to use the macros above grpc_combiner *grpc_combiner_ref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS); -void grpc_combiner_unref(grpc_exec_ctx *exec_ctx, - grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS); +void grpc_combiner_unref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS); // Fetch a scheduler to schedule closures against grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock); // Scheduler to execute \a action within the lock just prior to unlocking. grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock); -bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx); +bool grpc_combiner_continue_exec_ctx(); extern grpc_tracer_flag grpc_combiner_trace; diff --git a/src/core/lib/iomgr/endpoint.cc b/src/core/lib/iomgr/endpoint.cc index 37cce335ca..824991f2ed 100644 --- a/src/core/lib/iomgr/endpoint.cc +++ b/src/core/lib/iomgr/endpoint.cc @@ -18,35 +18,30 @@ #include "src/core/lib/iomgr/endpoint.h" -void grpc_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, - grpc_slice_buffer* slices, grpc_closure* cb) { - ep->vtable->read(exec_ctx, ep, slices, cb); +void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices, + grpc_closure* cb) { + ep->vtable->read(ep, slices, cb); } -void grpc_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, - grpc_slice_buffer* slices, grpc_closure* cb) { - ep->vtable->write(exec_ctx, ep, slices, cb); +void grpc_endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* slices, + grpc_closure* cb) { + ep->vtable->write(ep, slices, cb); } -void grpc_endpoint_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, - grpc_pollset* pollset) { - ep->vtable->add_to_pollset(exec_ctx, ep, pollset); +void grpc_endpoint_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) { + ep->vtable->add_to_pollset(ep, pollset); } -void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx, - grpc_endpoint* ep, +void grpc_endpoint_add_to_pollset_set(grpc_endpoint* ep, grpc_pollset_set* pollset_set) { - ep->vtable->add_to_pollset_set(exec_ctx, ep, pollset_set); + ep->vtable->add_to_pollset_set(ep, pollset_set); } -void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, - grpc_error* why) { - ep->vtable->shutdown(exec_ctx, ep, why); +void grpc_endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) { + ep->vtable->shutdown(ep, why); } -void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) { - ep->vtable->destroy(exec_ctx, ep); -} +void grpc_endpoint_destroy(grpc_endpoint* ep) { ep->vtable->destroy(ep); } char* grpc_endpoint_get_peer(grpc_endpoint* ep) { return ep->vtable->get_peer(ep); diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h index 21347d9023..f3f5efbb97 100644 --- a/src/core/lib/iomgr/endpoint.h +++ b/src/core/lib/iomgr/endpoint.h @@ -37,16 +37,12 @@ typedef struct grpc_endpoint grpc_endpoint; typedef struct grpc_endpoint_vtable grpc_endpoint_vtable; struct grpc_endpoint_vtable { - void (*read)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb); - void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb); - void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset *pollset); - void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset_set *pollset); - void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_error *why); - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); + void (*read)(grpc_endpoint *ep, grpc_slice_buffer *slices, grpc_closure *cb); + void (*write)(grpc_endpoint *ep, grpc_slice_buffer *slices, grpc_closure *cb); + void (*add_to_pollset)(grpc_endpoint *ep, grpc_pollset *pollset); + void (*add_to_pollset_set)(grpc_endpoint *ep, grpc_pollset_set *pollset); + void (*shutdown)(grpc_endpoint *ep, grpc_error *why); + void (*destroy)(grpc_endpoint *ep); grpc_resource_user *(*get_resource_user)(grpc_endpoint *ep); char *(*get_peer)(grpc_endpoint *ep); int (*get_fd)(grpc_endpoint *ep); @@ -57,8 +53,8 @@ struct grpc_endpoint_vtable { indicates the endpoint is closed. Valid slices may be placed into \a slices even when the callback is invoked with error != GRPC_ERROR_NONE. */ -void grpc_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb); +void grpc_endpoint_read(grpc_endpoint *ep, grpc_slice_buffer *slices, + grpc_closure *cb); char *grpc_endpoint_get_peer(grpc_endpoint *ep); @@ -76,21 +72,18 @@ int grpc_endpoint_get_fd(grpc_endpoint *ep); No guarantee is made to the content of slices after a write EXCEPT that it is a valid slice buffer. */ -void grpc_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb); +void grpc_endpoint_write(grpc_endpoint *ep, grpc_slice_buffer *slices, + grpc_closure *cb); /* Causes any pending and future read/write callbacks to run immediately with success==0 */ -void grpc_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_error *why); -void grpc_endpoint_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); +void grpc_endpoint_shutdown(grpc_endpoint *ep, grpc_error *why); +void grpc_endpoint_destroy(grpc_endpoint *ep); /* Add an endpoint to a pollset, so that when the pollset is polled, events from this endpoint are considered */ -void grpc_endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset *pollset); -void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_endpoint *ep, +void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset); +void grpc_endpoint_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pollset_set); grpc_resource_user *grpc_endpoint_get_resource_user(grpc_endpoint *endpoint); diff --git a/src/core/lib/iomgr/endpoint_pair_posix.cc b/src/core/lib/iomgr/endpoint_pair_posix.cc index 3ade2148ba..9be636e074 100644 --- a/src/core/lib/iomgr/endpoint_pair_posix.cc +++ b/src/core/lib/iomgr/endpoint_pair_posix.cc @@ -54,18 +54,18 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, char *final_name; create_sockets(sv); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; gpr_asprintf(&final_name, "%s:client", name); - p.client = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[1], final_name), args, + p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), args, "socketpair-server"); gpr_free(final_name); gpr_asprintf(&final_name, "%s:server", name); - p.server = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[0], final_name), args, + p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), args, "socketpair-client"); gpr_free(final_name); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return p; } diff --git a/src/core/lib/iomgr/endpoint_pair_windows.cc b/src/core/lib/iomgr/endpoint_pair_windows.cc index 782fa2fd69..3355cb97b0 100644 --- a/src/core/lib/iomgr/endpoint_pair_windows.cc +++ b/src/core/lib/iomgr/endpoint_pair_windows.cc @@ -72,14 +72,12 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair( SOCKET sv[2]; grpc_endpoint_pair p; create_sockets(sv); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - p.client = grpc_tcp_create(&exec_ctx, - grpc_winsocket_create(sv[1], "endpoint:client"), + ExecCtx _local_exec_ctx; + p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"), channel_args, "endpoint:server"); - p.server = grpc_tcp_create(&exec_ctx, - grpc_winsocket_create(sv[0], "endpoint:server"), + p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"), channel_args, "endpoint:client"); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return p; } diff --git a/src/core/lib/iomgr/error.cc b/src/core/lib/iomgr/error.cc index 2ea6cf1301..4b7fb62f79 100644 --- a/src/core/lib/iomgr/error.cc +++ b/src/core/lib/iomgr/error.cc @@ -158,9 +158,9 @@ static void unref_errs(grpc_error *err) { } static void unref_slice(grpc_slice slice) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_slice_unref_internal(&exec_ctx, slice); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + grpc_slice_unref_internal(slice); + grpc_exec_ctx_finish(); } static void unref_strs(grpc_error *err) { diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc index 6126e2771c..301b729eb5 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.cc +++ b/src/core/lib/iomgr/ev_epoll1_linux.cc @@ -295,32 +295,29 @@ static int fd_wrapped_fd(grpc_fd *fd) { return fd->fd; } /* if 'releasing_fd' is true, it means that we are going to detach the internal * fd from grpc_fd structure (i.e which means we should not be calling * shutdown() syscall on that fd) */ -static void fd_shutdown_internal(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_error *why, bool releasing_fd) { - if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure, - GRPC_ERROR_REF(why))) { +static void fd_shutdown_internal(grpc_fd *fd, grpc_error *why, + bool releasing_fd) { + if (grpc_lfev_set_shutdown(&fd->read_closure, GRPC_ERROR_REF(why))) { if (!releasing_fd) { shutdown(fd->fd, SHUT_RDWR); } - grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why)); + grpc_lfev_set_shutdown(&fd->write_closure, GRPC_ERROR_REF(why)); } GRPC_ERROR_UNREF(why); } /* Might be called multiple times */ -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { - fd_shutdown_internal(exec_ctx, fd, why, false); +static void fd_shutdown(grpc_fd *fd, grpc_error *why) { + fd_shutdown_internal(fd, why, false); } -static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *on_done, int *release_fd, +static void fd_orphan(grpc_fd *fd, grpc_closure *on_done, int *release_fd, bool already_closed, const char *reason) { grpc_error *error = GRPC_ERROR_NONE; bool is_release_fd = (release_fd != NULL); if (!grpc_lfev_is_shutdown(&fd->read_closure)) { - fd_shutdown_internal(exec_ctx, fd, - GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason), + fd_shutdown_internal(fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason), is_release_fd); } @@ -332,7 +329,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, close(fd->fd); } - GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_REF(error)); grpc_iomgr_unregister_object(&fd->iomgr_object); grpc_lfev_destroy(&fd->read_closure); @@ -344,8 +341,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_mu_unlock(&fd_freelist_mu); } -static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { +static grpc_pollset *fd_get_read_notifier_pollset(grpc_fd *fd) { gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); return (grpc_pollset *)notifier; } @@ -354,25 +350,22 @@ static bool fd_is_shutdown(grpc_fd *fd) { return grpc_lfev_is_shutdown(&fd->read_closure); } -static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); +static void fd_notify_on_read(grpc_fd *fd, grpc_closure *closure) { + grpc_lfev_notify_on(&fd->read_closure, closure, "read"); } -static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); +static void fd_notify_on_write(grpc_fd *fd, grpc_closure *closure) { + grpc_lfev_notify_on(&fd->write_closure, closure, "write"); } -static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_pollset *notifier) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); +static void fd_become_readable(grpc_fd *fd, grpc_pollset *notifier) { + grpc_lfev_set_ready(&fd->read_closure, "read"); /* Use release store to match with acquire load in fd_get_read_notifier */ gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); } -static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); +static void fd_become_writable(grpc_fd *fd) { + grpc_lfev_set_ready(&fd->write_closure, "write"); } /******************************************************************************* @@ -476,7 +469,7 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { pollset->next = pollset->prev = NULL; } -static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { +static void pollset_destroy(grpc_pollset *pollset) { gpr_mu_lock(&pollset->mu); if (!pollset->seen_inactive) { pollset_neighborhood *neighborhood = pollset->neighborhood; @@ -504,27 +497,26 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { gpr_mu_destroy(&pollset->mu); } -static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { +static grpc_error *pollset_kick_all(grpc_pollset *pollset) { GPR_TIMER_BEGIN("pollset_kick_all", 0); grpc_error *error = GRPC_ERROR_NONE; if (pollset->root_worker != NULL) { grpc_pollset_worker *worker = pollset->root_worker; do { - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK(); switch (worker->state) { case KICKED: - GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx); + GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); break; case UNKICKED: SET_KICK_STATE(worker, KICKED); if (worker->initialized_cv) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); gpr_cv_signal(&worker->cv); } break; case DESIGNATED_POLLER: - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); SET_KICK_STATE(worker, KICKED); append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd), "pollset_kick_all"); @@ -540,32 +532,29 @@ static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx, return error; } -static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { +static void pollset_maybe_finish_shutdown(grpc_pollset *pollset) { if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL && pollset->begin_refs == 0) { GPR_TIMER_MARK("pollset_finish_shutdown", 0); - GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE); pollset->shutdown_closure = NULL; } } -static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { +static void pollset_shutdown(grpc_pollset *pollset, grpc_closure *closure) { GPR_TIMER_BEGIN("pollset_shutdown", 0); GPR_ASSERT(pollset->shutdown_closure == NULL); GPR_ASSERT(!pollset->shutting_down); pollset->shutdown_closure = closure; pollset->shutting_down = true; - GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset)); - pollset_maybe_finish_shutdown(exec_ctx, pollset); + GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset)); + pollset_maybe_finish_shutdown(pollset); GPR_TIMER_END("pollset_shutdown", 0); } -static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, - grpc_millis millis) { +static int poll_deadline_to_millis_timeout(grpc_millis millis) { if (millis == GRPC_MILLIS_INF_FUTURE) return -1; - grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx); + grpc_millis delta = millis - grpc_exec_ctx_now(); if (delta > INT_MAX) { return INT_MAX; } else if (delta < 0) { @@ -583,8 +572,7 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only called by g_active_poller thread. So there is no need for synchronization when accessing fields in g_epoll_set */ -static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { +static grpc_error *process_epoll_events(grpc_pollset *pollset) { static const char *err_desc = "process_events"; grpc_error *error = GRPC_ERROR_NONE; @@ -608,11 +596,11 @@ static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx, bool write_ev = (ev->events & EPOLLOUT) != 0; if (read_ev || cancel) { - fd_become_readable(exec_ctx, fd, pollset); + fd_become_readable(fd, pollset); } if (write_ev || cancel) { - fd_become_writable(exec_ctx, fd); + fd_become_writable(fd); } } } @@ -628,27 +616,26 @@ static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx, NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller (i.e the designated poller thread) will be calling this function. So there is no need for any synchronization when accesing fields in g_epoll_set */ -static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, - grpc_millis deadline) { +static grpc_error *do_epoll_wait(grpc_pollset *ps, grpc_millis deadline) { GPR_TIMER_BEGIN("do_epoll_wait", 0); int r; - int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline); + int timeout = poll_deadline_to_millis_timeout(deadline); if (timeout != 0) { GRPC_SCHEDULING_START_BLOCKING_REGION; } do { - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); + GRPC_STATS_INC_SYSCALL_POLL(); r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS, timeout); } while (r < 0 && errno == EINTR); if (timeout != 0) { - GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx); + GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(); } if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait"); - GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r); + GRPC_STATS_INC_POLL_EVENTS_RETURNED(r); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r); @@ -661,8 +648,7 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, return GRPC_ERROR_NONE; } -static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *worker, +static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, grpc_pollset_worker **worker_hdl, grpc_millis deadline) { GPR_TIMER_BEGIN("begin_worker", 0); @@ -757,7 +743,7 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, SET_KICK_STATE(worker, KICKED); } } - grpc_exec_ctx_invalidate_now(exec_ctx); + grpc_exec_ctx_invalidate_now(); } if (GRPC_TRACER_ON(grpc_polling_trace)) { @@ -788,7 +774,7 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } static bool check_neighborhood_for_available_poller( - grpc_exec_ctx *exec_ctx, pollset_neighborhood *neighborhood) { + pollset_neighborhood *neighborhood) { GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0); bool found_worker = false; do { @@ -812,7 +798,7 @@ static bool check_neighborhood_for_available_poller( SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER); if (inspect_worker->initialized_cv) { GPR_TIMER_MARK("signal worker", 0); - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); gpr_cv_signal(&inspect_worker->cv); } } else { @@ -852,8 +838,7 @@ static bool check_neighborhood_for_available_poller( return found_worker; } -static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *worker, +static void end_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, grpc_pollset_worker **worker_hdl) { GPR_TIMER_BEGIN("end_worker", 0); if (GRPC_TRACER_ON(grpc_polling_trace)) { @@ -872,11 +857,11 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, GPR_ASSERT(worker->next->initialized_cv); gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next); SET_KICK_STATE(worker->next, DESIGNATED_POLLER); - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); gpr_cv_signal(&worker->next->cv); - if (grpc_exec_ctx_has_work(exec_ctx)) { + if (grpc_exec_ctx_has_work()) { gpr_mu_unlock(&pollset->mu); - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); gpr_mu_lock(&pollset->mu); } } else { @@ -891,8 +876,7 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, &g_neighborhoods[(poller_neighborhood_idx + i) % g_num_neighborhoods]; if (gpr_mu_trylock(&neighborhood->mu)) { - found_worker = - check_neighborhood_for_available_poller(exec_ctx, neighborhood); + found_worker = check_neighborhood_for_available_poller(neighborhood); gpr_mu_unlock(&neighborhood->mu); scan_state[i] = true; } else { @@ -905,16 +889,15 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, &g_neighborhoods[(poller_neighborhood_idx + i) % g_num_neighborhoods]; gpr_mu_lock(&neighborhood->mu); - found_worker = - check_neighborhood_for_available_poller(exec_ctx, neighborhood); + found_worker = check_neighborhood_for_available_poller(neighborhood); gpr_mu_unlock(&neighborhood->mu); } - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); gpr_mu_lock(&pollset->mu); } - } else if (grpc_exec_ctx_has_work(exec_ctx)) { + } else if (grpc_exec_ctx_has_work()) { gpr_mu_unlock(&pollset->mu); - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); gpr_mu_lock(&pollset->mu); } if (worker->initialized_cv) { @@ -924,7 +907,7 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, gpr_log(GPR_DEBUG, " .. remove worker"); } if (EMPTIED == worker_remove(pollset, worker)) { - pollset_maybe_finish_shutdown(exec_ctx, pollset); + pollset_maybe_finish_shutdown(pollset); } GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker); GPR_TIMER_END("end_worker", 0); @@ -934,7 +917,7 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, The function pollset_work() may temporarily release the lock (pollset->po.mu) during the course of its execution but it will always re-acquire the lock and ensure that it is held by the time the function returns */ -static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, +static grpc_error *pollset_work(grpc_pollset *ps, grpc_pollset_worker **worker_hdl, grpc_millis deadline) { grpc_pollset_worker worker; @@ -947,7 +930,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, return GRPC_ERROR_NONE; } - if (begin_worker(exec_ctx, ps, &worker, worker_hdl, deadline)) { + if (begin_worker(ps, &worker, worker_hdl, deadline)) { gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps); gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); GPR_ASSERT(!ps->shutting_down); @@ -970,9 +953,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, designated poller */ if (gpr_atm_acq_load(&g_epoll_set.cursor) == gpr_atm_acq_load(&g_epoll_set.num_events)) { - append_error(&error, do_epoll_wait(exec_ctx, ps, deadline), err_desc); + append_error(&error, do_epoll_wait(ps, deadline), err_desc); } - append_error(&error, process_epoll_events(exec_ctx, ps), err_desc); + append_error(&error, process_epoll_events(ps), err_desc); gpr_mu_lock(&ps->mu); /* lock */ @@ -980,17 +963,17 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, } else { gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps); } - end_worker(exec_ctx, ps, &worker, worker_hdl); + end_worker(ps, &worker, worker_hdl); gpr_tls_set(&g_current_thread_pollset, 0); GPR_TIMER_END("pollset_work", 0); return error; } -static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +static grpc_error *pollset_kick(grpc_pollset *pollset, grpc_pollset_worker *specific_worker) { GPR_TIMER_BEGIN("pollset_kick", 0); - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK(); grpc_error *ret_err = GRPC_ERROR_NONE; if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_strvec log; @@ -1023,7 +1006,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) { grpc_pollset_worker *root_worker = pollset->root_worker; if (root_worker == NULL) { - GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx); + GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(); pollset->kicked_without_poller = true; if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. kicked_without_poller"); @@ -1032,14 +1015,14 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } grpc_pollset_worker *next_worker = root_worker->next; if (root_worker->state == KICKED) { - GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx); + GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. already kicked %p", root_worker); } SET_KICK_STATE(root_worker, KICKED); goto done; } else if (next_worker->state == KICKED) { - GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx); + GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. already kicked %p", next_worker); } @@ -1050,7 +1033,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, // there is no next worker root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load( &g_active_poller)) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. kicked %p", root_worker); } @@ -1058,7 +1041,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd); goto done; } else if (next_worker->state == UNKICKED) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. kicked %p", next_worker); } @@ -1076,12 +1059,12 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } SET_KICK_STATE(root_worker, KICKED); if (root_worker->initialized_cv) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); gpr_cv_signal(&root_worker->cv); } goto done; } else { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker, root_worker); @@ -1091,13 +1074,13 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, goto done; } } else { - GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx); + GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); GPR_ASSERT(next_worker->state == KICKED); SET_KICK_STATE(next_worker, KICKED); goto done; } } else { - GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. kicked while waking up"); } @@ -1114,7 +1097,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, goto done; } else if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) { - GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker); } @@ -1122,7 +1105,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, goto done; } else if (specific_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. kick active poller"); } @@ -1130,7 +1113,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd); goto done; } else if (specific_worker->initialized_cv) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. kick waiting worker"); } @@ -1138,7 +1121,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, gpr_cv_signal(&specific_worker->cv); goto done; } else { - GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx); + GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. kick non-waiting worker"); } @@ -1150,8 +1133,7 @@ done: return ret_err; } -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) {} +static void pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {} /******************************************************************************* * Pollset-set Definitions @@ -1161,27 +1143,20 @@ static grpc_pollset_set *pollset_set_create(void) { return (grpc_pollset_set *)((intptr_t)0xdeafbeef); } -static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss) {} +static void pollset_set_destroy(grpc_pollset_set *pss) {} -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) {} +static void pollset_set_add_fd(grpc_pollset_set *pss, grpc_fd *fd) {} -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) {} +static void pollset_set_del_fd(grpc_pollset_set *pss, grpc_fd *fd) {} -static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) {} +static void pollset_set_add_pollset(grpc_pollset_set *pss, grpc_pollset *ps) {} -static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) {} +static void pollset_set_del_pollset(grpc_pollset_set *pss, grpc_pollset *ps) {} -static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +static void pollset_set_add_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item) {} -static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +static void pollset_set_del_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item) {} /******************************************************************************* diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc index 59dd8fd2fe..28fa48afa9 100644 --- a/src/core/lib/iomgr/ev_epollex_linux.cc +++ b/src/core/lib/iomgr/ev_epollex_linux.cc @@ -81,17 +81,14 @@ struct polling_group { static void po_init(polling_obj *po, polling_obj_type type); static void po_destroy(polling_obj *po); -static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b); +static void po_join(polling_obj *a, polling_obj *b); static int po_cmp(polling_obj *a, polling_obj *b); -static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po, - size_t initial_po_count); +static void pg_create(polling_obj **initial_po, size_t initial_po_count); static polling_group *pg_ref(polling_group *pg); static void pg_unref(polling_group *pg); -static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a, - polling_group *b); -static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg, - polling_obj *po); +static void pg_merge(polling_group *a, polling_group *b); +static void pg_join(polling_group *pg, polling_obj *po); /******************************************************************************* * pollable Declarations @@ -260,8 +257,7 @@ static gpr_mu fd_freelist_mu; #ifndef NDEBUG #define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__) -#define UNREF_BY(ec, fd, n, reason) \ - unref_by(ec, fd, n, reason, __FILE__, __LINE__) +#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__) static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) { if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { @@ -272,13 +268,13 @@ static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, } #else #define REF_BY(fd, n, reason) ref_by(fd, n) -#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n) +#define UNREF_BY(fd, n, reason) unref_by(fd, n) static void ref_by(grpc_fd *fd, int n) { #endif GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); } -static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { +static void fd_destroy(void *arg, grpc_error *error) { grpc_fd *fd = (grpc_fd *)arg; /* Add the fd to the freelist */ grpc_iomgr_unregister_object(&fd->iomgr_object); @@ -295,8 +291,8 @@ static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { } #ifndef NDEBUG -static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n, - const char *reason, const char *file, int line) { +static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, + int line) { if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { gpr_log(GPR_DEBUG, "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", @@ -304,13 +300,13 @@ static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n, gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); } #else -static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) { +static void unref_by(grpc_fd *fd, int n) { #endif gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n); if (old == n) { - GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd, - grpc_schedule_on_exec_ctx), - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(fd_destroy, fd, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); } else { GPR_ASSERT(old > n); } @@ -379,8 +375,7 @@ static int fd_wrapped_fd(grpc_fd *fd) { return ret_fd; } -static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *on_done, int *release_fd, +static void fd_orphan(grpc_fd *fd, grpc_closure *on_done, int *release_fd, bool already_closed, const char *reason) { bool is_fd_closed = already_closed; grpc_error *error = GRPC_ERROR_NONE; @@ -408,17 +403,16 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, to be alive (and not added to freelist) until the end of this function */ REF_BY(fd, 1, reason); - GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_REF(error)); gpr_mu_unlock(&fd->orphaned_mu); gpr_mu_unlock(&fd->pollable_obj.po.mu); - UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */ + UNREF_BY(fd, 2, reason); /* Drop the reference */ GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error)); GRPC_ERROR_UNREF(error); } -static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { +static grpc_pollset *fd_get_read_notifier_pollset(grpc_fd *fd) { gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); return (grpc_pollset *)notifier; } @@ -428,23 +422,20 @@ static bool fd_is_shutdown(grpc_fd *fd) { } /* Might be called multiple times */ -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { - if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure, - GRPC_ERROR_REF(why))) { +static void fd_shutdown(grpc_fd *fd, grpc_error *why) { + if (grpc_lfev_set_shutdown(&fd->read_closure, GRPC_ERROR_REF(why))) { shutdown(fd->fd, SHUT_RDWR); - grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why)); + grpc_lfev_set_shutdown(&fd->write_closure, GRPC_ERROR_REF(why)); } GRPC_ERROR_UNREF(why); } -static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); +static void fd_notify_on_read(grpc_fd *fd, grpc_closure *closure) { + grpc_lfev_notify_on(&fd->read_closure, closure, "read"); } -static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); +static void fd_notify_on_write(grpc_fd *fd, grpc_closure *closure) { + grpc_lfev_notify_on(&fd->write_closure, closure, "write"); } /******************************************************************************* @@ -545,24 +536,22 @@ static void pollset_global_shutdown(void) { gpr_tls_destroy(&g_current_thread_worker); } -static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { +static void pollset_maybe_finish_shutdown(grpc_pollset *pollset) { if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL && pollset->kick_alls_pending == 0) { - GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE); pollset->shutdown_closure = NULL; } } -static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error_unused) { +static void do_kick_all(void *arg, grpc_error *error_unused) { grpc_error *error = GRPC_ERROR_NONE; grpc_pollset *pollset = (grpc_pollset *)arg; gpr_mu_lock(&pollset->pollable_obj.po.mu); if (pollset->root_worker != NULL) { grpc_pollset_worker *worker = pollset->root_worker; do { - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK(); if (worker->pollable_obj != &pollset->pollable_obj) { gpr_mu_lock(&worker->pollable_obj->po.mu); } @@ -592,16 +581,16 @@ static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg, } while (worker != pollset->root_worker); } pollset->kick_alls_pending--; - pollset_maybe_finish_shutdown(exec_ctx, pollset); + pollset_maybe_finish_shutdown(pollset); gpr_mu_unlock(&pollset->pollable_obj.po.mu); GRPC_LOG_IF_ERROR("kick_all", error); } -static void pollset_kick_all(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { +static void pollset_kick_all(grpc_pollset *pollset) { pollset->kick_alls_pending++; - GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(do_kick_all, pollset, - grpc_schedule_on_exec_ctx), - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(do_kick_all, pollset, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); } static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p, @@ -667,10 +656,10 @@ static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p, } /* p->po.mu must be held before calling this function */ -static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +static grpc_error *pollset_kick(grpc_pollset *pollset, grpc_pollset_worker *specific_worker) { pollable *p = pollset->current_pollable_obj; - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK(); if (p != &pollset->pollable_obj) { gpr_mu_lock(&p->po.mu); } @@ -690,10 +679,9 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { *mu = &pollset->pollable_obj.po.mu; } -static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, - grpc_millis millis) { +static int poll_deadline_to_millis_timeout(grpc_millis millis) { if (millis == GRPC_MILLIS_INF_FUTURE) return -1; - grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx); + grpc_millis delta = millis - grpc_exec_ctx_now(); if (delta > INT_MAX) return INT_MAX; else if (delta < 0) @@ -702,9 +690,8 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, return (int)delta; } -static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_pollset *notifier) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); +static void fd_become_readable(grpc_fd *fd, grpc_pollset *notifier) { + grpc_lfev_set_ready(&fd->read_closure, "read"); /* Note, it is possible that fd_become_readable might be called twice with different 'notifier's when an fd becomes readable and it is in two epoll @@ -715,8 +702,8 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); } -static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); +static void fd_become_writable(grpc_fd *fd) { + grpc_lfev_set_ready(&fd->write_closure, "write"); } static grpc_error *fd_become_pollable_locked(grpc_fd *fd) { @@ -729,20 +716,18 @@ static grpc_error *fd_become_pollable_locked(grpc_fd *fd) { } /* pollset->po.mu lock must be held by the caller before calling this */ -static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { +static void pollset_shutdown(grpc_pollset *pollset, grpc_closure *closure) { GPR_ASSERT(pollset->shutdown_closure == NULL); pollset->shutdown_closure = closure; - pollset_kick_all(exec_ctx, pollset); - pollset_maybe_finish_shutdown(exec_ctx, pollset); + pollset_kick_all(pollset); + pollset_maybe_finish_shutdown(pollset); } static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable *p) { return p != &g_empty_pollable && p != &pollset->pollable_obj; } -static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset, bool drain) { +static grpc_error *pollset_process_events(grpc_pollset *pollset, bool drain) { static const char *err_desc = "pollset_process_events"; grpc_error *error = GRPC_ERROR_NONE; for (int i = 0; (drain || i < MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) && @@ -771,10 +756,10 @@ static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx, pollset, fd, cancel, read_ev, write_ev); } if (read_ev || cancel) { - fd_become_readable(exec_ctx, fd, pollset); + fd_become_readable(fd, pollset); } if (write_ev || cancel) { - fd_become_writable(exec_ctx, fd); + fd_become_writable(fd); } } } @@ -783,19 +768,18 @@ static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx, } /* pollset_shutdown is guaranteed to be called before pollset_destroy. */ -static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { +static void pollset_destroy(grpc_pollset *pollset) { pollable_destroy(&pollset->pollable_obj); if (pollset_is_pollable_fd(pollset, pollset->current_pollable_obj)) { - UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable_obj, 2, - "pollset_pollable"); + UNREF_BY((grpc_fd *)pollset->current_pollable_obj, 2, "pollset_pollable"); } GRPC_LOG_IF_ERROR("pollset_process_events", - pollset_process_events(exec_ctx, pollset, true)); + pollset_process_events(pollset, true)); } -static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - pollable *p, grpc_millis deadline) { - int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline); +static grpc_error *pollset_epoll(grpc_pollset *pollset, pollable *p, + grpc_millis deadline) { + int timeout = poll_deadline_to_millis_timeout(deadline); if (GRPC_TRACER_ON(grpc_polling_trace)) { char *desc = pollable_desc(p); @@ -808,11 +792,11 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } int r; do { - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); + GRPC_STATS_INC_SYSCALL_POLL(); r = epoll_wait(p->epfd, pollset->events, MAX_EPOLL_EVENTS, timeout); } while (r < 0 && errno == EINTR); if (timeout != 0) { - GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx); + GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(); } if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait"); @@ -867,8 +851,7 @@ static worker_remove_result worker_remove(grpc_pollset_worker **root, } /* Return true if this thread should poll */ -static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *worker, +static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, grpc_pollset_worker **worker_hdl, grpc_millis deadline) { bool do_poll = true; @@ -894,7 +877,7 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, worker->pollable_obj->root_worker != worker) { gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset, worker->pollable_obj, worker, - poll_deadline_to_millis_timeout(exec_ctx, deadline)); + poll_deadline_to_millis_timeout(deadline)); } while (do_poll && worker->pollable_obj->root_worker != worker) { if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->po.mu, @@ -921,15 +904,14 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, gpr_mu_lock(&pollset->pollable_obj.po.mu); gpr_mu_lock(&worker->pollable_obj->po.mu); } - grpc_exec_ctx_invalidate_now(exec_ctx); + grpc_exec_ctx_invalidate_now(); } return do_poll && pollset->shutdown_closure == NULL && pollset->current_pollable_obj == worker->pollable_obj; } -static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *worker, +static void end_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, grpc_pollset_worker **worker_hdl) { if (NEW_ROOT == worker_remove(&worker->pollable_obj->root_worker, PWL_POLLABLE, worker)) { @@ -939,10 +921,10 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, gpr_cv_destroy(&worker->cv); } if (pollset_is_pollable_fd(pollset, worker->pollable_obj)) { - UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable_obj, 2, "one_poll"); + UNREF_BY((grpc_fd *)worker->pollable_obj, 2, "one_poll"); } if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) { - pollset_maybe_finish_shutdown(exec_ctx, pollset); + pollset_maybe_finish_shutdown(pollset); } } @@ -950,14 +932,14 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, The function pollset_work() may temporarily release the lock (pollset->po.mu) during the course of its execution but it will always re-acquire the lock and ensure that it is held by the time the function returns */ -static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +static grpc_error *pollset_work(grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, grpc_millis deadline) { grpc_pollset_worker worker; if (0 && GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR " kwp=%d root_worker=%p", - pollset, worker_hdl, &worker, grpc_exec_ctx_now(exec_ctx), deadline, + pollset, worker_hdl, &worker, grpc_exec_ctx_now(), deadline, pollset->kicked_without_poller, pollset->root_worker); } grpc_error *error = GRPC_ERROR_NONE; @@ -969,7 +951,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (pollset->current_pollable_obj != &pollset->pollable_obj) { gpr_mu_lock(&pollset->current_pollable_obj->po.mu); } - if (begin_worker(exec_ctx, pollset, &worker, worker_hdl, deadline)) { + if (begin_worker(pollset, &worker, worker_hdl, deadline)) { gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); GPR_ASSERT(!pollset->shutdown_closure); @@ -979,41 +961,38 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } gpr_mu_unlock(&pollset->pollable_obj.po.mu); if (pollset->event_cursor == pollset->event_count) { - append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable_obj, - deadline), + append_error(&error, + pollset_epoll(pollset, worker.pollable_obj, deadline), err_desc); } - append_error(&error, pollset_process_events(exec_ctx, pollset, false), - err_desc); + append_error(&error, pollset_process_events(pollset, false), err_desc); gpr_mu_lock(&pollset->pollable_obj.po.mu); if (worker.pollable_obj != &pollset->pollable_obj) { gpr_mu_lock(&worker.pollable_obj->po.mu); } gpr_tls_set(&g_current_thread_pollset, 0); gpr_tls_set(&g_current_thread_worker, 0); - pollset_maybe_finish_shutdown(exec_ctx, pollset); + pollset_maybe_finish_shutdown(pollset); } - end_worker(exec_ctx, pollset, &worker, worker_hdl); + end_worker(pollset, &worker, worker_hdl); if (worker.pollable_obj != &pollset->pollable_obj) { gpr_mu_unlock(&worker.pollable_obj->po.mu); } - if (grpc_exec_ctx_has_work(exec_ctx)) { + if (grpc_exec_ctx_has_work()) { gpr_mu_unlock(&pollset->pollable_obj.po.mu); - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); gpr_mu_lock(&pollset->pollable_obj.po.mu); } return error; } -static void unref_fd_no_longer_poller(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void unref_fd_no_longer_poller(void *arg, grpc_error *error) { grpc_fd *fd = (grpc_fd *)arg; - UNREF_BY(exec_ctx, fd, 2, "pollset_pollable"); + UNREF_BY(fd, 2, "pollset_pollable"); } /* expects pollsets locked, flag whether fd is locked or not */ -static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset, grpc_fd *fd, +static grpc_error *pollset_add_fd_locked(grpc_pollset *pollset, grpc_fd *fd, bool fd_locked) { static const char *err_desc = "pollset_add_fd"; grpc_error *error = GRPC_ERROR_NONE; @@ -1024,7 +1003,7 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx, fd); } /* empty pollable --> single fd pollable */ - pollset_kick_all(exec_ctx, pollset); + pollset_kick_all(pollset); pollset->current_pollable_obj = &fd->pollable_obj; if (!fd_locked) gpr_mu_lock(&fd->pollable_obj.po.mu); append_error(&error, fd_become_pollable_locked(fd), err_desc); @@ -1046,27 +1025,25 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx, /* Introduce a spurious completion. If we do not, then it may be that the fd-specific epoll set consumed a completion without being polled, leading to a missed edge going up. */ - grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure, "read"); - grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure, "write"); - pollset_kick_all(exec_ctx, pollset); + grpc_lfev_set_ready(&had_fd->read_closure, "read"); + grpc_lfev_set_ready(&had_fd->write_closure, "write"); + pollset_kick_all(pollset); pollset->current_pollable_obj = &pollset->pollable_obj; if (append_error(&error, pollable_materialize(&pollset->pollable_obj), err_desc)) { pollable_add_fd(&pollset->pollable_obj, had_fd); pollable_add_fd(&pollset->pollable_obj, fd); } - GRPC_CLOSURE_SCHED(exec_ctx, - GRPC_CLOSURE_CREATE(unref_fd_no_longer_poller, had_fd, + GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(unref_fd_no_longer_poller, had_fd, grpc_schedule_on_exec_ctx), GRPC_ERROR_NONE); } return error; } -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) { +static void pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { gpr_mu_lock(&pollset->pollable_obj.po.mu); - grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd, false); + grpc_error *error = pollset_add_fd_locked(pollset, fd, false); gpr_mu_unlock(&pollset->pollable_obj.po.mu); GRPC_LOG_IF_ERROR("pollset_add_fd", error); } @@ -1081,36 +1058,29 @@ static grpc_pollset_set *pollset_set_create(void) { return pss; } -static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss) { +static void pollset_set_destroy(grpc_pollset_set *pss) { po_destroy(&pss->po); gpr_free(pss); } -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) { - po_join(exec_ctx, &pss->po, &fd->pollable_obj.po); +static void pollset_set_add_fd(grpc_pollset_set *pss, grpc_fd *fd) { + po_join(&pss->po, &fd->pollable_obj.po); } -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) {} +static void pollset_set_del_fd(grpc_pollset_set *pss, grpc_fd *fd) {} -static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) { - po_join(exec_ctx, &pss->po, &ps->pollable_obj.po); +static void pollset_set_add_pollset(grpc_pollset_set *pss, grpc_pollset *ps) { + po_join(&pss->po, &ps->pollable_obj.po); } -static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) {} +static void pollset_set_del_pollset(grpc_pollset_set *pss, grpc_pollset *ps) {} -static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +static void pollset_set_add_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item) { - po_join(exec_ctx, &bag->po, &item->po); + po_join(&bag->po, &item->po); } -static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +static void pollset_set_del_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item) {} static void po_init(polling_obj *po, polling_obj_type type) { @@ -1166,7 +1136,7 @@ static int po_cmp(polling_obj *a, polling_obj *b) { return 1; } -static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) { +static void po_join(polling_obj *a, polling_obj *b) { switch (po_cmp(a, b)) { case 0: return; @@ -1180,20 +1150,20 @@ static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) { if (a->group == NULL) { if (b->group == NULL) { polling_obj *initial_po[] = {a, b}; - pg_create(exec_ctx, initial_po, GPR_ARRAY_SIZE(initial_po)); + pg_create(initial_po, GPR_ARRAY_SIZE(initial_po)); gpr_mu_unlock(&a->mu); gpr_mu_unlock(&b->mu); } else { polling_group *b_group = pg_ref(b->group); gpr_mu_unlock(&b->mu); gpr_mu_unlock(&a->mu); - pg_join(exec_ctx, b_group, a); + pg_join(b_group, a); } } else if (b->group == NULL) { polling_group *a_group = pg_ref(a->group); gpr_mu_unlock(&a->mu); gpr_mu_unlock(&b->mu); - pg_join(exec_ctx, a_group, b); + pg_join(a_group, b); } else if (a->group == b->group) { /* nothing to do */ gpr_mu_unlock(&a->mu); @@ -1203,21 +1173,20 @@ static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) { polling_group *b_group = pg_ref(b->group); gpr_mu_unlock(&a->mu); gpr_mu_unlock(&b->mu); - pg_merge(exec_ctx, a_group, b_group); + pg_merge(a_group, b_group); } } } -static void pg_notify(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) { +static void pg_notify(polling_obj *a, polling_obj *b) { if (a->type == PO_FD && b->type == PO_POLLSET) { - pollset_add_fd_locked(exec_ctx, (grpc_pollset *)b, (grpc_fd *)a, true); + pollset_add_fd_locked((grpc_pollset *)b, (grpc_fd *)a, true); } else if (a->type == PO_POLLSET && b->type == PO_FD) { - pollset_add_fd_locked(exec_ctx, (grpc_pollset *)a, (grpc_fd *)b, true); + pollset_add_fd_locked((grpc_pollset *)a, (grpc_fd *)b, true); } } -static void pg_broadcast(grpc_exec_ctx *exec_ctx, polling_group *from, - polling_group *to) { +static void pg_broadcast(polling_group *from, polling_group *to) { for (polling_obj *a = from->po.next; a != &from->po; a = a->next) { for (polling_obj *b = to->po.next; b != &to->po; b = b->next) { if (po_cmp(a, b) < 0) { @@ -1228,15 +1197,14 @@ static void pg_broadcast(grpc_exec_ctx *exec_ctx, polling_group *from, gpr_mu_lock(&b->mu); gpr_mu_lock(&a->mu); } - pg_notify(exec_ctx, a, b); + pg_notify(a, b); gpr_mu_unlock(&a->mu); gpr_mu_unlock(&b->mu); } } } -static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po, - size_t initial_po_count) { +static void pg_create(polling_obj **initial_po, size_t initial_po_count) { /* assumes all polling objects in initial_po are locked */ polling_group *pg = (polling_group *)gpr_malloc(sizeof(*pg)); po_init(&pg->po, PO_POLLING_GROUP); @@ -1257,13 +1225,12 @@ static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po, pg->po.prev = initial_po[initial_po_count - 1]; for (size_t i = 1; i < initial_po_count; i++) { for (size_t j = 0; j < i; j++) { - pg_notify(exec_ctx, initial_po[i], initial_po[j]); + pg_notify(initial_po[i], initial_po[j]); } } } -static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg, - polling_obj *po) { +static void pg_join(polling_group *pg, polling_obj *po) { /* assumes neither pg nor po are locked; consumes one ref to pg */ pg = pg_lock_latest(pg); /* pg locked */ @@ -1283,12 +1250,12 @@ static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg, polling_group *po_group = pg_ref(po->group); gpr_mu_unlock(&po->mu); gpr_mu_unlock(&existing->mu); - pg_merge(exec_ctx, pg, po_group); + pg_merge(pg, po_group); /* early exit: polling obj picked up a group during joining: we needed to do a full merge */ return; } - pg_notify(exec_ctx, po, existing); + pg_notify(po, existing); gpr_mu_unlock(&po->mu); gpr_mu_unlock(&existing->mu); } @@ -1297,7 +1264,7 @@ static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg, gpr_mu_unlock(&pg->po.mu); polling_group *po_group = pg_ref(po->group); gpr_mu_unlock(&po->mu); - pg_merge(exec_ctx, pg, po_group); + pg_merge(pg, po_group); /* early exit: polling obj picked up a group during joining: we needed to do a full merge */ return; @@ -1310,8 +1277,7 @@ static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg, gpr_mu_unlock(&po->mu); } -static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a, - polling_group *b) { +static void pg_merge(polling_group *a, polling_group *b) { for (;;) { if (a == b) { pg_unref(a); @@ -1341,8 +1307,8 @@ static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a, size_t unref_count = 0; size_t unref_cap = 0; b->po.group = a; - pg_broadcast(exec_ctx, a, b); - pg_broadcast(exec_ctx, b, a); + pg_broadcast(a, b); + pg_broadcast(b, a); while (b->po.next != &b->po) { polling_obj *po = b->po.next; gpr_mu_lock(&po->mu); diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc index 035bdc4cb5..d7b61f0c44 100644 --- a/src/core/lib/iomgr/ev_epollsig_linux.cc +++ b/src/core/lib/iomgr/ev_epollsig_linux.cc @@ -164,13 +164,12 @@ static void fd_global_shutdown(void); #ifndef NDEBUG #define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__) -#define PI_UNREF(exec_ctx, p, r) \ - pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__) +#define PI_UNREF(p, r) pi_unref_dbg((p), (r), __FILE__, __LINE__) #else #define PI_ADD_REF(p, r) pi_add_ref((p)) -#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p)) +#define PI_UNREF(p, r) pi_unref((p)) #endif @@ -269,7 +268,7 @@ static grpc_wakeup_fd polling_island_wakeup_fd; static __thread polling_island *g_current_thread_polling_island; /* Forward declaration */ -static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi); +static void polling_island_delete(polling_island *pi); #ifdef GRPC_TSAN /* Currently TSAN may incorrectly flag data races between epoll_ctl and @@ -283,7 +282,7 @@ gpr_atm g_epoll_sync; #endif /* defined(GRPC_TSAN) */ static void pi_add_ref(polling_island *pi); -static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi); +static void pi_unref(polling_island *pi); #ifndef NDEBUG static void pi_add_ref_dbg(polling_island *pi, const char *reason, @@ -297,15 +296,15 @@ static void pi_add_ref_dbg(polling_island *pi, const char *reason, pi_add_ref(pi); } -static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi, - const char *reason, const char *file, int line) { +static void pi_unref_dbg(polling_island *pi, const char *reason, + const char *file, int line) { if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count); gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR " (%s) - (%s, %d)", pi, old_cnt, (old_cnt - 1), reason, file, line); } - pi_unref(exec_ctx, pi); + pi_unref(pi); } #endif @@ -313,7 +312,7 @@ static void pi_add_ref(polling_island *pi) { gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1); } -static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) { +static void pi_unref(polling_island *pi) { /* If ref count went to zero, delete the polling island. Note that this deletion not be done under a lock. Once the ref count goes to zero, we are guaranteed that no one else holds a reference to the @@ -324,9 +323,9 @@ static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) { */ if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) { polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); - polling_island_delete(exec_ctx, pi); + polling_island_delete(pi); if (next != NULL) { - PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */ + PI_UNREF(next, "pi_delete"); /* Recursive call */ } } } @@ -462,8 +461,7 @@ static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd, } /* Might return NULL in case of an error */ -static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, - grpc_fd *initial_fd, +static polling_island *polling_island_create(grpc_fd *initial_fd, grpc_error **error) { polling_island *pi = NULL; const char *err_desc = "polling_island_create"; @@ -494,13 +492,13 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, done: if (*error != GRPC_ERROR_NONE) { - polling_island_delete(exec_ctx, pi); + polling_island_delete(pi); pi = NULL; } return pi; } -static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) { +static void polling_island_delete(polling_island *pi) { GPR_ASSERT(pi->fd_cnt == 0); if (pi->epoll_fd >= 0) { @@ -857,8 +855,7 @@ static int fd_wrapped_fd(grpc_fd *fd) { return ret_fd; } -static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *on_done, int *release_fd, +static void fd_orphan(grpc_fd *fd, grpc_closure *on_done, int *release_fd, bool already_closed, const char *reason) { grpc_error *error = GRPC_ERROR_NONE; polling_island *unref_pi = NULL; @@ -897,7 +894,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, fd->orphaned = true; - GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_REF(error)); gpr_mu_unlock(&fd->po.mu); UNREF_BY(fd, 2, reason); /* Drop the reference */ @@ -906,7 +903,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, The polling island owns a workqueue which owns an fd, and unreffing inside the lock can cause an eventual lock loop that makes TSAN very unhappy. */ - PI_UNREF(exec_ctx, unref_pi, "fd_orphan"); + PI_UNREF(unref_pi, "fd_orphan"); } if (error != GRPC_ERROR_NONE) { const char *msg = grpc_error_string(error); @@ -915,8 +912,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, GRPC_ERROR_UNREF(error); } -static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { +static grpc_pollset *fd_get_read_notifier_pollset(grpc_fd *fd) { gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); return (grpc_pollset *)notifier; } @@ -926,23 +922,20 @@ static bool fd_is_shutdown(grpc_fd *fd) { } /* Might be called multiple times */ -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { - if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure, - GRPC_ERROR_REF(why))) { +static void fd_shutdown(grpc_fd *fd, grpc_error *why) { + if (grpc_lfev_set_shutdown(&fd->read_closure, GRPC_ERROR_REF(why))) { shutdown(fd->fd, SHUT_RDWR); - grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why)); + grpc_lfev_set_shutdown(&fd->write_closure, GRPC_ERROR_REF(why)); } GRPC_ERROR_UNREF(why); } -static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); +static void fd_notify_on_read(grpc_fd *fd, grpc_closure *closure) { + grpc_lfev_notify_on(&fd->read_closure, closure, "read"); } -static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); +static void fd_notify_on_write(grpc_fd *fd, grpc_closure *closure) { + grpc_lfev_notify_on(&fd->write_closure, closure, "write"); } /******************************************************************************* @@ -1024,11 +1017,11 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) { } /* p->mu must be held before calling this function */ -static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p, +static grpc_error *pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) { GPR_TIMER_BEGIN("pollset_kick", 0); grpc_error *error = GRPC_ERROR_NONE; - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK(); const char *err_desc = "Kick Failure"; grpc_pollset_worker *worker = specific_worker; if (worker != NULL) { @@ -1092,10 +1085,9 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { pollset->shutdown_done = NULL; } -static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, - grpc_millis millis) { +static int poll_deadline_to_millis_timeout(grpc_millis millis) { if (millis == GRPC_MILLIS_INF_FUTURE) return -1; - grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx); + grpc_millis delta = millis - grpc_exec_ctx_now(); if (delta > INT_MAX) return INT_MAX; else if (delta < 0) @@ -1104,9 +1096,8 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, return (int)delta; } -static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_pollset *notifier) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); +static void fd_become_readable(grpc_fd *fd, grpc_pollset *notifier) { + grpc_lfev_set_ready(&fd->read_closure, "read"); /* Note, it is possible that fd_become_readable might be called twice with different 'notifier's when an fd becomes readable and it is in two epoll @@ -1117,39 +1108,36 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); } -static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); +static void fd_become_writable(grpc_fd *fd) { + grpc_lfev_set_ready(&fd->write_closure, "write"); } -static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, - grpc_pollset *ps, +static void pollset_release_polling_island(grpc_pollset *ps, const char *reason) { if (ps->po.pi != NULL) { - PI_UNREF(exec_ctx, ps->po.pi, reason); + PI_UNREF(ps->po.pi, reason); } ps->po.pi = NULL; } -static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { +static void finish_shutdown_locked(grpc_pollset *pollset) { /* The pollset cannot have any workers if we are at this stage */ GPR_ASSERT(!pollset_has_workers(pollset)); pollset->finish_shutdown_called = true; /* Release the ref and set pollset->po.pi to NULL */ - pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown"); - GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); + pollset_release_polling_island(pollset, "ps_shutdown"); + GRPC_CLOSURE_SCHED(pollset->shutdown_done, GRPC_ERROR_NONE); } /* pollset->po.mu lock must be held by the caller before calling this */ -static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { +static void pollset_shutdown(grpc_pollset *pollset, grpc_closure *closure) { GPR_TIMER_BEGIN("pollset_shutdown", 0); GPR_ASSERT(!pollset->shutting_down); pollset->shutting_down = true; pollset->shutdown_done = closure; - pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST); + pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); /* If the pollset has any workers, we cannot call finish_shutdown_locked() because it would release the underlying polling island. In such a case, we @@ -1157,7 +1145,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (!pollset_has_workers(pollset)) { GPR_ASSERT(!pollset->finish_shutdown_called); GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0); - finish_shutdown_locked(exec_ctx, pollset); + finish_shutdown_locked(pollset); } GPR_TIMER_END("pollset_shutdown", 0); } @@ -1165,15 +1153,14 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, /* pollset_shutdown is guaranteed to be called before pollset_destroy. So other * than destroying the mutexes, there is nothing special that needs to be done * here */ -static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { +static void pollset_destroy(grpc_pollset *pollset) { GPR_ASSERT(!pollset_has_workers(pollset)); gpr_mu_destroy(&pollset->po.mu); } #define GRPC_EPOLL_MAX_EVENTS 100 /* Note: sig_mask contains the signal mask to use *during* epoll_wait() */ -static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset, +static void pollset_work_and_unlock(grpc_pollset *pollset, grpc_pollset_worker *worker, int timeout_ms, sigset_t *sig_mask, grpc_error **error) { struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS]; @@ -1195,7 +1182,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, this function (i.e pollset_work_and_unlock()) is called */ if (pollset->po.pi == NULL) { - pollset->po.pi = polling_island_create(exec_ctx, NULL, error); + pollset->po.pi = polling_island_create(NULL, error); if (pollset->po.pi == NULL) { GPR_TIMER_END("pollset_work_and_unlock", 0); return; /* Fatal error. We cannot continue */ @@ -1215,7 +1202,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the polling island to be deleted */ PI_ADD_REF(pi, "ps"); - PI_UNREF(exec_ctx, pollset->po.pi, "ps"); + PI_UNREF(pollset->po.pi, "ps"); pollset->po.pi = pi; } @@ -1229,10 +1216,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, g_current_thread_polling_island = pi; GRPC_SCHEDULING_START_BLOCKING_REGION; - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); + GRPC_STATS_INC_SYSCALL_POLL(); ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask); - GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx); + GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(); if (ep_rv < 0) { if (errno != EINTR) { gpr_asprintf(&err_msg, @@ -1270,10 +1257,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI); int write_ev = ep_ev[i].events & EPOLLOUT; if (read_ev || cancel) { - fd_become_readable(exec_ctx, fd, pollset); + fd_become_readable(fd, pollset); } if (write_ev || cancel) { - fd_become_writable(exec_ctx, fd); + fd_become_writable(fd); } } } @@ -1288,7 +1275,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, that we got before releasing the polling island lock). This is because pollset->po.pi pointer might get udpated in other parts of the code when there is an island merge while we are doing epoll_wait() above */ - PI_UNREF(exec_ctx, pi, "ps_work"); + PI_UNREF(pi, "ps_work"); GPR_TIMER_END("pollset_work_and_unlock", 0); } @@ -1297,12 +1284,12 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, The function pollset_work() may temporarily release the lock (pollset->po.mu) during the course of its execution but it will always re-acquire the lock and ensure that it is held by the time the function returns */ -static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +static grpc_error *pollset_work(grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, grpc_millis deadline) { GPR_TIMER_BEGIN("pollset_work", 0); grpc_error *error = GRPC_ERROR_NONE; - int timeout_ms = poll_deadline_to_millis_timeout(exec_ctx, deadline); + int timeout_ms = poll_deadline_to_millis_timeout(deadline); sigset_t new_mask; @@ -1360,9 +1347,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, push_front_worker(pollset, &worker); /* Add worker to pollset */ - pollset_work_and_unlock(exec_ctx, pollset, &worker, timeout_ms, - &g_orig_sigmask, &error); - grpc_exec_ctx_flush(exec_ctx); + pollset_work_and_unlock(pollset, &worker, timeout_ms, &g_orig_sigmask, + &error); + grpc_exec_ctx_flush(); gpr_mu_lock(&pollset->po.mu); @@ -1382,10 +1369,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (pollset->shutting_down && !pollset_has_workers(pollset) && !pollset->finish_shutdown_called) { GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0); - finish_shutdown_locked(exec_ctx, pollset); + finish_shutdown_locked(pollset); gpr_mu_unlock(&pollset->po.mu); - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); gpr_mu_lock(&pollset->po.mu); } @@ -1400,9 +1387,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, return error; } -static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, - poll_obj_type bag_type, poll_obj *item, - poll_obj_type item_type) { +static void add_poll_object(poll_obj *bag, poll_obj_type bag_type, + poll_obj *item, poll_obj_type item_type) { GPR_TIMER_BEGIN("add_poll_object", 0); #ifndef NDEBUG @@ -1452,7 +1438,7 @@ retry: keeping TSAN happy outweigh any performance advantage we might have by keeping the lock held. */ gpr_mu_unlock(&item->mu); - pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error); + pi_new = polling_island_create(FD_FROM_PO(item), &error); gpr_mu_lock(&item->mu); /* Need to reverify any assumptions made between the initial lock and @@ -1471,11 +1457,11 @@ retry: /* Ref and unref so that the polling island gets deleted during unref */ PI_ADD_REF(pi_new, "dance_of_destruction"); - PI_UNREF(exec_ctx, pi_new, "dance_of_destruction"); + PI_UNREF(pi_new, "dance_of_destruction"); goto retry; } } else { - pi_new = polling_island_create(exec_ctx, NULL, &error); + pi_new = polling_island_create(NULL, &error); } GRPC_POLLING_TRACE( @@ -1530,7 +1516,7 @@ retry: if (item->pi != pi_new) { PI_ADD_REF(pi_new, poll_obj_string(item_type)); if (item->pi != NULL) { - PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type)); + PI_UNREF(item->pi, poll_obj_string(item_type)); } item->pi = pi_new; } @@ -1538,7 +1524,7 @@ retry: if (bag->pi != pi_new) { PI_ADD_REF(pi_new, poll_obj_string(bag_type)); if (bag->pi != NULL) { - PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type)); + PI_UNREF(bag->pi, poll_obj_string(bag_type)); } bag->pi = pi_new; } @@ -1550,10 +1536,8 @@ retry: GPR_TIMER_END("add_poll_object", 0); } -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) { - add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po, - POLL_OBJ_FD); +static void pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { + add_poll_object(&pollset->po, POLL_OBJ_POLLSET, &fd->po, POLL_OBJ_FD); } /******************************************************************************* @@ -1570,48 +1554,39 @@ static grpc_pollset_set *pollset_set_create(void) { return pss; } -static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss) { +static void pollset_set_destroy(grpc_pollset_set *pss) { gpr_mu_destroy(&pss->po.mu); if (pss->po.pi != NULL) { - PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy"); + PI_UNREF(pss->po.pi, "pss_destroy"); } gpr_free(pss); } -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) { - add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po, - POLL_OBJ_FD); +static void pollset_set_add_fd(grpc_pollset_set *pss, grpc_fd *fd) { + add_poll_object(&pss->po, POLL_OBJ_POLLSET_SET, &fd->po, POLL_OBJ_FD); } -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) { +static void pollset_set_del_fd(grpc_pollset_set *pss, grpc_fd *fd) { /* Nothing to do */ } -static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) { - add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po, - POLL_OBJ_POLLSET); +static void pollset_set_add_pollset(grpc_pollset_set *pss, grpc_pollset *ps) { + add_poll_object(&pss->po, POLL_OBJ_POLLSET_SET, &ps->po, POLL_OBJ_POLLSET); } -static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) { +static void pollset_set_del_pollset(grpc_pollset_set *pss, grpc_pollset *ps) { /* Nothing to do */ } -static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +static void pollset_set_add_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item) { - add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po, + add_poll_object(&bag->po, POLL_OBJ_POLLSET_SET, &item->po, POLL_OBJ_POLLSET_SET); } -static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +static void pollset_set_del_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item) { /* Nothing to do */ } diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc index 036a35690c..b61ecf7c42 100644 --- a/src/core/lib/iomgr/ev_poll_posix.cc +++ b/src/core/lib/iomgr/ev_poll_posix.cc @@ -128,8 +128,7 @@ static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, MUST NOT be called with a pollset lock taken if got_read or got_write are 1, also does the become_{readable,writable} as appropriate. */ -static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec, - int got_read, int got_write, +static void fd_end_poll(grpc_fd_watcher *rec, int got_read, int got_write, grpc_pollset *read_notifier_pollset); /* Return 1 if this fd is orphaned, 0 otherwise */ @@ -186,11 +185,9 @@ struct grpc_pollset { }; /* Add an fd to a pollset */ -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - struct grpc_fd *fd); +static void pollset_add_fd(grpc_pollset *pollset, struct grpc_fd *fd); -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd); +static void pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd); /* Convert a timespec to milliseconds: - very small or negative poll times are clamped to zero to do a @@ -199,8 +196,7 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - longer than a millisecond polls are rounded up to the next nearest millisecond to avoid spinning - infinite timeouts are converted to -1 */ -static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, - grpc_millis deadline); +static int poll_deadline_to_millis_timeout(grpc_millis deadline); /* Allow kick to wakeup the currently polling worker */ #define GRPC_POLLSET_CAN_KICK_SELF 1 @@ -208,7 +204,7 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, #define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2 /* As per pollset_kick, with an extended set of flags (defined above) -- mostly for fd_posix's use. */ -static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p, +static grpc_error *pollset_kick_ext(grpc_pollset *p, grpc_pollset_worker *specific_worker, uint32_t flags) GRPC_MUST_USE_RESULT; @@ -353,8 +349,7 @@ static bool fd_is_orphaned(grpc_fd *fd) { } /* Return the read-notifier pollset */ -static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { +static grpc_pollset *fd_get_read_notifier_pollset(grpc_fd *fd) { grpc_pollset *notifier = NULL; gpr_mu_lock(&fd->mu); @@ -364,39 +359,36 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, return notifier; } -static grpc_error *pollset_kick_locked(grpc_exec_ctx *exec_ctx, - grpc_fd_watcher *watcher) { +static grpc_error *pollset_kick_locked(grpc_fd_watcher *watcher) { gpr_mu_lock(&watcher->pollset->mu); GPR_ASSERT(watcher->worker); - grpc_error *err = - pollset_kick_ext(exec_ctx, watcher->pollset, watcher->worker, - GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP); + grpc_error *err = pollset_kick_ext(watcher->pollset, watcher->worker, + GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP); gpr_mu_unlock(&watcher->pollset->mu); return err; } -static void maybe_wake_one_watcher_locked(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { +static void maybe_wake_one_watcher_locked(grpc_fd *fd) { if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) { - pollset_kick_locked(exec_ctx, fd->inactive_watcher_root.next); + pollset_kick_locked(fd->inactive_watcher_root.next); } else if (fd->read_watcher) { - pollset_kick_locked(exec_ctx, fd->read_watcher); + pollset_kick_locked(fd->read_watcher); } else if (fd->write_watcher) { - pollset_kick_locked(exec_ctx, fd->write_watcher); + pollset_kick_locked(fd->write_watcher); } } -static void wake_all_watchers_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { +static void wake_all_watchers_locked(grpc_fd *fd) { grpc_fd_watcher *watcher; for (watcher = fd->inactive_watcher_root.next; watcher != &fd->inactive_watcher_root; watcher = watcher->next) { - pollset_kick_locked(exec_ctx, watcher); + pollset_kick_locked(watcher); } if (fd->read_watcher) { - pollset_kick_locked(exec_ctx, fd->read_watcher); + pollset_kick_locked(fd->read_watcher); } if (fd->write_watcher && fd->write_watcher != fd->read_watcher) { - pollset_kick_locked(exec_ctx, fd->write_watcher); + pollset_kick_locked(fd->write_watcher); } } @@ -405,12 +397,12 @@ static int has_watchers(grpc_fd *fd) { fd->inactive_watcher_root.next != &fd->inactive_watcher_root; } -static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { +static void close_fd_locked(grpc_fd *fd) { fd->closed = 1; if (!fd->released) { close(fd->fd); } - GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_NONE); } static int fd_wrapped_fd(grpc_fd *fd) { @@ -421,8 +413,7 @@ static int fd_wrapped_fd(grpc_fd *fd) { } } -static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *on_done, int *release_fd, +static void fd_orphan(grpc_fd *fd, grpc_closure *on_done, int *release_fd, bool already_closed, const char *reason) { fd->on_done_closure = on_done; fd->released = release_fd != NULL; @@ -435,9 +426,9 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_mu_lock(&fd->mu); REF_BY(fd, 1, reason); /* remove active status, but keep referenced */ if (!has_watchers(fd)) { - close_fd_locked(exec_ctx, fd); + close_fd_locked(fd); } else { - wake_all_watchers_locked(exec_ctx, fd); + wake_all_watchers_locked(fd); } gpr_mu_unlock(&fd->mu); UNREF_BY(fd, 2, reason); /* drop the reference */ @@ -469,10 +460,10 @@ static grpc_error *fd_shutdown_error(grpc_fd *fd) { } } -static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure **st, grpc_closure *closure) { +static void notify_on_locked(grpc_fd *fd, grpc_closure **st, + grpc_closure *closure) { if (fd->shutdown) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown")); } else if (*st == CLOSURE_NOT_READY) { /* not ready ==> switch to a waiting state by setting the closure */ @@ -480,8 +471,8 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, } else if (*st == CLOSURE_READY) { /* already ready ==> queue the closure to run immediately */ *st = CLOSURE_NOT_READY; - GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd)); - maybe_wake_one_watcher_locked(exec_ctx, fd); + GRPC_CLOSURE_SCHED(closure, fd_shutdown_error(fd)); + maybe_wake_one_watcher_locked(fd); } else { /* upcallptr was set to a different closure. This is an error! */ gpr_log(GPR_ERROR, @@ -492,8 +483,7 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, } /* returns 1 if state becomes not ready */ -static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure **st) { +static int set_ready_locked(grpc_fd *fd, grpc_closure **st) { if (*st == CLOSURE_READY) { /* duplicate ready ==> ignore */ return 0; @@ -503,18 +493,18 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, return 0; } else { /* waiting ==> queue closure */ - GRPC_CLOSURE_SCHED(exec_ctx, *st, fd_shutdown_error(fd)); + GRPC_CLOSURE_SCHED(*st, fd_shutdown_error(fd)); *st = CLOSURE_NOT_READY; return 1; } } static void set_read_notifier_pollset_locked( - grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *read_notifier_pollset) { + grpc_fd *fd, grpc_pollset *read_notifier_pollset) { fd->read_notifier_pollset = read_notifier_pollset; } -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { +static void fd_shutdown(grpc_fd *fd, grpc_error *why) { gpr_mu_lock(&fd->mu); /* only shutdown once */ if (!fd->shutdown) { @@ -522,8 +512,8 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { fd->shutdown_error = why; /* signal read/write closed to OS so that future operations fail */ shutdown(fd->fd, SHUT_RDWR); - set_ready_locked(exec_ctx, fd, &fd->read_closure); - set_ready_locked(exec_ctx, fd, &fd->write_closure); + set_ready_locked(fd, &fd->read_closure); + set_ready_locked(fd, &fd->write_closure); } else { GRPC_ERROR_UNREF(why); } @@ -537,17 +527,15 @@ static bool fd_is_shutdown(grpc_fd *fd) { return r; } -static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { +static void fd_notify_on_read(grpc_fd *fd, grpc_closure *closure) { gpr_mu_lock(&fd->mu); - notify_on_locked(exec_ctx, fd, &fd->read_closure, closure); + notify_on_locked(fd, &fd->read_closure, closure); gpr_mu_unlock(&fd->mu); } -static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { +static void fd_notify_on_write(grpc_fd *fd, grpc_closure *closure) { gpr_mu_lock(&fd->mu); - notify_on_locked(exec_ctx, fd, &fd->write_closure, closure); + notify_on_locked(fd, &fd->write_closure, closure); gpr_mu_unlock(&fd->mu); } @@ -602,8 +590,7 @@ static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, return mask; } -static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher, - int got_read, int got_write, +static void fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write, grpc_pollset *read_notifier_pollset) { int was_polling = 0; int kick = 0; @@ -637,23 +624,23 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher, watcher->prev->next = watcher->next; } if (got_read) { - if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) { + if (set_ready_locked(fd, &fd->read_closure)) { kick = 1; } if (read_notifier_pollset != NULL) { - set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset); + set_read_notifier_pollset_locked(fd, read_notifier_pollset); } } if (got_write) { - if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) { + if (set_ready_locked(fd, &fd->write_closure)) { kick = 1; } } if (kick) { - maybe_wake_one_watcher_locked(exec_ctx, fd); + maybe_wake_one_watcher_locked(fd); } if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) { - close_fd_locked(exec_ctx, fd); + close_fd_locked(fd); } gpr_mu_unlock(&fd->mu); @@ -714,12 +701,12 @@ static void kick_append_error(grpc_error **composite, grpc_error *error) { *composite = grpc_error_add_child(*composite, error); } -static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p, +static grpc_error *pollset_kick_ext(grpc_pollset *p, grpc_pollset_worker *specific_worker, uint32_t flags) { GPR_TIMER_BEGIN("pollset_kick_ext", 0); grpc_error *error = GRPC_ERROR_NONE; - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK(); /* pollset->mu already held */ if (specific_worker != NULL) { @@ -785,9 +772,9 @@ static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p, return error; } -static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p, +static grpc_error *pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) { - return pollset_kick_ext(exec_ctx, p, specific_worker, 0); + return pollset_kick_ext(p, specific_worker, 0); } /* global state management */ @@ -821,7 +808,7 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { pollset->pollset_set_count = 0; } -static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { +static void pollset_destroy(grpc_pollset *pollset) { GPR_ASSERT(!pollset_has_workers(pollset)); GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail); while (pollset->local_wakeup_cache) { @@ -834,8 +821,7 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { gpr_mu_destroy(&pollset->mu); } -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) { +static void pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { gpr_mu_lock(&pollset->mu); size_t i; /* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */ @@ -850,19 +836,19 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } pollset->fds[pollset->fd_count++] = fd; GRPC_FD_REF(fd, "multipoller"); - pollset_kick(exec_ctx, pollset, NULL); + pollset_kick(pollset, NULL); exit: gpr_mu_unlock(&pollset->mu); } -static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { +static void finish_shutdown(grpc_pollset *pollset) { GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs)); size_t i; for (i = 0; i < pollset->fd_count; i++) { GRPC_FD_UNREF(pollset->fds[i], "multipoller"); } pollset->fd_count = 0; - GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(pollset->shutdown_done, GRPC_ERROR_NONE); } static void work_combine_error(grpc_error **composite, grpc_error *error) { @@ -873,7 +859,7 @@ static void work_combine_error(grpc_error **composite, grpc_error *error) { *composite = grpc_error_add_child(*composite, error); } -static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +static grpc_error *pollset_work(grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, grpc_millis deadline) { grpc_pollset_worker worker; @@ -912,7 +898,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (!pollset_has_workers(pollset) && !grpc_closure_list_empty(pollset->idle_jobs)) { GPR_TIMER_MARK("pollset_work.idle_jobs", 0); - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs); + GRPC_CLOSURE_LIST_SCHED(&pollset->idle_jobs); goto done; } /* If we're shutting down then we don't execute any extended work */ @@ -944,7 +930,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd_watcher *watchers; struct pollfd *pfds; - timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline); + timeout = poll_deadline_to_millis_timeout(deadline); if (pollset->fd_count + 2 <= inline_elements) { pfds = pollfd_space; @@ -988,9 +974,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid even going into the blocking annotation if possible */ GRPC_SCHEDULING_START_BLOCKING_REGION; - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); + GRPC_STATS_INC_SYSCALL_POLL(); r = grpc_poll_function(pfds, pfd_count, timeout); - GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx); + GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(); if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r); @@ -1003,16 +989,16 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, for (i = 1; i < pfd_count; i++) { if (watchers[i].fd == NULL) { - fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL); + fd_end_poll(&watchers[i], 0, 0, NULL); } else { // Wake up all the file descriptors, if we have an invalid one // we can identify it on the next pollset_work() - fd_end_poll(exec_ctx, &watchers[i], 1, 1, pollset); + fd_end_poll(&watchers[i], 1, 1, pollset); } } } else if (r == 0) { for (i = 1; i < pfd_count; i++) { - fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL); + fd_end_poll(&watchers[i], 0, 0, NULL); } } else { if (pfds[0].revents & POLLIN_CHECK) { @@ -1024,14 +1010,14 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } for (i = 1; i < pfd_count; i++) { if (watchers[i].fd == NULL) { - fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL); + fd_end_poll(&watchers[i], 0, 0, NULL); } else { if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset, pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0, (pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents); } - fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK, + fd_end_poll(&watchers[i], pfds[i].revents & POLLIN_CHECK, pfds[i].revents & POLLOUT_CHECK, pollset); } } @@ -1054,7 +1040,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, worker list, which means nobody could ask us to re-evaluate polling). */ done: if (!locked) { - queued_work |= grpc_exec_ctx_flush(exec_ctx); + queued_work |= grpc_exec_ctx_flush(); gpr_mu_lock(&pollset->mu); locked = 1; } @@ -1083,21 +1069,21 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, /* check shutdown conditions */ if (pollset->shutting_down) { if (pollset_has_workers(pollset)) { - pollset_kick(exec_ctx, pollset, NULL); + pollset_kick(pollset, NULL); } else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; gpr_mu_unlock(&pollset->mu); - finish_shutdown(exec_ctx, pollset); - grpc_exec_ctx_flush(exec_ctx); + finish_shutdown(pollset); + grpc_exec_ctx_flush(); /* Continuing to access pollset here is safe -- it is the caller's * responsibility to not destroy when it has outstanding calls to * pollset_work. * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */ gpr_mu_lock(&pollset->mu); } else if (!grpc_closure_list_empty(pollset->idle_jobs)) { - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs); + GRPC_CLOSURE_LIST_SCHED(&pollset->idle_jobs); gpr_mu_unlock(&pollset->mu); - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); gpr_mu_lock(&pollset->mu); } } @@ -1107,26 +1093,24 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, return error; } -static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { +static void pollset_shutdown(grpc_pollset *pollset, grpc_closure *closure) { GPR_ASSERT(!pollset->shutting_down); pollset->shutting_down = 1; pollset->shutdown_done = closure; - pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST); + pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); if (!pollset_has_workers(pollset)) { - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs); + GRPC_CLOSURE_LIST_SCHED(&pollset->idle_jobs); } if (!pollset->called_shutdown && !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; - finish_shutdown(exec_ctx, pollset); + finish_shutdown(pollset); } } -static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, - grpc_millis deadline) { +static int poll_deadline_to_millis_timeout(grpc_millis deadline) { if (deadline == GRPC_MILLIS_INF_FUTURE) return -1; if (deadline == 0) return 0; - grpc_millis n = deadline - grpc_exec_ctx_now(exec_ctx); + grpc_millis n = deadline - grpc_exec_ctx_now(); if (n < 0) return 0; if (n > INT_MAX) return -1; return (int)n; @@ -1143,8 +1127,7 @@ static grpc_pollset_set *pollset_set_create(void) { return pollset_set; } -static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set) { +static void pollset_set_destroy(grpc_pollset_set *pollset_set) { size_t i; gpr_mu_destroy(&pollset_set->mu); for (i = 0; i < pollset_set->fd_count; i++) { @@ -1159,7 +1142,7 @@ static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; gpr_mu_unlock(&pollset->mu); - finish_shutdown(exec_ctx, pollset); + finish_shutdown(pollset); } else { gpr_mu_unlock(&pollset->mu); } @@ -1170,8 +1153,7 @@ static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, gpr_free(pollset_set); } -static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, +static void pollset_set_add_pollset(grpc_pollset_set *pollset_set, grpc_pollset *pollset) { size_t i, j; gpr_mu_lock(&pollset->mu); @@ -1190,7 +1172,7 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, if (fd_is_orphaned(pollset_set->fds[i])) { GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); } else { - pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]); + pollset_add_fd(pollset, pollset_set->fds[i]); pollset_set->fds[j++] = pollset_set->fds[i]; } } @@ -1198,8 +1180,7 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, gpr_mu_unlock(&pollset_set->mu); } -static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, +static void pollset_set_del_pollset(grpc_pollset_set *pollset_set, grpc_pollset *pollset) { size_t i; gpr_mu_lock(&pollset_set->mu); @@ -1219,14 +1200,13 @@ static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; gpr_mu_unlock(&pollset->mu); - finish_shutdown(exec_ctx, pollset); + finish_shutdown(pollset); } else { gpr_mu_unlock(&pollset->mu); } } -static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +static void pollset_set_add_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item) { size_t i, j; gpr_mu_lock(&bag->mu); @@ -1241,7 +1221,7 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, if (fd_is_orphaned(bag->fds[i])) { GRPC_FD_UNREF(bag->fds[i], "pollset_set"); } else { - pollset_set_add_fd(exec_ctx, item, bag->fds[i]); + pollset_set_add_fd(item, bag->fds[i]); bag->fds[j++] = bag->fds[i]; } } @@ -1249,8 +1229,7 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, gpr_mu_unlock(&bag->mu); } -static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +static void pollset_set_del_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item) { size_t i; gpr_mu_lock(&bag->mu); @@ -1265,8 +1244,7 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, gpr_mu_unlock(&bag->mu); } -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { +static void pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) { size_t i; gpr_mu_lock(&pollset_set->mu); if (pollset_set->fd_count == pollset_set->fd_capacity) { @@ -1277,16 +1255,15 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, GRPC_FD_REF(fd, "pollset_set"); pollset_set->fds[pollset_set->fd_count++] = fd; for (i = 0; i < pollset_set->pollset_count; i++) { - pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd); + pollset_add_fd(pollset_set->pollsets[i], fd); } for (i = 0; i < pollset_set->pollset_set_count; i++) { - pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd); + pollset_set_add_fd(pollset_set->pollset_sets[i], fd); } gpr_mu_unlock(&pollset_set->mu); } -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { +static void pollset_set_del_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) { size_t i; gpr_mu_lock(&pollset_set->mu); for (i = 0; i < pollset_set->fd_count; i++) { @@ -1299,7 +1276,7 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, } } for (i = 0; i < pollset_set->pollset_set_count; i++) { - pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd); + pollset_set_del_fd(pollset_set->pollset_sets[i], fd); } gpr_mu_unlock(&pollset_set->mu); } diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc index 3a1dd8d30b..3bc34575a1 100644 --- a/src/core/lib/iomgr/ev_posix.cc +++ b/src/core/lib/iomgr/ev_posix.cc @@ -196,28 +196,25 @@ int grpc_fd_wrapped_fd(grpc_fd *fd) { return g_event_engine->fd_wrapped_fd(fd); } -void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done, - int *release_fd, bool already_closed, const char *reason) { - g_event_engine->fd_orphan(exec_ctx, fd, on_done, release_fd, already_closed, - reason); +void grpc_fd_orphan(grpc_fd *fd, grpc_closure *on_done, int *release_fd, + bool already_closed, const char *reason) { + g_event_engine->fd_orphan(fd, on_done, release_fd, already_closed, reason); } -void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { - g_event_engine->fd_shutdown(exec_ctx, fd, why); +void grpc_fd_shutdown(grpc_fd *fd, grpc_error *why) { + g_event_engine->fd_shutdown(fd, why); } bool grpc_fd_is_shutdown(grpc_fd *fd) { return g_event_engine->fd_is_shutdown(fd); } -void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - g_event_engine->fd_notify_on_read(exec_ctx, fd, closure); +void grpc_fd_notify_on_read(grpc_fd *fd, grpc_closure *closure) { + g_event_engine->fd_notify_on_read(fd, closure); } -void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - g_event_engine->fd_notify_on_write(exec_ctx, fd, closure); +void grpc_fd_notify_on_write(grpc_fd *fd, grpc_closure *closure) { + g_event_engine->fd_notify_on_write(fd, closure); } size_t grpc_pollset_size(void) { return g_event_engine->pollset_size; } @@ -226,72 +223,63 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { g_event_engine->pollset_init(pollset, mu); } -void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { - g_event_engine->pollset_shutdown(exec_ctx, pollset, closure); +void grpc_pollset_shutdown(grpc_pollset *pollset, grpc_closure *closure) { + g_event_engine->pollset_shutdown(pollset, closure); } -void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { - g_event_engine->pollset_destroy(exec_ctx, pollset); +void grpc_pollset_destroy(grpc_pollset *pollset) { + g_event_engine->pollset_destroy(pollset); } -grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +grpc_error *grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker **worker, grpc_millis deadline) { - return g_event_engine->pollset_work(exec_ctx, pollset, worker, deadline); + return g_event_engine->pollset_work(pollset, worker, deadline); } -grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +grpc_error *grpc_pollset_kick(grpc_pollset *pollset, grpc_pollset_worker *specific_worker) { - return g_event_engine->pollset_kick(exec_ctx, pollset, specific_worker); + return g_event_engine->pollset_kick(pollset, specific_worker); } -void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - struct grpc_fd *fd) { - g_event_engine->pollset_add_fd(exec_ctx, pollset, fd); +void grpc_pollset_add_fd(grpc_pollset *pollset, struct grpc_fd *fd) { + g_event_engine->pollset_add_fd(pollset, fd); } grpc_pollset_set *grpc_pollset_set_create(void) { return g_event_engine->pollset_set_create(); } -void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set) { - g_event_engine->pollset_set_destroy(exec_ctx, pollset_set); +void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) { + g_event_engine->pollset_set_destroy(pollset_set); } -void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, +void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set, grpc_pollset *pollset) { - g_event_engine->pollset_set_add_pollset(exec_ctx, pollset_set, pollset); + g_event_engine->pollset_set_add_pollset(pollset_set, pollset); } -void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, +void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set, grpc_pollset *pollset) { - g_event_engine->pollset_set_del_pollset(exec_ctx, pollset_set, pollset); + g_event_engine->pollset_set_del_pollset(pollset_set, pollset); } -void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +void grpc_pollset_set_add_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item) { - g_event_engine->pollset_set_add_pollset_set(exec_ctx, bag, item); + g_event_engine->pollset_set_add_pollset_set(bag, item); } -void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +void grpc_pollset_set_del_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item) { - g_event_engine->pollset_set_del_pollset_set(exec_ctx, bag, item); + g_event_engine->pollset_set_del_pollset_set(bag, item); } -void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { - g_event_engine->pollset_set_add_fd(exec_ctx, pollset_set, fd); +void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) { + g_event_engine->pollset_set_add_fd(pollset_set, fd); } -void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { - g_event_engine->pollset_set_del_fd(exec_ctx, pollset_set, fd); +void grpc_pollset_set_del_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) { + g_event_engine->pollset_set_del_fd(pollset_set, fd); } #endif // GRPC_POSIX_SOCKET diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h index bc4456c2a2..5f9511bf19 100644 --- a/src/core/lib/iomgr/ev_posix.h +++ b/src/core/lib/iomgr/ev_posix.h @@ -40,48 +40,36 @@ typedef struct grpc_event_engine_vtable { grpc_fd *(*fd_create)(int fd, const char *name); int (*fd_wrapped_fd)(grpc_fd *fd); - void (*fd_orphan)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done, - int *release_fd, bool already_closed, const char *reason); - void (*fd_shutdown)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why); - void (*fd_notify_on_read)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure); - void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure); + void (*fd_orphan)(grpc_fd *fd, grpc_closure *on_done, int *release_fd, + bool already_closed, const char *reason); + void (*fd_shutdown)(grpc_fd *fd, grpc_error *why); + void (*fd_notify_on_read)(grpc_fd *fd, grpc_closure *closure); + void (*fd_notify_on_write)(grpc_fd *fd, grpc_closure *closure); bool (*fd_is_shutdown)(grpc_fd *fd); - grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx, - grpc_fd *fd); + grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_fd *fd); void (*pollset_init)(grpc_pollset *pollset, gpr_mu **mu); - void (*pollset_shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure); - void (*pollset_destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset); - grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + void (*pollset_shutdown)(grpc_pollset *pollset, grpc_closure *closure); + void (*pollset_destroy)(grpc_pollset *pollset); + grpc_error *(*pollset_work)(grpc_pollset *pollset, grpc_pollset_worker **worker, grpc_millis deadline); - grpc_error *(*pollset_kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_error *(*pollset_kick)(grpc_pollset *pollset, grpc_pollset_worker *specific_worker); - void (*pollset_add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - struct grpc_fd *fd); + void (*pollset_add_fd)(grpc_pollset *pollset, struct grpc_fd *fd); grpc_pollset_set *(*pollset_set_create)(void); - void (*pollset_set_destroy)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set); - void (*pollset_set_add_pollset)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, + void (*pollset_set_destroy)(grpc_pollset_set *pollset_set); + void (*pollset_set_add_pollset)(grpc_pollset_set *pollset_set, grpc_pollset *pollset); - void (*pollset_set_del_pollset)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, + void (*pollset_set_del_pollset)(grpc_pollset_set *pollset_set, grpc_pollset *pollset); - void (*pollset_set_add_pollset_set)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, + void (*pollset_set_add_pollset_set)(grpc_pollset_set *bag, grpc_pollset_set *item); - void (*pollset_set_del_pollset_set)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, + void (*pollset_set_del_pollset_set)(grpc_pollset_set *bag, grpc_pollset_set *item); - void (*pollset_set_add_fd)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd); - void (*pollset_set_del_fd)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd); + void (*pollset_set_add_fd)(grpc_pollset_set *pollset_set, grpc_fd *fd); + void (*pollset_set_del_fd)(grpc_pollset_set *pollset_set, grpc_fd *fd); void (*shutdown_engine)(void); } grpc_event_engine_vtable; @@ -107,14 +95,14 @@ int grpc_fd_wrapped_fd(grpc_fd *fd); Requires: *fd initialized; no outstanding notify_on_read or notify_on_write. MUST NOT be called with a pollset lock taken */ -void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done, - int *release_fd, bool already_closed, const char *reason); +void grpc_fd_orphan(grpc_fd *fd, grpc_closure *on_done, int *release_fd, + bool already_closed, const char *reason); /* Has grpc_fd_shutdown been called on an fd? */ bool grpc_fd_is_shutdown(grpc_fd *fd); /* Cause any current and future callbacks to fail. */ -void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why); +void grpc_fd_shutdown(grpc_fd *fd, grpc_error *why); /* Register read interest, causing read_cb to be called once when fd becomes readable, on deadline specified by deadline, or on shutdown triggered by @@ -129,29 +117,23 @@ void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why); underlying platform. This means that users must drain fd in read_cb before calling notify_on_read again. Users are also expected to handle spurious events, i.e read_cb is called while nothing can be readable from fd */ -void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure); +void grpc_fd_notify_on_read(grpc_fd *fd, grpc_closure *closure); /* Exactly the same semantics as above, except based on writable events. */ -void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure); +void grpc_fd_notify_on_write(grpc_fd *fd, grpc_closure *closure); /* Return the read notifier pollset from the fd */ -grpc_pollset *grpc_fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd); +grpc_pollset *grpc_fd_get_read_notifier_pollset(grpc_fd *fd); /* pollset_posix functions */ /* Add an fd to a pollset */ -void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - struct grpc_fd *fd); +void grpc_pollset_add_fd(grpc_pollset *pollset, struct grpc_fd *fd); /* pollset_set_posix functions */ -void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd); -void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd); +void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd); +void grpc_pollset_set_del_fd(grpc_pollset_set *pollset_set, grpc_fd *fd); /* override to allow tests to hook poll() usage */ typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int); diff --git a/src/core/lib/iomgr/exec_ctx.cc b/src/core/lib/iomgr/exec_ctx.cc index 3d17afcb8f..0b346cfec7 100644 --- a/src/core/lib/iomgr/exec_ctx.cc +++ b/src/core/lib/iomgr/exec_ctx.cc @@ -25,10 +25,46 @@ #include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/profiling/timers.h" -bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) { +thread_local ExecCtx *exec_ctx = nullptr; + +ExecCtx::ExecCtx() + : closure_list(GRPC_CLOSURE_LIST_INIT), + active_combiner(nullptr), + last_combiner(nullptr), + flags(GRPC_EXEC_CTX_FLAG_IS_FINISHED), + starting_cpu(gpr_cpu_current_cpu()), + check_ready_to_finish_arg(nullptr), + check_ready_to_finish(nullptr), + now_is_valid(false), + now(0), + last_exec_ctx(exec_ctx) { + exec_ctx = this; +} + +ExecCtx::ExecCtx(uintptr_t fl, bool (*finish_check)(void *arg), + void *finish_check_arg) + : closure_list(GRPC_CLOSURE_LIST_INIT), + active_combiner(nullptr), + last_combiner(nullptr), + flags(fl), + starting_cpu(gpr_cpu_current_cpu()), + check_ready_to_finish_arg(finish_check_arg), + check_ready_to_finish(finish_check), + now_is_valid(false), + now(0), + last_exec_ctx(exec_ctx) { + exec_ctx = this; +} + +ExecCtx::~ExecCtx() { + GPR_ASSERT(exec_ctx == this); + grpc_exec_ctx_finish(); + exec_ctx = last_exec_ctx; +} + +bool grpc_exec_ctx_ready_to_finish() { if ((exec_ctx->flags & GRPC_EXEC_CTX_FLAG_IS_FINISHED) == 0) { - if (exec_ctx->check_ready_to_finish(exec_ctx, - exec_ctx->check_ready_to_finish_arg)) { + if (exec_ctx->check_ready_to_finish(exec_ctx->check_ready_to_finish_arg)) { exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED; return true; } @@ -38,26 +74,21 @@ bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) { } } -bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) { - return false; -} +bool grpc_never_ready_to_finish(void *arg_ignored) { return false; } -bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) { - return true; -} +bool grpc_always_ready_to_finish(void *arg_ignored) { return true; } -bool grpc_exec_ctx_has_work(grpc_exec_ctx *exec_ctx) { +bool grpc_exec_ctx_has_work() { return exec_ctx->active_combiner != NULL || !grpc_closure_list_empty(exec_ctx->closure_list); } -void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) { +void grpc_exec_ctx_finish() { exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED; - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); } -static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { +static void exec_ctx_run(grpc_closure *closure, grpc_error *error) { #ifndef NDEBUG closure->scheduled = false; if (GRPC_TRACER_ON(grpc_trace_closure)) { @@ -67,7 +98,7 @@ static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure, closure->line_initiated); } #endif - closure->cb(exec_ctx, closure->cb_arg, error); + closure->cb(closure->cb_arg, error); #ifndef NDEBUG if (GRPC_TRACER_ON(grpc_trace_closure)) { gpr_log(GPR_DEBUG, "closure %p finished", closure); @@ -76,7 +107,7 @@ static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure, GRPC_ERROR_UNREF(error); } -bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { +bool grpc_exec_ctx_flush() { bool did_something = 0; GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0); for (;;) { @@ -87,10 +118,10 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { grpc_closure *next = c->next_data.next; grpc_error *error = c->error_data.error; did_something = true; - exec_ctx_run(exec_ctx, c, error); + exec_ctx_run(c, error); c = next; } - } else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) { + } else if (!grpc_combiner_continue_exec_ctx()) { break; } } @@ -99,8 +130,7 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { return did_something; } -static void exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { +static void exec_ctx_sched(grpc_closure *closure, grpc_error *error) { grpc_closure_list_append(&exec_ctx->closure_list, closure, error); } @@ -138,7 +168,7 @@ static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) { return (gpr_atm)x; } -grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx) { +grpc_millis grpc_exec_ctx_now() { if (!exec_ctx->now_is_valid) { exec_ctx->now = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC)); exec_ctx->now_is_valid = true; @@ -146,9 +176,7 @@ grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx) { return exec_ctx->now; } -void grpc_exec_ctx_invalidate_now(grpc_exec_ctx *exec_ctx) { - exec_ctx->now_is_valid = false; -} +void grpc_exec_ctx_invalidate_now() { exec_ctx->now_is_valid = false; } gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock_type) { diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h index 44b9be7aa9..f79c7ae89a 100644 --- a/src/core/lib/iomgr/exec_ctx.h +++ b/src/core/lib/iomgr/exec_ctx.h @@ -21,6 +21,7 @@ #include <grpc/support/atm.h> #include <grpc/support/cpu.h> +#include <grpc/support/log.h> #include "src/core/lib/iomgr/closure.h" @@ -74,56 +75,87 @@ struct grpc_exec_ctx { uintptr_t flags; unsigned starting_cpu; void *check_ready_to_finish_arg; - bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg); + bool (*check_ready_to_finish)(void *arg); bool now_is_valid; grpc_millis now; + const char *creator; }; -/* initializer for grpc_exec_ctx: - prefer to use GRPC_EXEC_CTX_INIT whenever possible */ -#define GRPC_EXEC_CTX_INITIALIZER(flags, finish_check, finish_check_arg) \ - { \ - GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, gpr_cpu_current_cpu(), \ - finish_check_arg, finish_check, false, 0 \ - } - -/* initialize an execution context at the top level of an API call into grpc - (this is safe to use elsewhere, though possibly not as efficient) */ -#define GRPC_EXEC_CTX_INIT \ - GRPC_EXEC_CTX_INITIALIZER(GRPC_EXEC_CTX_FLAG_IS_FINISHED, NULL, NULL) - extern grpc_closure_scheduler *grpc_schedule_on_exec_ctx; -bool grpc_exec_ctx_has_work(grpc_exec_ctx *exec_ctx); +bool grpc_exec_ctx_has_work(); /** Flush any work that has been enqueued onto this grpc_exec_ctx. * Caller must guarantee that no interfering locks are held. * Returns true if work was performed, false otherwise. */ -bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx); +bool grpc_exec_ctx_flush(); /** Finish any pending work for a grpc_exec_ctx. Must be called before * the instance is destroyed, or work may be lost. */ -void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx); +void grpc_exec_ctx_finish(); /** Returns true if we'd like to leave this execution context as soon as possible: useful for deciding whether to do something more or not depending on outside context */ -bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx); +bool grpc_exec_ctx_ready_to_finish(); /** A finish check that is never ready to finish */ -bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored); +bool grpc_never_ready_to_finish(void *arg_ignored); /** A finish check that is always ready to finish */ -bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored); +bool grpc_always_ready_to_finish(void *arg_ignored); void grpc_exec_ctx_global_init(void); void grpc_exec_ctx_global_init(void); void grpc_exec_ctx_global_shutdown(void); -grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx); -void grpc_exec_ctx_invalidate_now(grpc_exec_ctx *exec_ctx); +grpc_millis grpc_exec_ctx_now(); +void grpc_exec_ctx_invalidate_now(); gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock); grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec timespec); grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec timespec); +inline grpc_exec_ctx make_exec_ctx(grpc_exec_ctx r) { + grpc_exec_ctx_flush(); + return r; +} + +class ExecCtx { + public: + ExecCtx(); + ExecCtx(uintptr_t fl, bool (*finish_check)(void *arg), + void *finish_check_arg); + ~ExecCtx(); + + grpc_closure_list closure_list; + /** currently active combiner: updated only via combiner.c */ + grpc_combiner *active_combiner; + /** last active combiner in the active combiner list */ + grpc_combiner *last_combiner; + uintptr_t flags; + unsigned starting_cpu; + void *check_ready_to_finish_arg; + bool (*check_ready_to_finish)(void *arg); + + bool now_is_valid; + grpc_millis now; + + private: + ExecCtx *last_exec_ctx; +}; + +extern thread_local ExecCtx *exec_ctx; + +/* initializer for grpc_exec_ctx: + * prefer to use GRPC_EXEC_CTX_INIT whenever possible */ +#define GRPC_EXEC_CTX_INITIALIZER(flags, finish_check, finish_check_arg) \ + make_exec_ctx(grpc_exec_ctx{GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, \ + gpr_cpu_current_cpu(), finish_check_arg, \ + finish_check, false, 0, __PRETTY_FUNCTION__}) + +/* initialize an execution context at the top level of an API call into grpc + (this is safe to use elsewhere, though possibly not as efficient) */ +#define GRPC_EXEC_CTX_INIT \ + GRPC_EXEC_CTX_INITIALIZER(GRPC_EXEC_CTX_FLAG_IS_FINISHED, NULL, NULL) + #ifdef __cplusplus } #endif diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc index 92c3e70301..653f2fc24b 100644 --- a/src/core/lib/iomgr/executor.cc +++ b/src/core/lib/iomgr/executor.cc @@ -56,7 +56,7 @@ static grpc_tracer_flag executor_trace = static void executor_thread(void *arg); -static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) { +static size_t run_closures(grpc_closure_list list) { size_t n = 0; grpc_closure *c = list.head; @@ -74,11 +74,11 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) { #ifndef NDEBUG c->scheduled = false; #endif - c->cb(exec_ctx, c->cb_arg, error); + c->cb(c->cb_arg, error); GRPC_ERROR_UNREF(error); c = next; n++; - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); } return n; @@ -88,7 +88,7 @@ bool grpc_executor_is_threaded() { return gpr_atm_no_barrier_load(&g_cur_threads) > 0; } -void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) { +void grpc_executor_set_threading(bool threading) { gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads); if (threading) { if (cur_threads > 0) return; @@ -126,29 +126,26 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) { for (size_t i = 0; i < g_max_threads; i++) { gpr_mu_destroy(&g_thread_state[i].mu); gpr_cv_destroy(&g_thread_state[i].cv); - run_closures(exec_ctx, g_thread_state[i].elems); + run_closures(g_thread_state[i].elems); } gpr_free(g_thread_state); gpr_tls_destroy(&g_this_thread_state); } } -void grpc_executor_init(grpc_exec_ctx *exec_ctx) { +void grpc_executor_init() { grpc_register_tracer(&executor_trace); gpr_atm_no_barrier_store(&g_cur_threads, 0); - grpc_executor_set_threading(exec_ctx, true); + grpc_executor_set_threading(true); } -void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) { - grpc_executor_set_threading(exec_ctx, false); -} +void grpc_executor_shutdown() { grpc_executor_set_threading(false); } static void executor_thread(void *arg) { thread_state *ts = (thread_state *)arg; gpr_tls_set(&g_this_thread_state, (intptr_t)ts); - grpc_exec_ctx exec_ctx = - GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL); + ExecCtx _local_exec_ctx(0, grpc_never_ready_to_finish, NULL); size_t subtract_depth = 0; for (;;) { @@ -170,7 +167,7 @@ static void executor_thread(void *arg) { gpr_mu_unlock(&ts->mu); break; } - GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx); + GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(); grpc_closure_list exec = ts->elems; ts->elems = GRPC_CLOSURE_LIST_INIT; gpr_mu_unlock(&ts->mu); @@ -178,19 +175,19 @@ static void executor_thread(void *arg) { gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state)); } - grpc_exec_ctx_invalidate_now(&exec_ctx); - subtract_depth = run_closures(&exec_ctx, exec); + grpc_exec_ctx_invalidate_now(); + subtract_depth = run_closures(exec); } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } -static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error, bool is_short) { +static void executor_push(grpc_closure *closure, grpc_error *error, + bool is_short) { bool retry_push; if (is_short) { - GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx); + GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(); } else { - GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx); + GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(); } do { retry_push = false; @@ -209,9 +206,9 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, } thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state); if (ts == NULL) { - ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)]; + ts = &g_thread_state[GPR_HASH_POINTER(&exec_ctx, cur_thread_count)]; } else { - GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx); + GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(); } thread_state *orig_ts = ts; @@ -247,7 +244,7 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, continue; } if (grpc_closure_list_empty(ts->elems)) { - GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx); + GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(); gpr_cv_signal(&ts->cv); } grpc_closure_list_append(&ts->elems, closure, error); @@ -271,19 +268,17 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, gpr_spinlock_unlock(&g_adding_thread_lock); } if (retry_push) { - GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx); + GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(); } } while (retry_push); } -static void executor_push_short(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { - executor_push(exec_ctx, closure, error, true); +static void executor_push_short(grpc_closure *closure, grpc_error *error) { + executor_push(closure, error, true); } -static void executor_push_long(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { - executor_push(exec_ctx, closure, error, false); +static void executor_push_long(grpc_closure *closure, grpc_error *error) { + executor_push(closure, error, false); } static const grpc_closure_scheduler_vtable executor_vtable_short = { diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h index ef5ac56c83..d75ba20d5a 100644 --- a/src/core/lib/iomgr/executor.h +++ b/src/core/lib/iomgr/executor.h @@ -35,19 +35,19 @@ typedef enum { * This mechanism is meant to outsource work (grpc_closure instances) to a * thread, for those cases where blocking isn't an option but there isn't a * non-blocking solution available. */ -void grpc_executor_init(grpc_exec_ctx *exec_ctx); +void grpc_executor_init(); grpc_closure_scheduler *grpc_executor_scheduler(grpc_executor_job_length); /** Shutdown the executor, running all pending work as part of the call */ -void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx); +void grpc_executor_shutdown(); /** Is the executor multi-threaded? */ bool grpc_executor_is_threaded(); /* enable/disable threading - must be called after grpc_executor_init and before grpc_executor_shutdown */ -void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool enable); +void grpc_executor_set_threading(bool enable); #ifdef __cplusplus } diff --git a/src/core/lib/iomgr/iocp_windows.cc b/src/core/lib/iomgr/iocp_windows.cc index 78185cc084..885a78bdfa 100644 --- a/src/core/lib/iomgr/iocp_windows.cc +++ b/src/core/lib/iomgr/iocp_windows.cc @@ -42,20 +42,18 @@ static gpr_atm g_custom_events = 0; static HANDLE g_iocp; -static DWORD deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx, - grpc_millis deadline) { +static DWORD deadline_to_millis_timeout(grpc_millis deadline) { if (deadline == GRPC_MILLIS_INF_FUTURE) { return INFINITE; } - grpc_millis now = grpc_exec_ctx_now(exec_ctx); + grpc_millis now = grpc_exec_ctx_now(); if (deadline < now) return 0; grpc_millis timeout = deadline - now; if (timeout > std::numeric_limits<DWORD>::max()) return INFINITE; return static_cast<DWORD>(deadline - now); } -grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx, - grpc_millis deadline) { +grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline) { BOOL success; DWORD bytes = 0; DWORD flags = 0; @@ -63,11 +61,11 @@ grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx, LPOVERLAPPED overlapped; grpc_winsocket *socket; grpc_winsocket_callback_info *info; - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); + GRPC_STATS_INC_SYSCALL_POLL(); success = GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key, &overlapped, - deadline_to_millis_timeout(exec_ctx, deadline)); - grpc_exec_ctx_invalidate_now(exec_ctx); + deadline_to_millis_timeout(deadline)); + grpc_exec_ctx_invalidate_now(); if (success == 0 && overlapped == NULL) { return GRPC_IOCP_WORK_TIMEOUT; } @@ -95,7 +93,7 @@ grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx, info->bytes_transfered = bytes; info->wsa_error = success ? 0 : WSAGetLastError(); GPR_ASSERT(overlapped == &info->overlapped); - grpc_socket_become_ready(exec_ctx, socket, info); + grpc_socket_become_ready(socket, info); return GRPC_IOCP_WORK_WORK; } @@ -115,22 +113,21 @@ void grpc_iocp_kick(void) { } void grpc_iocp_flush(void) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_iocp_work_status work_status; do { - work_status = grpc_iocp_work(&exec_ctx, GRPC_MILLIS_INF_PAST); - } while (work_status == GRPC_IOCP_WORK_KICK || - grpc_exec_ctx_flush(&exec_ctx)); + work_status = grpc_iocp_work(GRPC_MILLIS_INF_PAST); + } while (work_status == GRPC_IOCP_WORK_KICK || grpc_exec_ctx_flush()); } void grpc_iocp_shutdown(void) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; while (gpr_atm_acq_load(&g_custom_events)) { - grpc_iocp_work(&exec_ctx, GRPC_MILLIS_INF_FUTURE); - grpc_exec_ctx_flush(&exec_ctx); + grpc_iocp_work(GRPC_MILLIS_INF_FUTURE); + grpc_exec_ctx_flush(); } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); GPR_ASSERT(CloseHandle(g_iocp)); } diff --git a/src/core/lib/iomgr/iocp_windows.h b/src/core/lib/iomgr/iocp_windows.h index 4efbc94645..f6e2b776d1 100644 --- a/src/core/lib/iomgr/iocp_windows.h +++ b/src/core/lib/iomgr/iocp_windows.h @@ -33,8 +33,7 @@ typedef enum { GRPC_IOCP_WORK_KICK } grpc_iocp_work_status; -grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx, - grpc_millis deadline); +grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline); void grpc_iocp_init(void); void grpc_iocp_kick(void); void grpc_iocp_flush(void); diff --git a/src/core/lib/iomgr/iomgr.cc b/src/core/lib/iomgr/iomgr.cc index d6a5b4a76c..236679c361 100644 --- a/src/core/lib/iomgr/iomgr.cc +++ b/src/core/lib/iomgr/iomgr.cc @@ -45,20 +45,20 @@ static gpr_cv g_rcv; static int g_shutdown; static grpc_iomgr_object g_root_object; -void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) { +void grpc_iomgr_init() { g_shutdown = 0; gpr_mu_init(&g_mu); gpr_cv_init(&g_rcv); grpc_exec_ctx_global_init(); - grpc_executor_init(exec_ctx); - grpc_timer_list_init(exec_ctx); + grpc_executor_init(); + grpc_timer_list_init(); g_root_object.next = g_root_object.prev = &g_root_object; g_root_object.name = (char *)"root"; grpc_network_status_init(); grpc_iomgr_platform_init(); } -void grpc_iomgr_start(grpc_exec_ctx *exec_ctx) { grpc_timer_manager_init(); } +void grpc_iomgr_start() { grpc_timer_manager_init(); } static size_t count_objects(void) { grpc_iomgr_object *obj; @@ -76,14 +76,14 @@ static void dump_objects(const char *kind) { } } -void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) { +void grpc_iomgr_shutdown() { gpr_timespec shutdown_deadline = gpr_time_add( gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN)); gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME); grpc_timer_manager_shutdown(); grpc_iomgr_platform_flush(); - grpc_executor_shutdown(exec_ctx); + grpc_executor_shutdown(); gpr_mu_lock(&g_mu); g_shutdown = 1; @@ -100,9 +100,9 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) { } exec_ctx->now_is_valid = true; exec_ctx->now = GRPC_MILLIS_INF_FUTURE; - if (grpc_timer_check(exec_ctx, NULL) == GRPC_TIMERS_FIRED) { + if (grpc_timer_check(NULL) == GRPC_TIMERS_FIRED) { gpr_mu_unlock(&g_mu); - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); grpc_iomgr_platform_flush(); gpr_mu_lock(&g_mu); continue; @@ -134,8 +134,8 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) { } gpr_mu_unlock(&g_mu); - grpc_timer_list_shutdown(exec_ctx); - grpc_exec_ctx_flush(exec_ctx); + grpc_timer_list_shutdown(); + grpc_exec_ctx_flush(); /* ensure all threads have left g_mu */ gpr_mu_lock(&g_mu); diff --git a/src/core/lib/iomgr/iomgr.h b/src/core/lib/iomgr/iomgr.h index 6c0a08b918..ce9f60521c 100644 --- a/src/core/lib/iomgr/iomgr.h +++ b/src/core/lib/iomgr/iomgr.h @@ -27,14 +27,14 @@ extern "C" { #endif /** Initializes the iomgr. */ -void grpc_iomgr_init(grpc_exec_ctx *exec_ctx); +void grpc_iomgr_init(); /** Starts any background threads for iomgr. */ -void grpc_iomgr_start(grpc_exec_ctx *exec_ctx); +void grpc_iomgr_start(); /** Signals the intention to shutdown the iomgr. Expects to be able to flush * exec_ctx. */ -void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx); +void grpc_iomgr_shutdown(); #ifdef __cplusplus } diff --git a/src/core/lib/iomgr/iomgr_uv.cc b/src/core/lib/iomgr/iomgr_uv.cc index df5d23af3b..4dda970286 100644 --- a/src/core/lib/iomgr/iomgr_uv.cc +++ b/src/core/lib/iomgr/iomgr_uv.cc @@ -29,12 +29,12 @@ gpr_thd_id g_init_thread; void grpc_iomgr_platform_init(void) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_pollset_global_init(); grpc_register_tracer(&grpc_tcp_trace); - grpc_executor_set_threading(&exec_ctx, false); + grpc_executor_set_threading(false); g_init_thread = gpr_thd_currentid(); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } void grpc_iomgr_platform_flush(void) {} void grpc_iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); } diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc index f967b22ba9..2fa3531bc6 100644 --- a/src/core/lib/iomgr/lockfree_event.cc +++ b/src/core/lib/iomgr/lockfree_event.cc @@ -78,8 +78,8 @@ bool grpc_lfev_is_shutdown(gpr_atm *state) { return (curr & FD_SHUTDOWN_BIT) != 0; } -void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, - grpc_closure *closure, const char *variable) { +void grpc_lfev_notify_on(gpr_atm *state, grpc_closure *closure, + const char *variable) { while (true) { gpr_atm curr = gpr_atm_no_barrier_load(state); if (GRPC_TRACER_ON(grpc_polling_trace)) { @@ -112,7 +112,7 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, closure when transitioning out of CLOSURE_NO_READY state (i.e there is no other code that needs to 'happen-after' this) */ if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); return; /* Successful. Return */ } @@ -125,7 +125,7 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, schedule the closure with the shutdown error */ if ((curr & FD_SHUTDOWN_BIT) > 0) { grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT); - GRPC_CLOSURE_SCHED(exec_ctx, closure, + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "FD Shutdown", &shutdown_err, 1)); return; @@ -142,8 +142,7 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, GPR_UNREACHABLE_CODE(return ); } -bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, - grpc_error *shutdown_err) { +bool grpc_lfev_set_shutdown(gpr_atm *state, grpc_error *shutdown_err) { gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT; while (true) { @@ -177,7 +176,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, happens-after on that edge), and a release to pair with anything loading the shutdown state. */ if (gpr_atm_full_cas(state, curr, new_state)) { - GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)curr, + GRPC_CLOSURE_SCHED((grpc_closure *)curr, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "FD Shutdown", &shutdown_err, 1)); return true; @@ -193,8 +192,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, GPR_UNREACHABLE_CODE(return false); } -void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state, - const char *variable) { +void grpc_lfev_set_ready(gpr_atm *state, const char *variable) { while (true) { gpr_atm curr = gpr_atm_no_barrier_load(state); @@ -228,7 +226,7 @@ void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state, spurious set_ready; release pairs with this or the acquire in notify_on (or set_shutdown) */ else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) { - GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED((grpc_closure *)curr, GRPC_ERROR_NONE); return; } /* else the state changed again (only possible by either a racing diff --git a/src/core/lib/iomgr/lockfree_event.h b/src/core/lib/iomgr/lockfree_event.h index 02229e569e..cf681fc874 100644 --- a/src/core/lib/iomgr/lockfree_event.h +++ b/src/core/lib/iomgr/lockfree_event.h @@ -33,13 +33,11 @@ void grpc_lfev_init(gpr_atm *state); void grpc_lfev_destroy(gpr_atm *state); bool grpc_lfev_is_shutdown(gpr_atm *state); -void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, - grpc_closure *closure, const char *variable); -/* Returns true on first successful shutdown */ -bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, - grpc_error *shutdown_err); -void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state, +void grpc_lfev_notify_on(gpr_atm *state, grpc_closure *closure, const char *variable); +/* Returns true on first successful shutdown */ +bool grpc_lfev_set_shutdown(gpr_atm *state, grpc_error *shutdown_err); +void grpc_lfev_set_ready(gpr_atm *state, const char *variable); #ifdef __cplusplus } diff --git a/src/core/lib/iomgr/polling_entity.cc b/src/core/lib/iomgr/polling_entity.cc index 8591a5518e..06b78e823d 100644 --- a/src/core/lib/iomgr/polling_entity.cc +++ b/src/core/lib/iomgr/polling_entity.cc @@ -56,32 +56,28 @@ bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent) { return pollent->tag == GRPC_POLLS_NONE; } -void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_polling_entity *pollent, +void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity *pollent, grpc_pollset_set *pss_dst) { if (pollent->tag == GRPC_POLLS_POLLSET) { GPR_ASSERT(pollent->pollent.pollset != NULL); - grpc_pollset_set_add_pollset(exec_ctx, pss_dst, pollent->pollent.pollset); + grpc_pollset_set_add_pollset(pss_dst, pollent->pollent.pollset); } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) { GPR_ASSERT(pollent->pollent.pollset_set != NULL); - grpc_pollset_set_add_pollset_set(exec_ctx, pss_dst, - pollent->pollent.pollset_set); + grpc_pollset_set_add_pollset_set(pss_dst, pollent->pollent.pollset_set); } else { gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag); abort(); } } -void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_polling_entity *pollent, +void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity *pollent, grpc_pollset_set *pss_dst) { if (pollent->tag == GRPC_POLLS_POLLSET) { GPR_ASSERT(pollent->pollent.pollset != NULL); - grpc_pollset_set_del_pollset(exec_ctx, pss_dst, pollent->pollent.pollset); + grpc_pollset_set_del_pollset(pss_dst, pollent->pollent.pollset); } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) { GPR_ASSERT(pollent->pollent.pollset_set != NULL); - grpc_pollset_set_del_pollset_set(exec_ctx, pss_dst, - pollent->pollent.pollset_set); + grpc_pollset_set_del_pollset_set(pss_dst, pollent->pollent.pollset_set); } else { gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag); abort(); diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h index 009f968fac..e1e4c47c83 100644 --- a/src/core/lib/iomgr/polling_entity.h +++ b/src/core/lib/iomgr/polling_entity.h @@ -59,14 +59,12 @@ bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent); /** Add the pollset or pollset_set in \a pollent to the destination pollset_set * \a * pss_dst */ -void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_polling_entity *pollent, +void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity *pollent, grpc_pollset_set *pss_dst); /** Delete the pollset or pollset_set in \a pollent from the destination * pollset_set \a * pss_dst */ -void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_polling_entity *pollent, +void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity *pollent, grpc_pollset_set *pss_dst); #ifdef __cplusplus } diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h index 799fae154c..1f5b6c12f1 100644 --- a/src/core/lib/iomgr/pollset.h +++ b/src/core/lib/iomgr/pollset.h @@ -48,9 +48,8 @@ size_t grpc_pollset_size(void); void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu); /* Begin shutting down the pollset, and call closure when done. * pollset's mutex must be held */ -void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure); -void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset); +void grpc_pollset_shutdown(grpc_pollset *pollset, grpc_closure *closure); +void grpc_pollset_destroy(grpc_pollset *pollset); /* Do some work on a pollset. May involve invoking asynchronous callbacks, or actually polling file @@ -74,13 +73,13 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset); May call grpc_closure_list_run on grpc_closure_list, without holding the pollset lock */ -grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +grpc_error *grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker **worker, grpc_millis deadline) GRPC_MUST_USE_RESULT; /* Break one polling thread out of polling work for this pollset. If specific_worker is non-NULL, then kick that worker. */ -grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +grpc_error *grpc_pollset_kick(grpc_pollset *pollset, grpc_pollset_worker *specific_worker) GRPC_MUST_USE_RESULT; diff --git a/src/core/lib/iomgr/pollset_set.h b/src/core/lib/iomgr/pollset_set.h index 5455eda02f..a30069e6c8 100644 --- a/src/core/lib/iomgr/pollset_set.h +++ b/src/core/lib/iomgr/pollset_set.h @@ -33,19 +33,14 @@ extern "C" { typedef struct grpc_pollset_set grpc_pollset_set; grpc_pollset_set *grpc_pollset_set_create(void); -void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set); -void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, +void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set); +void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set, grpc_pollset *pollset); -void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, +void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set, grpc_pollset *pollset); -void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +void grpc_pollset_set_add_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item); -void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, +void grpc_pollset_set_del_pollset_set(grpc_pollset_set *bag, grpc_pollset_set *item); #ifdef __cplusplus diff --git a/src/core/lib/iomgr/pollset_set_uv.cc b/src/core/lib/iomgr/pollset_set_uv.cc index 90186edbb7..ac5dade8a5 100644 --- a/src/core/lib/iomgr/pollset_set_uv.cc +++ b/src/core/lib/iomgr/pollset_set_uv.cc @@ -26,23 +26,18 @@ grpc_pollset_set* grpc_pollset_set_create(void) { return (grpc_pollset_set*)((intptr_t)0xdeafbeef); } -void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set) {} +void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {} -void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set, +void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set, grpc_pollset* pollset) {} -void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set, +void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set, grpc_pollset* pollset) {} -void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* bag, +void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag, grpc_pollset_set* item) {} -void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* bag, +void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag, grpc_pollset_set* item) {} #endif /* GRPC_UV */ diff --git a/src/core/lib/iomgr/pollset_set_windows.cc b/src/core/lib/iomgr/pollset_set_windows.cc index 2105a47ad4..85edc9dee1 100644 --- a/src/core/lib/iomgr/pollset_set_windows.cc +++ b/src/core/lib/iomgr/pollset_set_windows.cc @@ -27,23 +27,18 @@ grpc_pollset_set* grpc_pollset_set_create(void) { return (grpc_pollset_set*)((intptr_t)0xdeafbeef); } -void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set) {} +void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {} -void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set, +void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set, grpc_pollset* pollset) {} -void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set, +void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set, grpc_pollset* pollset) {} -void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* bag, +void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag, grpc_pollset_set* item) {} -void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* bag, +void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag, grpc_pollset_set* item) {} #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/src/core/lib/iomgr/pollset_uv.cc b/src/core/lib/iomgr/pollset_uv.cc index b9901bf8ef..53585a80dc 100644 --- a/src/core/lib/iomgr/pollset_uv.cc +++ b/src/core/lib/iomgr/pollset_uv.cc @@ -87,8 +87,7 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { pollset->shutting_down = 0; } -void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { +void grpc_pollset_shutdown(grpc_pollset *pollset, grpc_closure *closure) { GPR_ASSERT(!pollset->shutting_down); GRPC_UV_ASSERT_SAME_THREAD(); pollset->shutting_down = 1; @@ -99,10 +98,10 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, // kick the loop once uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0); } - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); } -void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { +void grpc_pollset_destroy(grpc_pollset *pollset) { GRPC_UV_ASSERT_SAME_THREAD(); uv_close((uv_handle_t *)&pollset->timer, timer_close_cb); // timer.data is a boolean indicating that the timer has finished closing @@ -114,14 +113,14 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { } } -grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +grpc_error *grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, grpc_millis deadline) { uint64_t timeout; GRPC_UV_ASSERT_SAME_THREAD(); gpr_mu_unlock(&grpc_polling_mu); if (grpc_pollset_work_run_loop) { - grpc_millis now = grpc_exec_ctx_now(exec_ctx); + grpc_millis now = grpc_exec_ctx_now(); if (deadline >= now) { timeout = deadline - now; } else { @@ -140,13 +139,13 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } } if (!grpc_closure_list_empty(exec_ctx->closure_list)) { - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); } gpr_mu_lock(&grpc_polling_mu); return GRPC_ERROR_NONE; } -grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +grpc_error *grpc_pollset_kick(grpc_pollset *pollset, grpc_pollset_worker *specific_worker) { GRPC_UV_ASSERT_SAME_THREAD(); uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0); diff --git a/src/core/lib/iomgr/pollset_windows.cc b/src/core/lib/iomgr/pollset_windows.cc index bb4df83fc1..e1fdf9dc6b 100644 --- a/src/core/lib/iomgr/pollset_windows.cc +++ b/src/core/lib/iomgr/pollset_windows.cc @@ -95,20 +95,19 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { &pollset->root_worker; } -void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { +void grpc_pollset_shutdown(grpc_pollset *pollset, grpc_closure *closure) { pollset->shutting_down = 1; - grpc_pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST); + grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); if (!pollset->is_iocp_worker) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); } else { pollset->on_shutdown = closure; } } -void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {} +void grpc_pollset_destroy(grpc_pollset *pollset) {} -grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, +grpc_error *grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, grpc_millis deadline) { grpc_pollset_worker worker; @@ -129,8 +128,8 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, pollset->is_iocp_worker = 1; g_active_poller = &worker; gpr_mu_unlock(&grpc_polling_mu); - grpc_iocp_work(exec_ctx, deadline); - grpc_exec_ctx_flush(exec_ctx); + grpc_iocp_work(deadline); + grpc_exec_ctx_flush(); gpr_mu_lock(&grpc_polling_mu); pollset->is_iocp_worker = 0; g_active_poller = NULL; @@ -148,7 +147,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } if (pollset->shutting_down && pollset->on_shutdown != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(pollset->on_shutdown, GRPC_ERROR_NONE); pollset->on_shutdown = NULL; } goto done; @@ -170,7 +169,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, done: if (!grpc_closure_list_empty(exec_ctx->closure_list)) { gpr_mu_unlock(&grpc_polling_mu); - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); gpr_mu_lock(&grpc_polling_mu); } if (added_worker) { @@ -182,7 +181,7 @@ done: return GRPC_ERROR_NONE; } -grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p, +grpc_error *grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) { if (specific_worker != NULL) { if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) { @@ -210,7 +209,7 @@ grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p, specific_worker = pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET); if (specific_worker != NULL) { - grpc_pollset_kick(exec_ctx, p, specific_worker); + grpc_pollset_kick(p, specific_worker); } else if (p->is_iocp_worker) { grpc_iocp_kick(); } else { diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h index 5f0634299e..ac91ac3a7b 100644 --- a/src/core/lib/iomgr/resolve_address.h +++ b/src/core/lib/iomgr/resolve_address.h @@ -42,8 +42,7 @@ typedef struct { /* Asynchronously resolve addr. Use default_port if a port isn't designated in addr, otherwise use the port in addr. */ /* TODO(ctiller): add a timeout here */ -extern void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *addr, - const char *default_port, +extern void (*grpc_resolve_address)(const char *addr, const char *default_port, grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addresses); diff --git a/src/core/lib/iomgr/resolve_address_posix.cc b/src/core/lib/iomgr/resolve_address_posix.cc index 1b783495df..2fa0913403 100644 --- a/src/core/lib/iomgr/resolve_address_posix.cc +++ b/src/core/lib/iomgr/resolve_address_posix.cc @@ -152,12 +152,10 @@ typedef struct { /* Callback to be passed to grpc_executor to asynch-ify * grpc_blocking_resolve_address */ -static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, - grpc_error *error) { +static void do_request_thread(void *rp, grpc_error *error) { request *r = (request *)rp; - GRPC_CLOSURE_SCHED( - exec_ctx, r->on_done, - grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out)); + GRPC_CLOSURE_SCHED(r->on_done, grpc_blocking_resolve_address( + r->name, r->default_port, r->addrs_out)); gpr_free(r->name); gpr_free(r->default_port); gpr_free(r); @@ -170,8 +168,7 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { gpr_free(addrs); } -static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, +static void resolve_address_impl(const char *name, const char *default_port, grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addrs) { @@ -182,11 +179,11 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, r->default_port = gpr_strdup(default_port); r->on_done = on_done; r->addrs_out = addrs; - GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&r->request_closure, GRPC_ERROR_NONE); } void (*grpc_resolve_address)( - grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + const char *name, const char *default_port, grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addrs) = resolve_address_impl; diff --git a/src/core/lib/iomgr/resolve_address_uv.cc b/src/core/lib/iomgr/resolve_address_uv.cc index 4f7f234877..82ea27d22e 100644 --- a/src/core/lib/iomgr/resolve_address_uv.cc +++ b/src/core/lib/iomgr/resolve_address_uv.cc @@ -114,7 +114,7 @@ static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result, static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status, struct addrinfo *res) { request *r = (request *)req->data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_error *error; int retry_status; char *port = r->port; @@ -130,8 +130,8 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status, /* Either no retry was attempted, or the retry failed. Either way, the original error probably has more interesting information */ error = handle_addrinfo_result(status, res, r->addresses); - GRPC_CLOSURE_SCHED(&exec_ctx, r->on_done, error); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_CLOSURE_SCHED(r->on_done, error); + grpc_exec_ctx_finish(); gpr_free(r->hints); gpr_free(r->host); gpr_free(r->port); @@ -224,8 +224,7 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { gpr_free(addrs); } -static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, +static void resolve_address_impl(const char *name, const char *default_port, grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addrs) { @@ -239,7 +238,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, GRPC_UV_ASSERT_SAME_THREAD(); err = try_split_host_port(name, default_port, &host, &port); if (err != GRPC_ERROR_NONE) { - GRPC_CLOSURE_SCHED(exec_ctx, on_done, err); + GRPC_CLOSURE_SCHED(on_done, err); gpr_free(host); gpr_free(port); return; @@ -268,7 +267,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed"); err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, grpc_slice_from_static_string(uv_strerror(s))); - GRPC_CLOSURE_SCHED(exec_ctx, on_done, err); + GRPC_CLOSURE_SCHED(on_done, err); gpr_free(r); gpr_free(req); gpr_free(hints); @@ -278,7 +277,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, } void (*grpc_resolve_address)( - grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + const char *name, const char *default_port, grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addrs) = resolve_address_impl; diff --git a/src/core/lib/iomgr/resolve_address_windows.cc b/src/core/lib/iomgr/resolve_address_windows.cc index 451f01a701..079ff5c9e2 100644 --- a/src/core/lib/iomgr/resolve_address_windows.cc +++ b/src/core/lib/iomgr/resolve_address_windows.cc @@ -132,8 +132,7 @@ grpc_error *(*grpc_blocking_resolve_address)( /* Callback to be passed to grpc_executor to asynch-ify * grpc_blocking_resolve_address */ -static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, - grpc_error *error) { +static void do_request_thread(void *rp, grpc_error *error) { request *r = (request *)rp; if (error == GRPC_ERROR_NONE) { error = @@ -141,7 +140,7 @@ static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, } else { GRPC_ERROR_REF(error); } - GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, error); + GRPC_CLOSURE_SCHED(r->on_done, error); gpr_free(r->name); gpr_free(r->default_port); gpr_free(r); @@ -154,8 +153,7 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { gpr_free(addrs); } -static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, +static void resolve_address_impl(const char *name, const char *default_port, grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addresses) { @@ -166,11 +164,11 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, r->default_port = gpr_strdup(default_port); r->on_done = on_done; r->addresses = addresses; - GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&r->request_closure, GRPC_ERROR_NONE); } void (*grpc_resolve_address)( - grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + const char *name, const char *default_port, grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addresses) = resolve_address_impl; diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc index ecb5747da8..0a8c9073cf 100644 --- a/src/core/lib/iomgr/resource_quota.cc +++ b/src/core/lib/iomgr/resource_quota.cc @@ -155,8 +155,7 @@ struct grpc_resource_quota { char *name; }; -static void ru_unref_by(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, gpr_atm amount); +static void ru_unref_by(grpc_resource_user *resource_user, gpr_atm amount); /******************************************************************************* * list management @@ -240,35 +239,31 @@ static void rulist_remove(grpc_resource_user *resource_user, grpc_rulist list) { * resource quota state machine */ -static bool rq_alloc(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota); +static bool rq_alloc(grpc_resource_quota *resource_quota); static bool rq_reclaim_from_per_user_free_pool( - grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota); -static bool rq_reclaim(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota, bool destructive); + grpc_resource_quota *resource_quota); +static bool rq_reclaim(grpc_resource_quota *resource_quota, bool destructive); -static void rq_step(grpc_exec_ctx *exec_ctx, void *rq, grpc_error *error) { +static void rq_step(void *rq, grpc_error *error) { grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq; resource_quota->step_scheduled = false; do { - if (rq_alloc(exec_ctx, resource_quota)) goto done; - } while (rq_reclaim_from_per_user_free_pool(exec_ctx, resource_quota)); + if (rq_alloc(resource_quota)) goto done; + } while (rq_reclaim_from_per_user_free_pool(resource_quota)); - if (!rq_reclaim(exec_ctx, resource_quota, false)) { - rq_reclaim(exec_ctx, resource_quota, true); + if (!rq_reclaim(resource_quota, false)) { + rq_reclaim(resource_quota, true); } done: - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); } -static void rq_step_sched(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota) { +static void rq_step_sched(grpc_resource_quota *resource_quota) { if (resource_quota->step_scheduled) return; resource_quota->step_scheduled = true; grpc_resource_quota_ref_internal(resource_quota); - GRPC_CLOSURE_SCHED(exec_ctx, &resource_quota->rq_step_closure, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&resource_quota->rq_step_closure, GRPC_ERROR_NONE); } /* update the atomically available resource estimate - use no barriers since @@ -288,8 +283,7 @@ static void rq_update_estimate(grpc_resource_quota *resource_quota) { } /* returns true if all allocations are completed */ -static bool rq_alloc(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota) { +static bool rq_alloc(grpc_resource_quota *resource_quota) { grpc_resource_user *resource_user; while ((resource_user = rulist_pop_head(resource_quota, GRPC_RULIST_AWAITING_ALLOCATION))) { @@ -308,9 +302,9 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx, int64_t aborted_allocations = resource_user->outstanding_allocations; resource_user->outstanding_allocations = 0; resource_user->free_pool += aborted_allocations; - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated); + GRPC_CLOSURE_LIST_SCHED(&resource_user->on_allocated); gpr_mu_unlock(&resource_user->mu); - ru_unref_by(exec_ctx, resource_user, (gpr_atm)aborted_allocations); + ru_unref_by(resource_user, (gpr_atm)aborted_allocations); continue; } if (resource_user->free_pool < 0 && @@ -333,7 +327,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx, if (resource_user->free_pool >= 0) { resource_user->allocating = false; resource_user->outstanding_allocations = 0; - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated); + GRPC_CLOSURE_LIST_SCHED(&resource_user->on_allocated); gpr_mu_unlock(&resource_user->mu); } else { rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION); @@ -346,7 +340,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx, /* returns true if any memory could be reclaimed from buffers */ static bool rq_reclaim_from_per_user_free_pool( - grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) { + grpc_resource_quota *resource_quota) { grpc_resource_user *resource_user; while ((resource_user = rulist_pop_head(resource_quota, GRPC_RULIST_NON_EMPTY_FREE_POOL))) { @@ -372,8 +366,7 @@ static bool rq_reclaim_from_per_user_free_pool( } /* returns true if reclamation is proceeding */ -static bool rq_reclaim(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota, bool destructive) { +static bool rq_reclaim(grpc_resource_quota *resource_quota, bool destructive) { if (resource_quota->reclaiming) return true; grpc_rulist list = destructive ? GRPC_RULIST_RECLAIMER_DESTRUCTIVE : GRPC_RULIST_RECLAIMER_BENIGN; @@ -391,7 +384,7 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx, resource_quota->debug_only_last_reclaimer_resource_user = resource_user; resource_quota->debug_only_last_initiated_reclaimer = c; resource_user->reclaimers[destructive] = NULL; - GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_NONE); + GRPC_CLOSURE_RUN(c, GRPC_ERROR_NONE); return true; } @@ -411,10 +404,10 @@ static void ru_slice_ref(void *p) { gpr_ref(&rc->refs); } -static void ru_slice_unref(grpc_exec_ctx *exec_ctx, void *p) { +static void ru_slice_unref(void *p) { ru_slice_refcount *rc = (ru_slice_refcount *)p; if (gpr_unref(&rc->refs)) { - grpc_resource_user_free(exec_ctx, rc->resource_user, rc->size); + grpc_resource_user_free(rc->resource_user, rc->size); gpr_free(rc); } } @@ -444,61 +437,57 @@ static grpc_slice ru_slice_create(grpc_resource_user *resource_user, * the combiner */ -static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { +static void ru_allocate(void *ru, grpc_error *error) { grpc_resource_user *resource_user = (grpc_resource_user *)ru; if (rulist_empty(resource_user->resource_quota, GRPC_RULIST_AWAITING_ALLOCATION)) { - rq_step_sched(exec_ctx, resource_user->resource_quota); + rq_step_sched(resource_user->resource_quota); } rulist_add_tail(resource_user, GRPC_RULIST_AWAITING_ALLOCATION); } -static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru, - grpc_error *error) { +static void ru_add_to_free_pool(void *ru, grpc_error *error) { grpc_resource_user *resource_user = (grpc_resource_user *)ru; if (!rulist_empty(resource_user->resource_quota, GRPC_RULIST_AWAITING_ALLOCATION) && rulist_empty(resource_user->resource_quota, GRPC_RULIST_NON_EMPTY_FREE_POOL)) { - rq_step_sched(exec_ctx, resource_user->resource_quota); + rq_step_sched(resource_user->resource_quota); } rulist_add_tail(resource_user, GRPC_RULIST_NON_EMPTY_FREE_POOL); } -static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, +static bool ru_post_reclaimer(grpc_resource_user *resource_user, bool destructive) { grpc_closure *closure = resource_user->new_reclaimers[destructive]; GPR_ASSERT(closure != NULL); resource_user->new_reclaimers[destructive] = NULL; GPR_ASSERT(resource_user->reclaimers[destructive] == NULL); if (gpr_atm_acq_load(&resource_user->shutdown) > 0) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_CANCELLED); return false; } resource_user->reclaimers[destructive] = closure; return true; } -static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru, - grpc_error *error) { +static void ru_post_benign_reclaimer(void *ru, grpc_error *error) { grpc_resource_user *resource_user = (grpc_resource_user *)ru; - if (!ru_post_reclaimer(exec_ctx, resource_user, false)) return; + if (!ru_post_reclaimer(resource_user, false)) return; if (!rulist_empty(resource_user->resource_quota, GRPC_RULIST_AWAITING_ALLOCATION) && rulist_empty(resource_user->resource_quota, GRPC_RULIST_NON_EMPTY_FREE_POOL) && rulist_empty(resource_user->resource_quota, GRPC_RULIST_RECLAIMER_BENIGN)) { - rq_step_sched(exec_ctx, resource_user->resource_quota); + rq_step_sched(resource_user->resource_quota); } rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_BENIGN); } -static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru, - grpc_error *error) { +static void ru_post_destructive_reclaimer(void *ru, grpc_error *error) { grpc_resource_user *resource_user = (grpc_resource_user *)ru; - if (!ru_post_reclaimer(exec_ctx, resource_user, true)) return; + if (!ru_post_reclaimer(resource_user, true)) return; if (!rulist_empty(resource_user->resource_quota, GRPC_RULIST_AWAITING_ALLOCATION) && rulist_empty(resource_user->resource_quota, @@ -507,51 +496,46 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru, GRPC_RULIST_RECLAIMER_BENIGN) && rulist_empty(resource_user->resource_quota, GRPC_RULIST_RECLAIMER_DESTRUCTIVE)) { - rq_step_sched(exec_ctx, resource_user->resource_quota); + rq_step_sched(resource_user->resource_quota); } rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE); } -static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { +static void ru_shutdown(void *ru, grpc_error *error) { if (GRPC_TRACER_ON(grpc_resource_quota_trace)) { gpr_log(GPR_DEBUG, "RU shutdown %p", ru); } grpc_resource_user *resource_user = (grpc_resource_user *)ru; - GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0], - GRPC_ERROR_CANCELLED); - GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1], - GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(resource_user->reclaimers[0], GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(resource_user->reclaimers[1], GRPC_ERROR_CANCELLED); resource_user->reclaimers[0] = NULL; resource_user->reclaimers[1] = NULL; rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN); rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE); if (resource_user->allocating) { - rq_step_sched(exec_ctx, resource_user->resource_quota); + rq_step_sched(resource_user->resource_quota); } } -static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { +static void ru_destroy(void *ru, grpc_error *error) { grpc_resource_user *resource_user = (grpc_resource_user *)ru; GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0); for (int i = 0; i < GRPC_RULIST_COUNT; i++) { rulist_remove(resource_user, (grpc_rulist)i); } - GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0], - GRPC_ERROR_CANCELLED); - GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1], - GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(resource_user->reclaimers[0], GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(resource_user->reclaimers[1], GRPC_ERROR_CANCELLED); if (resource_user->free_pool != 0) { resource_user->resource_quota->free_pool += resource_user->free_pool; - rq_step_sched(exec_ctx, resource_user->resource_quota); + rq_step_sched(resource_user->resource_quota); } - grpc_resource_quota_unref_internal(exec_ctx, resource_user->resource_quota); + grpc_resource_quota_unref_internal(resource_user->resource_quota); gpr_mu_destroy(&resource_user->mu); gpr_free(resource_user->name); gpr_free(resource_user); } -static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void ru_allocated_slices(void *arg, grpc_error *error) { grpc_resource_user_slice_allocator *slice_allocator = (grpc_resource_user_slice_allocator *)arg; if (error == GRPC_ERROR_NONE) { @@ -561,7 +545,7 @@ static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg, slice_allocator->length)); } } - GRPC_CLOSURE_RUN(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_RUN(&slice_allocator->on_done, GRPC_ERROR_REF(error)); } /******************************************************************************* @@ -575,23 +559,22 @@ typedef struct { grpc_closure closure; } rq_resize_args; -static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) { +static void rq_resize(void *args, grpc_error *error) { rq_resize_args *a = (rq_resize_args *)args; int64_t delta = a->size - a->resource_quota->size; a->resource_quota->size += delta; a->resource_quota->free_pool += delta; rq_update_estimate(a->resource_quota); - rq_step_sched(exec_ctx, a->resource_quota); - grpc_resource_quota_unref_internal(exec_ctx, a->resource_quota); + rq_step_sched(a->resource_quota); + grpc_resource_quota_unref_internal(a->resource_quota); gpr_free(a); } -static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq, - grpc_error *error) { +static void rq_reclamation_done(void *rq, grpc_error *error) { grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq; resource_quota->reclaiming = false; - rq_step_sched(exec_ctx, resource_quota); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + rq_step_sched(resource_quota); + grpc_resource_quota_unref_internal(resource_quota); } /******************************************************************************* @@ -627,10 +610,9 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) { return resource_quota; } -void grpc_resource_quota_unref_internal(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota) { +void grpc_resource_quota_unref_internal(grpc_resource_quota *resource_quota) { if (gpr_unref(&resource_quota->refs)) { - GRPC_COMBINER_UNREF(exec_ctx, resource_quota->combiner, "resource_quota"); + GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota"); gpr_free(resource_quota->name); gpr_free(resource_quota); } @@ -638,9 +620,9 @@ void grpc_resource_quota_unref_internal(grpc_exec_ctx *exec_ctx, /* Public API */ void grpc_resource_quota_unref(grpc_resource_quota *resource_quota) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + grpc_resource_quota_unref_internal(resource_quota); + grpc_exec_ctx_finish(); } grpc_resource_quota *grpc_resource_quota_ref_internal( @@ -664,15 +646,15 @@ double grpc_resource_quota_get_memory_pressure( /* Public API */ void grpc_resource_quota_resize(grpc_resource_quota *resource_quota, size_t size) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; rq_resize_args *a = (rq_resize_args *)gpr_malloc(sizeof(*a)); a->resource_quota = grpc_resource_quota_ref_internal(resource_quota); a->size = (int64_t)size; gpr_atm_no_barrier_store(&resource_quota->last_size, (gpr_atm)GPR_MIN((size_t)GPR_ATM_MAX, size)); GRPC_CLOSURE_INIT(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(&exec_ctx, &a->closure, GRPC_ERROR_NONE); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_CLOSURE_SCHED(&a->closure, GRPC_ERROR_NONE); + grpc_exec_ctx_finish(); } size_t grpc_resource_quota_peek_size(grpc_resource_quota *resource_quota) { @@ -703,8 +685,8 @@ static void *rq_copy(void *rq) { return rq; } -static void rq_destroy(grpc_exec_ctx *exec_ctx, void *rq) { - grpc_resource_quota_unref_internal(exec_ctx, (grpc_resource_quota *)rq); +static void rq_destroy(void *rq) { + grpc_resource_quota_unref_internal((grpc_resource_quota *)rq); } static int rq_cmp(void *a, void *b) { return GPR_ICMP(a, b); } @@ -772,14 +754,12 @@ static void ru_ref_by(grpc_resource_user *resource_user, gpr_atm amount) { GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&resource_user->refs, amount) != 0); } -static void ru_unref_by(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, gpr_atm amount) { +static void ru_unref_by(grpc_resource_user *resource_user, gpr_atm amount) { GPR_ASSERT(amount > 0); gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount); GPR_ASSERT(old >= amount); if (old == amount) { - GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->destroy_closure, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&resource_user->destroy_closure, GRPC_ERROR_NONE); } } @@ -787,16 +767,13 @@ void grpc_resource_user_ref(grpc_resource_user *resource_user) { ru_ref_by(resource_user, 1); } -void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user) { - ru_unref_by(exec_ctx, resource_user, 1); +void grpc_resource_user_unref(grpc_resource_user *resource_user) { + ru_unref_by(resource_user, 1); } -void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user) { +void grpc_resource_user_shutdown(grpc_resource_user *resource_user) { if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) { GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_CREATE( ru_shutdown, resource_user, grpc_combiner_scheduler(resource_user->resource_quota->combiner)), @@ -804,8 +781,7 @@ void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, } } -void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, size_t size, +void grpc_resource_user_alloc(grpc_resource_user *resource_user, size_t size, grpc_closure *optional_on_done) { gpr_mu_lock(&resource_user->mu); ru_ref_by(resource_user, (gpr_atm)size); @@ -821,18 +797,16 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx, GRPC_ERROR_NONE); if (!resource_user->allocating) { resource_user->allocating = true; - GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->allocate_closure, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&resource_user->allocate_closure, GRPC_ERROR_NONE); } } else { resource_user->outstanding_allocations -= (int64_t)size; - GRPC_CLOSURE_SCHED(exec_ctx, optional_on_done, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(optional_on_done, GRPC_ERROR_NONE); } gpr_mu_unlock(&resource_user->mu); } -void grpc_resource_user_free(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, size_t size) { +void grpc_resource_user_free(grpc_resource_user *resource_user, size_t size) { gpr_mu_lock(&resource_user->mu); bool was_zero_or_negative = resource_user->free_pool <= 0; resource_user->free_pool += (int64_t)size; @@ -845,32 +819,29 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx, if (is_bigger_than_zero && was_zero_or_negative && !resource_user->added_to_free_pool) { resource_user->added_to_free_pool = true; - GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->add_to_free_pool_closure, + GRPC_CLOSURE_SCHED(&resource_user->add_to_free_pool_closure, GRPC_ERROR_NONE); } gpr_mu_unlock(&resource_user->mu); - ru_unref_by(exec_ctx, resource_user, (gpr_atm)size); + ru_unref_by(resource_user, (gpr_atm)size); } -void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, +void grpc_resource_user_post_reclaimer(grpc_resource_user *resource_user, bool destructive, grpc_closure *closure) { GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL); resource_user->new_reclaimers[destructive] = closure; - GRPC_CLOSURE_SCHED(exec_ctx, - &resource_user->post_reclaimer_closure[destructive], + GRPC_CLOSURE_SCHED(&resource_user->post_reclaimer_closure[destructive], GRPC_ERROR_NONE); } -void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user) { +void grpc_resource_user_finish_reclamation(grpc_resource_user *resource_user) { if (GRPC_TRACER_ON(grpc_resource_quota_trace)) { gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete", resource_user->resource_quota->name, resource_user->name); } GRPC_CLOSURE_SCHED( - exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure, + &resource_user->resource_quota->rq_reclamation_done_closure, GRPC_ERROR_NONE); } @@ -885,19 +856,17 @@ void grpc_resource_user_slice_allocator_init( } void grpc_resource_user_alloc_slices( - grpc_exec_ctx *exec_ctx, grpc_resource_user_slice_allocator *slice_allocator, size_t length, size_t count, grpc_slice_buffer *dest) { slice_allocator->length = length; slice_allocator->count = count; slice_allocator->dest = dest; - grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user, - count * length, &slice_allocator->on_allocated); + grpc_resource_user_alloc(slice_allocator->resource_user, count * length, + &slice_allocator->on_allocated); } -grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, +grpc_slice grpc_resource_user_slice_malloc(grpc_resource_user *resource_user, size_t size) { - grpc_resource_user_alloc(exec_ctx, resource_user, size, NULL); + grpc_resource_user_alloc(resource_user, size, NULL); return ru_slice_create(resource_user, size); } diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h index 1d4249b7e2..763ac9fcc1 100644 --- a/src/core/lib/iomgr/resource_quota.h +++ b/src/core/lib/iomgr/resource_quota.h @@ -69,8 +69,7 @@ extern grpc_tracer_flag grpc_resource_quota_trace; grpc_resource_quota *grpc_resource_quota_ref_internal( grpc_resource_quota *resource_quota); -void grpc_resource_quota_unref_internal(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota); +void grpc_resource_quota_unref_internal(grpc_resource_quota *resource_quota); grpc_resource_quota *grpc_resource_quota_from_channel_args( const grpc_channel_args *channel_args); @@ -93,32 +92,26 @@ grpc_resource_quota *grpc_resource_user_quota( grpc_resource_user *resource_user); void grpc_resource_user_ref(grpc_resource_user *resource_user); -void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user); -void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user); +void grpc_resource_user_unref(grpc_resource_user *resource_user); +void grpc_resource_user_shutdown(grpc_resource_user *resource_user); /* Allocate from the resource user (and its quota). If optional_on_done is NULL, then allocate immediately. This may push the quota over-limit, at which point reclamation will kick in. If optional_on_done is non-NULL, it will be scheduled when the allocation has been granted by the quota. */ -void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, size_t size, +void grpc_resource_user_alloc(grpc_resource_user *resource_user, size_t size, grpc_closure *optional_on_done); /* Release memory back to the quota */ -void grpc_resource_user_free(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, size_t size); +void grpc_resource_user_free(grpc_resource_user *resource_user, size_t size); /* Post a memory reclaimer to the resource user. Only one benign and one destructive reclaimer can be posted at once. When executed, the reclaimer MUST call grpc_resource_user_finish_reclamation before it completes, to return control to the resource quota. */ -void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, +void grpc_resource_user_post_reclaimer(grpc_resource_user *resource_user, bool destructive, grpc_closure *closure); /* Finish a reclamation step */ -void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user); +void grpc_resource_user_finish_reclamation(grpc_resource_user *resource_user); /* Helper to allocate slices from a resource user */ typedef struct grpc_resource_user_slice_allocator { @@ -145,13 +138,11 @@ void grpc_resource_user_slice_allocator_init( /* Allocate \a count slices of length \a length into \a dest. Only one request can be outstanding at a time. */ void grpc_resource_user_alloc_slices( - grpc_exec_ctx *exec_ctx, grpc_resource_user_slice_allocator *slice_allocator, size_t length, size_t count, grpc_slice_buffer *dest); /* Allocate one slice of length \a size synchronously. */ -grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, +grpc_slice grpc_resource_user_slice_malloc(grpc_resource_user *resource_user, size_t size); #ifdef __cplusplus diff --git a/src/core/lib/iomgr/socket_factory_posix.cc b/src/core/lib/iomgr/socket_factory_posix.cc index 8e907703ae..a9d7dac399 100644 --- a/src/core/lib/iomgr/socket_factory_posix.cc +++ b/src/core/lib/iomgr/socket_factory_posix.cc @@ -72,7 +72,7 @@ static void *socket_factory_arg_copy(void *p) { return grpc_socket_factory_ref((grpc_socket_factory *)p); } -static void socket_factory_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) { +static void socket_factory_arg_destroy(void *p) { grpc_socket_factory_unref((grpc_socket_factory *)p); } diff --git a/src/core/lib/iomgr/socket_mutator.cc b/src/core/lib/iomgr/socket_mutator.cc index b0435d5a07..0a2e91da4d 100644 --- a/src/core/lib/iomgr/socket_mutator.cc +++ b/src/core/lib/iomgr/socket_mutator.cc @@ -63,7 +63,7 @@ static void *socket_mutator_arg_copy(void *p) { return grpc_socket_mutator_ref((grpc_socket_mutator *)p); } -static void socket_mutator_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) { +static void socket_mutator_arg_destroy(void *p) { grpc_socket_mutator_unref((grpc_socket_mutator *)p); } diff --git a/src/core/lib/iomgr/socket_windows.cc b/src/core/lib/iomgr/socket_windows.cc index 8c7f7cf683..9d140fe485 100644 --- a/src/core/lib/iomgr/socket_windows.cc +++ b/src/core/lib/iomgr/socket_windows.cc @@ -109,37 +109,34 @@ void grpc_winsocket_destroy(grpc_winsocket *winsocket) { -) The IOCP already completed in the background, and we need to call the callback now. -) The IOCP hasn't completed yet, and we're queuing it for later. */ -static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx, - grpc_winsocket *socket, grpc_closure *closure, +static void socket_notify_on_iocp(grpc_winsocket *socket, grpc_closure *closure, grpc_winsocket_callback_info *info) { GPR_ASSERT(info->closure == NULL); gpr_mu_lock(&socket->state_mu); if (info->has_pending_iocp) { info->has_pending_iocp = 0; - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); } else { info->closure = closure; } gpr_mu_unlock(&socket->state_mu); } -void grpc_socket_notify_on_write(grpc_exec_ctx *exec_ctx, - grpc_winsocket *socket, +void grpc_socket_notify_on_write(grpc_winsocket *socket, grpc_closure *closure) { - socket_notify_on_iocp(exec_ctx, socket, closure, &socket->write_info); + socket_notify_on_iocp(socket, closure, &socket->write_info); } -void grpc_socket_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket, - grpc_closure *closure) { - socket_notify_on_iocp(exec_ctx, socket, closure, &socket->read_info); +void grpc_socket_notify_on_read(grpc_winsocket *socket, grpc_closure *closure) { + socket_notify_on_iocp(socket, closure, &socket->read_info); } -void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket, +void grpc_socket_become_ready(grpc_winsocket *socket, grpc_winsocket_callback_info *info) { GPR_ASSERT(!info->has_pending_iocp); gpr_mu_lock(&socket->state_mu); if (info->closure) { - GRPC_CLOSURE_SCHED(exec_ctx, info->closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(info->closure, GRPC_ERROR_NONE); info->closure = NULL; } else { info->has_pending_iocp = 1; diff --git a/src/core/lib/iomgr/socket_windows.h b/src/core/lib/iomgr/socket_windows.h index 84fa071e89..2b9fa0a4fe 100644 --- a/src/core/lib/iomgr/socket_windows.h +++ b/src/core/lib/iomgr/socket_windows.h @@ -99,16 +99,13 @@ void grpc_winsocket_shutdown(grpc_winsocket *socket); /* Destroy a socket. Should only be called if there's no pending operation. */ void grpc_winsocket_destroy(grpc_winsocket *socket); -void grpc_socket_notify_on_write(grpc_exec_ctx *exec_ctx, - grpc_winsocket *winsocket, +void grpc_socket_notify_on_write(grpc_winsocket *winsocket, grpc_closure *closure); -void grpc_socket_notify_on_read(grpc_exec_ctx *exec_ctx, - grpc_winsocket *winsocket, +void grpc_socket_notify_on_read(grpc_winsocket *winsocket, grpc_closure *closure); -void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, - grpc_winsocket *winsocket, +void grpc_socket_become_ready(grpc_winsocket *winsocket, grpc_winsocket_callback_info *ci); #ifdef __cplusplus diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h index b2f365f2af..bea2d740df 100644 --- a/src/core/lib/iomgr/tcp_client.h +++ b/src/core/lib/iomgr/tcp_client.h @@ -34,8 +34,7 @@ extern "C" { NULL on failure). interested_parties points to a set of pollsets that would be interested in this connection being established (in order to continue their work) */ -void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect, - grpc_endpoint **endpoint, +void grpc_tcp_client_connect(grpc_closure *on_connect, grpc_endpoint **endpoint, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc index 5611dd9062..c231c3f619 100644 --- a/src/core/lib/iomgr/tcp_client_posix.cc +++ b/src/core/lib/iomgr/tcp_client_posix.cc @@ -96,7 +96,7 @@ done: return err; } -static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { +static void tc_on_alarm(void *acp, grpc_error *error) { int done; async_connect *ac = (async_connect *)acp; if (GRPC_TRACER_ON(grpc_tcp_trace)) { @@ -106,26 +106,25 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { } gpr_mu_lock(&ac->mu); if (ac->fd != NULL) { - grpc_fd_shutdown(exec_ctx, ac->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "connect() timed out")); + grpc_fd_shutdown( + ac->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("connect() timed out")); } done = (--ac->refs == 0); gpr_mu_unlock(&ac->mu); if (done) { gpr_mu_destroy(&ac->mu); gpr_free(ac->addr_str); - grpc_channel_args_destroy(exec_ctx, ac->channel_args); + grpc_channel_args_destroy(ac->channel_args); gpr_free(ac); } } grpc_endpoint *grpc_tcp_client_create_from_fd( - grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args, - const char *addr_str) { - return grpc_tcp_create(exec_ctx, fd, channel_args, addr_str); + grpc_fd *fd, const grpc_channel_args *channel_args, const char *addr_str) { + return grpc_tcp_create(fd, channel_args, addr_str); } -static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { +static void on_writable(void *acp, grpc_error *error) { async_connect *ac = (async_connect *)acp; int so_error = 0; socklen_t so_error_size; @@ -149,7 +148,7 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { ac->fd = NULL; gpr_mu_unlock(&ac->mu); - grpc_timer_cancel(exec_ctx, &ac->alarm); + grpc_timer_cancel(&ac->alarm); gpr_mu_lock(&ac->mu); if (error != GRPC_ERROR_NONE) { @@ -171,9 +170,8 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { switch (so_error) { case 0: - grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd); - *ep = grpc_tcp_client_create_from_fd(exec_ctx, fd, ac->channel_args, - ac->addr_str); + grpc_pollset_set_del_fd(ac->interested_parties, fd); + *ep = grpc_tcp_client_create_from_fd(fd, ac->channel_args, ac->addr_str); fd = NULL; break; case ENOBUFS: @@ -193,7 +191,7 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { don't do that! */ gpr_log(GPR_ERROR, "kernel out of buffers"); gpr_mu_unlock(&ac->mu); - grpc_fd_notify_on_write(exec_ctx, fd, &ac->write_closure); + grpc_fd_notify_on_write(fd, &ac->write_closure); return; case ECONNREFUSED: /* This error shouldn't happen for anything other than connect(). */ @@ -208,8 +206,8 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { finish: if (fd != NULL) { - grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd); - grpc_fd_orphan(exec_ctx, fd, NULL, NULL, false /* already_closed */, + grpc_pollset_set_del_fd(ac->interested_parties, fd); + grpc_fd_orphan(fd, NULL, NULL, false /* already_closed */, "tcp_client_orphan"); fd = NULL; } @@ -232,14 +230,13 @@ finish: if (done) { gpr_mu_destroy(&ac->mu); gpr_free(ac->addr_str); - grpc_channel_args_destroy(exec_ctx, ac->channel_args); + grpc_channel_args_destroy(ac->channel_args); gpr_free(ac); } - GRPC_CLOSURE_SCHED(exec_ctx, closure, error); + GRPC_CLOSURE_SCHED(closure, error); } -static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, - grpc_closure *closure, grpc_endpoint **ep, +static void tcp_client_connect_impl(grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, @@ -264,7 +261,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd); if (error != GRPC_ERROR_NONE) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, error); + GRPC_CLOSURE_SCHED(closure, error); return; } if (dsmode == GRPC_DSMODE_IPV4) { @@ -273,7 +270,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, addr = &addr4_copy; } if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, error); + GRPC_CLOSURE_SCHED(closure, error); return; } @@ -289,20 +286,19 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, fdobj = grpc_fd_create(fd, name); if (err >= 0) { - *ep = - grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str); - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + *ep = grpc_tcp_client_create_from_fd(fdobj, channel_args, addr_str); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); goto done; } if (errno != EWOULDBLOCK && errno != EINPROGRESS) { - grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, false /* already_closed */, + grpc_fd_orphan(fdobj, NULL, NULL, false /* already_closed */, "tcp_client_connect_error"); - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect")); + GRPC_CLOSURE_SCHED(closure, GRPC_OS_ERROR(errno, "connect")); goto done; } - grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj); + grpc_pollset_set_add_fd(interested_parties, fdobj); ac = (async_connect *)gpr_malloc(sizeof(async_connect)); ac->closure = closure; @@ -324,8 +320,8 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, gpr_mu_lock(&ac->mu); GRPC_CLOSURE_INIT(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm); - grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure); + grpc_timer_init(&ac->alarm, deadline, &ac->on_alarm); + grpc_fd_notify_on_write(ac->fd, &ac->write_closure); gpr_mu_unlock(&ac->mu); done: @@ -336,20 +332,19 @@ done: // overridden by api_fuzzer.c extern "C" { void (*grpc_tcp_client_connect_impl)( - grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, + grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, grpc_millis deadline) = tcp_client_connect_impl; } -void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_endpoint **ep, +void grpc_tcp_client_connect(grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, grpc_millis deadline) { - grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, - channel_args, addr, deadline); + grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args, + addr, deadline); } #endif diff --git a/src/core/lib/iomgr/tcp_client_posix.h b/src/core/lib/iomgr/tcp_client_posix.h index 8740511804..ed37b5a9de 100644 --- a/src/core/lib/iomgr/tcp_client_posix.h +++ b/src/core/lib/iomgr/tcp_client_posix.h @@ -28,8 +28,7 @@ extern "C" { #endif grpc_endpoint *grpc_tcp_client_create_from_fd( - grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args, - const char *addr_str); + grpc_fd *fd, const grpc_channel_args *channel_args, const char *addr_str); #ifdef __cplusplus } diff --git a/src/core/lib/iomgr/tcp_client_uv.cc b/src/core/lib/iomgr/tcp_client_uv.cc index f3e9366299..31ee397107 100644 --- a/src/core/lib/iomgr/tcp_client_uv.cc +++ b/src/core/lib/iomgr/tcp_client_uv.cc @@ -46,17 +46,15 @@ typedef struct grpc_uv_tcp_connect { grpc_resource_quota *resource_quota; } grpc_uv_tcp_connect; -static void uv_tcp_connect_cleanup(grpc_exec_ctx *exec_ctx, - grpc_uv_tcp_connect *connect) { - grpc_resource_quota_unref_internal(exec_ctx, connect->resource_quota); +static void uv_tcp_connect_cleanup(grpc_uv_tcp_connect *connect) { + grpc_resource_quota_unref_internal(connect->resource_quota); gpr_free(connect->addr_name); gpr_free(connect); } static void tcp_close_callback(uv_handle_t *handle) { gpr_free(handle); } -static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, - grpc_error *error) { +static void uv_tc_on_alarm(void *acp, grpc_error *error) { int done; grpc_uv_tcp_connect *connect = (grpc_uv_tcp_connect *)acp; if (GRPC_TRACER_ON(grpc_tcp_trace)) { @@ -72,17 +70,17 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, } done = (--connect->refs == 0); if (done) { - uv_tcp_connect_cleanup(exec_ctx, connect); + uv_tcp_connect_cleanup(connect); } } static void uv_tc_on_connect(uv_connect_t *req, int status) { grpc_uv_tcp_connect *connect = (grpc_uv_tcp_connect *)req->data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_error *error = GRPC_ERROR_NONE; int done; grpc_closure *closure = connect->closure; - grpc_timer_cancel(&exec_ctx, &connect->alarm); + grpc_timer_cancel(&connect->alarm); if (status == 0) { *connect->endpoint = grpc_tcp_create( connect->tcp_handle, connect->resource_quota, connect->addr_name); @@ -107,15 +105,14 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) { } done = (--connect->refs == 0); if (done) { - grpc_exec_ctx_flush(&exec_ctx); - uv_tcp_connect_cleanup(&exec_ctx, connect); + grpc_exec_ctx_flush(); + uv_tcp_connect_cleanup(connect); } - GRPC_CLOSURE_SCHED(&exec_ctx, closure, error); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_CLOSURE_SCHED(closure, error); + grpc_exec_ctx_finish(); } -static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, - grpc_closure *closure, grpc_endpoint **ep, +static void tcp_client_connect_impl(grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *resolved_addr, @@ -130,7 +127,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, if (channel_args != NULL) { for (size_t i = 0; i < channel_args->num_args; i++) { if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) { - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); resource_quota = grpc_resource_quota_ref_internal( (grpc_resource_quota *)channel_args->args[i].value.pointer.p); } @@ -158,26 +155,25 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, uv_tc_on_connect); GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &connect->alarm, deadline, &connect->on_alarm); + grpc_timer_init(&connect->alarm, deadline, &connect->on_alarm); } // overridden by api_fuzzer.c extern "C" { void (*grpc_tcp_client_connect_impl)( - grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, + grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, grpc_millis deadline) = tcp_client_connect_impl; } -void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_endpoint **ep, +void grpc_tcp_client_connect(grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, grpc_millis deadline) { - grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, - channel_args, addr, deadline); + grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args, + addr, deadline); } #endif /* GRPC_UV */ diff --git a/src/core/lib/iomgr/tcp_client_windows.cc b/src/core/lib/iomgr/tcp_client_windows.cc index 9adf7ee4e9..94ae9365f0 100644 --- a/src/core/lib/iomgr/tcp_client_windows.cc +++ b/src/core/lib/iomgr/tcp_client_windows.cc @@ -52,13 +52,12 @@ typedef struct { grpc_channel_args *channel_args; } async_connect; -static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx, - async_connect *ac, +static void async_connect_unlock_and_cleanup(async_connect *ac, grpc_winsocket *socket) { int done = (--ac->refs == 0); gpr_mu_unlock(&ac->mu); if (done) { - grpc_channel_args_destroy(exec_ctx, ac->channel_args); + grpc_channel_args_destroy(ac->channel_args); gpr_mu_destroy(&ac->mu); gpr_free(ac->addr_name); gpr_free(ac); @@ -66,7 +65,7 @@ static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx, if (socket != NULL) grpc_winsocket_destroy(socket); } -static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { +static void on_alarm(void *acp, grpc_error *error) { async_connect *ac = (async_connect *)acp; gpr_mu_lock(&ac->mu); grpc_winsocket *socket = ac->socket; @@ -74,10 +73,10 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { if (socket != NULL) { grpc_winsocket_shutdown(socket); } - async_connect_unlock_and_cleanup(exec_ctx, ac, socket); + async_connect_unlock_and_cleanup(ac, socket); } -static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { +static void on_connect(void *acp, grpc_error *error) { async_connect *ac = (async_connect *)acp; grpc_endpoint **ep = ac->endpoint; GPR_ASSERT(*ep == NULL); @@ -90,7 +89,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { ac->socket = NULL; gpr_mu_unlock(&ac->mu); - grpc_timer_cancel(exec_ctx, &ac->alarm); + grpc_timer_cancel(&ac->alarm); gpr_mu_lock(&ac->mu); @@ -105,8 +104,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { if (!wsa_success) { error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx"); } else { - *ep = - grpc_tcp_create(exec_ctx, socket, ac->channel_args, ac->addr_name); + *ep = grpc_tcp_create(socket, ac->channel_args, ac->addr_name); socket = NULL; } } else { @@ -114,18 +112,20 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { } } - async_connect_unlock_and_cleanup(exec_ctx, ac, socket); + async_connect_unlock_and_cleanup(ac, socket); /* If the connection was aborted, the callback was already called when the deadline was met. */ - GRPC_CLOSURE_SCHED(exec_ctx, on_done, error); + GRPC_CLOSURE_SCHED(on_done, error); } /* Tries to issue one async connection, then schedules both an IOCP notification request for the connection, and one timeout alert. */ -static void tcp_client_connect_impl( - grpc_exec_ctx *exec_ctx, grpc_closure *on_done, grpc_endpoint **endpoint, - grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, grpc_millis deadline) { +static void tcp_client_connect_impl(grpc_closure *on_done, + grpc_endpoint **endpoint, + grpc_pollset_set *interested_parties, + const grpc_channel_args *channel_args, + const grpc_resolved_address *addr, + grpc_millis deadline) { SOCKET sock = INVALID_SOCKET; BOOL success; int status; @@ -205,8 +205,8 @@ static void tcp_client_connect_impl( GRPC_CLOSURE_INIT(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm); - grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect); + grpc_timer_init(&ac->alarm, deadline, &ac->on_alarm); + grpc_socket_notify_on_write(socket, &ac->on_connect); return; failure: @@ -222,26 +222,25 @@ failure: } else if (sock != INVALID_SOCKET) { closesocket(sock); } - GRPC_CLOSURE_SCHED(exec_ctx, on_done, final_error); + GRPC_CLOSURE_SCHED(on_done, final_error); } // overridden by api_fuzzer.c extern "C" { void (*grpc_tcp_client_connect_impl)( - grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, + grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, grpc_millis deadline) = tcp_client_connect_impl; } -void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_endpoint **ep, +void grpc_tcp_client_connect(grpc_closure *closure, grpc_endpoint **ep, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, const grpc_resolved_address *addr, grpc_millis deadline) { - grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, - channel_args, addr, deadline); + grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args, + addr, deadline); } #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc index 7fcaef7679..b6b7231ee1 100644 --- a/src/core/lib/iomgr/tcp_posix.cc +++ b/src/core/lib/iomgr/tcp_posix.cc @@ -110,36 +110,31 @@ typedef struct backup_poller { static gpr_atm g_uncovered_notifications_pending; static gpr_atm g_backup_poller; /* backup_poller* */ -static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error); -static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error); -static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx, - void *arg /* grpc_tcp */, +static void tcp_handle_read(void *arg /* grpc_tcp */, grpc_error *error); +static void tcp_handle_write(void *arg /* grpc_tcp */, grpc_error *error); +static void tcp_drop_uncovered_then_handle_write(void *arg /* grpc_tcp */, grpc_error *error); -static void done_poller(grpc_exec_ctx *exec_ctx, void *bp, - grpc_error *error_ignored) { +static void done_poller(void *bp, grpc_error *error_ignored) { backup_poller *p = (backup_poller *)bp; if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p); } - grpc_pollset_destroy(exec_ctx, BACKUP_POLLER_POLLSET(p)); + grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p)); gpr_free(p); } -static void run_poller(grpc_exec_ctx *exec_ctx, void *bp, - grpc_error *error_ignored) { +static void run_poller(void *bp, grpc_error *error_ignored) { backup_poller *p = (backup_poller *)bp; if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p); } gpr_mu_lock(p->pollset_mu); - grpc_millis deadline = grpc_exec_ctx_now(exec_ctx) + 13 * GPR_MS_PER_SEC; - GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx); + grpc_millis deadline = grpc_exec_ctx_now() + 13 * GPR_MS_PER_SEC; + GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(); GRPC_LOG_IF_ERROR( "backup_poller:pollset_work", - grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL, deadline)); + grpc_pollset_work(BACKUP_POLLER_POLLSET(p), NULL, deadline)); gpr_mu_unlock(p->pollset_mu); /* last "uncovered" notification is the ref that keeps us polling, if we get * there try a cas to release it */ @@ -154,18 +149,18 @@ static void run_poller(grpc_exec_ctx *exec_ctx, void *bp, if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p); } - grpc_pollset_shutdown(exec_ctx, BACKUP_POLLER_POLLSET(p), + grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p), GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p, grpc_schedule_on_exec_ctx)); } else { if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p); } - GRPC_CLOSURE_SCHED(exec_ctx, &p->run_poller, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&p->run_poller, GRPC_ERROR_NONE); } } -static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void drop_uncovered(grpc_tcp *tcp) { backup_poller *p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller); gpr_atm old_count = gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1); @@ -176,7 +171,7 @@ static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { GPR_ASSERT(old_count != 1); } -static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void cover_self(grpc_tcp *tcp) { backup_poller *p; gpr_atm old_count = gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2); @@ -185,7 +180,7 @@ static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { 2 + (int)old_count); } if (old_count == 0) { - GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx); + GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(); p = (backup_poller *)gpr_malloc(sizeof(*p) + grpc_pollset_size()); if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p); @@ -193,7 +188,6 @@ static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu); gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p); GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, grpc_executor_scheduler(GRPC_EXECUTOR_LONG)), GRPC_ERROR_NONE); @@ -205,39 +199,38 @@ static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp); } - grpc_pollset_add_fd(exec_ctx, BACKUP_POLLER_POLLSET(p), tcp->em_fd); + grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd); if (old_count != 0) { - drop_uncovered(exec_ctx, tcp); + drop_uncovered(tcp); } } -static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void notify_on_read(grpc_tcp *tcp) { if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp); } GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp, grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_done_closure); + grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure); } -static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void notify_on_write(grpc_tcp *tcp) { if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp); } - cover_self(exec_ctx, tcp); + cover_self(tcp); GRPC_CLOSURE_INIT(&tcp->write_done_closure, tcp_drop_uncovered_then_handle_write, tcp, grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_done_closure); + grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure); } -static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { +static void tcp_drop_uncovered_then_handle_write(void *arg, grpc_error *error) { if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error)); } - drop_uncovered(exec_ctx, (grpc_tcp *)arg); - tcp_handle_write(exec_ctx, arg, error); + drop_uncovered((grpc_tcp *)arg); + tcp_handle_write(arg, error); } static void add_to_estimate(grpc_tcp *tcp, size_t bytes) { @@ -283,33 +276,29 @@ static grpc_error *tcp_annotate_error(grpc_error *src_error, grpc_tcp *tcp) { grpc_slice_from_copied_string(tcp->peer_string)); } -static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error); -static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error); +static void tcp_handle_read(void *arg /* grpc_tcp */, grpc_error *error); +static void tcp_handle_write(void *arg /* grpc_tcp */, grpc_error *error); -static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_error *why) { +static void tcp_shutdown(grpc_endpoint *ep, grpc_error *why) { grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_fd_shutdown(exec_ctx, tcp->em_fd, why); - grpc_resource_user_shutdown(exec_ctx, tcp->resource_user); + grpc_fd_shutdown(tcp->em_fd, why); + grpc_resource_user_shutdown(tcp->resource_user); } -static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd, +static void tcp_free(grpc_tcp *tcp) { + grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd, false /* already_closed */, "tcp_unref_orphan"); - grpc_slice_buffer_destroy_internal(exec_ctx, &tcp->last_read_buffer); - grpc_resource_user_unref(exec_ctx, tcp->resource_user); + grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer); + grpc_resource_user_unref(tcp->resource_user); gpr_free(tcp->peer_string); gpr_free(tcp); } #ifndef NDEBUG -#define TCP_UNREF(cl, tcp, reason) \ - tcp_unref((cl), (tcp), (reason), __FILE__, __LINE__) +#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, - const char *reason, const char *file, int line) { +static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, + int line) { if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, @@ -317,7 +306,7 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, val - 1); } if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); + tcp_free(tcp); } } @@ -332,26 +321,25 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, gpr_ref(&tcp->refcount); } #else -#define TCP_UNREF(cl, tcp, reason) tcp_unref((cl), (tcp)) +#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) #define TCP_REF(tcp, reason) tcp_ref((tcp)) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void tcp_unref(grpc_tcp *tcp) { if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); + tcp_free(tcp); } } static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } #endif -static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { +static void tcp_destroy(grpc_endpoint *ep) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &tcp->last_read_buffer); - TCP_UNREF(exec_ctx, tcp, "destroy"); + grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer); + TCP_UNREF(tcp, "destroy"); } -static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, - grpc_error *error) { +static void call_read_cb(grpc_tcp *tcp, grpc_error *error) { grpc_closure *cb = tcp->read_cb; if (GRPC_TRACER_ON(grpc_tcp_trace)) { @@ -370,11 +358,11 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, tcp->read_cb = NULL; tcp->incoming_buffer = NULL; - GRPC_CLOSURE_RUN(exec_ctx, cb, error); + GRPC_CLOSURE_RUN(cb, error); } #define MAX_READ_IOVEC 4 -static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void tcp_do_read(grpc_tcp *tcp) { struct msghdr msg; struct iovec iov[MAX_READ_IOVEC]; ssize_t read_bytes; @@ -397,12 +385,12 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { msg.msg_controllen = 0; msg.msg_flags = 0; - GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, tcp->incoming_buffer->length); - GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, tcp->incoming_buffer->count); + GRPC_STATS_INC_TCP_READ_OFFER(tcp->incoming_buffer->length); + GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count); GPR_TIMER_BEGIN("recvmsg", 0); do { - GRPC_STATS_INC_SYSCALL_READ(exec_ctx); + GRPC_STATS_INC_SYSCALL_READ(); read_bytes = recvmsg(tcp->fd, &msg, 0); } while (read_bytes < 0 && errno == EINTR); GPR_TIMER_END("recvmsg", read_bytes >= 0); @@ -413,24 +401,22 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { if (errno == EAGAIN) { finish_estimate(tcp); /* We've consumed the edge, request a new one */ - notify_on_read(exec_ctx, tcp); + notify_on_read(tcp); } else { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, - tcp->incoming_buffer); - call_read_cb(exec_ctx, tcp, + grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer); + call_read_cb(tcp, tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp)); - TCP_UNREF(exec_ctx, tcp, "read"); + TCP_UNREF(tcp, "read"); } } else if (read_bytes == 0) { /* 0 read size ==> end of stream */ - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer); + grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer); call_read_cb( - exec_ctx, tcp, - tcp_annotate_error( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp)); - TCP_UNREF(exec_ctx, tcp, "read"); + tcp, tcp_annotate_error( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp)); + TCP_UNREF(tcp, "read"); } else { - GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, read_bytes); + GRPC_STATS_INC_TCP_READ_SIZE(read_bytes); add_to_estimate(tcp, (size_t)read_bytes); GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); if ((size_t)read_bytes < tcp->incoming_buffer->length) { @@ -440,50 +426,47 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { &tcp->last_read_buffer); } GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length); - call_read_cb(exec_ctx, tcp, GRPC_ERROR_NONE); - TCP_UNREF(exec_ctx, tcp, "read"); + call_read_cb(tcp, GRPC_ERROR_NONE); + TCP_UNREF(tcp, "read"); } GPR_TIMER_END("tcp_continue_read", 0); } -static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp, - grpc_error *error) { +static void tcp_read_allocation_done(void *tcpp, grpc_error *error) { grpc_tcp *tcp = (grpc_tcp *)tcpp; if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp, grpc_error_string(error)); } if (error != GRPC_ERROR_NONE) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer); - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, - &tcp->last_read_buffer); - call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error)); - TCP_UNREF(exec_ctx, tcp, "read"); + grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer); + grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer); + call_read_cb(tcp, GRPC_ERROR_REF(error)); + TCP_UNREF(tcp, "read"); } else { - tcp_do_read(exec_ctx, tcp); + tcp_do_read(tcp); } } -static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void tcp_continue_read(grpc_tcp *tcp) { size_t target_read_size = get_target_read_size(tcp); if (tcp->incoming_buffer->length < target_read_size && tcp->incoming_buffer->count < MAX_READ_IOVEC) { if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp); } - grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator, - target_read_size, 1, tcp->incoming_buffer); + grpc_resource_user_alloc_slices(&tcp->slice_allocator, target_read_size, 1, + tcp->incoming_buffer); } else { if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp); } - tcp_do_read(exec_ctx, tcp); + tcp_do_read(tcp); } } -static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error) { +static void tcp_handle_read(void *arg /* grpc_tcp */, grpc_error *error) { grpc_tcp *tcp = (grpc_tcp *)arg; GPR_ASSERT(!tcp->finished_edge); if (GRPC_TRACER_ON(grpc_tcp_trace)) { @@ -491,37 +474,35 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, } if (error != GRPC_ERROR_NONE) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer); - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, - &tcp->last_read_buffer); - call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error)); - TCP_UNREF(exec_ctx, tcp, "read"); + grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer); + grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer); + call_read_cb(tcp, GRPC_ERROR_REF(error)); + TCP_UNREF(tcp, "read"); } else { - tcp_continue_read(exec_ctx, tcp); + tcp_continue_read(tcp); } } -static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *incoming_buffer, grpc_closure *cb) { +static void tcp_read(grpc_endpoint *ep, grpc_slice_buffer *incoming_buffer, + grpc_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; GPR_ASSERT(tcp->read_cb == NULL); tcp->read_cb = cb; tcp->incoming_buffer = incoming_buffer; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, incoming_buffer); + grpc_slice_buffer_reset_and_unref_internal(incoming_buffer); grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer); TCP_REF(tcp, "read"); if (tcp->finished_edge) { tcp->finished_edge = false; - notify_on_read(exec_ctx, tcp); + notify_on_read(tcp); } else { - GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_done_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&tcp->read_done_closure, GRPC_ERROR_NONE); } } /* returns true if done, false if pending; if returning true, *error is set */ #define MAX_WRITE_IOVEC 1000 -static bool tcp_flush(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, - grpc_error **error) { +static bool tcp_flush(grpc_tcp *tcp, grpc_error **error) { struct msghdr msg; struct iovec iov[MAX_WRITE_IOVEC]; msg_iovlen_type iov_size; @@ -560,13 +541,13 @@ static bool tcp_flush(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, msg.msg_controllen = 0; msg.msg_flags = 0; - GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, sending_length); - GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, iov_size); + GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length); + GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size); GPR_TIMER_BEGIN("sendmsg", 1); do { /* TODO(klempner): Cork if this is a partial write */ - GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx); + GRPC_STATS_INC_SYSCALL_WRITE(); sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS); } while (sent_length < 0 && errno == EINTR); GPR_TIMER_END("sendmsg", 0); @@ -610,24 +591,23 @@ static bool tcp_flush(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, }; } -static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error) { +static void tcp_handle_write(void *arg /* grpc_tcp */, grpc_error *error) { grpc_tcp *tcp = (grpc_tcp *)arg; grpc_closure *cb; if (error != GRPC_ERROR_NONE) { cb = tcp->write_cb; tcp->write_cb = NULL; - cb->cb(exec_ctx, cb->cb_arg, error); - TCP_UNREF(exec_ctx, tcp, "write"); + cb->cb(cb->cb_arg, error); + TCP_UNREF(tcp, "write"); return; } - if (!tcp_flush(exec_ctx, tcp, &error)) { + if (!tcp_flush(tcp, &error)) { if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "write: delayed"); } - notify_on_write(exec_ctx, tcp); + notify_on_write(tcp); } else { cb = tcp->write_cb; tcp->write_cb = NULL; @@ -636,13 +616,13 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, gpr_log(GPR_DEBUG, "write: %s", str); } - GRPC_CLOSURE_RUN(exec_ctx, cb, error); - TCP_UNREF(exec_ctx, tcp, "write"); + GRPC_CLOSURE_RUN(cb, error); + TCP_UNREF(tcp, "write"); } } -static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *buf, grpc_closure *cb) { +static void tcp_write(grpc_endpoint *ep, grpc_slice_buffer *buf, + grpc_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_error *error = GRPC_ERROR_NONE; @@ -663,45 +643,43 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (buf->length == 0) { GPR_TIMER_END("tcp_write", 0); GRPC_CLOSURE_SCHED( - exec_ctx, cb, - grpc_fd_is_shutdown(tcp->em_fd) - ? tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), - tcp) - : GRPC_ERROR_NONE); + cb, grpc_fd_is_shutdown(tcp->em_fd) + ? tcp_annotate_error( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp) + : GRPC_ERROR_NONE); return; } tcp->outgoing_buffer = buf; tcp->outgoing_slice_idx = 0; tcp->outgoing_byte_idx = 0; - if (!tcp_flush(exec_ctx, tcp, &error)) { + if (!tcp_flush(tcp, &error)) { TCP_REF(tcp, "write"); tcp->write_cb = cb; if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "write: delayed"); } - notify_on_write(exec_ctx, tcp); + notify_on_write(tcp); } else { if (GRPC_TRACER_ON(grpc_tcp_trace)) { const char *str = grpc_error_string(error); gpr_log(GPR_DEBUG, "write: %s", str); } - GRPC_CLOSURE_SCHED(exec_ctx, cb, error); + GRPC_CLOSURE_SCHED(cb, error); } GPR_TIMER_END("tcp_write", 0); } -static void tcp_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset *pollset) { +static void tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_pollset_add_fd(exec_ctx, pollset, tcp->em_fd); + grpc_pollset_add_fd(pollset, tcp->em_fd); } -static void tcp_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, +static void tcp_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pollset_set) { grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_pollset_set_add_fd(exec_ctx, pollset_set, tcp->em_fd); + grpc_pollset_set_add_fd(pollset_set, tcp->em_fd); } static char *tcp_get_peer(grpc_endpoint *ep) { @@ -726,7 +704,7 @@ static const grpc_endpoint_vtable vtable = { #define MAX_CHUNK_SIZE 32 * 1024 * 1024 -grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd, +grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, const grpc_channel_args *channel_args, const char *peer_string) { int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE; @@ -755,7 +733,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd, grpc_channel_arg_get_integer(&channel_args->args[i], options); } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) { - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); resource_quota = grpc_resource_quota_ref_internal( (grpc_resource_quota *)channel_args->args[i].value.pointer.p); } @@ -792,7 +770,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd, &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp); /* Tell network status tracker about new endpoint */ grpc_network_status_register_endpoint(&tcp->base); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); return &tcp->base; } @@ -803,15 +781,15 @@ int grpc_tcp_fd(grpc_endpoint *ep) { return grpc_fd_wrapped_fd(tcp->em_fd); } -void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - int *fd, grpc_closure *done) { +void grpc_tcp_destroy_and_release_fd(grpc_endpoint *ep, int *fd, + grpc_closure *done) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; GPR_ASSERT(ep->vtable == &vtable); tcp->release_fd = fd; tcp->release_fd_cb = done; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &tcp->last_read_buffer); - TCP_UNREF(exec_ctx, tcp, "destroy"); + grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer); + TCP_UNREF(tcp, "destroy"); } #endif diff --git a/src/core/lib/iomgr/tcp_posix.h b/src/core/lib/iomgr/tcp_posix.h index 47e78fa67e..f79d0fb8a3 100644 --- a/src/core/lib/iomgr/tcp_posix.h +++ b/src/core/lib/iomgr/tcp_posix.h @@ -41,8 +41,7 @@ extern grpc_tracer_flag grpc_tcp_trace; /* Create a tcp endpoint given a file desciptor and a read slice size. Takes ownership of fd. */ -grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - const grpc_channel_args *args, +grpc_endpoint *grpc_tcp_create(grpc_fd *fd, const grpc_channel_args *args, const char *peer_string); /* Return the tcp endpoint's fd, or -1 if this is not available. Does not @@ -54,8 +53,8 @@ int grpc_tcp_fd(grpc_endpoint *ep); /* Destroy the tcp endpoint without closing its fd. *fd will be set and done * will be called when the endpoint is destroyed. * Requires: ep must be a tcp endpoint and fd must not be NULL. */ -void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - int *fd, grpc_closure *done); +void grpc_tcp_destroy_and_release_fd(grpc_endpoint *ep, int *fd, + grpc_closure *done); #ifdef __cplusplus } diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h index 8f9ce3819e..eb6f593501 100644 --- a/src/core/lib/iomgr/tcp_server.h +++ b/src/core/lib/iomgr/tcp_server.h @@ -43,22 +43,20 @@ typedef struct grpc_tcp_server_acceptor { /* Called for newly connected TCP connections. Takes ownership of acceptor. */ -typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *ep, +typedef void (*grpc_tcp_server_cb)(void *arg, grpc_endpoint *ep, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor); /* Create a server, initially not bound to any ports. The caller owns one ref. If shutdown_complete is not NULL, it will be used by grpc_tcp_server_unref() when the ref count reaches zero. */ -grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, - grpc_closure *shutdown_complete, +grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete, const grpc_channel_args *args, grpc_tcp_server **server); /* Start listening to bound ports */ -void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server, - grpc_pollset **pollsets, size_t pollset_count, +void grpc_tcp_server_start(grpc_tcp_server *server, grpc_pollset **pollsets, + size_t pollset_count, grpc_tcp_server_cb on_accept_cb, void *cb_arg); /* Add a port to the server, returning the newly allocated port on success, or @@ -96,11 +94,10 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, /* If the refcount drops to zero, enqueue calls on exec_ctx to shutdown_listeners and delete s. */ -void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s); +void grpc_tcp_server_unref(grpc_tcp_server *s); /* Shutdown the fds of listeners. */ -void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, - grpc_tcp_server *s); +void grpc_tcp_server_shutdown_listeners(grpc_tcp_server *s); #ifdef __cplusplus } diff --git a/src/core/lib/iomgr/tcp_server_posix.cc b/src/core/lib/iomgr/tcp_server_posix.cc index 06612d639c..a9dcd68cfb 100644 --- a/src/core/lib/iomgr/tcp_server_posix.cc +++ b/src/core/lib/iomgr/tcp_server_posix.cc @@ -68,8 +68,7 @@ static void init(void) { #endif } -grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, - grpc_closure *shutdown_complete, +grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete, const grpc_channel_args *args, grpc_tcp_server **server) { gpr_once_init(&check_init, init); @@ -116,12 +115,12 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +static void finish_shutdown(grpc_tcp_server *s) { gpr_mu_lock(&s->mu); GPR_ASSERT(s->shutdown); gpr_mu_unlock(&s->mu); if (s->shutdown_complete != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE); } gpr_mu_destroy(&s->mu); @@ -131,19 +130,18 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { s->head = sp->next; gpr_free(sp); } - grpc_channel_args_destroy(exec_ctx, s->channel_args); + grpc_channel_args_destroy(s->channel_args); gpr_free(s); } -static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, - grpc_error *error) { +static void destroyed_port(void *server, grpc_error *error) { grpc_tcp_server *s = (grpc_tcp_server *)server; gpr_mu_lock(&s->mu); s->destroyed_ports++; if (s->destroyed_ports == s->nports) { gpr_mu_unlock(&s->mu); - finish_shutdown(exec_ctx, s); + finish_shutdown(s); } else { GPR_ASSERT(s->destroyed_ports < s->nports); gpr_mu_unlock(&s->mu); @@ -153,7 +151,7 @@ static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, /* called when all listening endpoints have been shutdown, so no further events will be received on them - at this point it's safe to destroy things */ -static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +static void deactivated_all_ports(grpc_tcp_server *s) { /* delete ALL the things */ gpr_mu_lock(&s->mu); @@ -165,17 +163,17 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { grpc_unlink_if_unix_domain_socket(&sp->addr); GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s, grpc_schedule_on_exec_ctx); - grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL, + grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, NULL, false /* already_closed */, "tcp_listener_shutdown"); } gpr_mu_unlock(&s->mu); } else { gpr_mu_unlock(&s->mu); - finish_shutdown(exec_ctx, s); + finish_shutdown(s); } } -static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +static void tcp_server_destroy(grpc_tcp_server *s) { gpr_mu_lock(&s->mu); GPR_ASSERT(!s->shutdown); @@ -185,18 +183,18 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { if (s->active_ports) { grpc_tcp_listener *sp; for (sp = s->head; sp; sp = sp->next) { - grpc_fd_shutdown(exec_ctx, sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Server destroyed")); + grpc_fd_shutdown( + sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server destroyed")); } gpr_mu_unlock(&s->mu); } else { gpr_mu_unlock(&s->mu); - deactivated_all_ports(exec_ctx, s); + deactivated_all_ports(s); } } /* event manager callback when reads are ready */ -static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { +static void on_read(void *arg, grpc_error *err) { grpc_tcp_listener *sp = (grpc_tcp_listener *)arg; grpc_pollset *read_notifier_pollset; if (err != GRPC_ERROR_NONE) { @@ -222,7 +220,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { case EINTR: continue; case EAGAIN: - grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); + grpc_fd_notify_on_read(sp->emfd, &sp->read_closure); return; default: gpr_mu_lock(&sp->server->mu); @@ -248,7 +246,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { grpc_fd *fdobj = grpc_fd_create(fd, name); - grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj); + grpc_pollset_add_fd(read_notifier_pollset, fdobj); // Create acceptor. grpc_tcp_server_acceptor *acceptor = @@ -258,8 +256,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { acceptor->fd_index = sp->fd_index; sp->server->on_accept_cb( - exec_ctx, sp->server->on_accept_cb_arg, - grpc_tcp_create(exec_ctx, fdobj, sp->server->channel_args, addr_str), + sp->server->on_accept_cb_arg, + grpc_tcp_create(fdobj, sp->server->channel_args, addr_str), read_notifier_pollset, acceptor); gpr_free(name); @@ -272,7 +270,7 @@ error: gpr_mu_lock(&sp->server->mu); if (0 == --sp->server->active_ports && sp->server->shutdown) { gpr_mu_unlock(&sp->server->mu); - deactivated_all_ports(exec_ctx, sp->server); + deactivated_all_ports(sp->server); } else { gpr_mu_unlock(&sp->server->mu); } @@ -482,8 +480,8 @@ int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index, return -1; } -void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, - grpc_pollset **pollsets, size_t pollset_count, +void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollsets, + size_t pollset_count, grpc_tcp_server_cb on_accept_cb, void *on_accept_cb_arg) { size_t i; @@ -503,20 +501,20 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, GPR_ASSERT(GRPC_LOG_IF_ERROR( "clone_port", clone_port(sp, (unsigned)(pollset_count - 1)))); for (i = 0; i < pollset_count; i++) { - grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); + grpc_pollset_add_fd(pollsets[i], sp->emfd); GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp, grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); + grpc_fd_notify_on_read(sp->emfd, &sp->read_closure); s->active_ports++; sp = sp->next; } } else { for (i = 0; i < pollset_count; i++) { - grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); + grpc_pollset_add_fd(pollsets[i], sp->emfd); } GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp, grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); + grpc_fd_notify_on_read(sp->emfd, &sp->read_closure); s->active_ports++; sp = sp->next; } @@ -537,25 +535,24 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, gpr_mu_unlock(&s->mu); } -void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +void grpc_tcp_server_unref(grpc_tcp_server *s) { if (gpr_unref(&s->refs)) { - grpc_tcp_server_shutdown_listeners(exec_ctx, s); + grpc_tcp_server_shutdown_listeners(s); gpr_mu_lock(&s->mu); - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting); + GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting); gpr_mu_unlock(&s->mu); - tcp_server_destroy(exec_ctx, s); + tcp_server_destroy(s); } } -void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, - grpc_tcp_server *s) { +void grpc_tcp_server_shutdown_listeners(grpc_tcp_server *s) { gpr_mu_lock(&s->mu); s->shutdown_listeners = true; /* shutdown all fd's */ if (s->active_ports) { grpc_tcp_listener *sp; for (sp = s->head; sp; sp = sp->next) { - grpc_fd_shutdown(exec_ctx, sp->emfd, + grpc_fd_shutdown(sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown")); } } diff --git a/src/core/lib/iomgr/tcp_server_uv.cc b/src/core/lib/iomgr/tcp_server_uv.cc index 348838c495..fc4b58f81c 100644 --- a/src/core/lib/iomgr/tcp_server_uv.cc +++ b/src/core/lib/iomgr/tcp_server_uv.cc @@ -73,8 +73,7 @@ struct grpc_tcp_server { grpc_resource_quota *resource_quota; }; -grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, - grpc_closure *shutdown_complete, +grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete, const grpc_channel_args *args, grpc_tcp_server **server) { grpc_tcp_server *s = (grpc_tcp_server *)gpr_malloc(sizeof(grpc_tcp_server)); @@ -82,11 +81,11 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) { if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) { if (args->args[i].type == GRPC_ARG_POINTER) { - grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota); + grpc_resource_quota_unref_internal(s->resource_quota); s->resource_quota = grpc_resource_quota_ref_internal( (grpc_resource_quota *)args->args[i].value.pointer.p); } else { - grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota); + grpc_resource_quota_unref_internal(s->resource_quota); gpr_free(s); return GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool"); @@ -119,10 +118,10 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, GRPC_ERROR_NONE); } -static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +static void finish_shutdown(grpc_tcp_server *s) { GPR_ASSERT(s->shutdown); if (s->shutdown_complete != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE); } while (s->head) { @@ -132,18 +131,18 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { gpr_free(sp->handle); gpr_free(sp); } - grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota); + grpc_resource_quota_unref_internal(s->resource_quota); gpr_free(s); } static void handle_close_callback(uv_handle_t *handle) { grpc_tcp_listener *sp = (grpc_tcp_listener *)handle->data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; sp->server->open_ports--; if (sp->server->open_ports == 0 && sp->server->shutdown) { - finish_shutdown(&exec_ctx, sp->server); + finish_shutdown(sp->server); } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } static void close_listener(grpc_tcp_listener *sp) { @@ -153,7 +152,7 @@ static void close_listener(grpc_tcp_listener *sp) { } } -static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +static void tcp_server_destroy(grpc_tcp_server *s) { int immediately_done = 0; grpc_tcp_listener *sp; @@ -168,15 +167,15 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { } if (immediately_done) { - finish_shutdown(exec_ctx, s); + finish_shutdown(s); } } -void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +void grpc_tcp_server_unref(grpc_tcp_server *s) { GRPC_UV_ASSERT_SAME_THREAD(); if (gpr_unref(&s->refs)) { /* Complete shutdown_starting work before destroying. */ - grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_exec_ctx local_ExecCtx _local_exec_ctx; GRPC_CLOSURE_LIST_SCHED(&local_exec_ctx, &s->shutdown_starting); if (exec_ctx == NULL) { grpc_exec_ctx_flush(&local_exec_ctx); @@ -184,12 +183,12 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { grpc_exec_ctx_finish(&local_exec_ctx); } else { grpc_exec_ctx_finish(&local_exec_ctx); - tcp_server_destroy(exec_ctx, s); + tcp_server_destroy(s); } } } -static void finish_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *sp) { +static void finish_accept(grpc_tcp_listener *sp) { grpc_tcp_server_acceptor *acceptor = (grpc_tcp_server_acceptor *)gpr_malloc(sizeof(*acceptor)); uv_tcp_t *client = NULL; @@ -225,14 +224,13 @@ static void finish_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *sp) { acceptor->from_server = sp->server; acceptor->port_index = sp->port_index; acceptor->fd_index = 0; - sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, - acceptor); + sp->server->on_accept_cb(sp->server->on_accept_cb_arg, ep, NULL, acceptor); gpr_free(peer_name_string); } static void on_connect(uv_stream_t *server, int status) { grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; if (status < 0) { switch (status) { @@ -253,11 +251,11 @@ static void on_connect(uv_stream_t *server, int status) { // Create acceptor. if (sp->server->on_accept_cb) { - finish_accept(&exec_ctx, sp); + finish_accept(sp); } else { sp->has_pending_connection = true; } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle, @@ -428,8 +426,8 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, return error; } -void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server, - grpc_pollset **pollsets, size_t pollset_count, +void grpc_tcp_server_start(grpc_tcp_server *server, grpc_pollset **pollsets, + size_t pollset_count, grpc_tcp_server_cb on_accept_cb, void *cb_arg) { grpc_tcp_listener *sp; (void)pollsets; @@ -444,13 +442,12 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server, server->on_accept_cb_arg = cb_arg; for (sp = server->head; sp; sp = sp->next) { if (sp->has_pending_connection) { - finish_accept(exec_ctx, sp); + finish_accept(sp); sp->has_pending_connection = false; } } } -void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, - grpc_tcp_server *s) {} +void grpc_tcp_server_shutdown_listeners(grpc_tcp_server *s) {} #endif /* GRPC_UV */ diff --git a/src/core/lib/iomgr/tcp_server_windows.cc b/src/core/lib/iomgr/tcp_server_windows.cc index f198aaaa5b..4dfb17229a 100644 --- a/src/core/lib/iomgr/tcp_server_windows.cc +++ b/src/core/lib/iomgr/tcp_server_windows.cc @@ -94,8 +94,7 @@ struct grpc_tcp_server { /* Public function. Allocates the proper data structures to hold a grpc_tcp_server. */ -grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, - grpc_closure *shutdown_complete, +grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete, const grpc_channel_args *args, grpc_tcp_server **server) { grpc_tcp_server *s = (grpc_tcp_server *)gpr_malloc(sizeof(grpc_tcp_server)); @@ -114,8 +113,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void destroy_server(void *arg, grpc_error *error) { grpc_tcp_server *s = (grpc_tcp_server *)arg; /* Now that the accepts have been aborted, we can destroy the sockets. @@ -128,19 +126,18 @@ static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg, grpc_winsocket_destroy(sp->socket); gpr_free(sp); } - grpc_channel_args_destroy(exec_ctx, s->channel_args); + grpc_channel_args_destroy(s->channel_args); gpr_free(s); } -static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, - grpc_tcp_server *s) { +static void finish_shutdown_locked(grpc_tcp_server *s) { if (s->shutdown_complete != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE); } - GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(destroy_server, s, - grpc_schedule_on_exec_ctx), - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(destroy_server, s, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); } grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) { @@ -156,14 +153,14 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, gpr_mu_unlock(&s->mu); } -static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +static void tcp_server_destroy(grpc_tcp_server *s) { grpc_tcp_listener *sp; gpr_mu_lock(&s->mu); /* First, shutdown all fd's. This will queue abortion calls for all of the pending accepts due to the normal operation mechanism. */ if (s->active_ports == 0) { - finish_shutdown_locked(exec_ctx, s); + finish_shutdown_locked(s); } else { for (sp = s->head; sp; sp = sp->next) { sp->shutting_down = 1; @@ -173,13 +170,13 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { gpr_mu_unlock(&s->mu); } -void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +void grpc_tcp_server_unref(grpc_tcp_server *s) { if (gpr_unref(&s->refs)) { - grpc_tcp_server_shutdown_listeners(exec_ctx, s); + grpc_tcp_server_shutdown_listeners(s); gpr_mu_lock(&s->mu); - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting); + GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting); gpr_mu_unlock(&s->mu); - tcp_server_destroy(exec_ctx, s); + tcp_server_destroy(s); } } @@ -233,19 +230,17 @@ failure: return error; } -static void decrement_active_ports_and_notify_locked(grpc_exec_ctx *exec_ctx, - grpc_tcp_listener *sp) { +static void decrement_active_ports_and_notify_locked(grpc_tcp_listener *sp) { sp->shutting_down = 0; GPR_ASSERT(sp->server->active_ports > 0); if (0 == --sp->server->active_ports) { - finish_shutdown_locked(exec_ctx, sp->server); + finish_shutdown_locked(sp->server); } } /* In order to do an async accept, we need to create a socket first which will be the one assigned to the new incoming connection. */ -static grpc_error *start_accept_locked(grpc_exec_ctx *exec_ctx, - grpc_tcp_listener *port) { +static grpc_error *start_accept_locked(grpc_tcp_listener *port) { SOCKET sock = INVALID_SOCKET; BOOL success; DWORD addrlen = sizeof(struct sockaddr_in6) + 16; @@ -284,7 +279,7 @@ static grpc_error *start_accept_locked(grpc_exec_ctx *exec_ctx, /* We're ready to do the accept. Calling grpc_socket_notify_on_read may immediately process an accept that happened in the meantime. */ port->new_socket = sock; - grpc_socket_notify_on_read(exec_ctx, port->socket, &port->on_accept); + grpc_socket_notify_on_read(port->socket, &port->on_accept); port->outstanding_calls++; return error; @@ -295,7 +290,7 @@ failure: } /* Event manager callback when reads are ready. */ -static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { +static void on_accept(void *arg, grpc_error *error) { grpc_tcp_listener *sp = (grpc_tcp_listener *)arg; SOCKET sock = sp->new_socket; grpc_winsocket_callback_info *info = &sp->socket->read_info; @@ -357,7 +352,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { gpr_free(utf8_message); } gpr_asprintf(&fd_name, "tcp_server:%s", peer_name_string); - ep = grpc_tcp_create(exec_ctx, grpc_winsocket_create(sock, fd_name), + ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name), sp->server->channel_args, peer_name_string); gpr_free(fd_name); gpr_free(peer_name_string); @@ -375,17 +370,15 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { acceptor->from_server = sp->server; acceptor->port_index = sp->port_index; acceptor->fd_index = 0; - sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, - acceptor); + sp->server->on_accept_cb(sp->server->on_accept_cb_arg, ep, NULL, acceptor); } /* As we were notified from the IOCP of one and exactly one accept, the former socked we created has now either been destroy or assigned to the new connection. We need to create a new one for the next connection. */ - GPR_ASSERT( - GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp))); + GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(sp))); if (0 == --sp->outstanding_calls) { - decrement_active_ports_and_notify_locked(exec_ctx, sp); + decrement_active_ports_and_notify_locked(sp); } gpr_mu_unlock(&sp->server->mu); } @@ -522,8 +515,8 @@ done: return error; } -void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, - grpc_pollset **pollset, size_t pollset_count, +void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollset, + size_t pollset_count, grpc_tcp_server_cb on_accept_cb, void *on_accept_cb_arg) { grpc_tcp_listener *sp; @@ -534,14 +527,12 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, s->on_accept_cb = on_accept_cb; s->on_accept_cb_arg = on_accept_cb_arg; for (sp = s->head; sp; sp = sp->next) { - GPR_ASSERT( - GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp))); + GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(sp))); s->active_ports++; } gpr_mu_unlock(&s->mu); } -void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, - grpc_tcp_server *s) {} +void grpc_tcp_server_shutdown_listeners(grpc_tcp_server *s) {} #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/src/core/lib/iomgr/tcp_uv.cc b/src/core/lib/iomgr/tcp_uv.cc index e311964dbc..3628e1cc2c 100644 --- a/src/core/lib/iomgr/tcp_uv.cc +++ b/src/core/lib/iomgr/tcp_uv.cc @@ -65,20 +65,19 @@ typedef struct { grpc_pollset *pollset; } grpc_tcp; -static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - grpc_slice_unref_internal(exec_ctx, tcp->read_slice); - grpc_resource_user_unref(exec_ctx, tcp->resource_user); +static void tcp_free(grpc_tcp *tcp) { + grpc_slice_unref_internal(tcp->read_slice); + grpc_resource_user_unref(tcp->resource_user); gpr_free(tcp->handle); gpr_free(tcp->peer_string); gpr_free(tcp); } #ifndef NDEBUG -#define TCP_UNREF(exec_ctx, tcp, reason) \ - tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__) +#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, - const char *reason, const char *file, int line) { +static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, + int line) { if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, @@ -86,7 +85,7 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, val - 1); } if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); + tcp_free(tcp); } } @@ -101,11 +100,11 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, gpr_ref(&tcp->refcount); } #else -#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp)) +#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) #define TCP_REF(tcp, reason) tcp_ref((tcp)) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void tcp_unref(grpc_tcp *tcp) { if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); + tcp_free(tcp); } } @@ -113,40 +112,39 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } #endif static void uv_close_callback(uv_handle_t *handle) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_tcp *tcp = (grpc_tcp *)handle->data; - TCP_UNREF(&exec_ctx, tcp, "destroy"); - grpc_exec_ctx_finish(&exec_ctx); + TCP_UNREF(tcp, "destroy"); + grpc_exec_ctx_finish(); } -static grpc_slice alloc_read_slice(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user) { - return grpc_resource_user_slice_malloc(exec_ctx, resource_user, +static grpc_slice alloc_read_slice(grpc_resource_user *resource_user) { + return grpc_resource_user_slice_malloc(resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE); } static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_tcp *tcp = (grpc_tcp *)handle->data; (void)suggested_size; buf->base = (char *)GRPC_SLICE_START_PTR(tcp->read_slice); buf->len = GRPC_SLICE_LENGTH(tcp->read_slice); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } static void read_callback(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) { grpc_slice sub; grpc_error *error; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_tcp *tcp = (grpc_tcp *)stream->data; grpc_closure *cb = tcp->read_cb; if (nread == 0) { // Nothing happened. Wait for the next callback return; } - TCP_UNREF(&exec_ctx, tcp, "read"); + TCP_UNREF(tcp, "read"); tcp->read_cb = NULL; // TODO(murgatroid99): figure out what the return value here means uv_read_stop(stream); @@ -156,7 +154,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread, // Successful read sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread); grpc_slice_buffer_add(tcp->read_slices, sub); - tcp->read_slice = alloc_read_slice(&exec_ctx, tcp->resource_user); + tcp->read_slice = alloc_read_slice(tcp->resource_user); error = GRPC_ERROR_NONE; if (GRPC_TRACER_ON(grpc_tcp_trace)) { size_t i; @@ -175,12 +173,12 @@ static void read_callback(uv_stream_t *stream, ssize_t nread, // nread < 0: Error error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed"); } - GRPC_CLOSURE_SCHED(&exec_ctx, cb, error); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_CLOSURE_SCHED(cb, error); + grpc_exec_ctx_finish(); } -static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *read_slices, grpc_closure *cb) { +static void uv_endpoint_read(grpc_endpoint *ep, grpc_slice_buffer *read_slices, + grpc_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; int status; grpc_error *error = GRPC_ERROR_NONE; @@ -188,7 +186,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, GPR_ASSERT(tcp->read_cb == NULL); tcp->read_cb = cb; tcp->read_slices = read_slices; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices); + grpc_slice_buffer_reset_and_unref_internal(read_slices); TCP_REF(tcp, "read"); // TODO(murgatroid99): figure out what the return value here means status = @@ -198,7 +196,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, grpc_slice_from_static_string(uv_strerror(status))); - GRPC_CLOSURE_SCHED(exec_ctx, cb, error); + GRPC_CLOSURE_SCHED(cb, error); } if (GRPC_TRACER_ON(grpc_tcp_trace)) { const char *str = grpc_error_string(error); @@ -209,10 +207,10 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, static void write_callback(uv_write_t *req, int status) { grpc_tcp *tcp = (grpc_tcp *)req->data; grpc_error *error; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_closure *cb = tcp->write_cb; tcp->write_cb = NULL; - TCP_UNREF(&exec_ctx, tcp, "write"); + TCP_UNREF(tcp, "write"); if (status == 0) { error = GRPC_ERROR_NONE; } else { @@ -223,13 +221,13 @@ static void write_callback(uv_write_t *req, int status) { gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str); } gpr_free(tcp->write_buffers); - grpc_resource_user_free(&exec_ctx, tcp->resource_user, + grpc_resource_user_free(tcp->resource_user, sizeof(uv_buf_t) * tcp->write_slices->count); - GRPC_CLOSURE_SCHED(&exec_ctx, cb, error); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_CLOSURE_SCHED(cb, error); + grpc_exec_ctx_finish(); } -static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, +static void uv_endpoint_write(grpc_endpoint *ep, grpc_slice_buffer *write_slices, grpc_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; @@ -252,8 +250,8 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, } if (tcp->shutting_down) { - GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "TCP socket is shutting down")); + GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "TCP socket is shutting down")); return; } @@ -263,15 +261,15 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (tcp->write_slices->count == 0) { // No slices means we don't have to do anything, // and libuv doesn't like empty writes - GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE); return; } tcp->write_cb = cb; buffer_count = (unsigned int)tcp->write_slices->count; buffers = (uv_buf_t *)gpr_malloc(sizeof(uv_buf_t) * buffer_count); - grpc_resource_user_alloc(exec_ctx, tcp->resource_user, - sizeof(uv_buf_t) * buffer_count, NULL); + grpc_resource_user_alloc(tcp->resource_user, sizeof(uv_buf_t) * buffer_count, + NULL); for (i = 0; i < buffer_count; i++) { slice = &tcp->write_slices->slices[i]; buffers[i].base = (char *)GRPC_SLICE_START_PTR(*slice); @@ -286,8 +284,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, write_callback); } -static void uv_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset *pollset) { +static void uv_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { // No-op. We're ignoring pollsets currently (void)exec_ctx; (void)ep; @@ -296,7 +293,7 @@ static void uv_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, tcp->pollset = pollset; } -static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, +static void uv_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pollset) { // No-op. We're ignoring pollsets currently (void)exec_ctx; @@ -306,8 +303,7 @@ static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, static void shutdown_callback(uv_shutdown_t *req, int status) {} -static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_error *why) { +static void uv_endpoint_shutdown(grpc_endpoint *ep, grpc_error *why) { grpc_tcp *tcp = (grpc_tcp *)ep; if (!tcp->shutting_down) { if (GRPC_TRACER_ON(grpc_tcp_trace)) { @@ -317,12 +313,12 @@ static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, tcp->shutting_down = true; uv_shutdown_t *req = &tcp->shutdown_req; uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback); - grpc_resource_user_shutdown(exec_ctx, tcp->resource_user); + grpc_resource_user_shutdown(tcp->resource_user); } GRPC_ERROR_UNREF(why); } -static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { +static void uv_destroy(grpc_endpoint *ep) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; uv_close((uv_handle_t *)tcp->handle, uv_close_callback); @@ -349,7 +345,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, grpc_resource_quota *resource_quota, char *peer_string) { grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp); @@ -366,7 +362,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, tcp->peer_string = gpr_strdup(peer_string); tcp->shutting_down = false; tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); - tcp->read_slice = alloc_read_slice(&exec_ctx, tcp->resource_user); + tcp->read_slice = alloc_read_slice(tcp->resource_user); /* Tell network status tracking code about the new endpoint */ grpc_network_status_register_endpoint(&tcp->base); @@ -374,7 +370,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, uv_unref((uv_handle_t *)handle); #endif - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return &tcp->base; } diff --git a/src/core/lib/iomgr/tcp_windows.cc b/src/core/lib/iomgr/tcp_windows.cc index dc84e564a9..baa0a1895e 100644 --- a/src/core/lib/iomgr/tcp_windows.cc +++ b/src/core/lib/iomgr/tcp_windows.cc @@ -109,21 +109,20 @@ typedef struct grpc_tcp { char *peer_string; } grpc_tcp; -static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void tcp_free(grpc_tcp *tcp) { grpc_winsocket_destroy(tcp->socket); gpr_mu_destroy(&tcp->mu); gpr_free(tcp->peer_string); - grpc_resource_user_unref(exec_ctx, tcp->resource_user); + grpc_resource_user_unref(tcp->resource_user); if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error); gpr_free(tcp); } #ifndef NDEBUG -#define TCP_UNREF(exec_ctx, tcp, reason) \ - tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__) +#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, - const char *reason, const char *file, int line) { +static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, + int line) { if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, @@ -131,7 +130,7 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, val - 1); } if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); + tcp_free(tcp); } } @@ -146,11 +145,11 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, gpr_ref(&tcp->refcount); } #else -#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp)) +#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) #define TCP_REF(tcp, reason) tcp_ref((tcp)) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void tcp_unref(grpc_tcp *tcp) { if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); + tcp_free(tcp); } } @@ -158,7 +157,7 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } #endif /* Asynchronous callback from the IOCP, or the background thread. */ -static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { +static void on_read(void *tcpp, grpc_error *error) { grpc_tcp *tcp = (grpc_tcp *)tcpp; grpc_closure *cb = tcp->read_cb; grpc_winsocket *socket = tcp->socket; @@ -172,13 +171,13 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { char *utf8_message = gpr_format_message(info->wsa_error); error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message); gpr_free(utf8_message); - grpc_slice_unref_internal(exec_ctx, tcp->read_slice); + grpc_slice_unref_internal(tcp->read_slice); } else { if (info->bytes_transfered != 0 && !tcp->shutting_down) { sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered); grpc_slice_buffer_add(tcp->read_slices, sub); } else { - grpc_slice_unref_internal(exec_ctx, tcp->read_slice); + grpc_slice_unref_internal(tcp->read_slice); error = tcp->shutting_down ? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "TCP stream shutting down", &tcp->shutdown_error, 1) @@ -188,12 +187,12 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { } tcp->read_cb = NULL; - TCP_UNREF(exec_ctx, tcp, "read"); - GRPC_CLOSURE_SCHED(exec_ctx, cb, error); + TCP_UNREF(tcp, "read"); + GRPC_CLOSURE_SCHED(cb, error); } -static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *read_slices, grpc_closure *cb) { +static void win_read(grpc_endpoint *ep, grpc_slice_buffer *read_slices, + grpc_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_winsocket *handle = tcp->socket; grpc_winsocket_callback_info *info = &handle->read_info; @@ -204,15 +203,14 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (tcp->shutting_down) { GRPC_CLOSURE_SCHED( - exec_ctx, cb, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "TCP socket is shutting down", &tcp->shutdown_error, 1)); + cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "TCP socket is shutting down", &tcp->shutdown_error, 1)); return; } tcp->read_cb = cb; tcp->read_slices = read_slices; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices); + grpc_slice_buffer_reset_and_unref_internal(read_slices); tcp->read_slice = GRPC_SLICE_MALLOC(8192); @@ -230,7 +228,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, /* Did we get data immediately ? Yay. */ if (info->wsa_error != WSAEWOULDBLOCK) { info->bytes_transfered = bytes_read; - GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&tcp->on_read, GRPC_ERROR_NONE); return; } @@ -243,17 +241,17 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { info->wsa_error = wsa_error; - GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read, + GRPC_CLOSURE_SCHED(&tcp->on_read, GRPC_WSA_ERROR(info->wsa_error, "WSARecv")); return; } } - grpc_socket_notify_on_read(exec_ctx, tcp->socket, &tcp->on_read); + grpc_socket_notify_on_read(tcp->socket, &tcp->on_read); } /* Asynchronous callback from the IOCP, or the background thread. */ -static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { +static void on_write(void *tcpp, grpc_error *error) { grpc_tcp *tcp = (grpc_tcp *)tcpp; grpc_winsocket *handle = tcp->socket; grpc_winsocket_callback_info *info = &handle->write_info; @@ -274,13 +272,13 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { } } - TCP_UNREF(exec_ctx, tcp, "write"); - GRPC_CLOSURE_SCHED(exec_ctx, cb, error); + TCP_UNREF(tcp, "write"); + GRPC_CLOSURE_SCHED(cb, error); } /* Initiates a write. */ -static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb) { +static void win_write(grpc_endpoint *ep, grpc_slice_buffer *slices, + grpc_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_winsocket *socket = tcp->socket; grpc_winsocket_callback_info *info = &socket->write_info; @@ -294,9 +292,8 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (tcp->shutting_down) { GRPC_CLOSURE_SCHED( - exec_ctx, cb, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "TCP socket is shutting down", &tcp->shutdown_error, 1)); + cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "TCP socket is shutting down", &tcp->shutdown_error, 1)); return; } @@ -327,7 +324,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_error *error = status == 0 ? GRPC_ERROR_NONE : GRPC_WSA_ERROR(info->wsa_error, "WSASend"); - GRPC_CLOSURE_SCHED(exec_ctx, cb, error); + GRPC_CLOSURE_SCHED(cb, error); if (allocated) gpr_free(allocated); return; } @@ -344,27 +341,25 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (status != 0) { int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { - TCP_UNREF(exec_ctx, tcp, "write"); - GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend")); + TCP_UNREF(tcp, "write"); + GRPC_CLOSURE_SCHED(cb, GRPC_WSA_ERROR(wsa_error, "WSASend")); return; } } /* As all is now setup, we can now ask for the IOCP notification. It may trigger the callback immediately however, but no matter. */ - grpc_socket_notify_on_write(exec_ctx, socket, &tcp->on_write); + grpc_socket_notify_on_write(socket, &tcp->on_write); } -static void win_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset *ps) { +static void win_add_to_pollset(grpc_endpoint *ep, grpc_pollset *ps) { grpc_tcp *tcp; (void)ps; tcp = (grpc_tcp *)ep; grpc_iocp_add_socket(tcp->socket); } -static void win_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset_set *pss) { +static void win_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pss) { grpc_tcp *tcp; (void)pss; tcp = (grpc_tcp *)ep; @@ -377,8 +372,7 @@ static void win_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, we're not going to protect against these. However the IO Completion Port callback will happen from another thread, so we need to protect against concurrent access of the data structure in that regard. */ -static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_error *why) { +static void win_shutdown(grpc_endpoint *ep, grpc_error *why) { grpc_tcp *tcp = (grpc_tcp *)ep; gpr_mu_lock(&tcp->mu); /* At that point, what may happen is that we're already inside the IOCP @@ -391,13 +385,13 @@ static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, } grpc_winsocket_shutdown(tcp->socket); gpr_mu_unlock(&tcp->mu); - grpc_resource_user_shutdown(exec_ctx, tcp->resource_user); + grpc_resource_user_shutdown(tcp->resource_user); } -static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { +static void win_destroy(grpc_endpoint *ep) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; - TCP_UNREF(exec_ctx, tcp, "destroy"); + TCP_UNREF(tcp, "destroy"); } static char *win_get_peer(grpc_endpoint *ep) { @@ -417,14 +411,14 @@ static grpc_endpoint_vtable vtable = { win_shutdown, win_destroy, win_get_resource_user, win_get_peer, win_get_fd}; -grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket, +grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, grpc_channel_args *channel_args, const char *peer_string) { grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL); if (channel_args != NULL) { for (size_t i = 0; i < channel_args->num_args; i++) { if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) { - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); resource_quota = grpc_resource_quota_ref_internal( (grpc_resource_quota *)channel_args->args[i].value.pointer.p); } diff --git a/src/core/lib/iomgr/tcp_windows.h b/src/core/lib/iomgr/tcp_windows.h index f3697f707c..1e647196e4 100644 --- a/src/core/lib/iomgr/tcp_windows.h +++ b/src/core/lib/iomgr/tcp_windows.h @@ -39,7 +39,7 @@ extern "C" { /* Create a tcp endpoint given a winsock handle. * Takes ownership of the handle. */ -grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket, +grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, grpc_channel_args *channel_args, const char *peer_string); diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h index 419e834cf1..6a9c06f2ee 100644 --- a/src/core/lib/iomgr/timer.h +++ b/src/core/lib/iomgr/timer.h @@ -44,8 +44,8 @@ typedef struct grpc_timer grpc_timer; application code should check the error to determine how it was invoked. The application callback is also responsible for maintaining information about when to free up any user-level state. */ -void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, - grpc_millis deadline, grpc_closure *closure); +void grpc_timer_init(grpc_timer *timer, grpc_millis deadline, + grpc_closure *closure); /* Initialize *timer without setting it. This can later be passed through the regular init or cancel */ @@ -77,7 +77,7 @@ void grpc_timer_init_unset(grpc_timer *timer); matches this aim. Requires: cancel() must happen after init() on a given timer */ -void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer); +void grpc_timer_cancel(grpc_timer *timer); /* iomgr internal api for dealing with timers */ @@ -94,10 +94,9 @@ typedef enum { *next is never guaranteed to be updated on any given execution; however, with high probability at least one thread in the system will see an update at any time slice. */ -grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, - grpc_millis *next); -void grpc_timer_list_init(grpc_exec_ctx *exec_ctx); -void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx); +grpc_timer_check_result grpc_timer_check(grpc_millis *next); +void grpc_timer_list_init(); +void grpc_timer_list_shutdown(); /* Consume a kick issued by grpc_kick_poller */ void grpc_timer_consume_kick(void); diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc index b8e895de6f..c8cbd42a0c 100644 --- a/src/core/lib/iomgr/timer_generic.cc +++ b/src/core/lib/iomgr/timer_generic.cc @@ -227,8 +227,7 @@ static gpr_atm saturating_add(gpr_atm a, gpr_atm b) { return a + b; } -static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, - gpr_atm now, +static grpc_timer_check_result run_some_expired_timers(gpr_atm now, gpr_atm *next, grpc_error *error); @@ -238,13 +237,13 @@ static gpr_atm compute_min_deadline(timer_shard *shard) { : grpc_timer_heap_top(&shard->heap)->deadline; } -void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) { +void grpc_timer_list_init() { uint32_t i; g_shared_mutables.initialized = true; g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER; gpr_mu_init(&g_shared_mutables.mu); - g_shared_mutables.min_timer = grpc_exec_ctx_now(exec_ctx); + g_shared_mutables.min_timer = grpc_exec_ctx_now(); gpr_tls_init(&g_last_seen_min_timer); gpr_tls_set(&g_last_seen_min_timer, 0); grpc_register_tracer(&grpc_timer_trace); @@ -266,10 +265,10 @@ void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) { INIT_TIMER_HASH_TABLE(); } -void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) { +void grpc_timer_list_shutdown() { int i; run_some_expired_timers( - exec_ctx, GPR_ATM_MAX, NULL, + GPR_ATM_MAX, NULL, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown")); for (i = 0; i < NUM_SHARDS; i++) { timer_shard *shard = &g_shards[i]; @@ -320,8 +319,8 @@ static void note_deadline_change(timer_shard *shard) { void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; } -void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, - grpc_millis deadline, grpc_closure *closure) { +void grpc_timer_init(grpc_timer *timer, grpc_millis deadline, + grpc_closure *closure) { int is_first_timer = 0; timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)]; timer->closure = closure; @@ -334,12 +333,12 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, if (GRPC_TRACER_ON(grpc_timer_trace)) { gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer, - deadline, grpc_exec_ctx_now(exec_ctx), closure, closure->cb); + deadline, grpc_exec_ctx_now(), closure, closure->cb); } if (!g_shared_mutables.initialized) { timer->pending = false; - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Attempt to create timer before initialization")); return; @@ -347,10 +346,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_mu_lock(&shard->mu); timer->pending = true; - grpc_millis now = grpc_exec_ctx_now(exec_ctx); + grpc_millis now = grpc_exec_ctx_now(); if (deadline <= now) { timer->pending = false; - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE); gpr_mu_unlock(&shard->mu); /* early out */ return; @@ -410,7 +409,7 @@ void grpc_timer_consume_kick(void) { gpr_tls_set(&g_last_seen_min_timer, 0); } -void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { +void grpc_timer_cancel(grpc_timer *timer) { if (!g_shared_mutables.initialized) { /* must have already been cancelled, also the shard mutex is invalid */ return; @@ -426,7 +425,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { if (timer->pending) { REMOVE_FROM_HASH_TABLE(timer); - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CANCELLED); timer->pending = false; if (timer->heap_index == INVALID_HEAP_INDEX) { list_remove(timer); @@ -512,15 +511,14 @@ static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) { } /* REQUIRES: shard->mu unlocked */ -static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard, - gpr_atm now, gpr_atm *new_min_deadline, - grpc_error *error) { +static size_t pop_timers(timer_shard *shard, gpr_atm now, + gpr_atm *new_min_deadline, grpc_error *error) { size_t n = 0; grpc_timer *timer; gpr_mu_lock(&shard->mu); while ((timer = pop_one(shard, now))) { REMOVE_FROM_HASH_TABLE(timer); - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_REF(error)); n++; } *new_min_deadline = compute_min_deadline(shard); @@ -532,8 +530,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard, return n; } -static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, - gpr_atm now, +static grpc_timer_check_result run_some_expired_timers(gpr_atm now, gpr_atm *next, grpc_error *error) { grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED; @@ -562,8 +559,7 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, /* For efficiency, we pop as many available timers as we can from the shard. This may violate perfect timer deadline ordering, but that shouldn't be a big deal because we don't make ordering guarantees. */ - if (pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline, - error) > 0) { + if (pop_timers(g_shard_queue[0], now, &new_min_deadline, error) > 0) { result = GRPC_TIMERS_FIRED; } @@ -600,10 +596,9 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, return result; } -grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, - grpc_millis *next) { +grpc_timer_check_result grpc_timer_check(grpc_millis *next) { // prelude - grpc_millis now = grpc_exec_ctx_now(exec_ctx); + grpc_millis now = grpc_exec_ctx_now(); /* fetch from a thread-local first: this avoids contention on a globally mutable cacheline in the common case */ @@ -641,7 +636,7 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, } // actual code grpc_timer_check_result r = - run_some_expired_timers(exec_ctx, now, next, shutdown_error); + run_some_expired_timers(now, next, shutdown_error); // tracing if (GRPC_TRACER_ON(grpc_timer_check_trace)) { char *next_str; diff --git a/src/core/lib/iomgr/timer_manager.cc b/src/core/lib/iomgr/timer_manager.cc index 1248f82189..9c8d35add9 100644 --- a/src/core/lib/iomgr/timer_manager.cc +++ b/src/core/lib/iomgr/timer_manager.cc @@ -98,13 +98,13 @@ static void start_timer_thread_and_unlock(void) { } void grpc_timer_manager_tick() { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_millis next = GRPC_MILLIS_INF_FUTURE; - grpc_timer_check(&exec_ctx, &next); - grpc_exec_ctx_finish(&exec_ctx); + grpc_timer_check(&next); + grpc_exec_ctx_finish(); } -static void run_some_timers(grpc_exec_ctx *exec_ctx) { +static void run_some_timers() { // if there's something to execute... gpr_mu_lock(&g_mu); // remove a waiter from the pool, and start another thread if necessary @@ -126,7 +126,7 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) { if (GRPC_TRACER_ON(grpc_timer_check_trace)) { gpr_log(GPR_DEBUG, "flush exec_ctx"); } - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); gpr_mu_lock(&g_mu); // garbage collect any threads hanging out that are dead gc_completed_threads(); @@ -138,7 +138,7 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) { // wait until 'next' (or forever if there is already a timed waiter in the pool) // returns true if the thread should continue executing (false if it should // shutdown) -static bool wait_until(grpc_exec_ctx *exec_ctx, grpc_millis next) { +static bool wait_until(grpc_millis next) { gpr_mu_lock(&g_mu); // if we're not threaded anymore, leave if (!g_threaded) { @@ -179,7 +179,7 @@ static bool wait_until(grpc_exec_ctx *exec_ctx, grpc_millis next) { g_timed_waiter_deadline = next; if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - grpc_millis wait_time = next - grpc_exec_ctx_now(exec_ctx); + grpc_millis wait_time = next - grpc_exec_ctx_now(); gpr_log(GPR_DEBUG, "sleep for a %" PRIdPTR " milliseconds", wait_time); } @@ -221,14 +221,14 @@ static bool wait_until(grpc_exec_ctx *exec_ctx, grpc_millis next) { return true; } -static void timer_main_loop(grpc_exec_ctx *exec_ctx) { +static void timer_main_loop() { for (;;) { grpc_millis next = GRPC_MILLIS_INF_FUTURE; - grpc_exec_ctx_invalidate_now(exec_ctx); + grpc_exec_ctx_invalidate_now(); // check timer state, updates next to the next time to run a check - switch (grpc_timer_check(exec_ctx, &next)) { + switch (grpc_timer_check(&next)) { case GRPC_TIMERS_FIRED: - run_some_timers(exec_ctx); + run_some_timers(); break; case GRPC_TIMERS_NOT_CHECKED: /* This case only happens under contention, meaning more than one timer @@ -246,7 +246,7 @@ static void timer_main_loop(grpc_exec_ctx *exec_ctx) { next = GRPC_MILLIS_INF_FUTURE; /* fall through */ case GRPC_TIMERS_CHECKED_AND_EMPTY: - if (!wait_until(exec_ctx, next)) { + if (!wait_until(next)) { return; } break; @@ -274,10 +274,9 @@ static void timer_thread_cleanup(completed_thread *ct) { static void timer_thread(void *completed_thread_ptr) { // this threads exec_ctx: we try to run things through to completion here // since it's easy to spin up new threads - grpc_exec_ctx exec_ctx = - GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL); - timer_main_loop(&exec_ctx); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx(0, grpc_never_ready_to_finish, NULL); + timer_main_loop(); + grpc_exec_ctx_finish(); timer_thread_cleanup((completed_thread *)completed_thread_ptr); } diff --git a/src/core/lib/iomgr/timer_uv.cc b/src/core/lib/iomgr/timer_uv.cc index ccbbe357ae..b3e1ef372f 100644 --- a/src/core/lib/iomgr/timer_uv.cc +++ b/src/core/lib/iomgr/timer_uv.cc @@ -45,28 +45,28 @@ static void stop_uv_timer(uv_timer_t *handle) { void run_expired_timer(uv_timer_t *handle) { grpc_timer *timer = (grpc_timer *)handle->data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GRPC_UV_ASSERT_SAME_THREAD(); GPR_ASSERT(timer->pending); timer->pending = 0; - GRPC_CLOSURE_SCHED(&exec_ctx, timer->closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE); stop_uv_timer(handle); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } -void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, - grpc_millis deadline, grpc_closure *closure) { +void grpc_timer_init(grpc_timer *timer, grpc_millis deadline, + grpc_closure *closure) { uint64_t timeout; uv_timer_t *uv_timer; GRPC_UV_ASSERT_SAME_THREAD(); timer->closure = closure; - if (deadline <= grpc_exec_ctx_now(exec_ctx)) { + if (deadline <= grpc_exec_ctx_now()) { timer->pending = 0; - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE); return; } timer->pending = 1; - timeout = (uint64_t)(deadline - grpc_exec_ctx_now(exec_ctx)); + timeout = (uint64_t)(deadline - grpc_exec_ctx_now()); uv_timer = (uv_timer_t *)gpr_malloc(sizeof(uv_timer_t)); uv_timer_init(uv_default_loop(), uv_timer); uv_timer->data = timer; @@ -80,22 +80,21 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = 0; } -void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { +void grpc_timer_cancel(grpc_timer *timer) { GRPC_UV_ASSERT_SAME_THREAD(); if (timer->pending) { timer->pending = 0; - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CANCELLED); stop_uv_timer((uv_timer_t *)timer->uv_timer); } } -grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, - grpc_millis *next) { +grpc_timer_check_result grpc_timer_check(grpc_millis *next) { return GRPC_TIMERS_NOT_CHECKED; } -void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {} -void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {} +void grpc_timer_list_init() {} +void grpc_timer_list_shutdown() {} void grpc_timer_consume_kick(void) {} diff --git a/src/core/lib/iomgr/udp_server.cc b/src/core/lib/iomgr/udp_server.cc index 00b2e68bb5..c868e82d1d 100644 --- a/src/core/lib/iomgr/udp_server.cc +++ b/src/core/lib/iomgr/udp_server.cc @@ -141,22 +141,21 @@ grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args) { return s; } -static void shutdown_fd(grpc_exec_ctx *exec_ctx, void *args, - grpc_error *error) { +static void shutdown_fd(void *args, grpc_error *error) { struct shutdown_fd_args *shutdown_args = (struct shutdown_fd_args *)args; gpr_mu_lock(shutdown_args->server_mu); - grpc_fd_shutdown(exec_ctx, shutdown_args->fd, GRPC_ERROR_REF(error)); + grpc_fd_shutdown(shutdown_args->fd, GRPC_ERROR_REF(error)); gpr_mu_unlock(shutdown_args->server_mu); gpr_free(shutdown_args); } -static void dummy_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { +static void dummy_cb(void *arg, grpc_error *error) { // No-op. } -static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { +static void finish_shutdown(grpc_udp_server *s) { if (s->shutdown_complete != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE); } gpr_mu_destroy(&s->mu); @@ -174,14 +173,13 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { gpr_free(s); } -static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, - grpc_error *error) { +static void destroyed_port(void *server, grpc_error *error) { grpc_udp_server *s = (grpc_udp_server *)server; gpr_mu_lock(&s->mu); s->destroyed_ports++; if (s->destroyed_ports == s->nports) { gpr_mu_unlock(&s->mu); - finish_shutdown(exec_ctx, s); + finish_shutdown(s); } else { gpr_mu_unlock(&s->mu); } @@ -190,7 +188,7 @@ static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, /* called when all listening endpoints have been shutdown, so no further events will be received on them - at this point it's safe to destroy things */ -static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { +static void deactivated_all_ports(grpc_udp_server *s) { /* delete ALL the things */ gpr_mu_lock(&s->mu); @@ -210,21 +208,19 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, dummy_cb, sp->emfd, grpc_schedule_on_exec_ctx); GPR_ASSERT(sp->orphan_cb); - sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure, - sp->server->user_data); + sp->orphan_cb(sp->emfd, &sp->orphan_fd_closure, sp->server->user_data); } - grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL, + grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, NULL, false /* already_closed */, "udp_listener_shutdown"); } gpr_mu_unlock(&s->mu); } else { gpr_mu_unlock(&s->mu); - finish_shutdown(exec_ctx, s); + finish_shutdown(s); } } -void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, - grpc_closure *on_done) { +void grpc_udp_server_destroy(grpc_udp_server *s, grpc_closure *on_done) { grpc_udp_listener *sp; gpr_mu_lock(&s->mu); @@ -243,14 +239,13 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, args->server_mu = &s->mu; GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args, grpc_schedule_on_exec_ctx); - sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure, - sp->server->user_data); + sp->orphan_cb(sp->emfd, &sp->orphan_fd_closure, sp->server->user_data); sp->orphan_notified = true; } gpr_mu_unlock(&s->mu); } else { gpr_mu_unlock(&s->mu); - deactivated_all_ports(exec_ctx, s); + deactivated_all_ports(s); } } @@ -331,14 +326,14 @@ error: } /* event manager callback when reads are ready */ -static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { +static void on_read(void *arg, grpc_error *error) { grpc_udp_listener *sp = (grpc_udp_listener *)arg; gpr_mu_lock(&sp->server->mu); if (error != GRPC_ERROR_NONE) { if (0 == --sp->server->active_ports && sp->server->shutdown) { gpr_mu_unlock(&sp->server->mu); - deactivated_all_ports(exec_ctx, sp->server); + deactivated_all_ports(sp->server); } else { gpr_mu_unlock(&sp->server->mu); } @@ -347,21 +342,21 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { /* Tell the registered callback that data is available to read. */ GPR_ASSERT(sp->read_cb); - sp->read_cb(exec_ctx, sp->emfd, sp->server->user_data); + sp->read_cb(sp->emfd, sp->server->user_data); /* Re-arm the notification event so we get another chance to read. */ - grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); + grpc_fd_notify_on_read(sp->emfd, &sp->read_closure); gpr_mu_unlock(&sp->server->mu); } -static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { +static void on_write(void *arg, grpc_error *error) { grpc_udp_listener *sp = (grpc_udp_listener *)arg; gpr_mu_lock(&(sp->server->mu)); if (error != GRPC_ERROR_NONE) { if (0 == --sp->server->active_ports && sp->server->shutdown) { gpr_mu_unlock(&sp->server->mu); - deactivated_all_ports(exec_ctx, sp->server); + deactivated_all_ports(sp->server); } else { gpr_mu_unlock(&sp->server->mu); } @@ -370,10 +365,10 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { /* Tell the registered callback that the socket is writeable. */ GPR_ASSERT(sp->write_cb); - sp->write_cb(exec_ctx, sp->emfd, sp->server->user_data); + sp->write_cb(sp->emfd, sp->server->user_data); /* Re-arm the notification event so we get another chance to write. */ - grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure); + grpc_fd_notify_on_write(sp->emfd, &sp->write_closure); gpr_mu_unlock(&sp->server->mu); } @@ -512,9 +507,8 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) { return sp->fd; } -void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, - grpc_pollset **pollsets, size_t pollset_count, - void *user_data) { +void grpc_udp_server_start(grpc_udp_server *s, grpc_pollset **pollsets, + size_t pollset_count, void *user_data) { size_t i; gpr_mu_lock(&s->mu); grpc_udp_listener *sp; @@ -525,15 +519,15 @@ void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, sp = s->head; while (sp != NULL) { for (i = 0; i < pollset_count; i++) { - grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); + grpc_pollset_add_fd(pollsets[i], sp->emfd); } GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp, grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); + grpc_fd_notify_on_read(sp->emfd, &sp->read_closure); GRPC_CLOSURE_INIT(&sp->write_closure, on_write, sp, grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure); + grpc_fd_notify_on_write(sp->emfd, &sp->write_closure); /* Registered for both read and write callbacks: increment active_ports * twice to account for this, and delay free-ing of memory until both diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h index e887cb1bcf..27cbf370e6 100644 --- a/src/core/lib/iomgr/udp_server.h +++ b/src/core/lib/iomgr/udp_server.h @@ -35,16 +35,13 @@ struct grpc_server; typedef struct grpc_udp_server grpc_udp_server; /* Called when data is available to read from the socket. */ -typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd, - void *user_data); +typedef void (*grpc_udp_server_read_cb)(grpc_fd *emfd, void *user_data); /* Called when the socket is writeable. */ -typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd, - void *user_data); +typedef void (*grpc_udp_server_write_cb)(grpc_fd *emfd, void *user_data); /* Called when the grpc_fd is about to be orphaned (and the FD closed). */ -typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx *exec_ctx, - grpc_fd *emfd, +typedef void (*grpc_udp_server_orphan_cb)(grpc_fd *emfd, grpc_closure *shutdown_fd_callback, void *user_data); @@ -52,9 +49,8 @@ typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx *exec_ctx, grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args); /* Start listening to bound ports. user_data is passed to callbacks. */ -void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server, - grpc_pollset **pollsets, size_t pollset_count, - void *user_data); +void grpc_udp_server_start(grpc_udp_server *udp_server, grpc_pollset **pollsets, + size_t pollset_count, void *user_data); int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index); @@ -74,8 +70,7 @@ int grpc_udp_server_add_port(grpc_udp_server *s, grpc_udp_server_write_cb write_cb, grpc_udp_server_orphan_cb orphan_cb); -void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *server, - grpc_closure *on_done); +void grpc_udp_server_destroy(grpc_udp_server *server, grpc_closure *on_done); #ifdef __cplusplus } diff --git a/src/core/lib/security/context/security_context.cc b/src/core/lib/security/context/security_context.cc index 31d800b9b4..89f5019d99 100644 --- a/src/core/lib/security/context/security_context.cc +++ b/src/core/lib/security/context/security_context.cc @@ -38,7 +38,7 @@ grpc_tracer_flag grpc_trace_auth_context_refcount = grpc_call_error grpc_call_set_credentials(grpc_call *call, grpc_call_credentials *creds) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_client_security_context *ctx = NULL; GRPC_API_TRACE("grpc_call_set_credentials(call=%p, creds=%p)", 2, (call, creds)); @@ -54,10 +54,10 @@ grpc_call_error grpc_call_set_credentials(grpc_call *call, grpc_call_context_set(call, GRPC_CONTEXT_SECURITY, ctx, grpc_client_security_context_destroy); } else { - grpc_call_credentials_unref(&exec_ctx, ctx->creds); + grpc_call_credentials_unref(ctx->creds); ctx->creds = grpc_call_credentials_ref(creds); } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return GRPC_CALL_OK; } @@ -87,15 +87,15 @@ grpc_client_security_context *grpc_client_security_context_create(void) { } void grpc_client_security_context_destroy(void *ctx) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_client_security_context *c = (grpc_client_security_context *)ctx; - grpc_call_credentials_unref(&exec_ctx, c->creds); + grpc_call_credentials_unref(c->creds); GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "client_security_context"); if (c->extension.instance != NULL && c->extension.destroy != NULL) { c->extension.destroy(c->extension.instance); } gpr_free(ctx); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } /* --- grpc_server_security_context --- */ @@ -304,7 +304,7 @@ void grpc_auth_property_reset(grpc_auth_property *property) { memset(property, 0, sizeof(grpc_auth_property)); } -static void auth_context_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) { +static void auth_context_pointer_arg_destroy(void *p) { GRPC_AUTH_CONTEXT_UNREF((grpc_auth_context *)p, "auth_context_pointer_arg"); } diff --git a/src/core/lib/security/credentials/composite/composite_credentials.cc b/src/core/lib/security/credentials/composite/composite_credentials.cc index 779300ac07..d05a7a1f9c 100644 --- a/src/core/lib/security/credentials/composite/composite_credentials.cc +++ b/src/core/lib/security/credentials/composite/composite_credentials.cc @@ -39,17 +39,15 @@ typedef struct { grpc_closure internal_on_request_metadata; } grpc_composite_call_credentials_metadata_context; -static void composite_call_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { +static void composite_call_destruct(grpc_call_credentials *creds) { grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds; for (size_t i = 0; i < c->inner.num_creds; i++) { - grpc_call_credentials_unref(exec_ctx, c->inner.creds_array[i]); + grpc_call_credentials_unref(c->inner.creds_array[i]); } gpr_free(c->inner.creds_array); } -static void composite_call_metadata_cb(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void composite_call_metadata_cb(void *arg, grpc_error *error) { grpc_composite_call_credentials_metadata_context *ctx = (grpc_composite_call_credentials_metadata_context *)arg; if (error == GRPC_ERROR_NONE) { @@ -58,23 +56,23 @@ static void composite_call_metadata_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_call_credentials *inner_creds = ctx->composite_creds->inner.creds_array[ctx->creds_index++]; if (grpc_call_credentials_get_request_metadata( - exec_ctx, inner_creds, ctx->pollent, ctx->auth_md_context, - ctx->md_array, &ctx->internal_on_request_metadata, &error)) { + inner_creds, ctx->pollent, ctx->auth_md_context, ctx->md_array, + &ctx->internal_on_request_metadata, &error)) { // Synchronous response, so call ourselves recursively. - composite_call_metadata_cb(exec_ctx, arg, error); + composite_call_metadata_cb(arg, error); GRPC_ERROR_UNREF(error); } return; } // We're done! } - GRPC_CLOSURE_SCHED(exec_ctx, ctx->on_request_metadata, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(ctx->on_request_metadata, GRPC_ERROR_REF(error)); gpr_free(ctx); } static bool composite_call_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context auth_md_context, + grpc_call_credentials *creds, grpc_polling_entity *pollent, + grpc_auth_metadata_context auth_md_context, grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, grpc_error **error) { grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds; @@ -93,8 +91,8 @@ static bool composite_call_get_request_metadata( grpc_call_credentials *inner_creds = ctx->composite_creds->inner.creds_array[ctx->creds_index++]; if (grpc_call_credentials_get_request_metadata( - exec_ctx, inner_creds, ctx->pollent, ctx->auth_md_context, - ctx->md_array, &ctx->internal_on_request_metadata, error)) { + inner_creds, ctx->pollent, ctx->auth_md_context, ctx->md_array, + &ctx->internal_on_request_metadata, error)) { if (*error != GRPC_ERROR_NONE) break; } else { synchronous = false; // Async return. @@ -106,12 +104,12 @@ static bool composite_call_get_request_metadata( } static void composite_call_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { + grpc_call_credentials *creds, grpc_credentials_mdelem_array *md_array, + grpc_error *error) { grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds; for (size_t i = 0; i < c->inner.num_creds; ++i) { grpc_call_credentials_cancel_get_request_metadata( - exec_ctx, c->inner.creds_array[i], md_array, GRPC_ERROR_REF(error)); + c->inner.creds_array[i], md_array, GRPC_ERROR_REF(error)); } GRPC_ERROR_UNREF(error); } @@ -200,19 +198,17 @@ grpc_call_credentials *grpc_credentials_contains_type( /* -- Composite channel credentials. -- */ -static void composite_channel_destruct(grpc_exec_ctx *exec_ctx, - grpc_channel_credentials *creds) { +static void composite_channel_destruct(grpc_channel_credentials *creds) { grpc_composite_channel_credentials *c = (grpc_composite_channel_credentials *)creds; - grpc_channel_credentials_unref(exec_ctx, c->inner_creds); - grpc_call_credentials_unref(exec_ctx, c->call_creds); + grpc_channel_credentials_unref(c->inner_creds); + grpc_call_credentials_unref(c->call_creds); } static grpc_security_status composite_channel_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *creds, - grpc_call_credentials *call_creds, const char *target, - const grpc_channel_args *args, grpc_channel_security_connector **sc, - grpc_channel_args **new_args) { + grpc_channel_credentials *creds, grpc_call_credentials *call_creds, + const char *target, const grpc_channel_args *args, + grpc_channel_security_connector **sc, grpc_channel_args **new_args) { grpc_composite_channel_credentials *c = (grpc_composite_channel_credentials *)creds; grpc_security_status status = GRPC_SECURITY_ERROR; @@ -226,12 +222,11 @@ static grpc_security_status composite_channel_create_security_connector( grpc_call_credentials *composite_call_creds = grpc_composite_call_credentials_create(c->call_creds, call_creds, NULL); status = c->inner_creds->vtable->create_security_connector( - exec_ctx, c->inner_creds, composite_call_creds, target, args, sc, - new_args); - grpc_call_credentials_unref(exec_ctx, composite_call_creds); + c->inner_creds, composite_call_creds, target, args, sc, new_args); + grpc_call_credentials_unref(composite_call_creds); } else { status = c->inner_creds->vtable->create_security_connector( - exec_ctx, c->inner_creds, c->call_creds, target, args, sc, new_args); + c->inner_creds, c->call_creds, target, args, sc, new_args); } return status; } diff --git a/src/core/lib/security/credentials/credentials.cc b/src/core/lib/security/credentials/credentials.cc index ebbf350865..b12e3dac29 100644 --- a/src/core/lib/security/credentials/credentials.cc +++ b/src/core/lib/security/credentials/credentials.cc @@ -47,8 +47,8 @@ grpc_credentials_metadata_request *grpc_credentials_metadata_request_create( } void grpc_credentials_metadata_request_destroy( - grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *r) { - grpc_call_credentials_unref(exec_ctx, r->creds); + grpc_credentials_metadata_request *r) { + grpc_call_credentials_unref(r->creds); grpc_http_response_destroy(&r->response); gpr_free(r); } @@ -60,12 +60,11 @@ grpc_channel_credentials *grpc_channel_credentials_ref( return creds; } -void grpc_channel_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_channel_credentials *creds) { +void grpc_channel_credentials_unref(grpc_channel_credentials *creds) { if (creds == NULL) return; if (gpr_unref(&creds->refcount)) { if (creds->vtable->destruct != NULL) { - creds->vtable->destruct(exec_ctx, creds); + creds->vtable->destruct(creds); } gpr_free(creds); } @@ -73,9 +72,9 @@ void grpc_channel_credentials_unref(grpc_exec_ctx *exec_ctx, void grpc_channel_credentials_release(grpc_channel_credentials *creds) { GRPC_API_TRACE("grpc_channel_credentials_release(creds=%p)", 1, (creds)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_channel_credentials_unref(&exec_ctx, creds); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + grpc_channel_credentials_unref(creds); + grpc_exec_ctx_finish(); } grpc_call_credentials *grpc_call_credentials_ref(grpc_call_credentials *creds) { @@ -84,12 +83,11 @@ grpc_call_credentials *grpc_call_credentials_ref(grpc_call_credentials *creds) { return creds; } -void grpc_call_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { +void grpc_call_credentials_unref(grpc_call_credentials *creds) { if (creds == NULL) return; if (gpr_unref(&creds->refcount)) { if (creds->vtable->destruct != NULL) { - creds->vtable->destruct(exec_ctx, creds); + creds->vtable->destruct(creds); } gpr_free(creds); } @@ -97,43 +95,42 @@ void grpc_call_credentials_unref(grpc_exec_ctx *exec_ctx, void grpc_call_credentials_release(grpc_call_credentials *creds) { GRPC_API_TRACE("grpc_call_credentials_release(creds=%p)", 1, (creds)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_call_credentials_unref(&exec_ctx, creds); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + grpc_call_credentials_unref(creds); + grpc_exec_ctx_finish(); } bool grpc_call_credentials_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, - grpc_error **error) { + grpc_call_credentials *creds, grpc_polling_entity *pollent, + grpc_auth_metadata_context context, grpc_credentials_mdelem_array *md_array, + grpc_closure *on_request_metadata, grpc_error **error) { if (creds == NULL || creds->vtable->get_request_metadata == NULL) { return true; } - return creds->vtable->get_request_metadata( - exec_ctx, creds, pollent, context, md_array, on_request_metadata, error); + return creds->vtable->get_request_metadata(creds, pollent, context, md_array, + on_request_metadata, error); } void grpc_call_credentials_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { + grpc_call_credentials *creds, grpc_credentials_mdelem_array *md_array, + grpc_error *error) { if (creds == NULL || creds->vtable->cancel_get_request_metadata == NULL) { return; } - creds->vtable->cancel_get_request_metadata(exec_ctx, creds, md_array, error); + creds->vtable->cancel_get_request_metadata(creds, md_array, error); } grpc_security_status grpc_channel_credentials_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *channel_creds, - const char *target, const grpc_channel_args *args, - grpc_channel_security_connector **sc, grpc_channel_args **new_args) { + grpc_channel_credentials *channel_creds, const char *target, + const grpc_channel_args *args, grpc_channel_security_connector **sc, + grpc_channel_args **new_args) { *new_args = NULL; if (channel_creds == NULL) { return GRPC_SECURITY_ERROR; } GPR_ASSERT(channel_creds->vtable->create_security_connector != NULL); return channel_creds->vtable->create_security_connector( - exec_ctx, channel_creds, NULL, target, args, sc, new_args); + channel_creds, NULL, target, args, sc, new_args); } grpc_channel_credentials * @@ -148,8 +145,8 @@ grpc_channel_credentials_duplicate_without_call_credentials( } } -static void credentials_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) { - grpc_channel_credentials_unref(exec_ctx, (grpc_channel_credentials *)p); +static void credentials_pointer_arg_destroy(void *p) { + grpc_channel_credentials_unref((grpc_channel_credentials *)p); } static void *credentials_pointer_arg_copy(void *p) { @@ -199,12 +196,11 @@ grpc_server_credentials *grpc_server_credentials_ref( return creds; } -void grpc_server_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_server_credentials *creds) { +void grpc_server_credentials_unref(grpc_server_credentials *creds) { if (creds == NULL) return; if (gpr_unref(&creds->refcount)) { if (creds->vtable->destruct != NULL) { - creds->vtable->destruct(exec_ctx, creds); + creds->vtable->destruct(creds); } if (creds->processor.destroy != NULL && creds->processor.state != NULL) { creds->processor.destroy(creds->processor.state); @@ -215,19 +211,18 @@ void grpc_server_credentials_unref(grpc_exec_ctx *exec_ctx, void grpc_server_credentials_release(grpc_server_credentials *creds) { GRPC_API_TRACE("grpc_server_credentials_release(creds=%p)", 1, (creds)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_server_credentials_unref(&exec_ctx, creds); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + grpc_server_credentials_unref(creds); + grpc_exec_ctx_finish(); } grpc_security_status grpc_server_credentials_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds, - grpc_server_security_connector **sc) { + grpc_server_credentials *creds, grpc_server_security_connector **sc) { if (creds == NULL || creds->vtable->create_security_connector == NULL) { gpr_log(GPR_ERROR, "Server credentials cannot create security context."); return GRPC_SECURITY_ERROR; } - return creds->vtable->create_security_connector(exec_ctx, creds, sc); + return creds->vtable->create_security_connector(creds, sc); } void grpc_server_credentials_set_auth_metadata_processor( @@ -244,9 +239,8 @@ void grpc_server_credentials_set_auth_metadata_processor( creds->processor = processor; } -static void server_credentials_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, - void *p) { - grpc_server_credentials_unref(exec_ctx, (grpc_server_credentials *)p); +static void server_credentials_pointer_arg_destroy(void *p) { + grpc_server_credentials_unref((grpc_server_credentials *)p); } static void *server_credentials_pointer_arg_copy(void *p) { diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h index 73e39ae039..8f4b764544 100644 --- a/src/core/lib/security/credentials/credentials.h +++ b/src/core/lib/security/credentials/credentials.h @@ -92,13 +92,12 @@ void grpc_override_well_known_credentials_path_getter( #define GRPC_ARG_CHANNEL_CREDENTIALS "grpc.channel_credentials" typedef struct { - void (*destruct)(grpc_exec_ctx *exec_ctx, grpc_channel_credentials *c); + void (*destruct)(grpc_channel_credentials *c); grpc_security_status (*create_security_connector)( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *c, - grpc_call_credentials *call_creds, const char *target, - const grpc_channel_args *args, grpc_channel_security_connector **sc, - grpc_channel_args **new_args); + grpc_channel_credentials *c, grpc_call_credentials *call_creds, + const char *target, const grpc_channel_args *args, + grpc_channel_security_connector **sc, grpc_channel_args **new_args); grpc_channel_credentials *(*duplicate_without_call_credentials)( grpc_channel_credentials *c); @@ -112,17 +111,16 @@ struct grpc_channel_credentials { grpc_channel_credentials *grpc_channel_credentials_ref( grpc_channel_credentials *creds); -void grpc_channel_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_channel_credentials *creds); +void grpc_channel_credentials_unref(grpc_channel_credentials *creds); /* Creates a security connector for the channel. May also create new channel args for the channel to be used in place of the passed in const args if returned non NULL. In that case the caller is responsible for destroying new_args after channel creation. */ grpc_security_status grpc_channel_credentials_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *creds, - const char *target, const grpc_channel_args *args, - grpc_channel_security_connector **sc, grpc_channel_args **new_args); + grpc_channel_credentials *creds, const char *target, + const grpc_channel_args *args, grpc_channel_security_connector **sc, + grpc_channel_args **new_args); /* Creates a version of the channel credentials without any attached call credentials. This can be used in order to open a channel to a non-trusted @@ -157,22 +155,19 @@ void grpc_credentials_mdelem_array_add(grpc_credentials_mdelem_array *list, void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array *dst, grpc_credentials_mdelem_array *src); -void grpc_credentials_mdelem_array_destroy(grpc_exec_ctx *exec_ctx, - grpc_credentials_mdelem_array *list); +void grpc_credentials_mdelem_array_destroy(grpc_credentials_mdelem_array *list); /* --- grpc_call_credentials. --- */ typedef struct { - void (*destruct)(grpc_exec_ctx *exec_ctx, grpc_call_credentials *c); - bool (*get_request_metadata)(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *c, + void (*destruct)(grpc_call_credentials *c); + bool (*get_request_metadata)(grpc_call_credentials *c, grpc_polling_entity *pollent, grpc_auth_metadata_context context, grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, grpc_error **error); - void (*cancel_get_request_metadata)(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *c, + void (*cancel_get_request_metadata)(grpc_call_credentials *c, grpc_credentials_mdelem_array *md_array, grpc_error *error); } grpc_call_credentials_vtable; @@ -184,39 +179,35 @@ struct grpc_call_credentials { }; grpc_call_credentials *grpc_call_credentials_ref(grpc_call_credentials *creds); -void grpc_call_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds); +void grpc_call_credentials_unref(grpc_call_credentials *creds); /// Returns true if completed synchronously, in which case \a error will /// be set to indicate the result. Otherwise, \a on_request_metadata will /// be invoked asynchronously when complete. \a md_array will be populated /// with the resulting metadata once complete. bool grpc_call_credentials_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, - grpc_error **error); + grpc_call_credentials *creds, grpc_polling_entity *pollent, + grpc_auth_metadata_context context, grpc_credentials_mdelem_array *md_array, + grpc_closure *on_request_metadata, grpc_error **error); /// Cancels a pending asynchronous operation started by /// grpc_call_credentials_get_request_metadata() with the corresponding /// value of \a md_array. void grpc_call_credentials_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *c, - grpc_credentials_mdelem_array *md_array, grpc_error *error); + grpc_call_credentials *c, grpc_credentials_mdelem_array *md_array, + grpc_error *error); /* Metadata-only credentials with the specified key and value where asynchronicity can be simulated for testing. */ grpc_call_credentials *grpc_md_only_test_credentials_create( - grpc_exec_ctx *exec_ctx, const char *md_key, const char *md_value, - bool is_async); + const char *md_key, const char *md_value, bool is_async); /* --- grpc_server_credentials. --- */ typedef struct { - void (*destruct)(grpc_exec_ctx *exec_ctx, grpc_server_credentials *c); + void (*destruct)(grpc_server_credentials *c); grpc_security_status (*create_security_connector)( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *c, - grpc_server_security_connector **sc); + grpc_server_credentials *c, grpc_server_security_connector **sc); } grpc_server_credentials_vtable; struct grpc_server_credentials { @@ -227,14 +218,12 @@ struct grpc_server_credentials { }; grpc_security_status grpc_server_credentials_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds, - grpc_server_security_connector **sc); + grpc_server_credentials *creds, grpc_server_security_connector **sc); grpc_server_credentials *grpc_server_credentials_ref( grpc_server_credentials *creds); -void grpc_server_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_server_credentials *creds); +void grpc_server_credentials_unref(grpc_server_credentials *creds); #define GRPC_SERVER_CREDENTIALS_ARG "grpc.server_credentials" @@ -254,7 +243,7 @@ grpc_credentials_metadata_request *grpc_credentials_metadata_request_create( grpc_call_credentials *creds); void grpc_credentials_metadata_request_destroy( - grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *r); + grpc_credentials_metadata_request *r); #ifdef __cplusplus } diff --git a/src/core/lib/security/credentials/credentials_metadata.cc b/src/core/lib/security/credentials/credentials_metadata.cc index 5ba98bda4e..29b632376b 100644 --- a/src/core/lib/security/credentials/credentials_metadata.cc +++ b/src/core/lib/security/credentials/credentials_metadata.cc @@ -52,9 +52,9 @@ void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array *dst, } void grpc_credentials_mdelem_array_destroy( - grpc_exec_ctx *exec_ctx, grpc_credentials_mdelem_array *list) { + grpc_credentials_mdelem_array *list) { for (size_t i = 0; i < list->size; ++i) { - GRPC_MDELEM_UNREF(exec_ctx, list->md[i]); + GRPC_MDELEM_UNREF(list->md[i]); } gpr_free(list->md); } diff --git a/src/core/lib/security/credentials/fake/fake_credentials.cc b/src/core/lib/security/credentials/fake/fake_credentials.cc index cf10bf24c8..42257c7a13 100644 --- a/src/core/lib/security/credentials/fake/fake_credentials.cc +++ b/src/core/lib/security/credentials/fake/fake_credentials.cc @@ -34,10 +34,9 @@ "grpc.fake_security.expected_targets" static grpc_security_status fake_transport_security_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *c, - grpc_call_credentials *call_creds, const char *target, - const grpc_channel_args *args, grpc_channel_security_connector **sc, - grpc_channel_args **new_args) { + grpc_channel_credentials *c, grpc_call_credentials *call_creds, + const char *target, const grpc_channel_args *args, + grpc_channel_security_connector **sc, grpc_channel_args **new_args) { *sc = grpc_fake_channel_security_connector_create(c, call_creds, target, args); return GRPC_SECURITY_OK; @@ -45,8 +44,7 @@ static grpc_security_status fake_transport_security_create_security_connector( static grpc_security_status fake_transport_security_server_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *c, - grpc_server_security_connector **sc) { + grpc_server_credentials *c, grpc_server_security_connector **sc) { *sc = grpc_fake_server_security_connector_create(c); return GRPC_SECURITY_OK; } @@ -98,29 +96,27 @@ const char *grpc_fake_transport_get_expected_targets( /* -- Metadata-only test credentials. -- */ -static void md_only_test_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { +static void md_only_test_destruct(grpc_call_credentials *creds) { grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds; - GRPC_MDELEM_UNREF(exec_ctx, c->md); + GRPC_MDELEM_UNREF(c->md); } static bool md_only_test_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, - grpc_error **error) { + grpc_call_credentials *creds, grpc_polling_entity *pollent, + grpc_auth_metadata_context context, grpc_credentials_mdelem_array *md_array, + grpc_closure *on_request_metadata, grpc_error **error) { grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds; grpc_credentials_mdelem_array_add(md_array, c->md); if (c->is_async) { - GRPC_CLOSURE_SCHED(exec_ctx, on_request_metadata, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(on_request_metadata, GRPC_ERROR_NONE); return false; } return true; } static void md_only_test_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *c, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { + grpc_call_credentials *c, grpc_credentials_mdelem_array *md_array, + grpc_error *error) { GRPC_ERROR_UNREF(error); } @@ -129,17 +125,15 @@ static grpc_call_credentials_vtable md_only_test_vtable = { md_only_test_cancel_get_request_metadata}; grpc_call_credentials *grpc_md_only_test_credentials_create( - grpc_exec_ctx *exec_ctx, const char *md_key, const char *md_value, - bool is_async) { + const char *md_key, const char *md_value, bool is_async) { grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)gpr_zalloc( sizeof(grpc_md_only_test_credentials)); c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2; c->base.vtable = &md_only_test_vtable; gpr_ref_init(&c->base.refcount, 1); - c->md = - grpc_mdelem_from_slices(exec_ctx, grpc_slice_from_copied_string(md_key), - grpc_slice_from_copied_string(md_value)); + c->md = grpc_mdelem_from_slices(grpc_slice_from_copied_string(md_key), + grpc_slice_from_copied_string(md_value)); c->is_async = is_async; return &c->base; } diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.cc b/src/core/lib/security/credentials/google_default/google_default_credentials.cc index 5b2ddceb4a..719e6d147a 100644 --- a/src/core/lib/security/credentials/google_default/google_default_credentials.cc +++ b/src/core/lib/security/credentials/google_default/google_default_credentials.cc @@ -58,8 +58,7 @@ typedef struct { grpc_http_response response; } compute_engine_detector; -static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx, - void *user_data, +static void on_compute_engine_detection_http_response(void *user_data, grpc_error *error) { compute_engine_detector *detector = (compute_engine_detector *)user_data; if (error == GRPC_ERROR_NONE && detector->response.status == 200 && @@ -80,16 +79,15 @@ static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx, detector->is_done = 1; GRPC_LOG_IF_ERROR( "Pollset kick", - grpc_pollset_kick(exec_ctx, - grpc_polling_entity_pollset(&detector->pollent), NULL)); + grpc_pollset_kick(grpc_polling_entity_pollset(&detector->pollent), NULL)); gpr_mu_unlock(g_polling_mu); } -static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, grpc_error *e) { - grpc_pollset_destroy(exec_ctx, (grpc_pollset *)p); +static void destroy_pollset(void *p, grpc_error *e) { + grpc_pollset_destroy((grpc_pollset *)p); } -static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { +static int is_stack_running_on_compute_engine() { compute_engine_detector detector; grpc_httpcli_request request; grpc_httpcli_context context; @@ -115,14 +113,14 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { grpc_resource_quota *resource_quota = grpc_resource_quota_create("google_default_credentials"); grpc_httpcli_get( - exec_ctx, &context, &detector.pollent, resource_quota, &request, - grpc_exec_ctx_now(exec_ctx) + max_detection_delay, + &context, &detector.pollent, resource_quota, &request, + grpc_exec_ctx_now() + max_detection_delay, GRPC_CLOSURE_CREATE(on_compute_engine_detection_http_response, &detector, grpc_schedule_on_exec_ctx), &detector.response); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); /* Block until we get the response. This is not ideal but this should only be called once for the lifetime of the process by the default credentials. */ @@ -131,8 +129,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { grpc_pollset_worker *worker = NULL; if (!GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(exec_ctx, - grpc_polling_entity_pollset(&detector.pollent), + grpc_pollset_work(grpc_polling_entity_pollset(&detector.pollent), &worker, GRPC_MILLIS_INF_FUTURE))) { detector.is_done = 1; detector.success = 0; @@ -140,15 +137,14 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { } gpr_mu_unlock(g_polling_mu); - grpc_httpcli_context_destroy(exec_ctx, &context); + grpc_httpcli_context_destroy(&context); GRPC_CLOSURE_INIT(&destroy_closure, destroy_pollset, grpc_polling_entity_pollset(&detector.pollent), grpc_schedule_on_exec_ctx); - grpc_pollset_shutdown(exec_ctx, - grpc_polling_entity_pollset(&detector.pollent), + grpc_pollset_shutdown(grpc_polling_entity_pollset(&detector.pollent), &destroy_closure); g_polling_mu = NULL; - grpc_exec_ctx_flush(exec_ctx); + grpc_exec_ctx_flush(); gpr_free(grpc_polling_entity_pollset(&detector.pollent)); grpc_http_response_destroy(&detector.response); @@ -158,7 +154,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { /* Takes ownership of creds_path if not NULL. */ static grpc_error *create_default_creds_from_path( - grpc_exec_ctx *exec_ctx, char *creds_path, grpc_call_credentials **creds) { + char *creds_path, grpc_call_credentials **creds) { grpc_json *json = NULL; grpc_auth_json_key key; grpc_auth_refresh_token token; @@ -187,7 +183,7 @@ static grpc_error *create_default_creds_from_path( if (grpc_auth_json_key_is_valid(&key)) { result = grpc_service_account_jwt_access_credentials_create_from_auth_json_key( - exec_ctx, key, grpc_max_auth_token_lifetime()); + key, grpc_max_auth_token_lifetime()); if (result == NULL) { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "grpc_service_account_jwt_access_credentials_create_from_auth_json_" @@ -212,7 +208,7 @@ static grpc_error *create_default_creds_from_path( end: GPR_ASSERT((result == NULL) + (error == GRPC_ERROR_NONE) == 1); if (creds_path != NULL) gpr_free(creds_path); - grpc_slice_unref_internal(exec_ctx, creds_data); + grpc_slice_unref_internal(creds_data); if (json != NULL) grpc_json_destroy(json); *creds = result; return error; @@ -224,7 +220,7 @@ grpc_channel_credentials *grpc_google_default_credentials_create(void) { grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Failed to create Google credentials"); grpc_error *err; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GRPC_API_TRACE("grpc_google_default_credentials_create(void)", 0, ()); @@ -239,22 +235,20 @@ grpc_channel_credentials *grpc_google_default_credentials_create(void) { /* First, try the environment variable. */ err = create_default_creds_from_path( - &exec_ctx, gpr_getenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR), &call_creds); + gpr_getenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR), &call_creds); if (err == GRPC_ERROR_NONE) goto end; error = grpc_error_add_child(error, err); /* Then the well-known file. */ err = create_default_creds_from_path( - &exec_ctx, grpc_get_well_known_google_credentials_file_path(), - &call_creds); + grpc_get_well_known_google_credentials_file_path(), &call_creds); if (err == GRPC_ERROR_NONE) goto end; error = grpc_error_add_child(error, err); /* At last try to see if we're on compute engine (do the detection only once since it requires a network test). */ if (!compute_engine_detection_done) { - int need_compute_engine_creds = - is_stack_running_on_compute_engine(&exec_ctx); + int need_compute_engine_creds = is_stack_running_on_compute_engine(); compute_engine_detection_done = 1; if (need_compute_engine_creds) { call_creds = grpc_google_compute_engine_credentials_create(NULL); @@ -278,8 +272,8 @@ end: grpc_composite_channel_credentials_create(ssl_creds, call_creds, NULL)); GPR_ASSERT(default_credentials != NULL); - grpc_channel_credentials_unref(&exec_ctx, ssl_creds); - grpc_call_credentials_unref(&exec_ctx, call_creds); + grpc_channel_credentials_unref(ssl_creds); + grpc_call_credentials_unref(call_creds); result = default_credentials; } else { gpr_log(GPR_ERROR, "Could not create google default credentials."); @@ -291,21 +285,21 @@ end: } else { GRPC_ERROR_UNREF(error); } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return result; } void grpc_flush_cached_google_default_credentials(void) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; gpr_once_init(&g_once, init_default_credentials); gpr_mu_lock(&g_state_mu); if (default_credentials != NULL) { - grpc_channel_credentials_unref(&exec_ctx, default_credentials); + grpc_channel_credentials_unref(default_credentials); default_credentials = NULL; } compute_engine_detection_done = 0; gpr_mu_unlock(&g_state_mu); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } /* -- Well known credentials path. -- */ diff --git a/src/core/lib/security/credentials/iam/iam_credentials.cc b/src/core/lib/security/credentials/iam/iam_credentials.cc index e9cf208c16..3b91e739f1 100644 --- a/src/core/lib/security/credentials/iam/iam_credentials.cc +++ b/src/core/lib/security/credentials/iam/iam_credentials.cc @@ -27,14 +27,12 @@ #include <grpc/support/string_util.h> #include <grpc/support/sync.h> -static void iam_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { +static void iam_destruct(grpc_call_credentials *creds) { grpc_google_iam_credentials *c = (grpc_google_iam_credentials *)creds; - grpc_credentials_mdelem_array_destroy(exec_ctx, &c->md_array); + grpc_credentials_mdelem_array_destroy(&c->md_array); } -static bool iam_get_request_metadata(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds, +static bool iam_get_request_metadata(grpc_call_credentials *creds, grpc_polling_entity *pollent, grpc_auth_metadata_context context, grpc_credentials_mdelem_array *md_array, @@ -46,8 +44,8 @@ static bool iam_get_request_metadata(grpc_exec_ctx *exec_ctx, } static void iam_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *c, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { + grpc_call_credentials *c, grpc_credentials_mdelem_array *md_array, + grpc_error *error) { GRPC_ERROR_UNREF(error); } @@ -56,7 +54,7 @@ static grpc_call_credentials_vtable iam_vtable = { grpc_call_credentials *grpc_google_iam_credentials_create( const char *token, const char *authority_selector, void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GRPC_API_TRACE( "grpc_iam_credentials_create(token=%s, authority_selector=%s, " "reserved=%p)", @@ -70,17 +68,15 @@ grpc_call_credentials *grpc_google_iam_credentials_create( c->base.vtable = &iam_vtable; gpr_ref_init(&c->base.refcount, 1); grpc_mdelem md = grpc_mdelem_from_slices( - &exec_ctx, grpc_slice_from_static_string(GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY), grpc_slice_from_copied_string(token)); grpc_credentials_mdelem_array_add(&c->md_array, md); - GRPC_MDELEM_UNREF(&exec_ctx, md); + GRPC_MDELEM_UNREF(md); md = grpc_mdelem_from_slices( - &exec_ctx, grpc_slice_from_static_string(GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY), grpc_slice_from_copied_string(authority_selector)); grpc_credentials_mdelem_array_add(&c->md_array, md); - GRPC_MDELEM_UNREF(&exec_ctx, md); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_MDELEM_UNREF(md); + grpc_exec_ctx_finish(); return &c->base; } diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.cc b/src/core/lib/security/credentials/jwt/jwt_credentials.cc index 835dd677ed..15eeaf6bac 100644 --- a/src/core/lib/security/credentials/jwt/jwt_credentials.cc +++ b/src/core/lib/security/credentials/jwt/jwt_credentials.cc @@ -30,9 +30,8 @@ #include <grpc/support/string_util.h> #include <grpc/support/sync.h> -static void jwt_reset_cache(grpc_exec_ctx *exec_ctx, - grpc_service_account_jwt_access_credentials *c) { - GRPC_MDELEM_UNREF(exec_ctx, c->cached.jwt_md); +static void jwt_reset_cache(grpc_service_account_jwt_access_credentials *c) { + GRPC_MDELEM_UNREF(c->cached.jwt_md); c->cached.jwt_md = GRPC_MDNULL; if (c->cached.service_url != NULL) { gpr_free(c->cached.service_url); @@ -41,17 +40,15 @@ static void jwt_reset_cache(grpc_exec_ctx *exec_ctx, c->cached.jwt_expiration = gpr_inf_past(GPR_CLOCK_REALTIME); } -static void jwt_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { +static void jwt_destruct(grpc_call_credentials *creds) { grpc_service_account_jwt_access_credentials *c = (grpc_service_account_jwt_access_credentials *)creds; grpc_auth_json_key_destruct(&c->key); - jwt_reset_cache(exec_ctx, c); + jwt_reset_cache(c); gpr_mu_destroy(&c->cache_mu); } -static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds, +static bool jwt_get_request_metadata(grpc_call_credentials *creds, grpc_polling_entity *pollent, grpc_auth_metadata_context context, grpc_credentials_mdelem_array *md_array, @@ -81,7 +78,7 @@ static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx, char *jwt = NULL; /* Generate a new jwt. */ gpr_mu_lock(&c->cache_mu); - jwt_reset_cache(exec_ctx, c); + jwt_reset_cache(c); jwt = grpc_jwt_encode_and_sign(&c->key, context.service_url, c->jwt_lifetime, NULL); if (jwt != NULL) { @@ -92,7 +89,6 @@ static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), c->jwt_lifetime); c->cached.service_url = gpr_strdup(context.service_url); c->cached.jwt_md = grpc_mdelem_from_slices( - exec_ctx, grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY), grpc_slice_from_copied_string(md_value)); gpr_free(md_value); @@ -103,7 +99,7 @@ static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx, if (!GRPC_MDISNULL(jwt_md)) { grpc_credentials_mdelem_array_add(md_array, jwt_md); - GRPC_MDELEM_UNREF(exec_ctx, jwt_md); + GRPC_MDELEM_UNREF(jwt_md); } else { *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Could not generate JWT."); } @@ -111,8 +107,8 @@ static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx, } static void jwt_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *c, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { + grpc_call_credentials *c, grpc_credentials_mdelem_array *md_array, + grpc_error *error) { GRPC_ERROR_UNREF(error); } @@ -121,8 +117,7 @@ static grpc_call_credentials_vtable jwt_vtable = { grpc_call_credentials * grpc_service_account_jwt_access_credentials_create_from_auth_json_key( - grpc_exec_ctx *exec_ctx, grpc_auth_json_key key, - gpr_timespec token_lifetime) { + grpc_auth_json_key key, gpr_timespec token_lifetime) { grpc_service_account_jwt_access_credentials *c; if (!grpc_auth_json_key_is_valid(&key)) { gpr_log(GPR_ERROR, "Invalid input for jwt credentials creation"); @@ -143,7 +138,7 @@ grpc_service_account_jwt_access_credentials_create_from_auth_json_key( } c->jwt_lifetime = token_lifetime; gpr_mu_init(&c->cache_mu); - jwt_reset_cache(exec_ctx, c); + jwt_reset_cache(c); return &c->base; } @@ -186,11 +181,10 @@ grpc_call_credentials *grpc_service_account_jwt_access_credentials_create( gpr_free(clean_json); } GPR_ASSERT(reserved == NULL); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_call_credentials *creds = grpc_service_account_jwt_access_credentials_create_from_auth_json_key( - &exec_ctx, grpc_auth_json_key_create_from_string(json_key), - token_lifetime); - grpc_exec_ctx_finish(&exec_ctx); + grpc_auth_json_key_create_from_string(json_key), token_lifetime); + grpc_exec_ctx_finish(); return creds; } diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.h b/src/core/lib/security/credentials/jwt/jwt_credentials.h index 5cee6ed0da..12b39c30f2 100644 --- a/src/core/lib/security/credentials/jwt/jwt_credentials.h +++ b/src/core/lib/security/credentials/jwt/jwt_credentials.h @@ -46,8 +46,7 @@ typedef struct { // Takes ownership of the key. grpc_call_credentials * grpc_service_account_jwt_access_credentials_create_from_auth_json_key( - grpc_exec_ctx *exec_ctx, grpc_auth_json_key key, - gpr_timespec token_lifetime); + grpc_auth_json_key key, gpr_timespec token_lifetime); #ifdef __cplusplus } diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.cc b/src/core/lib/security/credentials/jwt/jwt_verifier.cc index 39e72c195b..7e12d15717 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.cc +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.cc @@ -74,12 +74,11 @@ static const EVP_MD *evp_md_from_alg(const char *alg) { } } -static grpc_json *parse_json_part_from_jwt(grpc_exec_ctx *exec_ctx, - const char *str, size_t len, +static grpc_json *parse_json_part_from_jwt(const char *str, size_t len, grpc_slice *buffer) { grpc_json *json; - *buffer = grpc_base64_decode_with_len(exec_ctx, str, len, 1); + *buffer = grpc_base64_decode_with_len(str, len, 1); if (GRPC_SLICE_IS_EMPTY(*buffer)) { gpr_log(GPR_ERROR, "Invalid base64."); return NULL; @@ -87,7 +86,7 @@ static grpc_json *parse_json_part_from_jwt(grpc_exec_ctx *exec_ctx, json = grpc_json_parse_string_with_len((char *)GRPC_SLICE_START_PTR(*buffer), GRPC_SLICE_LENGTH(*buffer)); if (json == NULL) { - grpc_slice_unref_internal(exec_ctx, *buffer); + grpc_slice_unref_internal(*buffer); gpr_log(GPR_ERROR, "JSON parsing error."); } return json; @@ -123,14 +122,13 @@ typedef struct { grpc_slice buffer; } jose_header; -static void jose_header_destroy(grpc_exec_ctx *exec_ctx, jose_header *h) { - grpc_slice_unref_internal(exec_ctx, h->buffer); +static void jose_header_destroy(jose_header *h) { + grpc_slice_unref_internal(h->buffer); gpr_free(h); } /* Takes ownership of json and buffer. */ -static jose_header *jose_header_from_json(grpc_exec_ctx *exec_ctx, - grpc_json *json, grpc_slice buffer) { +static jose_header *jose_header_from_json(grpc_json *json, grpc_slice buffer) { grpc_json *cur; jose_header *h = (jose_header *)gpr_zalloc(sizeof(jose_header)); h->buffer = buffer; @@ -164,7 +162,7 @@ static jose_header *jose_header_from_json(grpc_exec_ctx *exec_ctx, error: grpc_json_destroy(json); - jose_header_destroy(exec_ctx, h); + jose_header_destroy(h); return NULL; } @@ -184,9 +182,9 @@ struct grpc_jwt_claims { grpc_slice buffer; }; -void grpc_jwt_claims_destroy(grpc_exec_ctx *exec_ctx, grpc_jwt_claims *claims) { +void grpc_jwt_claims_destroy(grpc_jwt_claims *claims) { grpc_json_destroy(claims->json); - grpc_slice_unref_internal(exec_ctx, claims->buffer); + grpc_slice_unref_internal(claims->buffer); gpr_free(claims); } @@ -231,8 +229,7 @@ gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims) { } /* Takes ownership of json and buffer even in case of failure. */ -grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_exec_ctx *exec_ctx, - grpc_json *json, grpc_slice buffer) { +grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, grpc_slice buffer) { grpc_json *cur; grpc_jwt_claims *claims = (grpc_jwt_claims *)gpr_malloc(sizeof(grpc_jwt_claims)); @@ -274,7 +271,7 @@ grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_exec_ctx *exec_ctx, return claims; error: - grpc_jwt_claims_destroy(exec_ctx, claims); + grpc_jwt_claims_destroy(claims); return NULL; } @@ -350,7 +347,7 @@ static verifier_cb_ctx *verifier_cb_ctx_create( grpc_jwt_claims *claims, const char *audience, grpc_slice signature, const char *signed_jwt, size_t signed_jwt_len, void *user_data, grpc_jwt_verification_done_cb cb) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; verifier_cb_ctx *ctx = (verifier_cb_ctx *)gpr_zalloc(sizeof(verifier_cb_ctx)); ctx->verifier = verifier; ctx->pollent = grpc_polling_entity_create_from_pollset(pollset); @@ -361,16 +358,16 @@ static verifier_cb_ctx *verifier_cb_ctx_create( ctx->signed_data = grpc_slice_from_copied_buffer(signed_jwt, signed_jwt_len); ctx->user_data = user_data; ctx->user_cb = cb; - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return ctx; } -void verifier_cb_ctx_destroy(grpc_exec_ctx *exec_ctx, verifier_cb_ctx *ctx) { +void verifier_cb_ctx_destroy(verifier_cb_ctx *ctx) { if (ctx->audience != NULL) gpr_free(ctx->audience); - if (ctx->claims != NULL) grpc_jwt_claims_destroy(exec_ctx, ctx->claims); - grpc_slice_unref_internal(exec_ctx, ctx->signature); - grpc_slice_unref_internal(exec_ctx, ctx->signed_data); - jose_header_destroy(exec_ctx, ctx->header); + if (ctx->claims != NULL) grpc_jwt_claims_destroy(ctx->claims); + grpc_slice_unref_internal(ctx->signature); + grpc_slice_unref_internal(ctx->signed_data); + jose_header_destroy(ctx->header); for (size_t i = 0; i < HTTP_RESPONSE_COUNT; i++) { grpc_http_response_destroy(&ctx->responses[i]); } @@ -450,19 +447,19 @@ end: return result; } -static BIGNUM *bignum_from_base64(grpc_exec_ctx *exec_ctx, const char *b64) { +static BIGNUM *bignum_from_base64(const char *b64) { BIGNUM *result = NULL; grpc_slice bin; if (b64 == NULL) return NULL; - bin = grpc_base64_decode(exec_ctx, b64, 1); + bin = grpc_base64_decode(b64, 1); if (GRPC_SLICE_IS_EMPTY(bin)) { gpr_log(GPR_ERROR, "Invalid base64 for big num."); return NULL; } result = BN_bin2bn(GRPC_SLICE_START_PTR(bin), TSI_SIZE_AS_SIZE(GRPC_SLICE_LENGTH(bin)), NULL); - grpc_slice_unref_internal(exec_ctx, bin); + grpc_slice_unref_internal(bin); return result; } @@ -495,8 +492,7 @@ static int RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d) { } #endif // OPENSSL_VERSION_NUMBER < 0x10100000L -static EVP_PKEY *pkey_from_jwk(grpc_exec_ctx *exec_ctx, const grpc_json *json, - const char *kty) { +static EVP_PKEY *pkey_from_jwk(const grpc_json *json, const char *kty) { const grpc_json *key_prop; RSA *rsa = NULL; EVP_PKEY *result = NULL; @@ -515,12 +511,10 @@ static EVP_PKEY *pkey_from_jwk(grpc_exec_ctx *exec_ctx, const grpc_json *json, } for (key_prop = json->child; key_prop != NULL; key_prop = key_prop->next) { if (strcmp(key_prop->key, "n") == 0) { - tmp_n = - bignum_from_base64(exec_ctx, validate_string_field(key_prop, "n")); + tmp_n = bignum_from_base64(validate_string_field(key_prop, "n")); if (tmp_n == NULL) goto end; } else if (strcmp(key_prop->key, "e") == 0) { - tmp_e = - bignum_from_base64(exec_ctx, validate_string_field(key_prop, "e")); + tmp_e = bignum_from_base64(validate_string_field(key_prop, "e")); if (tmp_e == NULL) goto end; } } @@ -545,8 +539,7 @@ end: return result; } -static EVP_PKEY *find_verification_key(grpc_exec_ctx *exec_ctx, - const grpc_json *json, +static EVP_PKEY *find_verification_key(const grpc_json *json, const char *header_alg, const char *header_kid) { const grpc_json *jkey; @@ -590,7 +583,7 @@ static EVP_PKEY *find_verification_key(grpc_exec_ctx *exec_ctx, } if (alg != NULL && kid != NULL && kty != NULL && strcmp(kid, header_kid) == 0 && strcmp(alg, header_alg) == 0) { - return pkey_from_jwk(exec_ctx, jkey, kty); + return pkey_from_jwk(jkey, kty); } } gpr_log(GPR_ERROR, @@ -631,8 +624,7 @@ end: return result; } -static void on_keys_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *error) { +static void on_keys_retrieved(void *user_data, grpc_error *error) { verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data; grpc_json *json = json_from_http(&ctx->responses[HTTP_RESPONSE_KEYS]); EVP_PKEY *verification_key = NULL; @@ -644,7 +636,7 @@ static void on_keys_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, goto end; } verification_key = - find_verification_key(exec_ctx, json, ctx->header->alg, ctx->header->kid); + find_verification_key(json, ctx->header->alg, ctx->header->kid); if (verification_key == NULL) { gpr_log(GPR_ERROR, "Could not find verification key with kid %s.", ctx->header->kid); @@ -668,12 +660,11 @@ static void on_keys_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, end: if (json != NULL) grpc_json_destroy(json); EVP_PKEY_free(verification_key); - ctx->user_cb(exec_ctx, ctx->user_data, status, claims); - verifier_cb_ctx_destroy(exec_ctx, ctx); + ctx->user_cb(ctx->user_data, status, claims); + verifier_cb_ctx_destroy(ctx); } -static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *error) { +static void on_openid_config_retrieved(void *user_data, grpc_error *error) { const grpc_json *cur; verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data; const grpc_http_response *response = &ctx->responses[HTTP_RESPONSE_OPENID]; @@ -710,20 +701,19 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, extreme memory pressure. */ resource_quota = grpc_resource_quota_create("jwt_verifier"); grpc_httpcli_get( - exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req, - grpc_exec_ctx_now(exec_ctx) + grpc_jwt_verifier_max_delay, + &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req, + grpc_exec_ctx_now() + grpc_jwt_verifier_max_delay, GRPC_CLOSURE_CREATE(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx), &ctx->responses[HTTP_RESPONSE_KEYS]); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); grpc_json_destroy(json); gpr_free(req.host); return; error: if (json != NULL) grpc_json_destroy(json); - ctx->user_cb(exec_ctx, ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, - NULL); - verifier_cb_ctx_destroy(exec_ctx, ctx); + ctx->user_cb(ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, NULL); + verifier_cb_ctx_destroy(ctx); } static email_key_mapping *verifier_get_mapping(grpc_jwt_verifier *v, @@ -771,8 +761,7 @@ const char *grpc_jwt_issuer_email_domain(const char *issuer) { } /* Takes ownership of ctx. */ -static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, - verifier_cb_ctx *ctx) { +static void retrieve_key_and_verify(verifier_cb_ctx *ctx) { const char *email_domain; grpc_closure *http_cb; char *path_prefix = NULL; @@ -838,23 +827,20 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, channel. This would allow us to cancel an authentication query when under extreme memory pressure. */ resource_quota = grpc_resource_quota_create("jwt_verifier"); - grpc_httpcli_get(exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, - resource_quota, &req, - grpc_exec_ctx_now(exec_ctx) + grpc_jwt_verifier_max_delay, + grpc_httpcli_get(&ctx->verifier->http_ctx, &ctx->pollent, resource_quota, + &req, grpc_exec_ctx_now() + grpc_jwt_verifier_max_delay, http_cb, &ctx->responses[rsp_idx]); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); gpr_free(req.host); gpr_free(req.http.path); return; error: - ctx->user_cb(exec_ctx, ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, - NULL); - verifier_cb_ctx_destroy(exec_ctx, ctx); + ctx->user_cb(ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, NULL); + verifier_cb_ctx_destroy(ctx); } -void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx, - grpc_jwt_verifier *verifier, +void grpc_jwt_verifier_verify(grpc_jwt_verifier *verifier, grpc_pollset *pollset, const char *jwt, const char *audience, grpc_jwt_verification_done_cb cb, @@ -872,35 +858,32 @@ void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx, GPR_ASSERT(verifier != NULL && jwt != NULL && audience != NULL && cb != NULL); dot = strchr(cur, '.'); if (dot == NULL) goto error; - json = parse_json_part_from_jwt(exec_ctx, cur, (size_t)(dot - cur), - &header_buffer); + json = parse_json_part_from_jwt(cur, (size_t)(dot - cur), &header_buffer); if (json == NULL) goto error; - header = jose_header_from_json(exec_ctx, json, header_buffer); + header = jose_header_from_json(json, header_buffer); if (header == NULL) goto error; cur = dot + 1; dot = strchr(cur, '.'); if (dot == NULL) goto error; - json = parse_json_part_from_jwt(exec_ctx, cur, (size_t)(dot - cur), - &claims_buffer); + json = parse_json_part_from_jwt(cur, (size_t)(dot - cur), &claims_buffer); if (json == NULL) goto error; - claims = grpc_jwt_claims_from_json(exec_ctx, json, claims_buffer); + claims = grpc_jwt_claims_from_json(json, claims_buffer); if (claims == NULL) goto error; signed_jwt_len = (size_t)(dot - jwt); cur = dot + 1; - signature = grpc_base64_decode(exec_ctx, cur, 1); + signature = grpc_base64_decode(cur, 1); if (GRPC_SLICE_IS_EMPTY(signature)) goto error; retrieve_key_and_verify( - exec_ctx, verifier_cb_ctx_create(verifier, pollset, header, claims, audience, signature, jwt, signed_jwt_len, user_data, cb)); return; error: - if (header != NULL) jose_header_destroy(exec_ctx, header); - if (claims != NULL) grpc_jwt_claims_destroy(exec_ctx, claims); - cb(exec_ctx, user_data, GRPC_JWT_VERIFIER_BAD_FORMAT, NULL); + if (header != NULL) jose_header_destroy(header); + if (claims != NULL) grpc_jwt_claims_destroy(claims); + cb(user_data, GRPC_JWT_VERIFIER_BAD_FORMAT, NULL); } grpc_jwt_verifier *grpc_jwt_verifier_create( @@ -927,10 +910,10 @@ grpc_jwt_verifier *grpc_jwt_verifier_create( return v; } -void grpc_jwt_verifier_destroy(grpc_exec_ctx *exec_ctx, grpc_jwt_verifier *v) { +void grpc_jwt_verifier_destroy(grpc_jwt_verifier *v) { size_t i; if (v == NULL) return; - grpc_httpcli_context_destroy(exec_ctx, &v->http_ctx); + grpc_httpcli_context_destroy(&v->http_ctx); if (v->mappings != NULL) { for (i = 0; i < v->num_mappings; i++) { gpr_free(v->mappings[i].email_domain); diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h index 998365e75c..671a52d739 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.h +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h @@ -55,7 +55,7 @@ const char *grpc_jwt_verifier_status_to_string(grpc_jwt_verifier_status status); typedef struct grpc_jwt_claims grpc_jwt_claims; -void grpc_jwt_claims_destroy(grpc_exec_ctx *exec_ctx, grpc_jwt_claims *claims); +void grpc_jwt_claims_destroy(grpc_jwt_claims *claims); /* Returns the whole JSON tree of the claims. */ const grpc_json *grpc_jwt_claims_json(const grpc_jwt_claims *claims); @@ -98,21 +98,18 @@ grpc_jwt_verifier *grpc_jwt_verifier_create( size_t num_mappings); /*The verifier must not be destroyed if there are still outstanding callbacks.*/ -void grpc_jwt_verifier_destroy(grpc_exec_ctx *exec_ctx, - grpc_jwt_verifier *verifier); +void grpc_jwt_verifier_destroy(grpc_jwt_verifier *verifier); /* User provided callback that will be called when the verification of the JWT is done (maybe in another thread). It is the responsibility of the callee to call grpc_jwt_claims_destroy on the claims. */ -typedef void (*grpc_jwt_verification_done_cb)(grpc_exec_ctx *exec_ctx, - void *user_data, +typedef void (*grpc_jwt_verification_done_cb)(void *user_data, grpc_jwt_verifier_status status, grpc_jwt_claims *claims); /* Verifies for the JWT for the given expected audience. */ -void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx, - grpc_jwt_verifier *verifier, +void grpc_jwt_verifier_verify(grpc_jwt_verifier *verifier, grpc_pollset *pollset, const char *jwt, const char *audience, grpc_jwt_verification_done_cb cb, @@ -120,8 +117,7 @@ void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx, /* --- TESTING ONLY exposed functions. --- */ -grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_exec_ctx *exec_ctx, - grpc_json *json, grpc_slice buffer); +grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, grpc_slice buffer); grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims, const char *audience); const char *grpc_jwt_issuer_email_domain(const char *issuer); diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc index f52a424e36..7dd75ed4e3 100644 --- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc @@ -103,21 +103,19 @@ void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token *refresh_token) { // Oauth2 Token Fetcher credentials. // -static void oauth2_token_fetcher_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { +static void oauth2_token_fetcher_destruct(grpc_call_credentials *creds) { grpc_oauth2_token_fetcher_credentials *c = (grpc_oauth2_token_fetcher_credentials *)creds; - GRPC_MDELEM_UNREF(exec_ctx, c->access_token_md); + GRPC_MDELEM_UNREF(c->access_token_md); gpr_mu_destroy(&c->mu); - grpc_pollset_set_destroy(exec_ctx, - grpc_polling_entity_pollset_set(&c->pollent)); - grpc_httpcli_context_destroy(exec_ctx, &c->httpcli_context); + grpc_pollset_set_destroy(grpc_polling_entity_pollset_set(&c->pollent)); + grpc_httpcli_context_destroy(&c->httpcli_context); } grpc_credentials_status grpc_oauth2_token_fetcher_credentials_parse_server_response( - grpc_exec_ctx *exec_ctx, const grpc_http_response *response, - grpc_mdelem *token_md, grpc_millis *token_lifetime) { + const grpc_http_response *response, grpc_mdelem *token_md, + grpc_millis *token_lifetime) { char *null_terminated_body = NULL; char *new_access_token = NULL; grpc_credentials_status status = GRPC_CREDENTIALS_OK; @@ -184,9 +182,8 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response( gpr_asprintf(&new_access_token, "%s %s", token_type->value, access_token->value); *token_lifetime = strtol(expires_in->value, NULL, 10) * GPR_MS_PER_SEC; - if (!GRPC_MDISNULL(*token_md)) GRPC_MDELEM_UNREF(exec_ctx, *token_md); + if (!GRPC_MDISNULL(*token_md)) GRPC_MDELEM_UNREF(*token_md); *token_md = grpc_mdelem_from_slices( - exec_ctx, grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY), grpc_slice_from_copied_string(new_access_token)); status = GRPC_CREDENTIALS_OK; @@ -194,7 +191,7 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response( end: if (status != GRPC_CREDENTIALS_OK && !GRPC_MDISNULL(*token_md)) { - GRPC_MDELEM_UNREF(exec_ctx, *token_md); + GRPC_MDELEM_UNREF(*token_md); *token_md = GRPC_MDNULL; } if (null_terminated_body != NULL) gpr_free(null_terminated_body); @@ -203,8 +200,7 @@ end: return status; } -static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx, - void *user_data, +static void on_oauth2_token_fetcher_http_response(void *user_data, grpc_error *error) { GRPC_LOG_IF_ERROR("oauth_fetch", GRPC_ERROR_REF(error)); grpc_credentials_metadata_request *r = @@ -215,14 +211,13 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx, grpc_millis token_lifetime; grpc_credentials_status status = grpc_oauth2_token_fetcher_credentials_parse_server_response( - exec_ctx, &r->response, &access_token_md, &token_lifetime); + &r->response, &access_token_md, &token_lifetime); // Update cache and grab list of pending requests. gpr_mu_lock(&c->mu); c->token_fetch_pending = false; c->access_token_md = GRPC_MDELEM_REF(access_token_md); - c->token_expiration = status == GRPC_CREDENTIALS_OK - ? grpc_exec_ctx_now(exec_ctx) + token_lifetime - : 0; + c->token_expiration = + status == GRPC_CREDENTIALS_OK ? grpc_exec_ctx_now() + token_lifetime : 0; grpc_oauth2_pending_get_request_metadata *pending_request = c->pending_requests; c->pending_requests = NULL; @@ -236,24 +231,22 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx, error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Error occured when fetching oauth2 token.", &error, 1); } - GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata, error); + GRPC_CLOSURE_SCHED(pending_request->on_request_metadata, error); grpc_polling_entity_del_from_pollset_set( - exec_ctx, pending_request->pollent, - grpc_polling_entity_pollset_set(&c->pollent)); + pending_request->pollent, grpc_polling_entity_pollset_set(&c->pollent)); grpc_oauth2_pending_get_request_metadata *prev = pending_request; pending_request = pending_request->next; gpr_free(prev); } - GRPC_MDELEM_UNREF(exec_ctx, access_token_md); - grpc_call_credentials_unref(exec_ctx, r->creds); - grpc_credentials_metadata_request_destroy(exec_ctx, r); + GRPC_MDELEM_UNREF(access_token_md); + grpc_call_credentials_unref(r->creds); + grpc_credentials_metadata_request_destroy(r); } static bool oauth2_token_fetcher_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, - grpc_error **error) { + grpc_call_credentials *creds, grpc_polling_entity *pollent, + grpc_auth_metadata_context context, grpc_credentials_mdelem_array *md_array, + grpc_closure *on_request_metadata, grpc_error **error) { grpc_oauth2_token_fetcher_credentials *c = (grpc_oauth2_token_fetcher_credentials *)creds; // Check if we can use the cached token. @@ -262,13 +255,13 @@ static bool oauth2_token_fetcher_get_request_metadata( grpc_mdelem cached_access_token_md = GRPC_MDNULL; gpr_mu_lock(&c->mu); if (!GRPC_MDISNULL(c->access_token_md) && - (c->token_expiration + grpc_exec_ctx_now(exec_ctx) > refresh_threshold)) { + (c->token_expiration + grpc_exec_ctx_now() > refresh_threshold)) { cached_access_token_md = GRPC_MDELEM_REF(c->access_token_md); } if (!GRPC_MDISNULL(cached_access_token_md)) { gpr_mu_unlock(&c->mu); grpc_credentials_mdelem_array_add(md_array, cached_access_token_md); - GRPC_MDELEM_UNREF(exec_ctx, cached_access_token_md); + GRPC_MDELEM_UNREF(cached_access_token_md); return true; } // Couldn't get the token from the cache. @@ -280,7 +273,7 @@ static bool oauth2_token_fetcher_get_request_metadata( pending_request->on_request_metadata = on_request_metadata; pending_request->pollent = pollent; grpc_polling_entity_add_to_pollset_set( - exec_ctx, pollent, grpc_polling_entity_pollset_set(&c->pollent)); + pollent, grpc_polling_entity_pollset_set(&c->pollent)); pending_request->next = c->pending_requests; c->pending_requests = pending_request; bool start_fetch = false; @@ -291,17 +284,17 @@ static bool oauth2_token_fetcher_get_request_metadata( gpr_mu_unlock(&c->mu); if (start_fetch) { grpc_call_credentials_ref(creds); - c->fetch_func(exec_ctx, grpc_credentials_metadata_request_create(creds), + c->fetch_func(grpc_credentials_metadata_request_create(creds), &c->httpcli_context, &c->pollent, on_oauth2_token_fetcher_http_response, - grpc_exec_ctx_now(exec_ctx) + refresh_threshold); + grpc_exec_ctx_now() + refresh_threshold); } return false; } static void oauth2_token_fetcher_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { + grpc_call_credentials *creds, grpc_credentials_mdelem_array *md_array, + grpc_error *error) { grpc_oauth2_token_fetcher_credentials *c = (grpc_oauth2_token_fetcher_credentials *)creds; gpr_mu_lock(&c->mu); @@ -317,7 +310,7 @@ static void oauth2_token_fetcher_cancel_get_request_metadata( c->pending_requests = pending_request->next; } // Invoke the callback immediately with an error. - GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata, + GRPC_CLOSURE_SCHED(pending_request->on_request_metadata, GRPC_ERROR_REF(error)); gpr_free(pending_request); break; @@ -351,7 +344,7 @@ static grpc_call_credentials_vtable compute_engine_vtable = { oauth2_token_fetcher_cancel_get_request_metadata}; static void compute_engine_fetch_oauth2( - grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req, + grpc_credentials_metadata_request *metadata_req, grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent, grpc_iomgr_cb_func response_cb, grpc_millis deadline) { grpc_http_header header = {(char *)"Metadata-Flavor", (char *)"Google"}; @@ -367,10 +360,10 @@ static void compute_engine_fetch_oauth2( grpc_resource_quota *resource_quota = grpc_resource_quota_create("oauth2_credentials"); grpc_httpcli_get( - exec_ctx, httpcli_context, pollent, resource_quota, &request, deadline, + httpcli_context, pollent, resource_quota, &request, deadline, GRPC_CLOSURE_CREATE(response_cb, metadata_req, grpc_schedule_on_exec_ctx), &metadata_req->response); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); } grpc_call_credentials *grpc_google_compute_engine_credentials_create( @@ -390,12 +383,11 @@ grpc_call_credentials *grpc_google_compute_engine_credentials_create( // Google Refresh Token credentials. // -static void refresh_token_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { +static void refresh_token_destruct(grpc_call_credentials *creds) { grpc_google_refresh_token_credentials *c = (grpc_google_refresh_token_credentials *)creds; grpc_auth_refresh_token_destruct(&c->refresh_token); - oauth2_token_fetcher_destruct(exec_ctx, &c->base.base); + oauth2_token_fetcher_destruct(&c->base.base); } static grpc_call_credentials_vtable refresh_token_vtable = { @@ -403,7 +395,7 @@ static grpc_call_credentials_vtable refresh_token_vtable = { oauth2_token_fetcher_cancel_get_request_metadata}; static void refresh_token_fetch_oauth2( - grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req, + grpc_credentials_metadata_request *metadata_req, grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent, grpc_iomgr_cb_func response_cb, grpc_millis deadline) { grpc_google_refresh_token_credentials *c = @@ -427,11 +419,11 @@ static void refresh_token_fetch_oauth2( grpc_resource_quota *resource_quota = grpc_resource_quota_create("oauth2_credentials_refresh"); grpc_httpcli_post( - exec_ctx, httpcli_context, pollent, resource_quota, &request, body, - strlen(body), deadline, + httpcli_context, pollent, resource_quota, &request, body, strlen(body), + deadline, GRPC_CLOSURE_CREATE(response_cb, metadata_req, grpc_schedule_on_exec_ctx), &metadata_req->response); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); gpr_free(body); } @@ -483,25 +475,23 @@ grpc_call_credentials *grpc_google_refresh_token_credentials_create( // Oauth2 Access Token credentials. // -static void access_token_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { +static void access_token_destruct(grpc_call_credentials *creds) { grpc_access_token_credentials *c = (grpc_access_token_credentials *)creds; - GRPC_MDELEM_UNREF(exec_ctx, c->access_token_md); + GRPC_MDELEM_UNREF(c->access_token_md); } static bool access_token_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, - grpc_error **error) { + grpc_call_credentials *creds, grpc_polling_entity *pollent, + grpc_auth_metadata_context context, grpc_credentials_mdelem_array *md_array, + grpc_closure *on_request_metadata, grpc_error **error) { grpc_access_token_credentials *c = (grpc_access_token_credentials *)creds; grpc_credentials_mdelem_array_add(md_array, c->access_token_md); return true; } static void access_token_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *c, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { + grpc_call_credentials *c, grpc_credentials_mdelem_array *md_array, + grpc_error *error) { GRPC_ERROR_UNREF(error); } @@ -524,11 +514,11 @@ grpc_call_credentials *grpc_access_token_credentials_create( gpr_ref_init(&c->base.refcount, 1); char *token_md_value; gpr_asprintf(&token_md_value, "Bearer %s", access_token); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; c->access_token_md = grpc_mdelem_from_slices( - &exec_ctx, grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY), + grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY), grpc_slice_from_copied_string(token_md_value)); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); gpr_free(token_md_value); return &c->base; } diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h index c12db896f3..b7b5f58746 100644 --- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h @@ -56,8 +56,7 @@ void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token *refresh_token); // This object is a base for credentials that need to acquire an oauth2 token // from an http service. -typedef void (*grpc_fetch_oauth2_func)(grpc_exec_ctx *exec_ctx, - grpc_credentials_metadata_request *req, +typedef void (*grpc_fetch_oauth2_func)(grpc_credentials_metadata_request *req, grpc_httpcli_context *http_context, grpc_polling_entity *pollent, grpc_iomgr_cb_func cb, @@ -103,8 +102,8 @@ grpc_refresh_token_credentials_create_from_auth_refresh_token( // Exposed for testing only. grpc_credentials_status grpc_oauth2_token_fetcher_credentials_parse_server_response( - grpc_exec_ctx *exec_ctx, const struct grpc_http_response *response, - grpc_mdelem *token_md, grpc_millis *token_lifetime); + const struct grpc_http_response *response, grpc_mdelem *token_md, + grpc_millis *token_lifetime); #ifdef __cplusplus } diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.cc b/src/core/lib/security/credentials/plugin/plugin_credentials.cc index 8106a730fe..c91d147ea5 100644 --- a/src/core/lib/security/credentials/plugin/plugin_credentials.cc +++ b/src/core/lib/security/credentials/plugin/plugin_credentials.cc @@ -34,8 +34,7 @@ grpc_tracer_flag grpc_plugin_credentials_trace = GRPC_TRACER_INITIALIZER(false, "plugin_credentials"); -static void plugin_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { +static void plugin_destruct(grpc_call_credentials *creds) { grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds; gpr_mu_destroy(&c->mu); if (c->plugin.state != NULL && c->plugin.destroy != NULL) { @@ -62,18 +61,17 @@ static void pending_request_remove_locked( // When this returns, r->cancelled indicates whether the request was // cancelled before completion. static void pending_request_complete( - grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r) { + grpc_plugin_credentials_pending_request *r) { gpr_mu_lock(&r->creds->mu); if (!r->cancelled) pending_request_remove_locked(r->creds, r); gpr_mu_unlock(&r->creds->mu); // Ref to credentials not needed anymore. - grpc_call_credentials_unref(exec_ctx, &r->creds->base); + grpc_call_credentials_unref(&r->creds->base); } static grpc_error *process_plugin_result( - grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r, - const grpc_metadata *md, size_t num_md, grpc_status_code status, - const char *error_details) { + grpc_plugin_credentials_pending_request *r, const grpc_metadata *md, + size_t num_md, grpc_status_code status, const char *error_details) { grpc_error *error = GRPC_ERROR_NONE; if (status != GRPC_STATUS_OK) { char *msg; @@ -101,11 +99,11 @@ static grpc_error *process_plugin_result( error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata"); } else { for (size_t i = 0; i < num_md; ++i) { - grpc_mdelem mdelem = grpc_mdelem_from_slices( - exec_ctx, grpc_slice_ref_internal(md[i].key), - grpc_slice_ref_internal(md[i].value)); + grpc_mdelem mdelem = + grpc_mdelem_from_slices(grpc_slice_ref_internal(md[i].key), + grpc_slice_ref_internal(md[i].value)); grpc_credentials_mdelem_array_add(r->md_array, mdelem); - GRPC_MDELEM_UNREF(exec_ctx, mdelem); + GRPC_MDELEM_UNREF(mdelem); } } } @@ -118,7 +116,7 @@ static void plugin_md_request_metadata_ready(void *request, grpc_status_code status, const char *error_details) { /* called from application code */ - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INITIALIZER( + ExecCtx _local_exec_ctx( GRPC_EXEC_CTX_FLAG_IS_FINISHED | GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP, NULL, NULL); grpc_plugin_credentials_pending_request *r = @@ -130,12 +128,12 @@ static void plugin_md_request_metadata_ready(void *request, r->creds, r); } // Remove request from pending list if not previously cancelled. - pending_request_complete(&exec_ctx, r); + pending_request_complete(r); // If it has not been cancelled, process it. if (!r->cancelled) { grpc_error *error = - process_plugin_result(&exec_ctx, r, md, num_md, status, error_details); - GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, error); + process_plugin_result(r, md, num_md, status, error_details); + GRPC_CLOSURE_SCHED(r->on_request_metadata, error); } else if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: plugin was previously " @@ -143,11 +141,10 @@ static void plugin_md_request_metadata_ready(void *request, r->creds, r); } gpr_free(r); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } -static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds, +static bool plugin_get_request_metadata(grpc_call_credentials *creds, grpc_polling_entity *pollent, grpc_auth_metadata_context context, grpc_credentials_mdelem_array *md_array, @@ -195,7 +192,7 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, } // Returned synchronously. // Remove request from pending list if not previously cancelled. - pending_request_complete(exec_ctx, pending_request); + pending_request_complete(pending_request); // If the request was cancelled, the error will have been returned // asynchronously by plugin_cancel_get_request_metadata(), so return // false. Otherwise, process the result. @@ -214,13 +211,13 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, "synchronously", c, pending_request); } - *error = process_plugin_result(exec_ctx, pending_request, creds_md, - num_creds_md, status, error_details); + *error = process_plugin_result(pending_request, creds_md, num_creds_md, + status, error_details); } // Clean up. for (size_t i = 0; i < num_creds_md; ++i) { - grpc_slice_unref_internal(exec_ctx, creds_md[i].key); - grpc_slice_unref_internal(exec_ctx, creds_md[i].value); + grpc_slice_unref_internal(creds_md[i].key); + grpc_slice_unref_internal(creds_md[i].value); } gpr_free((void *)error_details); gpr_free(pending_request); @@ -229,8 +226,8 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, } static void plugin_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { + grpc_call_credentials *creds, grpc_credentials_mdelem_array *md_array, + grpc_error *error) { grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds; gpr_mu_lock(&c->mu); for (grpc_plugin_credentials_pending_request *pending_request = @@ -242,7 +239,7 @@ static void plugin_cancel_get_request_metadata( pending_request); } pending_request->cancelled = true; - GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata, + GRPC_CLOSURE_SCHED(pending_request->on_request_metadata, GRPC_ERROR_REF(error)); pending_request_remove_locked(c, pending_request); break; diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.cc b/src/core/lib/security/credentials/ssl/ssl_credentials.cc index 290336adc0..e78c20d34a 100644 --- a/src/core/lib/security/credentials/ssl/ssl_credentials.cc +++ b/src/core/lib/security/credentials/ssl/ssl_credentials.cc @@ -38,18 +38,16 @@ static void ssl_config_pem_key_cert_pair_destroy( gpr_free((void *)kp->cert_chain); } -static void ssl_destruct(grpc_exec_ctx *exec_ctx, - grpc_channel_credentials *creds) { +static void ssl_destruct(grpc_channel_credentials *creds) { grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds; gpr_free(c->config.pem_root_certs); ssl_config_pem_key_cert_pair_destroy(&c->config.pem_key_cert_pair); } static grpc_security_status ssl_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *creds, - grpc_call_credentials *call_creds, const char *target, - const grpc_channel_args *args, grpc_channel_security_connector **sc, - grpc_channel_args **new_args) { + grpc_channel_credentials *creds, grpc_call_credentials *call_creds, + const char *target, const grpc_channel_args *args, + grpc_channel_security_connector **sc, grpc_channel_args **new_args) { grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds; grpc_security_status status = GRPC_SECURITY_OK; const char *overridden_target_name = NULL; @@ -62,7 +60,8 @@ static grpc_security_status ssl_create_security_connector( } } status = grpc_ssl_channel_security_connector_create( - exec_ctx, creds, call_creds, &c->config, target, overridden_target_name, + creds, call_creds, &c->config, target, overridden_target_name, + sc); if (status != GRPC_SECURITY_OK) { return status; @@ -114,8 +113,7 @@ grpc_channel_credentials *grpc_ssl_credentials_create( // SSL Server Credentials. // -static void ssl_server_destruct(grpc_exec_ctx *exec_ctx, - grpc_server_credentials *creds) { +static void ssl_server_destruct(grpc_server_credentials *creds) { grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds; size_t i; for (i = 0; i < c->config.num_key_cert_pairs; i++) { @@ -126,11 +124,9 @@ static void ssl_server_destruct(grpc_exec_ctx *exec_ctx, } static grpc_security_status ssl_server_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds, - grpc_server_security_connector **sc) { + grpc_server_credentials *creds, grpc_server_security_connector **sc) { grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds; - return grpc_ssl_server_security_connector_create(exec_ctx, creds, &c->config, - sc); + return grpc_ssl_server_security_connector_create(creds, &c->config, sc); } static grpc_server_credentials_vtable ssl_server_vtable = { diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc index a8464dbf9e..8e1853b9ec 100644 --- a/src/core/lib/security/transport/client_auth_filter.cc +++ b/src/core/lib/security/transport/client_auth_filter.cc @@ -90,8 +90,7 @@ static void add_error(grpc_error **combined, grpc_error *error) { *combined = grpc_error_add_child(*combined, error); } -static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *input_error) { +static void on_credentials_metadata(void *arg, grpc_error *input_error) { grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; grpc_call_element *elem = (grpc_call_element *)batch->handler_private.extra_arg; @@ -105,16 +104,16 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg, batch->payload->send_initial_metadata.send_initial_metadata; for (size_t i = 0; i < calld->md_array.size; ++i) { add_error(&error, grpc_metadata_batch_add_tail( - exec_ctx, mdb, &calld->md_links[i], + mdb, &calld->md_links[i], GRPC_MDELEM_REF(calld->md_array.md[i]))); } } if (error == GRPC_ERROR_NONE) { - grpc_call_next_op(exec_ctx, elem, batch); + grpc_call_next_op(elem, batch); } else { error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED); - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error, + grpc_transport_stream_op_batch_finish_with_failure(batch, error, calld->call_combiner); } } @@ -149,20 +148,17 @@ void build_auth_metadata_context(grpc_security_connector *sc, gpr_free(host); } -static void cancel_get_request_metadata(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void cancel_get_request_metadata(void *arg, grpc_error *error) { grpc_call_element *elem = (grpc_call_element *)arg; call_data *calld = (call_data *)elem->call_data; if (error != GRPC_ERROR_NONE) { grpc_call_credentials_cancel_get_request_metadata( - exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error)); + calld->creds, &calld->md_array, GRPC_ERROR_REF(error)); } - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, - "cancel_get_request_metadata"); + GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_get_request_metadata"); } -static void send_security_metadata(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, +static void send_security_metadata(grpc_call_element *elem, grpc_transport_stream_op_batch *batch) { call_data *calld = (call_data *)elem->call_data; channel_data *chand = (channel_data *)elem->channel_data; @@ -176,7 +172,7 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx, if (channel_call_creds == NULL && !call_creds_has_md) { /* Skip sending metadata altogether. */ - grpc_call_next_op(exec_ctx, elem, batch); + grpc_call_next_op(elem, batch); return; } @@ -185,11 +181,10 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx, ctx->creds, NULL); if (calld->creds == NULL) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, batch, - grpc_error_set_int( - GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Incompatible credentials set on channel and call."), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED), + batch, grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Incompatible credentials set on channel and call."), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED), calld->call_combiner); return; } @@ -207,30 +202,29 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx, batch, grpc_schedule_on_exec_ctx); grpc_error *error = GRPC_ERROR_NONE; if (grpc_call_credentials_get_request_metadata( - exec_ctx, calld->creds, calld->pollent, calld->auth_md_context, + calld->creds, calld->pollent, calld->auth_md_context, &calld->md_array, &calld->async_result_closure, &error)) { // Synchronous return; invoke on_credentials_metadata() directly. - on_credentials_metadata(exec_ctx, batch, error); + on_credentials_metadata(batch, error); GRPC_ERROR_UNREF(error); } else { // Async return; register cancellation closure with call combiner. GRPC_CALL_STACK_REF(calld->owning_call, "cancel_get_request_metadata"); grpc_call_combiner_set_notify_on_cancel( - exec_ctx, calld->call_combiner, + calld->call_combiner, GRPC_CLOSURE_INIT(&calld->get_request_metadata_cancel_closure, cancel_get_request_metadata, elem, grpc_schedule_on_exec_ctx)); } } -static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void on_host_checked(void *arg, grpc_error *error) { grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; grpc_call_element *elem = (grpc_call_element *)batch->handler_private.extra_arg; call_data *calld = (call_data *)elem->call_data; if (error == GRPC_ERROR_NONE) { - send_security_metadata(exec_ctx, elem, batch); + send_security_metadata(elem, batch); } else { char *error_msg; char *host = grpc_slice_to_c_string(calld->host); @@ -238,31 +232,28 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg, host); gpr_free(host); grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, batch, - grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg), - GRPC_ERROR_INT_GRPC_STATUS, - GRPC_STATUS_UNAUTHENTICATED), + batch, grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED), calld->call_combiner); gpr_free(error_msg); } } -static void cancel_check_call_host(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void cancel_check_call_host(void *arg, grpc_error *error) { grpc_call_element *elem = (grpc_call_element *)arg; call_data *calld = (call_data *)elem->call_data; channel_data *chand = (channel_data *)elem->channel_data; if (error != GRPC_ERROR_NONE) { grpc_channel_security_connector_cancel_check_call_host( - exec_ctx, chand->security_connector, &calld->async_result_closure, + chand->security_connector, &calld->async_result_closure, GRPC_ERROR_REF(error)); } - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_check_call_host"); + GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_check_call_host"); } static void auth_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { + grpc_call_element *elem, grpc_transport_stream_op_batch *batch) { GPR_TIMER_BEGIN("auth_start_transport_stream_op_batch", 0); /* grab pointers to our data from the call element */ @@ -295,13 +286,13 @@ static void auth_start_transport_stream_op_batch( */ if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_AUTHORITY)) { if (calld->have_host) { - grpc_slice_unref_internal(exec_ctx, calld->host); + grpc_slice_unref_internal(calld->host); } calld->host = grpc_slice_ref_internal(GRPC_MDVALUE(md)); calld->have_host = true; } else if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_PATH)) { if (calld->have_method) { - grpc_slice_unref_internal(exec_ctx, calld->method); + grpc_slice_unref_internal(calld->method); } calld->method = grpc_slice_ref_internal(GRPC_MDVALUE(md)); calld->have_method = true; @@ -314,16 +305,16 @@ static void auth_start_transport_stream_op_batch( char *call_host = grpc_slice_to_c_string(calld->host); grpc_error *error = GRPC_ERROR_NONE; if (grpc_channel_security_connector_check_call_host( - exec_ctx, chand->security_connector, call_host, - chand->auth_context, &calld->async_result_closure, &error)) { + chand->security_connector, call_host, chand->auth_context, + &calld->async_result_closure, &error)) { // Synchronous return; invoke on_host_checked() directly. - on_host_checked(exec_ctx, batch, error); + on_host_checked(batch, error); GRPC_ERROR_UNREF(error); } else { // Async return; register cancellation closure with call combiner. GRPC_CALL_STACK_REF(calld->owning_call, "cancel_check_call_host"); grpc_call_combiner_set_notify_on_cancel( - exec_ctx, calld->call_combiner, + calld->call_combiner, GRPC_CLOSURE_INIT(&calld->check_call_host_cancel_closure, cancel_check_call_host, elem, grpc_schedule_on_exec_ctx)); @@ -335,13 +326,12 @@ static void auth_start_transport_stream_op_batch( } /* pass control down the stack */ - grpc_call_next_op(exec_ctx, elem, batch); + grpc_call_next_op(elem, batch); GPR_TIMER_END("auth_start_transport_stream_op_batch", 0); } /* Constructor for call_data */ -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, +static grpc_error *init_call_elem(grpc_call_element *elem, const grpc_call_element_args *args) { call_data *calld = (call_data *)elem->call_data; calld->owning_call = args->call_stack; @@ -349,32 +339,30 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, +static void set_pollset_or_pollset_set(grpc_call_element *elem, grpc_polling_entity *pollent) { call_data *calld = (call_data *)elem->call_data; calld->pollent = pollent; } /* Destructor for call_data */ -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, +static void destroy_call_elem(grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *ignored) { call_data *calld = (call_data *)elem->call_data; - grpc_credentials_mdelem_array_destroy(exec_ctx, &calld->md_array); - grpc_call_credentials_unref(exec_ctx, calld->creds); + grpc_credentials_mdelem_array_destroy(&calld->md_array); + grpc_call_credentials_unref(calld->creds); if (calld->have_host) { - grpc_slice_unref_internal(exec_ctx, calld->host); + grpc_slice_unref_internal(calld->host); } if (calld->have_method) { - grpc_slice_unref_internal(exec_ctx, calld->method); + grpc_slice_unref_internal(calld->method); } reset_auth_metadata_context(&calld->auth_md_context); } /* Constructor for channel_data */ -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, +static grpc_error *init_channel_elem(grpc_channel_element *elem, grpc_channel_element_args *args) { grpc_security_connector *sc = grpc_security_connector_find_in_args(args->channel_args); @@ -407,13 +395,12 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, } /* Destructor for channel data */ -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { +static void destroy_channel_elem(grpc_channel_element *elem) { /* grab pointers to our data from the channel element */ channel_data *chand = (channel_data *)elem->channel_data; grpc_channel_security_connector *sc = chand->security_connector; if (sc != NULL) { - GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, &sc->base, "client_auth_filter"); + GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "client_auth_filter"); } GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "client_auth_filter"); } diff --git a/src/core/lib/security/transport/lb_targets_info.cc b/src/core/lib/security/transport/lb_targets_info.cc index 947fc1addf..aa01e911d9 100644 --- a/src/core/lib/security/transport/lb_targets_info.cc +++ b/src/core/lib/security/transport/lb_targets_info.cc @@ -28,8 +28,8 @@ static void *targets_info_copy(void *p) { return grpc_slice_hash_table_ref((grpc_slice_hash_table *)p); } -static void targets_info_destroy(grpc_exec_ctx *exec_ctx, void *p) { - grpc_slice_hash_table_unref(exec_ctx, (grpc_slice_hash_table *)p); +static void targets_info_destroy(void *p) { + grpc_slice_hash_table_unref((grpc_slice_hash_table *)p); } static int targets_info_cmp(void *a, void *b) { return grpc_slice_hash_table_cmp((const grpc_slice_hash_table *)a, diff --git a/src/core/lib/security/transport/secure_endpoint.cc b/src/core/lib/security/transport/secure_endpoint.cc index ae5633b82c..c784f2390f 100644 --- a/src/core/lib/security/transport/secure_endpoint.cc +++ b/src/core/lib/security/transport/secure_endpoint.cc @@ -64,28 +64,27 @@ typedef struct { grpc_tracer_flag grpc_trace_secure_endpoint = GRPC_TRACER_INITIALIZER(false, "secure_endpoint"); -static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) { +static void destroy(secure_endpoint *secure_ep) { secure_endpoint *ep = secure_ep; - grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep); + grpc_endpoint_destroy(ep->wrapped_ep); tsi_frame_protector_destroy(ep->protector); - tsi_zero_copy_grpc_protector_destroy(exec_ctx, ep->zero_copy_protector); - grpc_slice_buffer_destroy_internal(exec_ctx, &ep->leftover_bytes); - grpc_slice_unref_internal(exec_ctx, ep->read_staging_buffer); - grpc_slice_unref_internal(exec_ctx, ep->write_staging_buffer); - grpc_slice_buffer_destroy_internal(exec_ctx, &ep->output_buffer); - grpc_slice_buffer_destroy_internal(exec_ctx, &ep->source_buffer); + tsi_zero_copy_grpc_protector_destroy(ep->zero_copy_protector); + grpc_slice_buffer_destroy_internal(&ep->leftover_bytes); + grpc_slice_unref_internal(ep->read_staging_buffer); + grpc_slice_unref_internal(ep->write_staging_buffer); + grpc_slice_buffer_destroy_internal(&ep->output_buffer); + grpc_slice_buffer_destroy_internal(&ep->source_buffer); gpr_mu_destroy(&ep->protector_mu); gpr_free(ep); } #ifndef NDEBUG -#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \ - secure_endpoint_unref((exec_ctx), (ep), (reason), __FILE__, __LINE__) +#define SECURE_ENDPOINT_UNREF(ep, reason) \ + secure_endpoint_unref((ep), (reason), __FILE__, __LINE__) #define SECURE_ENDPOINT_REF(ep, reason) \ secure_endpoint_ref((ep), (reason), __FILE__, __LINE__) -static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx, secure_endpoint *ep, - const char *reason, const char *file, - int line) { +static void secure_endpoint_unref(secure_endpoint *ep, const char *reason, + const char *file, int line) { if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) { gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, @@ -93,7 +92,7 @@ static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx, secure_endpoint *ep, val - 1); } if (gpr_unref(&ep->ref)) { - destroy(exec_ctx, ep); + destroy(ep); } } @@ -108,13 +107,11 @@ static void secure_endpoint_ref(secure_endpoint *ep, const char *reason, gpr_ref(&ep->ref); } #else -#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \ - secure_endpoint_unref((exec_ctx), (ep)) +#define SECURE_ENDPOINT_UNREF(ep, reason) secure_endpoint_unref((ep)) #define SECURE_ENDPOINT_REF(ep, reason) secure_endpoint_ref((ep)) -static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx, - secure_endpoint *ep) { +static void secure_endpoint_unref(secure_endpoint *ep) { if (gpr_unref(&ep->ref)) { - destroy(exec_ctx, ep); + destroy(ep); } } @@ -129,8 +126,7 @@ static void flush_read_staging_buffer(secure_endpoint *ep, uint8_t **cur, *end = GRPC_SLICE_END_PTR(ep->read_staging_buffer); } -static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep, - grpc_error *error) { +static void call_read_cb(secure_endpoint *ep, grpc_error *error) { if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) { size_t i; for (i = 0; i < ep->read_buffer->count; i++) { @@ -141,12 +137,11 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep, } } ep->read_buffer = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, ep->read_cb, error); - SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read"); + GRPC_CLOSURE_SCHED(ep->read_cb, error); + SECURE_ENDPOINT_UNREF(ep, "read"); } -static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *error) { +static void on_read(void *user_data, grpc_error *error) { unsigned i; uint8_t keep_looping = 0; tsi_result result = TSI_OK; @@ -155,16 +150,16 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, uint8_t *end = GRPC_SLICE_END_PTR(ep->read_staging_buffer); if (error != GRPC_ERROR_NONE) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer); - call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Secure read failed", &error, 1)); + grpc_slice_buffer_reset_and_unref_internal(ep->read_buffer); + call_read_cb(ep, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Secure read failed", &error, 1)); return; } if (ep->zero_copy_protector != NULL) { // Use zero-copy grpc protector to unprotect. result = tsi_zero_copy_grpc_protector_unprotect( - exec_ctx, ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer); + ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer); } else { // Use frame protector to unprotect. /* TODO(yangg) check error, maybe bail out early */ @@ -217,37 +212,35 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, /* TODO(yangg) experiment with moving this block after read_cb to see if it helps latency */ - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->source_buffer); + grpc_slice_buffer_reset_and_unref_internal(&ep->source_buffer); if (result != TSI_OK) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer); + grpc_slice_buffer_reset_and_unref_internal(ep->read_buffer); call_read_cb( - exec_ctx, ep, - grpc_set_tsi_error_result( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unwrap failed"), result)); + ep, grpc_set_tsi_error_result( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unwrap failed"), result)); return; } - call_read_cb(exec_ctx, ep, GRPC_ERROR_NONE); + call_read_cb(ep, GRPC_ERROR_NONE); } -static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, - grpc_slice_buffer *slices, grpc_closure *cb) { +static void endpoint_read(grpc_endpoint *secure_ep, grpc_slice_buffer *slices, + grpc_closure *cb) { secure_endpoint *ep = (secure_endpoint *)secure_ep; ep->read_cb = cb; ep->read_buffer = slices; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer); + grpc_slice_buffer_reset_and_unref_internal(ep->read_buffer); SECURE_ENDPOINT_REF(ep, "read"); if (ep->leftover_bytes.count) { grpc_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer); GPR_ASSERT(ep->leftover_bytes.count == 0); - on_read(exec_ctx, ep, GRPC_ERROR_NONE); + on_read(ep, GRPC_ERROR_NONE); return; } - grpc_endpoint_read(exec_ctx, ep->wrapped_ep, &ep->source_buffer, - &ep->on_read); + grpc_endpoint_read(ep->wrapped_ep, &ep->source_buffer, &ep->on_read); } static void flush_write_staging_buffer(secure_endpoint *ep, uint8_t **cur, @@ -258,8 +251,8 @@ static void flush_write_staging_buffer(secure_endpoint *ep, uint8_t **cur, *end = GRPC_SLICE_END_PTR(ep->write_staging_buffer); } -static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, - grpc_slice_buffer *slices, grpc_closure *cb) { +static void endpoint_write(grpc_endpoint *secure_ep, grpc_slice_buffer *slices, + grpc_closure *cb) { GPR_TIMER_BEGIN("secure_endpoint.endpoint_write", 0); unsigned i; @@ -268,7 +261,7 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, uint8_t *cur = GRPC_SLICE_START_PTR(ep->write_staging_buffer); uint8_t *end = GRPC_SLICE_END_PTR(ep->write_staging_buffer); - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer); + grpc_slice_buffer_reset_and_unref_internal(&ep->output_buffer); if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) { for (i = 0; i < slices->count; i++) { @@ -281,8 +274,8 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, if (ep->zero_copy_protector != NULL) { // Use zero-copy grpc protector to protect. - result = tsi_zero_copy_grpc_protector_protect( - exec_ctx, ep->zero_copy_protector, slices, &ep->output_buffer); + result = tsi_zero_copy_grpc_protector_protect(ep->zero_copy_protector, + slices, &ep->output_buffer); } else { // Use frame protector to protect. for (i = 0; i < slices->count; i++) { @@ -340,43 +333,38 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, if (result != TSI_OK) { /* TODO(yangg) do different things according to the error type? */ - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer); + grpc_slice_buffer_reset_and_unref_internal(&ep->output_buffer); GRPC_CLOSURE_SCHED( - exec_ctx, cb, - grpc_set_tsi_error_result( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Wrap failed"), result)); + cb, grpc_set_tsi_error_result( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Wrap failed"), result)); GPR_TIMER_END("secure_endpoint.endpoint_write", 0); return; } - grpc_endpoint_write(exec_ctx, ep->wrapped_ep, &ep->output_buffer, cb); + grpc_endpoint_write(ep->wrapped_ep, &ep->output_buffer, cb); GPR_TIMER_END("secure_endpoint.endpoint_write", 0); } -static void endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, - grpc_error *why) { +static void endpoint_shutdown(grpc_endpoint *secure_ep, grpc_error *why) { secure_endpoint *ep = (secure_endpoint *)secure_ep; - grpc_endpoint_shutdown(exec_ctx, ep->wrapped_ep, why); + grpc_endpoint_shutdown(ep->wrapped_ep, why); } -static void endpoint_destroy(grpc_exec_ctx *exec_ctx, - grpc_endpoint *secure_ep) { +static void endpoint_destroy(grpc_endpoint *secure_ep) { secure_endpoint *ep = (secure_endpoint *)secure_ep; - SECURE_ENDPOINT_UNREF(exec_ctx, ep, "destroy"); + SECURE_ENDPOINT_UNREF(ep, "destroy"); } -static void endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx, - grpc_endpoint *secure_ep, +static void endpoint_add_to_pollset(grpc_endpoint *secure_ep, grpc_pollset *pollset) { secure_endpoint *ep = (secure_endpoint *)secure_ep; - grpc_endpoint_add_to_pollset(exec_ctx, ep->wrapped_ep, pollset); + grpc_endpoint_add_to_pollset(ep->wrapped_ep, pollset); } -static void endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_endpoint *secure_ep, +static void endpoint_add_to_pollset_set(grpc_endpoint *secure_ep, grpc_pollset_set *pollset_set) { secure_endpoint *ep = (secure_endpoint *)secure_ep; - grpc_endpoint_add_to_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set); + grpc_endpoint_add_to_pollset_set(ep->wrapped_ep, pollset_set); } static char *endpoint_get_peer(grpc_endpoint *secure_ep) { diff --git a/src/core/lib/security/transport/security_connector.cc b/src/core/lib/security/transport/security_connector.cc index 80d9a7b77f..d8259dd130 100644 --- a/src/core/lib/security/transport/security_connector.cc +++ b/src/core/lib/security/transport/security_connector.cc @@ -106,33 +106,32 @@ const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer, } void grpc_channel_security_connector_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector, + grpc_channel_security_connector *connector, grpc_handshake_manager *handshake_mgr) { if (connector != NULL) { - connector->add_handshakers(exec_ctx, connector, handshake_mgr); + connector->add_handshakers(connector, handshake_mgr); } } void grpc_server_security_connector_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_server_security_connector *connector, + grpc_server_security_connector *connector, grpc_handshake_manager *handshake_mgr) { if (connector != NULL) { - connector->add_handshakers(exec_ctx, connector, handshake_mgr); + connector->add_handshakers(connector, handshake_mgr); } } -void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, +void grpc_security_connector_check_peer(grpc_security_connector *sc, tsi_peer peer, grpc_auth_context **auth_context, grpc_closure *on_peer_checked) { if (sc == NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, + GRPC_CLOSURE_SCHED(on_peer_checked, GRPC_ERROR_CREATE_FROM_STATIC_STRING( "cannot check peer -- no security connector")); tsi_peer_destruct(&peer); } else { - sc->vtable->check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked); + sc->vtable->check_peer(sc, peer, auth_context, on_peer_checked); } } @@ -170,26 +169,26 @@ int grpc_server_security_connector_cmp(grpc_server_security_connector *sc1, } bool grpc_channel_security_connector_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - const char *host, grpc_auth_context *auth_context, - grpc_closure *on_call_host_checked, grpc_error **error) { + grpc_channel_security_connector *sc, const char *host, + grpc_auth_context *auth_context, grpc_closure *on_call_host_checked, + grpc_error **error) { if (sc == NULL || sc->check_call_host == NULL) { *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "cannot check call host -- no security connector"); return true; } - return sc->check_call_host(exec_ctx, sc, host, auth_context, - on_call_host_checked, error); + return sc->check_call_host(sc, host, auth_context, on_call_host_checked, + error); } void grpc_channel_security_connector_cancel_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_closure *on_call_host_checked, grpc_error *error) { + grpc_channel_security_connector *sc, grpc_closure *on_call_host_checked, + grpc_error *error) { if (sc == NULL || sc->cancel_check_call_host == NULL) { GRPC_ERROR_UNREF(error); return; } - sc->cancel_check_call_host(exec_ctx, sc, on_call_host_checked, error); + sc->cancel_check_call_host(sc, on_call_host_checked, error); } #ifndef NDEBUG @@ -213,8 +212,7 @@ grpc_security_connector *grpc_security_connector_ref( } #ifndef NDEBUG -void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, +void grpc_security_connector_unref(grpc_security_connector *sc, const char *file, int line, const char *reason) { if (sc == NULL) return; @@ -225,15 +223,14 @@ void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx, val, val - 1, reason); } #else -void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { +void grpc_security_connector_unref(grpc_security_connector *sc) { if (sc == NULL) return; #endif - if (gpr_unref(&sc->refcount)) sc->vtable->destroy(exec_ctx, sc); + if (gpr_unref(&sc->refcount)) sc->vtable->destroy(sc); } -static void connector_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) { - GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, (grpc_security_connector *)p, +static void connector_arg_destroy(void *p) { + GRPC_SECURITY_CONNECTOR_UNREF((grpc_security_connector *)p, "connector_arg_destroy"); } @@ -286,20 +283,16 @@ typedef struct { bool is_lb_channel; } grpc_fake_channel_security_connector; -static void fake_channel_destroy(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { +static void fake_channel_destroy(grpc_security_connector *sc) { grpc_fake_channel_security_connector *c = (grpc_fake_channel_security_connector *)sc; - grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds); + grpc_call_credentials_unref(c->base.request_metadata_creds); gpr_free(c->target); gpr_free(c->expected_targets); gpr_free(c); } -static void fake_server_destroy(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { - gpr_free(sc); -} +static void fake_server_destroy(grpc_security_connector *sc) { gpr_free(sc); } static bool fake_check_target(const char *target_type, const char *target, const char *set_str) { @@ -363,8 +356,7 @@ done: if (!success) abort(); } -static void fake_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, +static void fake_check_peer(grpc_security_connector *sc, tsi_peer peer, grpc_auth_context **auth_context, grpc_closure *on_peer_checked) { const char *prop_name; @@ -396,25 +388,23 @@ static void fake_check_peer(grpc_exec_ctx *exec_ctx, *auth_context, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME, GRPC_FAKE_TRANSPORT_SECURITY_TYPE); end: - GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error); + GRPC_CLOSURE_SCHED(on_peer_checked, error); tsi_peer_destruct(&peer); } -static void fake_channel_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, +static void fake_channel_check_peer(grpc_security_connector *sc, tsi_peer peer, grpc_auth_context **auth_context, grpc_closure *on_peer_checked) { - fake_check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked); + fake_check_peer(sc, peer, auth_context, on_peer_checked); grpc_fake_channel_security_connector *c = (grpc_fake_channel_security_connector *)sc; fake_secure_name_check(c->target, c->expected_targets, c->is_lb_channel); } -static void fake_server_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, +static void fake_server_check_peer(grpc_security_connector *sc, tsi_peer peer, grpc_auth_context **auth_context, grpc_closure *on_peer_checked) { - fake_check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked); + fake_check_peer(sc, peer, auth_context, on_peer_checked); } static int fake_channel_cmp(grpc_security_connector *sc1, @@ -443,8 +433,7 @@ static int fake_server_cmp(grpc_security_connector *sc1, (grpc_server_security_connector *)sc2); } -static bool fake_channel_check_call_host(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, +static bool fake_channel_check_call_host(grpc_channel_security_connector *sc, const char *host, grpc_auth_context *auth_context, grpc_closure *on_call_host_checked, @@ -453,29 +442,26 @@ static bool fake_channel_check_call_host(grpc_exec_ctx *exec_ctx, } static void fake_channel_cancel_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_closure *on_call_host_checked, grpc_error *error) { + grpc_channel_security_connector *sc, grpc_closure *on_call_host_checked, + grpc_error *error) { GRPC_ERROR_UNREF(error); } static void fake_channel_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, + grpc_channel_security_connector *sc, grpc_handshake_manager *handshake_mgr) { grpc_handshake_manager_add( handshake_mgr, grpc_security_handshaker_create( - exec_ctx, tsi_create_fake_handshaker(true /* is_client */), - &sc->base)); + tsi_create_fake_handshaker(true /* is_client */), &sc->base)); } -static void fake_server_add_handshakers(grpc_exec_ctx *exec_ctx, - grpc_server_security_connector *sc, +static void fake_server_add_handshakers(grpc_server_security_connector *sc, grpc_handshake_manager *handshake_mgr) { grpc_handshake_manager_add( handshake_mgr, grpc_security_handshaker_create( - exec_ctx, tsi_create_fake_handshaker(false /* is_client */), - &sc->base)); + tsi_create_fake_handshaker(false /* is_client */), &sc->base)); } static grpc_security_connector_vtable fake_channel_vtable = { @@ -533,12 +519,11 @@ typedef struct { tsi_ssl_server_handshaker_factory *server_handshaker_factory; } grpc_ssl_server_security_connector; -static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { +static void ssl_channel_destroy(grpc_security_connector *sc) { grpc_ssl_channel_security_connector *c = (grpc_ssl_channel_security_connector *)sc; - grpc_channel_credentials_unref(exec_ctx, c->base.channel_creds); - grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds); + grpc_channel_credentials_unref(c->base.channel_creds); + grpc_call_credentials_unref(c->base.request_metadata_creds); tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory); c->client_handshaker_factory = NULL; if (c->target_name != NULL) gpr_free(c->target_name); @@ -546,18 +531,16 @@ static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx, gpr_free(sc); } -static void ssl_server_destroy(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { +static void ssl_server_destroy(grpc_security_connector *sc) { grpc_ssl_server_security_connector *c = (grpc_ssl_server_security_connector *)sc; - grpc_server_credentials_unref(exec_ctx, c->base.server_creds); + grpc_server_credentials_unref(c->base.server_creds); tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory); c->server_handshaker_factory = NULL; gpr_free(sc); } -static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, +static void ssl_channel_add_handshakers(grpc_channel_security_connector *sc, grpc_handshake_manager *handshake_mgr) { grpc_ssl_channel_security_connector *c = (grpc_ssl_channel_security_connector *)sc; @@ -576,13 +559,11 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx, // Create handshakers. grpc_handshake_manager_add( - handshake_mgr, - grpc_security_handshaker_create( - exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base)); + handshake_mgr, grpc_security_handshaker_create( + tsi_create_adapter_handshaker(tsi_hs), &sc->base)); } -static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx, - grpc_server_security_connector *sc, +static void ssl_server_add_handshakers(grpc_server_security_connector *sc, grpc_handshake_manager *handshake_mgr) { grpc_ssl_server_security_connector *c = (grpc_ssl_server_security_connector *)sc; @@ -598,9 +579,8 @@ static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx, // Create handshakers. grpc_handshake_manager_add( - handshake_mgr, - grpc_security_handshaker_create( - exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base)); + handshake_mgr, grpc_security_handshaker_create( + tsi_create_adapter_handshaker(tsi_hs), &sc->base)); } static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) { @@ -684,8 +664,7 @@ static grpc_error *ssl_check_peer(grpc_security_connector *sc, return GRPC_ERROR_NONE; } -static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, +static void ssl_channel_check_peer(grpc_security_connector *sc, tsi_peer peer, grpc_auth_context **auth_context, grpc_closure *on_peer_checked) { grpc_ssl_channel_security_connector *c = @@ -694,17 +673,16 @@ static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx, ? c->overridden_target_name : c->target_name, &peer, auth_context); - GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error); + GRPC_CLOSURE_SCHED(on_peer_checked, error); tsi_peer_destruct(&peer); } -static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, +static void ssl_server_check_peer(grpc_security_connector *sc, tsi_peer peer, grpc_auth_context **auth_context, grpc_closure *on_peer_checked) { grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context); tsi_peer_destruct(&peer); - GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error); + GRPC_CLOSURE_SCHED(on_peer_checked, error); } static int ssl_channel_cmp(grpc_security_connector *sc1, @@ -774,8 +752,7 @@ void tsi_shallow_peer_destruct(tsi_peer *peer) { if (peer->properties != NULL) gpr_free(peer->properties); } -static bool ssl_channel_check_call_host(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, +static bool ssl_channel_check_call_host(grpc_channel_security_connector *sc, const char *host, grpc_auth_context *auth_context, grpc_closure *on_call_host_checked, @@ -800,8 +777,8 @@ static bool ssl_channel_check_call_host(grpc_exec_ctx *exec_ctx, } static void ssl_channel_cancel_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_closure *on_call_host_checked, grpc_error *error) { + grpc_channel_security_connector *sc, grpc_closure *on_call_host_checked, + grpc_error *error) { GRPC_ERROR_UNREF(error); } @@ -893,7 +870,7 @@ const char *grpc_get_default_ssl_roots(void) { } grpc_security_status grpc_ssl_channel_security_connector_create( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *channel_creds, + grpc_channel_credentials *channel_creds, grpc_call_credentials *request_metadata_creds, const grpc_ssl_config *config, const char *target_name, const char *overridden_target_name, grpc_channel_security_connector **sc) { @@ -951,7 +928,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create( if (result != TSI_OK) { gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", tsi_result_to_string(result)); - ssl_channel_destroy(exec_ctx, &c->base.base); + ssl_channel_destroy(&c->base.base); *sc = NULL; goto error; } @@ -965,8 +942,8 @@ error: } grpc_security_status grpc_ssl_server_security_connector_create( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *server_creds, - const grpc_ssl_server_config *config, grpc_server_security_connector **sc) { + grpc_server_credentials *server_creds, const grpc_ssl_server_config *config, + grpc_server_security_connector **sc) { size_t num_alpn_protocols = grpc_chttp2_num_alpn_versions(); const char **alpn_protocol_strings = (const char **)gpr_malloc(sizeof(const char *) * num_alpn_protocols); @@ -998,7 +975,7 @@ grpc_security_status grpc_ssl_server_security_connector_create( if (result != TSI_OK) { gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", tsi_result_to_string(result)); - ssl_server_destroy(exec_ctx, &c->base.base); + ssl_server_destroy(&c->base.base); *sc = NULL; goto error; } diff --git a/src/core/lib/security/transport/security_connector.h b/src/core/lib/security/transport/security_connector.h index 216bb35e81..7834045772 100644 --- a/src/core/lib/security/transport/security_connector.h +++ b/src/core/lib/security/transport/security_connector.h @@ -56,9 +56,9 @@ typedef struct grpc_security_connector grpc_security_connector; #define GRPC_ARG_SECURITY_CONNECTOR "grpc.security_connector" typedef struct { - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc); - void (*check_peer)(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc, - tsi_peer peer, grpc_auth_context **auth_context, + void (*destroy)(grpc_security_connector *sc); + void (*check_peer)(grpc_security_connector *sc, tsi_peer peer, + grpc_auth_context **auth_context, grpc_closure *on_peer_checked); int (*cmp)(grpc_security_connector *sc, grpc_security_connector *other); } grpc_security_connector_vtable; @@ -73,29 +73,25 @@ struct grpc_security_connector { #ifndef NDEBUG #define GRPC_SECURITY_CONNECTOR_REF(p, r) \ grpc_security_connector_ref((p), __FILE__, __LINE__, (r)) -#define GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, r) \ - grpc_security_connector_unref((exec_ctx), (p), __FILE__, __LINE__, (r)) +#define GRPC_SECURITY_CONNECTOR_UNREF(p, r) \ + grpc_security_connector_unref((p), __FILE__, __LINE__, (r)) grpc_security_connector *grpc_security_connector_ref( grpc_security_connector *policy, const char *file, int line, const char *reason); -void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx, - grpc_security_connector *policy, +void grpc_security_connector_unref(grpc_security_connector *policy, const char *file, int line, const char *reason); #else #define GRPC_SECURITY_CONNECTOR_REF(p, r) grpc_security_connector_ref((p)) -#define GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, r) \ - grpc_security_connector_unref((exec_ctx), (p)) +#define GRPC_SECURITY_CONNECTOR_UNREF(p, r) grpc_security_connector_unref((p)) grpc_security_connector *grpc_security_connector_ref( grpc_security_connector *policy); -void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx, - grpc_security_connector *policy); +void grpc_security_connector_unref(grpc_security_connector *policy); #endif /* Check the peer. Callee takes ownership of the peer object. When done, sets *auth_context and invokes on_peer_checked. */ -void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, +void grpc_security_connector_check_peer(grpc_security_connector *sc, tsi_peer peer, grpc_auth_context **auth_context, grpc_closure *on_peer_checked); @@ -125,17 +121,14 @@ struct grpc_channel_security_connector { grpc_security_connector base; grpc_channel_credentials *channel_creds; grpc_call_credentials *request_metadata_creds; - bool (*check_call_host)(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, const char *host, + bool (*check_call_host)(grpc_channel_security_connector *sc, const char *host, grpc_auth_context *auth_context, grpc_closure *on_call_host_checked, grpc_error **error); - void (*cancel_check_call_host)(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, + void (*cancel_check_call_host)(grpc_channel_security_connector *sc, grpc_closure *on_call_host_checked, grpc_error *error); - void (*add_handshakers)(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, + void (*add_handshakers)(grpc_channel_security_connector *sc, grpc_handshake_manager *handshake_mgr); }; @@ -148,20 +141,20 @@ int grpc_channel_security_connector_cmp(grpc_channel_security_connector *sc1, /// be set to indicate the result. Otherwise, \a on_call_host_checked /// will be invoked when complete. bool grpc_channel_security_connector_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - const char *host, grpc_auth_context *auth_context, - grpc_closure *on_call_host_checked, grpc_error **error); + grpc_channel_security_connector *sc, const char *host, + grpc_auth_context *auth_context, grpc_closure *on_call_host_checked, + grpc_error **error); /// Cancels a pending asychronous call to /// grpc_channel_security_connector_check_call_host() with /// \a on_call_host_checked as its callback. void grpc_channel_security_connector_cancel_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_closure *on_call_host_checked, grpc_error *error); + grpc_channel_security_connector *sc, grpc_closure *on_call_host_checked, + grpc_error *error); /* Registers handshakers with \a handshake_mgr. */ void grpc_channel_security_connector_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector, + grpc_channel_security_connector *connector, grpc_handshake_manager *handshake_mgr); /* --- server_security_connector object. --- @@ -174,8 +167,7 @@ typedef struct grpc_server_security_connector grpc_server_security_connector; struct grpc_server_security_connector { grpc_security_connector base; grpc_server_credentials *server_creds; - void (*add_handshakers)(grpc_exec_ctx *exec_ctx, - grpc_server_security_connector *sc, + void (*add_handshakers)(grpc_server_security_connector *sc, grpc_handshake_manager *handshake_mgr); }; @@ -184,7 +176,8 @@ int grpc_server_security_connector_cmp(grpc_server_security_connector *sc1, grpc_server_security_connector *sc2); void grpc_server_security_connector_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc, + grpc_server_security_connector *sc, + grpc_handshake_manager *handshake_mgr); /* --- Creation security connectors. --- */ @@ -222,7 +215,7 @@ typedef struct { specific error code otherwise. */ grpc_security_status grpc_ssl_channel_security_connector_create( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *channel_creds, + grpc_channel_credentials *channel_creds, grpc_call_credentials *request_metadata_creds, const grpc_ssl_config *config, const char *target_name, const char *overridden_target_name, grpc_channel_security_connector **sc); @@ -248,8 +241,8 @@ typedef struct { specific error code otherwise. */ grpc_security_status grpc_ssl_server_security_connector_create( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *server_creds, - const grpc_ssl_server_config *config, grpc_server_security_connector **sc); + grpc_server_credentials *server_creds, const grpc_ssl_server_config *config, + grpc_server_security_connector **sc); /* Util. */ const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer, diff --git a/src/core/lib/security/transport/security_handshaker.cc b/src/core/lib/security/transport/security_handshaker.cc index 3d19605617..ff0fc09817 100644 --- a/src/core/lib/security/transport/security_handshaker.cc +++ b/src/core/lib/security/transport/security_handshaker.cc @@ -65,43 +65,40 @@ typedef struct { tsi_handshaker_result *handshaker_result; } security_handshaker; -static void security_handshaker_unref(grpc_exec_ctx *exec_ctx, - security_handshaker *h) { +static void security_handshaker_unref(security_handshaker *h) { if (gpr_unref(&h->refs)) { gpr_mu_destroy(&h->mu); tsi_handshaker_destroy(h->handshaker); tsi_handshaker_result_destroy(h->handshaker_result); if (h->endpoint_to_destroy != NULL) { - grpc_endpoint_destroy(exec_ctx, h->endpoint_to_destroy); + grpc_endpoint_destroy(h->endpoint_to_destroy); } if (h->read_buffer_to_destroy != NULL) { - grpc_slice_buffer_destroy_internal(exec_ctx, h->read_buffer_to_destroy); + grpc_slice_buffer_destroy_internal(h->read_buffer_to_destroy); gpr_free(h->read_buffer_to_destroy); } gpr_free(h->handshake_buffer); - grpc_slice_buffer_destroy_internal(exec_ctx, &h->outgoing); + grpc_slice_buffer_destroy_internal(&h->outgoing); GRPC_AUTH_CONTEXT_UNREF(h->auth_context, "handshake"); - GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, h->connector, "handshake"); + GRPC_SECURITY_CONNECTOR_UNREF(h->connector, "handshake"); gpr_free(h); } } // Set args fields to NULL, saving the endpoint and read buffer for // later destruction. -static void cleanup_args_for_failure_locked(grpc_exec_ctx *exec_ctx, - security_handshaker *h) { +static void cleanup_args_for_failure_locked(security_handshaker *h) { h->endpoint_to_destroy = h->args->endpoint; h->args->endpoint = NULL; h->read_buffer_to_destroy = h->args->read_buffer; h->args->read_buffer = NULL; - grpc_channel_args_destroy(exec_ctx, h->args->args); + grpc_channel_args_destroy(h->args->args); h->args->args = NULL; } // If the handshake failed or we're shutting down, clean up and invoke the // callback with the error. -static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx, - security_handshaker *h, +static void security_handshake_failed_locked(security_handshaker *h, grpc_error *error) { if (error == GRPC_ERROR_NONE) { // If we were shut down after the handshake succeeded but before an @@ -116,34 +113,33 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx, // before destroying them, even if we know that there are no // pending read/write callbacks. This should be fixed, at which // point this can be removed. - grpc_endpoint_shutdown(exec_ctx, h->args->endpoint, GRPC_ERROR_REF(error)); + grpc_endpoint_shutdown(h->args->endpoint, GRPC_ERROR_REF(error)); // Not shutting down, so the write failed. Clean up before // invoking the callback. - cleanup_args_for_failure_locked(exec_ctx, h); + cleanup_args_for_failure_locked(h); // Set shutdown to true so that subsequent calls to // security_handshaker_shutdown() do nothing. h->shutdown = true; } // Invoke callback. - GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error); + GRPC_CLOSURE_SCHED(h->on_handshake_done, error); } -static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx, - security_handshaker *h, grpc_error *error) { +static void on_peer_checked_inner(security_handshaker *h, grpc_error *error) { if (error != GRPC_ERROR_NONE || h->shutdown) { - security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error)); + security_handshake_failed_locked(h, GRPC_ERROR_REF(error)); return; } // Create zero-copy frame protector, if implemented. tsi_zero_copy_grpc_protector *zero_copy_protector = NULL; tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector( - exec_ctx, h->handshaker_result, NULL, &zero_copy_protector); + h->handshaker_result, NULL, &zero_copy_protector); if (result != TSI_OK && result != TSI_UNIMPLEMENTED) { error = grpc_set_tsi_error_result( GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Zero-copy frame protector creation failed"), result); - security_handshake_failed_locked(exec_ctx, h, error); + security_handshake_failed_locked(h, error); return; } // Create frame protector if zero-copy frame protector is NULL. @@ -155,7 +151,7 @@ static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx, error = grpc_set_tsi_error_result(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Frame protector creation failed"), result); - security_handshake_failed_locked(exec_ctx, h, error); + security_handshake_failed_locked(h, error); return; } } @@ -170,7 +166,7 @@ static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx, grpc_slice_from_copied_buffer((char *)unused_bytes, unused_bytes_size); h->args->endpoint = grpc_secure_endpoint_create( protector, zero_copy_protector, h->args->endpoint, &slice, 1); - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); } else { h->args->endpoint = grpc_secure_endpoint_create( protector, zero_copy_protector, h->args->endpoint, NULL, 0); @@ -178,31 +174,29 @@ static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx, tsi_handshaker_result_destroy(h->handshaker_result); h->handshaker_result = NULL; // Clear out the read buffer before it gets passed to the transport. - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, h->args->read_buffer); + grpc_slice_buffer_reset_and_unref_internal(h->args->read_buffer); // Add auth context to channel args. grpc_arg auth_context_arg = grpc_auth_context_to_arg(h->auth_context); grpc_channel_args *tmp_args = h->args->args; h->args->args = grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1); - grpc_channel_args_destroy(exec_ctx, tmp_args); + grpc_channel_args_destroy(tmp_args); // Invoke callback. - GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(h->on_handshake_done, GRPC_ERROR_NONE); // Set shutdown to true so that subsequent calls to // security_handshaker_shutdown() do nothing. h->shutdown = true; } -static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void on_peer_checked(void *arg, grpc_error *error) { security_handshaker *h = (security_handshaker *)arg; gpr_mu_lock(&h->mu); - on_peer_checked_inner(exec_ctx, h, error); + on_peer_checked_inner(h, error); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); } -static grpc_error *check_peer_locked(grpc_exec_ctx *exec_ctx, - security_handshaker *h) { +static grpc_error *check_peer_locked(security_handshaker *h) { tsi_peer peer; tsi_result result = tsi_handshaker_result_extract_peer(h->handshaker_result, &peer); @@ -210,20 +204,20 @@ static grpc_error *check_peer_locked(grpc_exec_ctx *exec_ctx, return grpc_set_tsi_error_result( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Peer extraction failed"), result); } - grpc_security_connector_check_peer(exec_ctx, h->connector, peer, - &h->auth_context, &h->on_peer_checked); + grpc_security_connector_check_peer(h->connector, peer, &h->auth_context, + &h->on_peer_checked); return GRPC_ERROR_NONE; } static grpc_error *on_handshake_next_done_locked( - grpc_exec_ctx *exec_ctx, security_handshaker *h, tsi_result result, + security_handshaker *h, tsi_result result, const unsigned char *bytes_to_send, size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) { grpc_error *error = GRPC_ERROR_NONE; // Read more if we need to. if (result == TSI_INCOMPLETE_DATA) { GPR_ASSERT(bytes_to_send_size == 0); - grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer, + grpc_endpoint_read(h->args->endpoint, h->args->read_buffer, &h->on_handshake_data_received_from_peer); return error; } @@ -240,17 +234,17 @@ static grpc_error *on_handshake_next_done_locked( // Send data to peer, if needed. grpc_slice to_send = grpc_slice_from_copied_buffer( (const char *)bytes_to_send, bytes_to_send_size); - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &h->outgoing); + grpc_slice_buffer_reset_and_unref_internal(&h->outgoing); grpc_slice_buffer_add(&h->outgoing, to_send); - grpc_endpoint_write(exec_ctx, h->args->endpoint, &h->outgoing, + grpc_endpoint_write(h->args->endpoint, &h->outgoing, &h->on_handshake_data_sent_to_peer); } else if (handshaker_result == NULL) { // There is nothing to send, but need to read from peer. - grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer, + grpc_endpoint_read(h->args->endpoint, h->args->read_buffer, &h->on_handshake_data_received_from_peer); } else { // Handshake has finished, check peer and so on. - error = check_peer_locked(exec_ctx, h); + error = check_peer_locked(h); } return error; } @@ -261,24 +255,23 @@ static void on_handshake_next_done_grpc_wrapper( security_handshaker *h = (security_handshaker *)user_data; // This callback will be invoked by TSI in a non-grpc thread, so it's // safe to create our own exec_ctx here. - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; gpr_mu_lock(&h->mu); - grpc_error *error = - on_handshake_next_done_locked(&exec_ctx, h, result, bytes_to_send, - bytes_to_send_size, handshaker_result); + grpc_error *error = on_handshake_next_done_locked( + h, result, bytes_to_send, bytes_to_send_size, handshaker_result); if (error != GRPC_ERROR_NONE) { - security_handshake_failed_locked(&exec_ctx, h, error); + security_handshake_failed_locked(h, error); gpr_mu_unlock(&h->mu); - security_handshaker_unref(&exec_ctx, h); + security_handshaker_unref(h); } else { gpr_mu_unlock(&h->mu); } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } static grpc_error *do_handshaker_next_locked( - grpc_exec_ctx *exec_ctx, security_handshaker *h, - const unsigned char *bytes_received, size_t bytes_received_size) { + security_handshaker *h, const unsigned char *bytes_received, + size_t bytes_received_size) { // Invoke TSI handshaker. const unsigned char *bytes_to_send = NULL; size_t bytes_to_send_size = 0; @@ -293,21 +286,20 @@ static grpc_error *do_handshaker_next_locked( return GRPC_ERROR_NONE; } // Handshaker returned synchronously. Invoke callback directly in - // this thread with our existing exec_ctx. - return on_handshake_next_done_locked(exec_ctx, h, result, bytes_to_send, + // this thread with our existing exec_ctx-> + return on_handshake_next_done_locked(h, result, bytes_to_send, bytes_to_send_size, handshaker_result); } -static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { +static void on_handshake_data_received_from_peer(void *arg, grpc_error *error) { security_handshaker *h = (security_handshaker *)arg; gpr_mu_lock(&h->mu); if (error != GRPC_ERROR_NONE || h->shutdown) { security_handshake_failed_locked( - exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Handshake read failed", &error, 1)); + h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Handshake read failed", &error, 1)); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); return; } // Copy all slices received. @@ -329,40 +321,39 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx, offset += slice_size; } // Call TSI handshaker. - error = do_handshaker_next_locked(exec_ctx, h, h->handshake_buffer, - bytes_received_size); + error = + do_handshaker_next_locked(h, h->handshake_buffer, bytes_received_size); if (error != GRPC_ERROR_NONE) { - security_handshake_failed_locked(exec_ctx, h, error); + security_handshake_failed_locked(h, error); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); } else { gpr_mu_unlock(&h->mu); } } -static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void on_handshake_data_sent_to_peer(void *arg, grpc_error *error) { security_handshaker *h = (security_handshaker *)arg; gpr_mu_lock(&h->mu); if (error != GRPC_ERROR_NONE || h->shutdown) { security_handshake_failed_locked( - exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Handshake write failed", &error, 1)); + h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Handshake write failed", &error, 1)); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); return; } // We may be done. if (h->handshaker_result == NULL) { - grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer, + grpc_endpoint_read(h->args->endpoint, h->args->read_buffer, &h->on_handshake_data_received_from_peer); } else { - error = check_peer_locked(exec_ctx, h); + error = check_peer_locked(h); if (error != GRPC_ERROR_NONE) { - security_handshake_failed_locked(exec_ctx, h, error); + security_handshake_failed_locked(h, error); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); return; } } @@ -373,28 +364,25 @@ static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg, // public handshaker API // -static void security_handshaker_destroy(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker) { +static void security_handshaker_destroy(grpc_handshaker *handshaker) { security_handshaker *h = (security_handshaker *)handshaker; - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); } -static void security_handshaker_shutdown(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker, +static void security_handshaker_shutdown(grpc_handshaker *handshaker, grpc_error *why) { security_handshaker *h = (security_handshaker *)handshaker; gpr_mu_lock(&h->mu); if (!h->shutdown) { h->shutdown = true; - grpc_endpoint_shutdown(exec_ctx, h->args->endpoint, GRPC_ERROR_REF(why)); - cleanup_args_for_failure_locked(exec_ctx, h); + grpc_endpoint_shutdown(h->args->endpoint, GRPC_ERROR_REF(why)); + cleanup_args_for_failure_locked(h); } gpr_mu_unlock(&h->mu); GRPC_ERROR_UNREF(why); } -static void security_handshaker_do_handshake(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker, +static void security_handshaker_do_handshake(grpc_handshaker *handshaker, grpc_tcp_server_acceptor *acceptor, grpc_closure *on_handshake_done, grpc_handshaker_args *args) { @@ -403,11 +391,11 @@ static void security_handshaker_do_handshake(grpc_exec_ctx *exec_ctx, h->args = args; h->on_handshake_done = on_handshake_done; gpr_ref(&h->refs); - grpc_error *error = do_handshaker_next_locked(exec_ctx, h, NULL, 0); + grpc_error *error = do_handshaker_next_locked(h, NULL, 0); if (error != GRPC_ERROR_NONE) { - security_handshake_failed_locked(exec_ctx, h, error); + security_handshake_failed_locked(h, error); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); return; } gpr_mu_unlock(&h->mu); @@ -418,8 +406,7 @@ static const grpc_handshaker_vtable security_handshaker_vtable = { security_handshaker_do_handshake}; static grpc_handshaker *security_handshaker_create( - grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker, - grpc_security_connector *connector) { + tsi_handshaker *handshaker, grpc_security_connector *connector) { security_handshaker *h = (security_handshaker *)gpr_zalloc(sizeof(security_handshaker)); grpc_handshaker_init(&security_handshaker_vtable, &h->base); @@ -445,23 +432,20 @@ static grpc_handshaker *security_handshaker_create( // fail_handshaker // -static void fail_handshaker_destroy(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker) { +static void fail_handshaker_destroy(grpc_handshaker *handshaker) { gpr_free(handshaker); } -static void fail_handshaker_shutdown(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker, +static void fail_handshaker_shutdown(grpc_handshaker *handshaker, grpc_error *why) { GRPC_ERROR_UNREF(why); } -static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker, +static void fail_handshaker_do_handshake(grpc_handshaker *handshaker, grpc_tcp_server_acceptor *acceptor, grpc_closure *on_handshake_done, grpc_handshaker_args *args) { - GRPC_CLOSURE_SCHED(exec_ctx, on_handshake_done, + GRPC_CLOSURE_SCHED(on_handshake_done, GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Failed to create security handshaker")); } @@ -481,27 +465,27 @@ static grpc_handshaker *fail_handshaker_create() { // static void client_handshaker_factory_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory, - const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr) { + grpc_handshaker_factory *handshaker_factory, const grpc_channel_args *args, + grpc_handshake_manager *handshake_mgr) { grpc_channel_security_connector *security_connector = (grpc_channel_security_connector *)grpc_security_connector_find_in_args( args); - grpc_channel_security_connector_add_handshakers(exec_ctx, security_connector, + grpc_channel_security_connector_add_handshakers(security_connector, handshake_mgr); } static void server_handshaker_factory_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *hf, - const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr) { + grpc_handshaker_factory *hf, const grpc_channel_args *args, + grpc_handshake_manager *handshake_mgr) { grpc_server_security_connector *security_connector = (grpc_server_security_connector *)grpc_security_connector_find_in_args( args); - grpc_server_security_connector_add_handshakers(exec_ctx, security_connector, + grpc_server_security_connector_add_handshakers(security_connector, handshake_mgr); } static void handshaker_factory_destroy( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory) {} + grpc_handshaker_factory *handshaker_factory) {} static const grpc_handshaker_factory_vtable client_handshaker_factory_vtable = { client_handshaker_factory_add_handshakers, handshaker_factory_destroy}; @@ -520,14 +504,13 @@ static grpc_handshaker_factory server_handshaker_factory = { // grpc_handshaker *grpc_security_handshaker_create( - grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker, - grpc_security_connector *connector) { + tsi_handshaker *handshaker, grpc_security_connector *connector) { // If no TSI handshaker was created, return a handshaker that always fails. // Otherwise, return a real security handshaker. if (handshaker == NULL) { return fail_handshaker_create(); } else { - return security_handshaker_create(exec_ctx, handshaker, connector); + return security_handshaker_create(handshaker, connector); } } diff --git a/src/core/lib/security/transport/security_handshaker.h b/src/core/lib/security/transport/security_handshaker.h index 178099bb94..b555a72a65 100644 --- a/src/core/lib/security/transport/security_handshaker.h +++ b/src/core/lib/security/transport/security_handshaker.h @@ -29,8 +29,7 @@ extern "C" { /// Creates a security handshaker using \a handshaker. grpc_handshaker *grpc_security_handshaker_create( - grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker, - grpc_security_connector *connector); + tsi_handshaker *handshaker, grpc_security_connector *connector); /// Registers security handshaker factories. void grpc_security_register_handshaker_factories(); diff --git a/src/core/lib/security/transport/server_auth_filter.cc b/src/core/lib/security/transport/server_auth_filter.cc index f5e02f42fe..36d0b7612b 100644 --- a/src/core/lib/security/transport/server_auth_filter.cc +++ b/src/core/lib/security/transport/server_auth_filter.cc @@ -73,8 +73,7 @@ static grpc_metadata_array metadata_batch_to_md_array( return result; } -static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx, - void *user_data, +static grpc_filtered_mdelem remove_consumed_md(void *user_data, grpc_mdelem md) { grpc_call_element *elem = (grpc_call_element *)user_data; call_data *calld = (call_data *)elem->call_data; @@ -88,8 +87,7 @@ static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx, return GRPC_FILTERED_MDELEM(md); } -static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, +static void on_md_processing_done_inner(grpc_call_element *elem, const grpc_metadata *consumed_md, size_t num_consumed_md, const grpc_metadata *response_md, @@ -107,11 +105,10 @@ static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx, calld->consumed_md = consumed_md; calld->num_consumed_md = num_consumed_md; error = grpc_metadata_batch_filter( - exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata, + batch->payload->recv_initial_metadata.recv_initial_metadata, remove_consumed_md, elem, "Response metadata filtering error"); } - GRPC_CLOSURE_SCHED(exec_ctx, calld->original_recv_initial_metadata_ready, - error); + GRPC_CLOSURE_SCHED(calld->original_recv_initial_metadata_ready, error); } // Called from application code. @@ -121,7 +118,7 @@ static void on_md_processing_done( grpc_status_code status, const char *error_details) { grpc_call_element *elem = (grpc_call_element *)user_data; call_data *calld = (call_data *)elem->call_data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; // If the call was not cancelled while we were in flight, process the result. if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT, (gpr_atm)STATE_DONE)) { @@ -134,34 +131,32 @@ static void on_md_processing_done( GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details), GRPC_ERROR_INT_GRPC_STATUS, status); } - on_md_processing_done_inner(&exec_ctx, elem, consumed_md, num_consumed_md, - response_md, num_response_md, error); + on_md_processing_done_inner(elem, consumed_md, num_consumed_md, response_md, + num_response_md, error); } // Clean up. for (size_t i = 0; i < calld->md.count; i++) { - grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key); - grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value); + grpc_slice_unref_internal(calld->md.metadata[i].key); + grpc_slice_unref_internal(calld->md.metadata[i].value); } grpc_metadata_array_destroy(&calld->md); - GRPC_CALL_STACK_UNREF(&exec_ctx, calld->owning_call, "server_auth_metadata"); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_CALL_STACK_UNREF(calld->owning_call, "server_auth_metadata"); + grpc_exec_ctx_finish(); } -static void cancel_call(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { +static void cancel_call(void *arg, grpc_error *error) { grpc_call_element *elem = (grpc_call_element *)arg; call_data *calld = (call_data *)elem->call_data; // If the result was not already processed, invoke the callback now. if (error != GRPC_ERROR_NONE && gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT, (gpr_atm)STATE_CANCELLED)) { - on_md_processing_done_inner(exec_ctx, elem, NULL, 0, NULL, 0, - GRPC_ERROR_REF(error)); + on_md_processing_done_inner(elem, NULL, 0, NULL, 0, GRPC_ERROR_REF(error)); } - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_call"); + GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_call"); } -static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void recv_initial_metadata_ready(void *arg, grpc_error *error) { grpc_call_element *elem = (grpc_call_element *)arg; channel_data *chand = (channel_data *)elem->channel_data; call_data *calld = (call_data *)elem->call_data; @@ -173,7 +168,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg, GRPC_CALL_STACK_REF(calld->owning_call, "cancel_call"); GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem, grpc_schedule_on_exec_ctx); - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, + grpc_call_combiner_set_notify_on_cancel(calld->call_combiner, &calld->cancel_closure); GRPC_CALL_STACK_REF(calld->owning_call, "server_auth_metadata"); calld->md = metadata_batch_to_md_array( @@ -184,13 +179,12 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg, return; } } - GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_initial_metadata_ready, + GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready, GRPC_ERROR_REF(error)); } static void auth_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { + grpc_call_element *elem, grpc_transport_stream_op_batch *batch) { call_data *calld = (call_data *)elem->call_data; if (batch->recv_initial_metadata) { // Inject our callback. @@ -200,12 +194,11 @@ static void auth_start_transport_stream_op_batch( batch->payload->recv_initial_metadata.recv_initial_metadata_ready = &calld->recv_initial_metadata_ready; } - grpc_call_next_op(exec_ctx, elem, batch); + grpc_call_next_op(elem, batch); } /* Constructor for call_data */ -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, +static grpc_error *init_call_elem(grpc_call_element *elem, const grpc_call_element_args *args) { call_data *calld = (call_data *)elem->call_data; channel_data *chand = (channel_data *)elem->channel_data; @@ -231,13 +224,12 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, } /* Destructor for call_data */ -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, +static void destroy_call_elem(grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *ignored) {} /* Constructor for channel_data */ -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, +static grpc_error *init_channel_elem(grpc_channel_element *elem, grpc_channel_element_args *args) { GPR_ASSERT(!args->is_last); channel_data *chand = (channel_data *)elem->channel_data; @@ -253,11 +245,10 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, } /* Destructor for channel data */ -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { +static void destroy_channel_elem(grpc_channel_element *elem) { channel_data *chand = (channel_data *)elem->channel_data; GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "server_auth_filter"); - grpc_server_credentials_unref(exec_ctx, chand->creds); + grpc_server_credentials_unref(chand->creds); } const grpc_channel_filter grpc_server_auth_filter = { diff --git a/src/core/lib/slice/b64.cc b/src/core/lib/slice/b64.cc index 50264719a4..69c6498923 100644 --- a/src/core/lib/slice/b64.cc +++ b/src/core/lib/slice/b64.cc @@ -122,9 +122,8 @@ void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size, result[current - result] = '\0'; } -grpc_slice grpc_base64_decode(grpc_exec_ctx *exec_ctx, const char *b64, - int url_safe) { - return grpc_base64_decode_with_len(exec_ctx, b64, strlen(b64), url_safe); +grpc_slice grpc_base64_decode(const char *b64, int url_safe) { + return grpc_base64_decode_with_len(b64, strlen(b64), url_safe); } static void decode_one_char(const unsigned char *codes, unsigned char *result, @@ -185,8 +184,8 @@ static int decode_group(const unsigned char *codes, size_t num_codes, return 1; } -grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx *exec_ctx, const char *b64, - size_t b64_len, int url_safe) { +grpc_slice grpc_base64_decode_with_len(const char *b64, size_t b64_len, + int url_safe) { grpc_slice result = GRPC_SLICE_MALLOC(b64_len); unsigned char *current = GRPC_SLICE_START_PTR(result); size_t result_size = 0; @@ -231,6 +230,6 @@ grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx *exec_ctx, const char *b64, return result; fail: - grpc_slice_unref_internal(exec_ctx, result); + grpc_slice_unref_internal(result); return grpc_empty_slice(); } diff --git a/src/core/lib/slice/b64.h b/src/core/lib/slice/b64.h index 9b4dc65dbb..0a60f5a351 100644 --- a/src/core/lib/slice/b64.h +++ b/src/core/lib/slice/b64.h @@ -44,12 +44,11 @@ void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size, /* Decodes data according to the base64 specification. Returns an empty slice in case of failure. */ -grpc_slice grpc_base64_decode(grpc_exec_ctx *exec_ctx, const char *b64, - int url_safe); +grpc_slice grpc_base64_decode(const char *b64, int url_safe); /* Same as above except that the length is provided by the caller. */ -grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx *exec_ctx, const char *b64, - size_t b64_len, int url_safe); +grpc_slice grpc_base64_decode_with_len(const char *b64, size_t b64_len, + int url_safe); #ifdef __cplusplus } diff --git a/src/core/lib/slice/slice.cc b/src/core/lib/slice/slice.cc index 0764eda052..07fc49e189 100644 --- a/src/core/lib/slice/slice.cc +++ b/src/core/lib/slice/slice.cc @@ -54,9 +54,9 @@ grpc_slice grpc_slice_ref_internal(grpc_slice slice) { return slice; } -void grpc_slice_unref_internal(grpc_exec_ctx *exec_ctx, grpc_slice slice) { +void grpc_slice_unref_internal(grpc_slice slice) { if (slice.refcount) { - slice.refcount->vtable->unref(exec_ctx, slice.refcount); + slice.refcount->vtable->unref(slice.refcount); } } @@ -67,15 +67,15 @@ grpc_slice grpc_slice_ref(grpc_slice slice) { /* Public API */ void grpc_slice_unref(grpc_slice slice) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_slice_unref_internal(&exec_ctx, slice); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + grpc_slice_unref_internal(slice); + grpc_exec_ctx_finish(); } /* grpc_slice_from_static_string support structure - a refcount that does nothing */ static void noop_ref(void *unused) {} -static void noop_unref(grpc_exec_ctx *exec_ctx, void *unused) {} +static void noop_unref(void *unused) {} static const grpc_slice_refcount_vtable noop_refcount_vtable = { noop_ref, noop_unref, grpc_slice_default_eq_impl, @@ -109,7 +109,7 @@ static void new_slice_ref(void *p) { gpr_ref(&r->refs); } -static void new_slice_unref(grpc_exec_ctx *exec_ctx, void *p) { +static void new_slice_unref(void *p) { new_slice_refcount *r = (new_slice_refcount *)p; if (gpr_unref(&r->refs)) { r->user_destroy(r->user_data); @@ -159,7 +159,7 @@ static void new_with_len_ref(void *p) { gpr_ref(&r->refs); } -static void new_with_len_unref(grpc_exec_ctx *exec_ctx, void *p) { +static void new_with_len_unref(void *p) { new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p; if (gpr_unref(&r->refs)) { r->user_destroy(r->user_data, r->user_length); @@ -210,7 +210,7 @@ static void malloc_ref(void *p) { gpr_ref(&r->refs); } -static void malloc_unref(grpc_exec_ctx *exec_ctx, void *p) { +static void malloc_unref(void *p) { malloc_refcount *r = (malloc_refcount *)p; if (gpr_unref(&r->refs)) { gpr_free(r); diff --git a/src/core/lib/slice/slice_buffer.cc b/src/core/lib/slice/slice_buffer.cc index 63ffc0b00d..c76d91bd1c 100644 --- a/src/core/lib/slice/slice_buffer.cc +++ b/src/core/lib/slice/slice_buffer.cc @@ -65,18 +65,17 @@ void grpc_slice_buffer_init(grpc_slice_buffer *sb) { sb->base_slices = sb->slices = sb->inlined; } -void grpc_slice_buffer_destroy_internal(grpc_exec_ctx *exec_ctx, - grpc_slice_buffer *sb) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, sb); +void grpc_slice_buffer_destroy_internal(grpc_slice_buffer *sb) { + grpc_slice_buffer_reset_and_unref_internal(sb); if (sb->base_slices != sb->inlined) { gpr_free(sb->base_slices); } } void grpc_slice_buffer_destroy(grpc_slice_buffer *sb) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_slice_buffer_destroy_internal(&exec_ctx, sb); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + grpc_slice_buffer_destroy_internal(sb); + grpc_exec_ctx_finish(); } uint8_t *grpc_slice_buffer_tiny_add(grpc_slice_buffer *sb, size_t n) { @@ -163,11 +162,10 @@ void grpc_slice_buffer_pop(grpc_slice_buffer *sb) { } } -void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx *exec_ctx, - grpc_slice_buffer *sb) { +void grpc_slice_buffer_reset_and_unref_internal(grpc_slice_buffer *sb) { size_t i; for (i = 0; i < sb->count; i++) { - grpc_slice_unref_internal(exec_ctx, sb->slices[i]); + grpc_slice_unref_internal(sb->slices[i]); } sb->count = 0; @@ -175,9 +173,9 @@ void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx *exec_ctx, } void grpc_slice_buffer_reset_and_unref(grpc_slice_buffer *sb) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, sb); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + grpc_slice_buffer_reset_and_unref_internal(sb); + grpc_exec_ctx_finish(); } void grpc_slice_buffer_swap(grpc_slice_buffer *a, grpc_slice_buffer *b) { @@ -289,8 +287,7 @@ void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer *src, size_t n, slice_buffer_move_first_maybe_ref(src, n, dst, false); } -void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx, - grpc_slice_buffer *src, size_t n, +void grpc_slice_buffer_move_first_into_buffer(grpc_slice_buffer *src, size_t n, void *dst) { char *dstp = (char *)dst; GPR_ASSERT(src->length >= n); @@ -305,13 +302,13 @@ void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx, n = 0; } else if (slice_len == n) { memcpy(dstp, GRPC_SLICE_START_PTR(slice), n); - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); n = 0; } else { memcpy(dstp, GRPC_SLICE_START_PTR(slice), slice_len); dstp += slice_len; n -= slice_len; - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); } } } diff --git a/src/core/lib/slice/slice_hash_table.cc b/src/core/lib/slice/slice_hash_table.cc index 6c2c9c201c..c122c9f4ef 100644 --- a/src/core/lib/slice/slice_hash_table.cc +++ b/src/core/lib/slice/slice_hash_table.cc @@ -27,7 +27,7 @@ struct grpc_slice_hash_table { gpr_refcount refs; - void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value); + void (*destroy_value)(void* value); int (*value_cmp)(void* a, void* b); size_t size; size_t max_num_probes; @@ -58,8 +58,7 @@ static void grpc_slice_hash_table_add(grpc_slice_hash_table* table, grpc_slice_hash_table* grpc_slice_hash_table_create( size_t num_entries, grpc_slice_hash_table_entry* entries, - void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value), - int (*value_cmp)(void* a, void* b)) { + void (*destroy_value)(void* value), int (*value_cmp)(void* a, void* b)) { grpc_slice_hash_table* table = (grpc_slice_hash_table*)gpr_zalloc(sizeof(*table)); gpr_ref_init(&table->refs, 1); @@ -81,14 +80,13 @@ grpc_slice_hash_table* grpc_slice_hash_table_ref(grpc_slice_hash_table* table) { return table; } -void grpc_slice_hash_table_unref(grpc_exec_ctx* exec_ctx, - grpc_slice_hash_table* table) { +void grpc_slice_hash_table_unref(grpc_slice_hash_table* table) { if (table != NULL && gpr_unref(&table->refs)) { for (size_t i = 0; i < table->size; ++i) { grpc_slice_hash_table_entry* entry = &table->entries[i]; if (!is_empty(entry)) { - grpc_slice_unref_internal(exec_ctx, entry->key); - table->destroy_value(exec_ctx, entry->value); + grpc_slice_unref_internal(entry->key); + table->destroy_value(entry->value); } } gpr_free(table->entries); diff --git a/src/core/lib/slice/slice_hash_table.h b/src/core/lib/slice/slice_hash_table.h index 41250df738..a3df9c92e5 100644 --- a/src/core/lib/slice/slice_hash_table.h +++ b/src/core/lib/slice/slice_hash_table.h @@ -50,12 +50,10 @@ typedef struct grpc_slice_hash_table_entry { will be used. */ grpc_slice_hash_table *grpc_slice_hash_table_create( size_t num_entries, grpc_slice_hash_table_entry *entries, - void (*destroy_value)(grpc_exec_ctx *exec_ctx, void *value), - int (*value_cmp)(void *a, void *b)); + void (*destroy_value)(void *value), int (*value_cmp)(void *a, void *b)); grpc_slice_hash_table *grpc_slice_hash_table_ref(grpc_slice_hash_table *table); -void grpc_slice_hash_table_unref(grpc_exec_ctx *exec_ctx, - grpc_slice_hash_table *table); +void grpc_slice_hash_table_unref(grpc_slice_hash_table *table); /** Returns the value from \a table associated with \a key. Returns NULL if \a key is not found. */ diff --git a/src/core/lib/slice/slice_intern.cc b/src/core/lib/slice/slice_intern.cc index 1ea9a2aa67..2503ee8177 100644 --- a/src/core/lib/slice/slice_intern.cc +++ b/src/core/lib/slice/slice_intern.cc @@ -90,7 +90,7 @@ static void interned_slice_destroy(interned_slice_refcount *s) { gpr_mu_unlock(&shard->mu); } -static void interned_slice_unref(grpc_exec_ctx *exec_ctx, void *p) { +static void interned_slice_unref(void *p) { interned_slice_refcount *s = (interned_slice_refcount *)p; if (1 == gpr_atm_full_fetch_add(&s->refcnt, -1)) { interned_slice_destroy(s); @@ -101,9 +101,8 @@ static void interned_slice_sub_ref(void *p) { interned_slice_ref(((char *)p) - offsetof(interned_slice_refcount, sub)); } -static void interned_slice_sub_unref(grpc_exec_ctx *exec_ctx, void *p) { - interned_slice_unref(exec_ctx, - ((char *)p) - offsetof(interned_slice_refcount, sub)); +static void interned_slice_sub_unref(void *p) { + interned_slice_unref(((char *)p) - offsetof(interned_slice_refcount, sub)); } static uint32_t interned_slice_hash(grpc_slice slice) { diff --git a/src/core/lib/slice/slice_internal.h b/src/core/lib/slice/slice_internal.h index fcf70a0e55..5156f3fc50 100644 --- a/src/core/lib/slice/slice_internal.h +++ b/src/core/lib/slice/slice_internal.h @@ -29,11 +29,9 @@ extern "C" { #endif grpc_slice grpc_slice_ref_internal(grpc_slice slice); -void grpc_slice_unref_internal(grpc_exec_ctx *exec_ctx, grpc_slice slice); -void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx *exec_ctx, - grpc_slice_buffer *sb); -void grpc_slice_buffer_destroy_internal(grpc_exec_ctx *exec_ctx, - grpc_slice_buffer *sb); +void grpc_slice_unref_internal(grpc_slice slice); +void grpc_slice_buffer_reset_and_unref_internal(grpc_slice_buffer *sb); +void grpc_slice_buffer_destroy_internal(grpc_slice_buffer *sb); /* Check if a slice is interned */ bool grpc_slice_is_interned(grpc_slice slice); diff --git a/src/core/lib/surface/alarm.cc b/src/core/lib/surface/alarm.cc index 16a16bfd93..4f3bc5172d 100644 --- a/src/core/lib/surface/alarm.cc +++ b/src/core/lib/surface/alarm.cc @@ -47,11 +47,11 @@ static void alarm_ref(grpc_alarm *alarm) { gpr_ref(&alarm->refs); } static void alarm_unref(grpc_alarm *alarm) { if (gpr_unref(&alarm->refs)) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; if (alarm->cq != NULL) { - GRPC_CQ_INTERNAL_UNREF(&exec_ctx, alarm->cq, "alarm"); + GRPC_CQ_INTERNAL_UNREF(alarm->cq, "alarm"); } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); gpr_free(alarm); } } @@ -82,20 +82,19 @@ static void alarm_unref_dbg(grpc_alarm *alarm, const char *reason, } #endif -static void alarm_end_completion(grpc_exec_ctx *exec_ctx, void *arg, - grpc_cq_completion *c) { +static void alarm_end_completion(void *arg, grpc_cq_completion *c) { grpc_alarm *alarm = (grpc_alarm *)arg; GRPC_ALARM_UNREF(alarm, "dequeue-end-op"); } -static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { +static void alarm_cb(void *arg, grpc_error *error) { grpc_alarm *alarm = (grpc_alarm *)arg; /* We are queuing an op on completion queue. This means, the alarm's structure cannot be destroyed until the op is dequeued. Adding an extra ref here and unref'ing when the op is dequeued will achieve this */ GRPC_ALARM_REF(alarm, "queue-end-op"); - grpc_cq_end_op(exec_ctx, alarm->cq, alarm->tag, error, alarm_end_completion, + grpc_cq_end_op(alarm->cq, alarm->tag, error, alarm_end_completion, (void *)alarm, &alarm->completion); } @@ -118,22 +117,22 @@ grpc_alarm *grpc_alarm_create(void *reserved) { void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq, gpr_timespec deadline, void *tag, void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GRPC_CQ_INTERNAL_REF(cq, "alarm"); alarm->cq = cq; alarm->tag = tag; GPR_ASSERT(grpc_cq_begin_op(cq, tag)); - grpc_timer_init(&exec_ctx, &alarm->alarm, - grpc_timespec_to_millis_round_up(deadline), &alarm->on_alarm); - grpc_exec_ctx_finish(&exec_ctx); + grpc_timer_init(&alarm->alarm, grpc_timespec_to_millis_round_up(deadline), + &alarm->on_alarm); + grpc_exec_ctx_finish(); } void grpc_alarm_cancel(grpc_alarm *alarm, void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_timer_cancel(&exec_ctx, &alarm->alarm); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + grpc_timer_cancel(&alarm->alarm); + grpc_exec_ctx_finish(); } void grpc_alarm_destroy(grpc_alarm *alarm, void *reserved) { diff --git a/src/core/lib/surface/byte_buffer.cc b/src/core/lib/surface/byte_buffer.cc index 7ed550ef87..5123189671 100644 --- a/src/core/lib/surface/byte_buffer.cc +++ b/src/core/lib/surface/byte_buffer.cc @@ -71,14 +71,14 @@ grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb) { void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) { if (!bb) return; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; switch (bb->type) { case GRPC_BB_RAW: - grpc_slice_buffer_destroy_internal(&exec_ctx, &bb->data.raw.slice_buffer); + grpc_slice_buffer_destroy_internal(&bb->data.raw.slice_buffer); break; } gpr_free(bb); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) { diff --git a/src/core/lib/surface/byte_buffer_reader.cc b/src/core/lib/surface/byte_buffer_reader.cc index 87bd3239c0..5f5596c803 100644 --- a/src/core/lib/surface/byte_buffer_reader.cc +++ b/src/core/lib/surface/byte_buffer_reader.cc @@ -42,15 +42,14 @@ static int is_compressed(grpc_byte_buffer *buffer) { int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, grpc_byte_buffer *buffer) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_slice_buffer decompressed_slices_buffer; reader->buffer_in = buffer; switch (reader->buffer_in->type) { case GRPC_BB_RAW: grpc_slice_buffer_init(&decompressed_slices_buffer); if (is_compressed(reader->buffer_in)) { - if (grpc_msg_decompress(&exec_ctx, - reader->buffer_in->data.raw.compression, + if (grpc_msg_decompress(reader->buffer_in->data.raw.compression, &reader->buffer_in->data.raw.slice_buffer, &decompressed_slices_buffer) == 0) { gpr_log(GPR_ERROR, @@ -64,15 +63,14 @@ int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, grpc_raw_byte_buffer_create(decompressed_slices_buffer.slices, decompressed_slices_buffer.count); } - grpc_slice_buffer_destroy_internal(&exec_ctx, - &decompressed_slices_buffer); + grpc_slice_buffer_destroy_internal(&decompressed_slices_buffer); } else { /* not compressed, use the input buffer as output */ reader->buffer_out = reader->buffer_in; } reader->current.index = 0; break; } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return 1; } @@ -112,14 +110,14 @@ grpc_slice grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader) { grpc_slice out_slice = GRPC_SLICE_MALLOC(input_size); uint8_t *const outbuf = GRPC_SLICE_START_PTR(out_slice); /* just an alias */ - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; while (grpc_byte_buffer_reader_next(reader, &in_slice) != 0) { const size_t slice_length = GRPC_SLICE_LENGTH(in_slice); memcpy(&(outbuf[bytes_read]), GRPC_SLICE_START_PTR(in_slice), slice_length); bytes_read += slice_length; - grpc_slice_unref_internal(&exec_ctx, in_slice); + grpc_slice_unref_internal(in_slice); GPR_ASSERT(bytes_read <= input_size); } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return out_slice; } diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc index 8216aa0ec8..26002056ea 100644 --- a/src/core/lib/surface/call.cc +++ b/src/core/lib/surface/call.cc @@ -271,29 +271,26 @@ grpc_tracer_flag grpc_compression_trace = #define CALL_FROM_TOP_ELEM(top_elem) \ CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem)) -static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call, - grpc_transport_stream_op_batch *op, +static void execute_batch(grpc_call *call, grpc_transport_stream_op_batch *op, grpc_closure *start_batch_closure); -static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c, - status_source source, grpc_status_code status, +static void cancel_with_status(grpc_call *c, status_source source, + grpc_status_code status, const char *description); -static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c, - status_source source, grpc_error *error); -static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack, - grpc_error *error); -static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp, - grpc_error *error); -static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call, +static void cancel_with_error(grpc_call *c, status_source source, + grpc_error *error); +static void destroy_call(void *call_stack, grpc_error *error); +static void receiving_slice_ready(void *bctlp, grpc_error *error); +static void get_final_status(grpc_call *call, void (*set_value)(grpc_status_code code, void *user_data), void *set_value_user_data, grpc_slice *details); static void set_status_value_directly(grpc_status_code status, void *dest); -static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call, - status_source source, grpc_error *error); -static void process_data_after_md(grpc_exec_ctx *exec_ctx, batch_control *bctl); -static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl); -static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl, - grpc_error *error, bool has_cancelled); +static void set_status_from_error(grpc_call *call, status_source source, + grpc_error *error); +static void process_data_after_md(batch_control *bctl); +static void post_batch_completion(batch_control *bctl); +static void add_batch_error(batch_control *bctl, grpc_error *error, + bool has_cancelled); static void add_init_error(grpc_error **composite, grpc_error *new_err) { if (new_err == GRPC_ERROR_NONE) return; @@ -323,8 +320,7 @@ static parent_call *get_parent_call(grpc_call *call) { return (parent_call *)gpr_atm_acq_load(&call->parent_call_atm); } -grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, - const grpc_call_create_args *args, +grpc_error *grpc_call_create(const grpc_call_create_args *args, grpc_call **out_call) { size_t i, j; grpc_error *error = GRPC_ERROR_NONE; @@ -333,7 +329,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, grpc_call *call; GPR_TIMER_BEGIN("grpc_call_create", 0); size_t initial_size = grpc_channel_get_call_size_estimate(args->channel); - GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, initial_size); + GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size); gpr_arena *arena = gpr_arena_create(initial_size); call = (grpc_call *)gpr_arena_alloc( arena, sizeof(grpc_call) + channel_stack->call_stack_size); @@ -348,9 +344,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE); call->is_client = args->server_transport_data == NULL; if (call->is_client) { - GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx); + GRPC_STATS_INC_CLIENT_CALLS_CREATED(); } else { - GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx); + GRPC_STATS_INC_SERVER_CALLS_CREATED(); } call->stream_op_payload.context = call->context; grpc_slice path = grpc_empty_slice(); @@ -445,15 +441,13 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, send_deadline, call->arena, &call->call_combiner}; - add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1, - destroy_call, call, &call_args)); + add_init_error(&error, grpc_call_stack_init(channel_stack, 1, destroy_call, + call, &call_args)); if (error != GRPC_ERROR_NONE) { - cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_ERROR_REF(error)); + cancel_with_error(call, STATUS_FROM_SURFACE, GRPC_ERROR_REF(error)); } if (immediately_cancel) { - cancel_with_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE, - GRPC_ERROR_CANCELLED); + cancel_with_error(call, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED); } if (args->cq != NULL) { GPR_ASSERT( @@ -468,17 +462,17 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, args->pollset_set_alternative); } if (!grpc_polling_entity_is_empty(&call->pollent)) { - grpc_call_stack_set_pollset_or_pollset_set( - exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent); + grpc_call_stack_set_pollset_or_pollset_set(CALL_STACK_FROM_CALL(call), + &call->pollent); } - grpc_slice_unref_internal(exec_ctx, path); + grpc_slice_unref_internal(path); GPR_TIMER_END("grpc_call_create", 0); return error; } -void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call, +void grpc_call_set_completion_queue(grpc_call *call, grpc_completion_queue *cq) { GPR_ASSERT(cq); @@ -489,8 +483,8 @@ void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call, call->cq = cq; GRPC_CQ_INTERNAL_REF(cq, "bind"); call->pollent = grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq)); - grpc_call_stack_set_pollset_or_pollset_set( - exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent); + grpc_call_stack_set_pollset_or_pollset_set(CALL_STACK_FROM_CALL(call), + &call->pollent); } #ifndef NDEBUG @@ -503,40 +497,38 @@ void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call, void grpc_call_internal_ref(grpc_call *c REF_ARG) { GRPC_CALL_STACK_REF(CALL_STACK_FROM_CALL(c), REF_REASON); } -void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) { - GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON); +void grpc_call_internal_unref(grpc_call *c REF_ARG) { + GRPC_CALL_STACK_UNREF(CALL_STACK_FROM_CALL(c), REF_REASON); } -static void release_call(grpc_exec_ctx *exec_ctx, void *call, - grpc_error *error) { +static void release_call(void *call, grpc_error *error) { grpc_call *c = (grpc_call *)call; grpc_channel *channel = c->channel; grpc_call_combiner_destroy(&c->call_combiner); gpr_free((char *)c->peer_string); grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena)); - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call"); + GRPC_CHANNEL_INTERNAL_UNREF(channel, "call"); } static void set_status_value_directly(grpc_status_code status, void *dest); -static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, - grpc_error *error) { +static void destroy_call(void *call, grpc_error *error) { size_t i; int ii; grpc_call *c = (grpc_call *)call; GPR_TIMER_BEGIN("destroy_call", 0); for (i = 0; i < 2; i++) { grpc_metadata_batch_destroy( - exec_ctx, &c->metadata_batch[1 /* is_receiving */][i /* is_initial */]); + &c->metadata_batch[1 /* is_receiving */][i /* is_initial */]); } if (c->receiving_stream != NULL) { - grpc_byte_stream_destroy(exec_ctx, c->receiving_stream); + grpc_byte_stream_destroy(c->receiving_stream); } parent_call *pc = get_parent_call(c); if (pc != NULL) { gpr_mu_destroy(&pc->child_list_mu); } for (ii = 0; ii < c->send_extra_metadata_count; ii++) { - GRPC_MDELEM_UNREF(exec_ctx, c->send_extra_metadata[ii].md); + GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md); } for (i = 0; i < GRPC_CONTEXT_COUNT; i++) { if (c->context[i].destroy) { @@ -544,11 +536,11 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, } } if (c->cq) { - GRPC_CQ_INTERNAL_UNREF(exec_ctx, c->cq, "bind"); + GRPC_CQ_INTERNAL_UNREF(c->cq, "bind"); } - get_final_status(exec_ctx, c, set_status_value_directly, - &c->final_info.final_status, NULL); + get_final_status(c, set_status_value_directly, &c->final_info.final_status, + NULL); c->final_info.stats.latency = gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time); @@ -557,7 +549,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, unpack_received_status(gpr_atm_acq_load(&c->status[i])).error); } - grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, + grpc_call_stack_destroy(CALL_STACK_FROM_CALL(c), &c->final_info, GRPC_CLOSURE_INIT(&c->release_call, release_call, c, grpc_schedule_on_exec_ctx)); GPR_TIMER_END("destroy_call", 0); @@ -569,7 +561,7 @@ void grpc_call_unref(grpc_call *c) { if (!gpr_unref(&c->ext_ref)) return; child_call *cc = c->child; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GPR_TIMER_BEGIN("grpc_call_unref", 0); GRPC_API_TRACE("grpc_call_unref(c=%p)", 1, (c)); @@ -586,7 +578,7 @@ void grpc_call_unref(grpc_call *c) { cc->sibling_prev->child->sibling_next = cc->sibling_next; cc->sibling_next->child->sibling_prev = cc->sibling_prev; gpr_mu_unlock(&pc->child_list_mu); - GRPC_CALL_INTERNAL_UNREF(&exec_ctx, cc->parent, "child"); + GRPC_CALL_INTERNAL_UNREF(cc->parent, "child"); } GPR_ASSERT(!c->destroy_called); @@ -594,52 +586,49 @@ void grpc_call_unref(grpc_call *c) { bool cancel = gpr_atm_acq_load(&c->any_ops_sent_atm) != 0 && gpr_atm_acq_load(&c->received_final_op_atm) == 0; if (cancel) { - cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE, - GRPC_ERROR_CANCELLED); + cancel_with_error(c, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED); } else { // Unset the call combiner cancellation closure. This has the // effect of scheduling the previously set cancellation closure, if // any, so that it can release any internal references it may be // holding to the call stack. - grpc_call_combiner_set_notify_on_cancel(&exec_ctx, &c->call_combiner, NULL); + grpc_call_combiner_set_notify_on_cancel(&c->call_combiner, NULL); } - GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy"); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_CALL_INTERNAL_UNREF(c, "destroy"); + grpc_exec_ctx_finish(); GPR_TIMER_END("grpc_call_unref", 0); } grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) { GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved)); GPR_ASSERT(!reserved); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - cancel_with_error(&exec_ctx, call, STATUS_FROM_API_OVERRIDE, - GRPC_ERROR_CANCELLED); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + cancel_with_error(call, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED); + grpc_exec_ctx_finish(); return GRPC_CALL_OK; } // This is called via the call combiner to start sending a batch down // the filter stack. -static void execute_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *ignored) { +static void execute_batch_in_call_combiner(void *arg, grpc_error *ignored) { grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; grpc_call *call = (grpc_call *)batch->handler_private.extra_arg; GPR_TIMER_BEGIN("execute_batch", 0); grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0); GRPC_CALL_LOG_OP(GPR_INFO, elem, batch); - elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch); + elem->filter->start_transport_stream_op_batch(elem, batch); GPR_TIMER_END("execute_batch", 0); } // start_batch_closure points to a caller-allocated closure to be used // for entering the call combiner. -static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call, +static void execute_batch(grpc_call *call, grpc_transport_stream_op_batch *batch, grpc_closure *start_batch_closure) { batch->handler_private.extra_arg = call; GRPC_CLOSURE_INIT(start_batch_closure, execute_batch_in_call_combiner, batch, grpc_schedule_on_exec_ctx); - GRPC_CALL_COMBINER_START(exec_ctx, &call->call_combiner, start_batch_closure, + GRPC_CALL_COMBINER_START(&call->call_combiner, start_batch_closure, GRPC_ERROR_NONE, "executing batch"); } @@ -663,15 +652,14 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c, grpc_status_code status, const char *description, void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GRPC_API_TRACE( "grpc_call_cancel_with_status(" "c=%p, status=%d, description=%s, reserved=%p)", 4, (c, (int)status, description, reserved)); GPR_ASSERT(reserved == NULL); - cancel_with_status(&exec_ctx, c, STATUS_FROM_API_OVERRIDE, status, - description); - grpc_exec_ctx_finish(&exec_ctx); + cancel_with_status(c, STATUS_FROM_API_OVERRIDE, status, description); + grpc_exec_ctx_finish(); return GRPC_CALL_OK; } @@ -683,24 +671,23 @@ typedef struct { // The on_complete callback used when sending a cancel_stream batch down // the filter stack. Yields the call combiner when the batch is done. -static void done_termination(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void done_termination(void *arg, grpc_error *error) { cancel_state *state = (cancel_state *)arg; - GRPC_CALL_COMBINER_STOP(exec_ctx, &state->call->call_combiner, + GRPC_CALL_COMBINER_STOP(&state->call->call_combiner, "on_complete for cancel_stream op"); - GRPC_CALL_INTERNAL_UNREF(exec_ctx, state->call, "termination"); + GRPC_CALL_INTERNAL_UNREF(state->call, "termination"); gpr_free(state); } -static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c, - status_source source, grpc_error *error) { +static void cancel_with_error(grpc_call *c, status_source source, + grpc_error *error) { GRPC_CALL_INTERNAL_REF(c, "termination"); // Inform the call combiner of the cancellation, so that it can cancel // any in-flight asynchronous actions that may be holding the call // combiner. This ensures that the cancel_stream batch can be sent // down the filter stack in a timely manner. - grpc_call_combiner_cancel(exec_ctx, &c->call_combiner, GRPC_ERROR_REF(error)); - set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error)); + grpc_call_combiner_cancel(&c->call_combiner, GRPC_ERROR_REF(error)); + set_status_from_error(c, source, GRPC_ERROR_REF(error)); cancel_state *state = (cancel_state *)gpr_malloc(sizeof(*state)); state->call = c; GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state, @@ -709,7 +696,7 @@ static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c, grpc_make_transport_stream_op(&state->finish_batch); op->cancel_stream = true; op->payload->cancel_stream.cancel_error = error; - execute_batch(exec_ctx, c, op, &state->start_batch); + execute_batch(c, op, &state->start_batch); } static grpc_error *error_from_status(grpc_status_code status, @@ -723,27 +710,23 @@ static grpc_error *error_from_status(grpc_status_code status, GRPC_ERROR_INT_GRPC_STATUS, status); } -static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c, - status_source source, grpc_status_code status, +static void cancel_with_status(grpc_call *c, status_source source, + grpc_status_code status, const char *description) { - cancel_with_error(exec_ctx, c, source, - error_from_status(status, description)); + cancel_with_error(c, source, error_from_status(status, description)); } /******************************************************************************* * FINAL STATUS CODE MANIPULATION */ -static bool get_final_status_from(grpc_exec_ctx *exec_ctx, grpc_call *call, - grpc_error *error, bool allow_ok_status, - void (*set_value)(grpc_status_code code, - void *user_data), - void *set_value_user_data, - grpc_slice *details) { +static bool get_final_status_from( + grpc_call *call, grpc_error *error, bool allow_ok_status, + void (*set_value)(grpc_status_code code, void *user_data), + void *set_value_user_data, grpc_slice *details) { grpc_status_code code; grpc_slice slice = grpc_empty_slice(); - grpc_error_get_status(exec_ctx, error, call->send_deadline, &code, &slice, - NULL); + grpc_error_get_status(error, call->send_deadline, &code, &slice, NULL); if (code == GRPC_STATUS_OK && !allow_ok_status) { return false; } @@ -755,7 +738,7 @@ static bool get_final_status_from(grpc_exec_ctx *exec_ctx, grpc_call *call, return true; } -static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call, +static void get_final_status(grpc_call *call, void (*set_value)(grpc_status_code code, void *user_data), void *set_value_user_data, grpc_slice *details) { @@ -780,9 +763,8 @@ static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call, for (i = 0; i < STATUS_SOURCE_COUNT; i++) { if (status[i].is_set && grpc_error_has_clear_grpc_status(status[i].error)) { - if (get_final_status_from(exec_ctx, call, status[i].error, - allow_ok_status != 0, set_value, - set_value_user_data, details)) { + if (get_final_status_from(call, status[i].error, allow_ok_status != 0, + set_value, set_value_user_data, details)) { return; } } @@ -790,9 +772,8 @@ static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call, /* If no clearly defined status exists, search for 'anything' */ for (i = 0; i < STATUS_SOURCE_COUNT; i++) { if (status[i].is_set) { - if (get_final_status_from(exec_ctx, call, status[i].error, - allow_ok_status != 0, set_value, - set_value_user_data, details)) { + if (get_final_status_from(call, status[i].error, allow_ok_status != 0, + set_value, set_value_user_data, details)) { return; } } @@ -806,8 +787,8 @@ static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call, } } -static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call, - status_source source, grpc_error *error) { +static void set_status_from_error(grpc_call *call, status_source source, + grpc_error *error) { if (!gpr_atm_rel_cas(&call->status[source], pack_received_status({false, GRPC_ERROR_NONE}), pack_received_status({true, error}))) { @@ -859,8 +840,7 @@ uint32_t grpc_call_test_only_get_message_flags(grpc_call *call) { static void destroy_encodings_accepted_by_peer(void *p) { return; } -static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx, - grpc_call *call, grpc_mdelem mdel) { +static void set_encodings_accepted_by_peer(grpc_call *call, grpc_mdelem mdel) { size_t i; grpc_compression_algorithm algorithm; grpc_slice_buffer accept_encoding_parts; @@ -898,15 +878,14 @@ static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx, } } - grpc_slice_buffer_destroy_internal(exec_ctx, &accept_encoding_parts); + grpc_slice_buffer_destroy_internal(&accept_encoding_parts); grpc_mdelem_set_user_data( mdel, destroy_encodings_accepted_by_peer, (void *)(((uintptr_t)call->encodings_accepted_by_peer) + 1)); } -static void set_stream_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx, - grpc_call *call, +static void set_stream_encodings_accepted_by_peer(grpc_call *call, grpc_mdelem mdel) { size_t i; grpc_stream_compression_algorithm algorithm; @@ -944,7 +923,7 @@ static void set_stream_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx, } } - grpc_slice_buffer_destroy_internal(exec_ctx, &accept_encoding_parts); + grpc_slice_buffer_destroy_internal(&accept_encoding_parts); grpc_mdelem_set_user_data( mdel, destroy_encodings_accepted_by_peer, @@ -982,10 +961,12 @@ static grpc_metadata *get_md_elem(grpc_metadata *metadata, return res; } -static int prepare_application_metadata( - grpc_exec_ctx *exec_ctx, grpc_call *call, int count, - grpc_metadata *metadata, int is_trailing, int prepend_extra_metadata, - grpc_metadata *additional_metadata, int additional_metadata_count) { +static int prepare_application_metadata(grpc_call *call, int count, + grpc_metadata *metadata, + int is_trailing, + int prepend_extra_metadata, + grpc_metadata *additional_metadata, + int additional_metadata_count) { int total_count = count + additional_metadata_count; int i; grpc_metadata_batch *batch = @@ -1004,14 +985,14 @@ static int prepare_application_metadata( grpc_validate_header_nonbin_value_is_legal(md->value))) { break; } - l->md = grpc_mdelem_from_grpc_metadata(exec_ctx, (grpc_metadata *)md); + l->md = grpc_mdelem_from_grpc_metadata((grpc_metadata *)md); } if (i != total_count) { for (int j = 0; j < i; j++) { const grpc_metadata *md = get_md_elem(metadata, additional_metadata, j, count); grpc_linked_mdelem *l = linked_from_md(md); - GRPC_MDELEM_UNREF(exec_ctx, l->md); + GRPC_MDELEM_UNREF(l->md); } return 0; } @@ -1022,16 +1003,16 @@ static int prepare_application_metadata( for (i = 0; i < call->send_extra_metadata_count; i++) { GRPC_LOG_IF_ERROR("prepare_application_metadata", grpc_metadata_batch_link_tail( - exec_ctx, batch, &call->send_extra_metadata[i])); + batch, &call->send_extra_metadata[i])); } } } for (i = 0; i < total_count; i++) { grpc_metadata *md = get_md_elem(metadata, additional_metadata, i, count); grpc_linked_mdelem *l = linked_from_md(md); - grpc_error *error = grpc_metadata_batch_link_tail(exec_ctx, batch, l); + grpc_error *error = grpc_metadata_batch_link_tail(batch, l); if (error != GRPC_ERROR_NONE) { - GRPC_MDELEM_UNREF(exec_ctx, l->md); + GRPC_MDELEM_UNREF(l->md); } GRPC_LOG_IF_ERROR("prepare_application_metadata", error); } @@ -1118,46 +1099,43 @@ static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b, GPR_TIMER_END("publish_app_metadata", 0); } -static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call, - grpc_metadata_batch *b) { +static void recv_initial_filter(grpc_call *call, grpc_metadata_batch *b) { if (b->idx.named.content_encoding != NULL) { if (b->idx.named.grpc_encoding != NULL) { gpr_log(GPR_ERROR, "Received both content-encoding and grpc-encoding header. " "Ignoring grpc-encoding."); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_encoding); + grpc_metadata_batch_remove(b, b->idx.named.grpc_encoding); } GPR_TIMER_BEGIN("incoming_stream_compression_algorithm", 0); set_incoming_stream_compression_algorithm( call, decode_stream_compression(b->idx.named.content_encoding->md)); GPR_TIMER_END("incoming_stream_compression_algorithm", 0); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.content_encoding); + grpc_metadata_batch_remove(b, b->idx.named.content_encoding); } else if (b->idx.named.grpc_encoding != NULL) { GPR_TIMER_BEGIN("incoming_compression_algorithm", 0); set_incoming_compression_algorithm( call, decode_compression(b->idx.named.grpc_encoding->md)); GPR_TIMER_END("incoming_compression_algorithm", 0); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_encoding); + grpc_metadata_batch_remove(b, b->idx.named.grpc_encoding); } if (b->idx.named.grpc_accept_encoding != NULL) { GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0); - set_encodings_accepted_by_peer(exec_ctx, call, - b->idx.named.grpc_accept_encoding->md); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_accept_encoding); + set_encodings_accepted_by_peer(call, b->idx.named.grpc_accept_encoding->md); + grpc_metadata_batch_remove(b, b->idx.named.grpc_accept_encoding); GPR_TIMER_END("encodings_accepted_by_peer", 0); } if (b->idx.named.accept_encoding != NULL) { GPR_TIMER_BEGIN("stream_encodings_accepted_by_peer", 0); - set_stream_encodings_accepted_by_peer(exec_ctx, call, + set_stream_encodings_accepted_by_peer(call, b->idx.named.accept_encoding->md); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.accept_encoding); + grpc_metadata_batch_remove(b, b->idx.named.accept_encoding); GPR_TIMER_END("stream_encodings_accepted_by_peer", 0); } publish_app_metadata(call, b, false); } -static void recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args, - grpc_metadata_batch *b) { +static void recv_trailing_filter(void *args, grpc_metadata_batch *b) { grpc_call *call = (grpc_call *)args; if (b->idx.named.grpc_status != NULL) { uint32_t status_code = decode_status(b->idx.named.grpc_status->md); @@ -1172,13 +1150,13 @@ static void recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args, error = grpc_error_set_str( error, GRPC_ERROR_STR_GRPC_MESSAGE, grpc_slice_ref_internal(GRPC_MDVALUE(b->idx.named.grpc_message->md))); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_message); + grpc_metadata_batch_remove(b, b->idx.named.grpc_message); } else if (error != GRPC_ERROR_NONE) { error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE, grpc_empty_slice()); } - set_status_from_error(exec_ctx, call, STATUS_FROM_WIRE, error); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_status); + set_status_from_error(call, STATUS_FROM_WIRE, error); + grpc_metadata_batch_remove(b, b->idx.named.grpc_status); } publish_app_metadata(call, b, true); } @@ -1255,12 +1233,12 @@ static batch_control *allocate_batch_control(grpc_call *call, return bctl; } -static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data, +static void finish_batch_completion(void *user_data, grpc_cq_completion *storage) { batch_control *bctl = (batch_control *)user_data; grpc_call *call = bctl->call; bctl->call = NULL; - GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion"); + GRPC_CALL_INTERNAL_UNREF(call, "completion"); } static grpc_error *consolidate_batch_errors(batch_control *bctl) { @@ -1284,15 +1262,13 @@ static grpc_error *consolidate_batch_errors(batch_control *bctl) { } } -static void post_batch_completion(grpc_exec_ctx *exec_ctx, - batch_control *bctl) { +static void post_batch_completion(batch_control *bctl) { grpc_call *next_child_call; grpc_call *call = bctl->call; grpc_error *error = consolidate_batch_errors(bctl); if (bctl->op.send_initial_metadata) { grpc_metadata_batch_destroy( - exec_ctx, &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]); } if (bctl->op.send_message) { @@ -1300,13 +1276,12 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, } if (bctl->op.send_trailing_metadata) { grpc_metadata_batch_destroy( - exec_ctx, &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]); } if (bctl->op.recv_trailing_metadata) { grpc_metadata_batch *md = &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */]; - recv_trailing_filter(exec_ctx, call, md); + recv_trailing_filter(call, md); /* propagate cancellation to any interested children */ gpr_atm_rel_store(&call->received_final_op_atm, 1); @@ -1320,9 +1295,9 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, next_child_call = child->child->sibling_next; if (child->cancellation_is_inherited) { GRPC_CALL_INTERNAL_REF(child, "propagate_cancel"); - cancel_with_error(exec_ctx, child, STATUS_FROM_API_OVERRIDE, + cancel_with_error(child, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED); - GRPC_CALL_INTERNAL_UNREF(exec_ctx, child, "propagate_cancel"); + GRPC_CALL_INTERNAL_UNREF(child, "propagate_cancel"); } child = next_child_call; } while (child != pc->first_child); @@ -1331,11 +1306,11 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, } if (call->is_client) { - get_final_status(exec_ctx, call, set_status_value_directly, + get_final_status(call, set_status_value_directly, call->final_op.client.status, call->final_op.client.status_details); } else { - get_final_status(exec_ctx, call, set_cancelled_value, + get_final_status(call, set_cancelled_value, call->final_op.server.cancelled, NULL); } @@ -1351,25 +1326,24 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, if (bctl->completion_data.notify_tag.is_closure) { /* unrefs bctl->error */ bctl->call = NULL; - GRPC_CLOSURE_RUN( - exec_ctx, (grpc_closure *)bctl->completion_data.notify_tag.tag, error); - GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion"); + GRPC_CLOSURE_RUN((grpc_closure *)bctl->completion_data.notify_tag.tag, + error); + GRPC_CALL_INTERNAL_UNREF(call, "completion"); } else { /* unrefs bctl->error */ - grpc_cq_end_op( - exec_ctx, bctl->call->cq, bctl->completion_data.notify_tag.tag, error, - finish_batch_completion, bctl, &bctl->completion_data.cq_completion); + grpc_cq_end_op(bctl->call->cq, bctl->completion_data.notify_tag.tag, error, + finish_batch_completion, bctl, + &bctl->completion_data.cq_completion); } } -static void finish_batch_step(grpc_exec_ctx *exec_ctx, batch_control *bctl) { +static void finish_batch_step(batch_control *bctl) { if (gpr_unref(&bctl->steps_to_complete)) { - post_batch_completion(exec_ctx, bctl); + post_batch_completion(bctl); } } -static void continue_receiving_slices(grpc_exec_ctx *exec_ctx, - batch_control *bctl) { +static void continue_receiving_slices(batch_control *bctl) { grpc_error *error; grpc_call *call = bctl->call; for (;;) { @@ -1377,25 +1351,25 @@ static void continue_receiving_slices(grpc_exec_ctx *exec_ctx, (*call->receiving_buffer)->data.raw.slice_buffer.length; if (remaining == 0) { call->receiving_message = 0; - grpc_byte_stream_destroy(exec_ctx, call->receiving_stream); + grpc_byte_stream_destroy(call->receiving_stream); call->receiving_stream = NULL; - finish_batch_step(exec_ctx, bctl); + finish_batch_step(bctl); return; } - if (grpc_byte_stream_next(exec_ctx, call->receiving_stream, remaining, + if (grpc_byte_stream_next(call->receiving_stream, remaining, &call->receiving_slice_ready)) { - error = grpc_byte_stream_pull(exec_ctx, call->receiving_stream, - &call->receiving_slice); + error = + grpc_byte_stream_pull(call->receiving_stream, &call->receiving_slice); if (error == GRPC_ERROR_NONE) { grpc_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer, call->receiving_slice); } else { - grpc_byte_stream_destroy(exec_ctx, call->receiving_stream); + grpc_byte_stream_destroy(call->receiving_stream); call->receiving_stream = NULL; grpc_byte_buffer_destroy(*call->receiving_buffer); *call->receiving_buffer = NULL; call->receiving_message = 0; - finish_batch_step(exec_ctx, bctl); + finish_batch_step(bctl); return; } } else { @@ -1404,8 +1378,7 @@ static void continue_receiving_slices(grpc_exec_ctx *exec_ctx, } } -static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp, - grpc_error *error) { +static void receiving_slice_ready(void *bctlp, grpc_error *error) { batch_control *bctl = (batch_control *)bctlp; grpc_call *call = bctl->call; grpc_byte_stream *bs = call->receiving_stream; @@ -1413,11 +1386,11 @@ static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp, if (error == GRPC_ERROR_NONE) { grpc_slice slice; - error = grpc_byte_stream_pull(exec_ctx, bs, &slice); + error = grpc_byte_stream_pull(bs, &slice); if (error == GRPC_ERROR_NONE) { grpc_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer, slice); - continue_receiving_slices(exec_ctx, bctl); + continue_receiving_slices(bctl); } else { /* Error returned by grpc_byte_stream_pull needs to be released manually */ @@ -1429,25 +1402,24 @@ static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp, if (GRPC_TRACER_ON(grpc_trace_operation_failures)) { GRPC_LOG_IF_ERROR("receiving_slice_ready", GRPC_ERROR_REF(error)); } - grpc_byte_stream_destroy(exec_ctx, call->receiving_stream); + grpc_byte_stream_destroy(call->receiving_stream); call->receiving_stream = NULL; grpc_byte_buffer_destroy(*call->receiving_buffer); *call->receiving_buffer = NULL; call->receiving_message = 0; - finish_batch_step(exec_ctx, bctl); + finish_batch_step(bctl); if (release_error) { GRPC_ERROR_UNREF(error); } } } -static void process_data_after_md(grpc_exec_ctx *exec_ctx, - batch_control *bctl) { +static void process_data_after_md(batch_control *bctl) { grpc_call *call = bctl->call; if (call->receiving_stream == NULL) { *call->receiving_buffer = NULL; call->receiving_message = 0; - finish_batch_step(exec_ctx, bctl); + finish_batch_step(bctl); } else { call->test_only_last_message_flags = call->receiving_stream->flags; if ((call->receiving_stream->flags & GRPC_WRITE_INTERNAL_COMPRESS) && @@ -1459,46 +1431,42 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx, } GRPC_CLOSURE_INIT(&call->receiving_slice_ready, receiving_slice_ready, bctl, grpc_schedule_on_exec_ctx); - continue_receiving_slices(exec_ctx, bctl); + continue_receiving_slices(bctl); } } -static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp, - grpc_error *error) { +static void receiving_stream_ready(void *bctlp, grpc_error *error) { batch_control *bctl = (batch_control *)bctlp; grpc_call *call = bctl->call; if (error != GRPC_ERROR_NONE) { if (call->receiving_stream != NULL) { - grpc_byte_stream_destroy(exec_ctx, call->receiving_stream); + grpc_byte_stream_destroy(call->receiving_stream); call->receiving_stream = NULL; } - add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), true); - cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_ERROR_REF(error)); + add_batch_error(bctl, GRPC_ERROR_REF(error), true); + cancel_with_error(call, STATUS_FROM_SURFACE, GRPC_ERROR_REF(error)); } /* If recv_state is RECV_NONE, we will save the batch_control * object with rel_cas, and will not use it after the cas. Its corresponding * acq_load is in receiving_initial_metadata_ready() */ if (error != GRPC_ERROR_NONE || call->receiving_stream == NULL || !gpr_atm_rel_cas(&call->recv_state, RECV_NONE, (gpr_atm)bctlp)) { - process_data_after_md(exec_ctx, bctl); + process_data_after_md(bctl); } } // The recv_message_ready callback used when sending a batch containing // a recv_message op down the filter stack. Yields the call combiner // before processing the received message. -static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx *exec_ctx, - void *bctlp, +static void receiving_stream_ready_in_call_combiner(void *bctlp, grpc_error *error) { batch_control *bctl = (batch_control *)bctlp; grpc_call *call = bctl->call; - GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "recv_message_ready"); - receiving_stream_ready(exec_ctx, bctlp, error); + GRPC_CALL_COMBINER_STOP(&call->call_combiner, "recv_message_ready"); + receiving_stream_ready(bctlp, error); } -static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, - batch_control *bctl) { +static void validate_filtered_metadata(batch_control *bctl) { grpc_call *call = bctl->call; /* validate compression algorithms */ if (call->incoming_stream_compression_algorithm != @@ -1512,8 +1480,8 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, gpr_asprintf(&error_msg, "Invalid stream compression algorithm value '%d'.", algo); gpr_log(GPR_ERROR, "%s", error_msg); - cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_STATUS_UNIMPLEMENTED, error_msg); + cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED, + error_msg); } else if (grpc_compression_options_is_stream_compression_algorithm_enabled( &compression_options, algo) == 0) { /* check if algorithm is supported by current channel config */ @@ -1522,8 +1490,8 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, gpr_asprintf(&error_msg, "Stream compression algorithm '%s' is disabled.", algo_name); gpr_log(GPR_ERROR, "%s", error_msg); - cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_STATUS_UNIMPLEMENTED, error_msg); + cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED, + error_msg); } gpr_free(error_msg); @@ -1553,8 +1521,8 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, gpr_asprintf(&error_msg, "Invalid compression algorithm value '%d'.", algo); gpr_log(GPR_ERROR, "%s", error_msg); - cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_STATUS_UNIMPLEMENTED, error_msg); + cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED, + error_msg); } else if (grpc_compression_options_is_algorithm_enabled( &compression_options, algo) == 0) { /* check if algorithm is supported by current channel config */ @@ -1563,8 +1531,8 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.", algo_name); gpr_log(GPR_ERROR, "%s", error_msg); - cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_STATUS_UNIMPLEMENTED, error_msg); + cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED, + error_msg); } else { call->incoming_compression_algorithm = algo; } @@ -1587,34 +1555,31 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, } } -static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl, - grpc_error *error, bool has_cancelled) { +static void add_batch_error(batch_control *bctl, grpc_error *error, + bool has_cancelled) { if (error == GRPC_ERROR_NONE) return; int idx = (int)gpr_atm_full_fetch_add(&bctl->num_errors, 1); if (idx == 0 && !has_cancelled) { - cancel_with_error(exec_ctx, bctl->call, STATUS_FROM_CORE, - GRPC_ERROR_REF(error)); + cancel_with_error(bctl->call, STATUS_FROM_CORE, GRPC_ERROR_REF(error)); } bctl->errors[idx] = error; } -static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx, - void *bctlp, grpc_error *error) { +static void receiving_initial_metadata_ready(void *bctlp, grpc_error *error) { batch_control *bctl = (batch_control *)bctlp; grpc_call *call = bctl->call; - GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, - "recv_initial_metadata_ready"); + GRPC_CALL_COMBINER_STOP(&call->call_combiner, "recv_initial_metadata_ready"); - add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false); + add_batch_error(bctl, GRPC_ERROR_REF(error), false); if (error == GRPC_ERROR_NONE) { grpc_metadata_batch *md = &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */]; - recv_initial_filter(exec_ctx, call, md); + recv_initial_filter(call, md); /* TODO(ctiller): this could be moved into recv_initial_filter now */ GPR_TIMER_BEGIN("validate_filtered_metadata", 0); - validate_filtered_metadata(exec_ctx, bctl); + validate_filtered_metadata(bctl); GPR_TIMER_END("validate_filtered_metadata", 0); if (md->deadline != GRPC_MILLIS_INF_FUTURE && !call->is_client) { @@ -1647,28 +1612,25 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx, } } if (saved_rsr_closure != NULL) { - GRPC_CLOSURE_RUN(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_RUN(saved_rsr_closure, GRPC_ERROR_REF(error)); } - finish_batch_step(exec_ctx, bctl); + finish_batch_step(bctl); } -static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, - grpc_error *error) { +static void finish_batch(void *bctlp, grpc_error *error) { batch_control *bctl = (batch_control *)bctlp; grpc_call *call = bctl->call; - GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "on_complete"); - add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false); - finish_batch_step(exec_ctx, bctl); + GRPC_CALL_COMBINER_STOP(&call->call_combiner, "on_complete"); + add_batch_error(bctl, GRPC_ERROR_REF(error), false); + finish_batch_step(bctl); } -static void free_no_op_completion(grpc_exec_ctx *exec_ctx, void *p, - grpc_cq_completion *completion) { +static void free_no_op_completion(void *p, grpc_cq_completion *completion) { gpr_free(completion); } -static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, - grpc_call *call, const grpc_op *ops, +static grpc_call_error call_start_batch(grpc_call *call, const grpc_op *ops, size_t nops, void *notify_tag, int is_notify_tag_closure) { size_t i; @@ -1686,11 +1648,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, if (!is_notify_tag_closure) { GPR_ASSERT(grpc_cq_begin_op(call->cq, notify_tag)); grpc_cq_end_op( - exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE, - free_no_op_completion, NULL, + call->cq, notify_tag, GRPC_ERROR_NONE, free_no_op_completion, NULL, (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion))); } else { - GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)notify_tag, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED((grpc_closure *)notify_tag, GRPC_ERROR_NONE); } error = GRPC_CALL_OK; goto done; @@ -1790,7 +1751,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, stream_op->send_initial_metadata = true; call->sent_initial_metadata = true; if (!prepare_application_metadata( - exec_ctx, call, (int)op->data.send_initial_metadata.count, + call, (int)op->data.send_initial_metadata.count, op->data.send_initial_metadata.metadata, 0, call->is_client, &call->compression_md, (int)additional_metadata_count)) { error = GRPC_CALL_ERROR_INVALID_METADATA; @@ -1884,7 +1845,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, GPR_ASSERT(call->send_extra_metadata_count == 0); call->send_extra_metadata_count = 1; call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem( - exec_ctx, call->channel, op->data.send_status_from_server.status); + call->channel, op->data.send_status_from_server.status); { grpc_error *override_error = GRPC_ERROR_NONE; if (op->data.send_status_from_server.status != GRPC_STATUS_OK) { @@ -1893,7 +1854,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, } if (op->data.send_status_from_server.status_details != NULL) { call->send_extra_metadata[1].md = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_GRPC_MESSAGE, + GRPC_MDSTR_GRPC_MESSAGE, grpc_slice_ref_internal( *op->data.send_status_from_server.status_details)); call->send_extra_metadata_count++; @@ -1904,16 +1865,15 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, grpc_slice_from_copied_string(msg)); gpr_free(msg); } - set_status_from_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE, - override_error); + set_status_from_error(call, STATUS_FROM_API_OVERRIDE, override_error); } if (!prepare_application_metadata( - exec_ctx, call, + call, (int)op->data.send_status_from_server.trailing_metadata_count, op->data.send_status_from_server.trailing_metadata, 1, 1, NULL, 0)) { for (int n = 0; n < call->send_extra_metadata_count; n++) { - GRPC_MDELEM_UNREF(exec_ctx, call->send_extra_metadata[n].md); + GRPC_MDELEM_UNREF(call->send_extra_metadata[n].md); } call->send_extra_metadata_count = 0; error = GRPC_CALL_ERROR_INVALID_METADATA; @@ -2040,7 +2000,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, stream_op->on_complete = &bctl->finish_batch; gpr_atm_rel_store(&call->any_ops_sent_atm, 1); - execute_batch(exec_ctx, call, stream_op, &bctl->start_batch); + execute_batch(call, stream_op, &bctl->start_batch); done: GPR_TIMER_END("grpc_call_start_batch", 0); @@ -2050,15 +2010,15 @@ done_with_error: /* reverse any mutations that occured */ if (stream_op->send_initial_metadata) { call->sent_initial_metadata = false; - grpc_metadata_batch_clear(exec_ctx, &call->metadata_batch[0][0]); + grpc_metadata_batch_clear(&call->metadata_batch[0][0]); } if (stream_op->send_message) { call->sending_message = false; - grpc_byte_stream_destroy(exec_ctx, &call->sending_stream.base); + grpc_byte_stream_destroy(&call->sending_stream.base); } if (stream_op->send_trailing_metadata) { call->sent_final_op = false; - grpc_metadata_batch_clear(exec_ctx, &call->metadata_batch[0][1]); + grpc_metadata_batch_clear(&call->metadata_batch[0][1]); } if (stream_op->recv_initial_metadata) { call->received_initial_metadata = false; @@ -2074,7 +2034,7 @@ done_with_error: grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, size_t nops, void *tag, void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_call_error err; GRPC_API_TRACE( @@ -2085,19 +2045,18 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, if (reserved != NULL) { err = GRPC_CALL_ERROR; } else { - err = call_start_batch(&exec_ctx, call, ops, nops, tag, 0); + err = call_start_batch(call, ops, nops, tag, 0); } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return err; } -grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx, - grpc_call *call, +grpc_call_error grpc_call_start_batch_and_execute(grpc_call *call, const grpc_op *ops, size_t nops, grpc_closure *closure) { - return call_start_batch(exec_ctx, call, ops, nops, closure, 1); + return call_start_batch(call, ops, nops, closure, 1); } void grpc_call_context_set(grpc_call *call, grpc_context_index elem, diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h index 27c2f5243c..eba96e04b9 100644 --- a/src/core/lib/surface/call.h +++ b/src/core/lib/surface/call.h @@ -30,8 +30,7 @@ extern "C" { #include <grpc/grpc.h> #include <grpc/impl/codegen/compression_types.h> -typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx, - grpc_call *call, int success, +typedef void (*grpc_ioreq_completion_func)(grpc_call *call, int success, void *user_data); typedef struct grpc_call_create_args { @@ -55,33 +54,28 @@ typedef struct grpc_call_create_args { /* Create a new call based on \a args. Regardless of success or failure, always returns a valid new call into *call */ -grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, - const grpc_call_create_args *args, +grpc_error *grpc_call_create(const grpc_call_create_args *args, grpc_call **call); -void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call, - grpc_completion_queue *cq); +void grpc_call_set_completion_queue(grpc_call *call, grpc_completion_queue *cq); #ifndef NDEBUG void grpc_call_internal_ref(grpc_call *call, const char *reason); -void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call, - const char *reason); +void grpc_call_internal_unref(grpc_call *call, const char *reason); #define GRPC_CALL_INTERNAL_REF(call, reason) \ grpc_call_internal_ref(call, reason) -#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \ - grpc_call_internal_unref(exec_ctx, call, reason) +#define GRPC_CALL_INTERNAL_UNREF(call, reason) \ + grpc_call_internal_unref(call, reason) #else void grpc_call_internal_ref(grpc_call *call); -void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call); +void grpc_call_internal_unref(grpc_call *call); #define GRPC_CALL_INTERNAL_REF(call, reason) grpc_call_internal_ref(call) -#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \ - grpc_call_internal_unref(exec_ctx, call) +#define GRPC_CALL_INTERNAL_UNREF(call, reason) grpc_call_internal_unref(call) #endif grpc_call_stack *grpc_call_get_call_stack(grpc_call *call); -grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx, - grpc_call *call, +grpc_call_error grpc_call_start_batch_and_execute(grpc_call *call, const grpc_op *ops, size_t nops, grpc_closure *closure); diff --git a/src/core/lib/surface/call_details.cc b/src/core/lib/surface/call_details.cc index ea9208c7e3..01b19abefb 100644 --- a/src/core/lib/surface/call_details.cc +++ b/src/core/lib/surface/call_details.cc @@ -34,8 +34,8 @@ void grpc_call_details_init(grpc_call_details* cd) { void grpc_call_details_destroy(grpc_call_details* cd) { GRPC_API_TRACE("grpc_call_details_destroy(cd=%p)", 1, (cd)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_slice_unref_internal(&exec_ctx, cd->method); - grpc_slice_unref_internal(&exec_ctx, cd->host); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + grpc_slice_unref_internal(cd->method); + grpc_slice_unref_internal(cd->host); + grpc_exec_ctx_finish(); } diff --git a/src/core/lib/surface/channel.cc b/src/core/lib/surface/channel.cc index 860dcc82db..0511f561ac 100644 --- a/src/core/lib/surface/channel.cc +++ b/src/core/lib/surface/channel.cc @@ -69,23 +69,22 @@ struct grpc_channel { #define CHANNEL_FROM_TOP_ELEM(top_elem) \ CHANNEL_FROM_CHANNEL_STACK(grpc_channel_stack_from_top_element(top_elem)) -static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); +static void destroy_channel(void *arg, grpc_error *error); grpc_channel *grpc_channel_create_with_builder( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, + grpc_channel_stack_builder *builder, grpc_channel_stack_type channel_stack_type) { char *target = gpr_strdup(grpc_channel_stack_builder_get_target(builder)); grpc_channel_args *args = grpc_channel_args_copy( grpc_channel_stack_builder_get_channel_arguments(builder)); grpc_channel *channel; if (channel_stack_type == GRPC_SERVER_CHANNEL) { - GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx); + GRPC_STATS_INC_SERVER_CHANNELS_CREATED(); } else { - GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx); + GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(); } grpc_error *error = grpc_channel_stack_builder_finish( - exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL, + builder, sizeof(grpc_channel), 1, destroy_channel, NULL, (void **)&channel); if (error != GRPC_ERROR_NONE) { gpr_log(GPR_ERROR, "channel stack builder failed: %s", @@ -114,10 +113,10 @@ grpc_channel *grpc_channel_create_with_builder( } else { if (!GRPC_MDISNULL(channel->default_authority)) { /* setting this takes precedence over anything else */ - GRPC_MDELEM_UNREF(exec_ctx, channel->default_authority); + GRPC_MDELEM_UNREF(channel->default_authority); } channel->default_authority = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_AUTHORITY, + GRPC_MDSTR_AUTHORITY, grpc_slice_intern( grpc_slice_from_static_string(args->args[i].value.string))); } @@ -134,7 +133,7 @@ grpc_channel *grpc_channel_create_with_builder( GRPC_SSL_TARGET_NAME_OVERRIDE_ARG); } else { channel->default_authority = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_AUTHORITY, + GRPC_MDSTR_AUTHORITY, grpc_slice_intern( grpc_slice_from_static_string(args->args[i].value.string))); } @@ -191,25 +190,23 @@ grpc_channel *grpc_channel_create_with_builder( } done: - grpc_channel_args_destroy(exec_ctx, args); + grpc_channel_args_destroy(args); return channel; } -grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target, +grpc_channel *grpc_channel_create(const char *target, const grpc_channel_args *input_args, grpc_channel_stack_type channel_stack_type, grpc_transport *optional_transport) { grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create(); - grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder, - input_args); + grpc_channel_stack_builder_set_channel_arguments(builder, input_args); grpc_channel_stack_builder_set_target(builder, target); grpc_channel_stack_builder_set_transport(builder, optional_transport); - if (!grpc_channel_init_create_stack(exec_ctx, builder, channel_stack_type)) { - grpc_channel_stack_builder_destroy(exec_ctx, builder); + if (!grpc_channel_init_create_stack(builder, channel_stack_type)) { + grpc_channel_stack_builder_destroy(builder); return NULL; } - return grpc_channel_create_with_builder(exec_ctx, builder, - channel_stack_type); + return grpc_channel_create_with_builder(builder, channel_stack_type); } size_t grpc_channel_get_call_size_estimate(grpc_channel *channel) { @@ -251,18 +248,18 @@ char *grpc_channel_get_target(grpc_channel *channel) { void grpc_channel_get_info(grpc_channel *channel, const grpc_channel_info *channel_info) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_channel_element *elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0); - elem->filter->get_channel_info(&exec_ctx, elem, channel_info); - grpc_exec_ctx_finish(&exec_ctx); + elem->filter->get_channel_info(elem, channel_info); + grpc_exec_ctx_finish(); } static grpc_call *grpc_channel_create_call_internal( - grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call, - uint32_t propagation_mask, grpc_completion_queue *cq, - grpc_pollset_set *pollset_set_alternative, grpc_mdelem path_mdelem, - grpc_mdelem authority_mdelem, grpc_millis deadline) { + grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, + grpc_completion_queue *cq, grpc_pollset_set *pollset_set_alternative, + grpc_mdelem path_mdelem, grpc_mdelem authority_mdelem, + grpc_millis deadline) { grpc_mdelem send_metadata[2]; size_t num_metadata = 0; @@ -289,7 +286,7 @@ static grpc_call *grpc_channel_create_call_internal( args.send_deadline = deadline; grpc_call *call; - GRPC_LOG_IF_ERROR("call_create", grpc_call_create(exec_ctx, &args, &call)); + GRPC_LOG_IF_ERROR("call_create", grpc_call_create(&args, &call)); return call; } @@ -300,29 +297,27 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel, grpc_slice method, const grpc_slice *host, gpr_timespec deadline, void *reserved) { GPR_ASSERT(!reserved); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_call *call = grpc_channel_create_call_internal( - &exec_ctx, channel, parent_call, propagation_mask, cq, NULL, - grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_PATH, - grpc_slice_ref_internal(method)), - host != NULL ? grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_AUTHORITY, + channel, parent_call, propagation_mask, cq, NULL, + grpc_mdelem_from_slices(GRPC_MDSTR_PATH, grpc_slice_ref_internal(method)), + host != NULL ? grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY, grpc_slice_ref_internal(*host)) : GRPC_MDNULL, grpc_timespec_to_millis_round_up(deadline)); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return call; } grpc_call *grpc_channel_create_pollset_set_call( - grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call, - uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method, - const grpc_slice *host, grpc_millis deadline, void *reserved) { + grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, + grpc_pollset_set *pollset_set, grpc_slice method, const grpc_slice *host, + grpc_millis deadline, void *reserved) { GPR_ASSERT(!reserved); return grpc_channel_create_call_internal( - exec_ctx, channel, parent_call, propagation_mask, NULL, pollset_set, - grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_PATH, - grpc_slice_ref_internal(method)), - host != NULL ? grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_AUTHORITY, + channel, parent_call, propagation_mask, NULL, pollset_set, + grpc_mdelem_from_slices(GRPC_MDSTR_PATH, grpc_slice_ref_internal(method)), + host != NULL ? grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY, grpc_slice_ref_internal(*host)) : GRPC_MDNULL, deadline); @@ -335,21 +330,21 @@ void *grpc_channel_register_call(grpc_channel *channel, const char *method, "grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)", 4, (channel, method, host, reserved)); GPR_ASSERT(!reserved); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; rc->path = grpc_mdelem_from_slices( - &exec_ctx, GRPC_MDSTR_PATH, + GRPC_MDSTR_PATH, grpc_slice_intern(grpc_slice_from_static_string(method))); rc->authority = host ? grpc_mdelem_from_slices( - &exec_ctx, GRPC_MDSTR_AUTHORITY, + GRPC_MDSTR_AUTHORITY, grpc_slice_intern(grpc_slice_from_static_string(host))) : GRPC_MDNULL; gpr_mu_lock(&channel->registered_call_mu); rc->next = channel->registered_calls; channel->registered_calls = rc; gpr_mu_unlock(&channel->registered_call_mu); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return rc; } @@ -369,12 +364,12 @@ grpc_call *grpc_channel_create_registered_call( registered_call_handle, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type, reserved)); GPR_ASSERT(!reserved); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_call *call = grpc_channel_create_call_internal( - &exec_ctx, channel, parent_call, propagation_mask, completion_queue, NULL, + channel, parent_call, propagation_mask, completion_queue, NULL, GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority), grpc_timespec_to_millis_round_up(deadline)); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return call; } @@ -389,23 +384,21 @@ void grpc_channel_internal_ref(grpc_channel *c REF_ARG) { GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON); } -void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, - grpc_channel *c REF_ARG) { - GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON); +void grpc_channel_internal_unref(grpc_channel *c REF_ARG) { + GRPC_CHANNEL_STACK_UNREF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON); } -static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void destroy_channel(void *arg, grpc_error *error) { grpc_channel *channel = (grpc_channel *)arg; - grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel)); + grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CHANNEL(channel)); while (channel->registered_calls) { registered_call *rc = channel->registered_calls; channel->registered_calls = rc->next; - GRPC_MDELEM_UNREF(exec_ctx, rc->path); - GRPC_MDELEM_UNREF(exec_ctx, rc->authority); + GRPC_MDELEM_UNREF(rc->path); + GRPC_MDELEM_UNREF(rc->authority); gpr_free(rc); } - GRPC_MDELEM_UNREF(exec_ctx, channel->default_authority); + GRPC_MDELEM_UNREF(channel->default_authority); gpr_mu_destroy(&channel->registered_call_mu); gpr_free(channel->target); gpr_free(channel); @@ -414,16 +407,16 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, void grpc_channel_destroy(grpc_channel *channel) { grpc_transport_op *op = grpc_make_transport_op(NULL); grpc_channel_element *elem; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GRPC_API_TRACE("grpc_channel_destroy(channel=%p)", 1, (channel)); op->disconnect_with_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Destroyed"); elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0); - elem->filter->start_transport_op(&exec_ctx, elem, op); + elem->filter->start_transport_op(elem, op); - GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, channel, "channel"); + GRPC_CHANNEL_INTERNAL_UNREF(channel, "channel"); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel) { @@ -435,8 +428,7 @@ grpc_compression_options grpc_channel_compression_options( return channel->compression_options; } -grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx, - grpc_channel *channel, int i) { +grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_channel *channel, int i) { char tmp[GPR_LTOA_MIN_BUFSIZE]; switch (i) { case 0: @@ -447,6 +439,6 @@ grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx, return GRPC_MDELEM_GRPC_STATUS_2; } gpr_ltoa(i, tmp); - return grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_STATUS, + return grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_STATUS, grpc_slice_from_copied_string(tmp)); } diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h index 4d1c7e369f..dd8b8983ba 100644 --- a/src/core/lib/surface/channel.h +++ b/src/core/lib/surface/channel.h @@ -27,13 +27,13 @@ extern "C" { #endif -grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target, +grpc_channel *grpc_channel_create(const char *target, const grpc_channel_args *args, grpc_channel_stack_type channel_stack_type, grpc_transport *optional_transport); grpc_channel *grpc_channel_create_with_builder( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, + grpc_channel_stack_builder *builder, grpc_channel_stack_type channel_stack_type); /** Create a call given a grpc_channel, in order to call \a method. @@ -45,9 +45,9 @@ grpc_channel *grpc_channel_create_with_builder( properties from the server call to this new client call, depending on the value of \a propagation_mask (see propagation_bits.h for possible values) */ grpc_call *grpc_channel_create_pollset_set_call( - grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call, - uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method, - const grpc_slice *host, grpc_millis deadline, void *reserved); + grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, + grpc_pollset_set *pollset_set, grpc_slice method, const grpc_slice *host, + grpc_millis deadline, void *reserved); /** Get a (borrowed) pointer to this channels underlying channel stack */ grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel); @@ -56,8 +56,7 @@ grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel); status_code. The returned elem is owned by the caller. */ -grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx, - grpc_channel *channel, +grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_channel *channel, int status_code); size_t grpc_channel_get_call_size_estimate(grpc_channel *channel); @@ -65,20 +64,18 @@ void grpc_channel_update_call_size_estimate(grpc_channel *channel, size_t size); #ifndef NDEBUG void grpc_channel_internal_ref(grpc_channel *channel, const char *reason); -void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel, - const char *reason); +void grpc_channel_internal_unref(grpc_channel *channel, const char *reason); #define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \ grpc_channel_internal_ref(channel, reason) -#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \ - grpc_channel_internal_unref(exec_ctx, channel, reason) +#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \ + grpc_channel_internal_unref(channel, reason) #else void grpc_channel_internal_ref(grpc_channel *channel); -void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, - grpc_channel *channel); +void grpc_channel_internal_unref(grpc_channel *channel); #define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \ grpc_channel_internal_ref(channel) -#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \ - grpc_channel_internal_unref(exec_ctx, channel) +#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \ + grpc_channel_internal_unref(channel) #endif /** Return the channel's compression options. */ diff --git a/src/core/lib/surface/channel_init.cc b/src/core/lib/surface/channel_init.cc index 33f444b89e..a2ea3160c0 100644 --- a/src/core/lib/surface/channel_init.cc +++ b/src/core/lib/surface/channel_init.cc @@ -89,8 +89,7 @@ void grpc_channel_init_shutdown(void) { } } -bool grpc_channel_init_create_stack(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, +bool grpc_channel_init_create_stack(grpc_channel_stack_builder *builder, grpc_channel_stack_type type) { GPR_ASSERT(g_finalized); @@ -99,7 +98,7 @@ bool grpc_channel_init_create_stack(grpc_exec_ctx *exec_ctx, for (size_t i = 0; i < g_slots[type].num_slots; i++) { const stage_slot *slot = &g_slots[type].slots[i]; - if (!slot->fn(exec_ctx, builder, slot->arg)) { + if (!slot->fn(builder, slot->arg)) { return false; } } diff --git a/src/core/lib/surface/channel_init.h b/src/core/lib/surface/channel_init.h index 5f109332ad..1c5e0a2976 100644 --- a/src/core/lib/surface/channel_init.h +++ b/src/core/lib/surface/channel_init.h @@ -36,8 +36,7 @@ extern "C" { /// One stage of mutation: call functions against \a builder to influence the /// finally constructed channel stack -typedef bool (*grpc_channel_init_stage)(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, +typedef bool (*grpc_channel_init_stage)(grpc_channel_stack_builder *builder, void *arg); /// Global initialization of the system @@ -70,8 +69,7 @@ void grpc_channel_init_shutdown(void); /// \a optional_transport is either NULL or a constructed transport object /// Returns a pointer to the base of the memory allocated (the actual channel /// stack object will be prefix_bytes past that pointer) -bool grpc_channel_init_create_stack(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, +bool grpc_channel_init_create_stack(grpc_channel_stack_builder *builder, grpc_channel_stack_type type); #ifdef __cplusplus diff --git a/src/core/lib/surface/channel_ping.cc b/src/core/lib/surface/channel_ping.cc index f45b568958..5d41b8e9e4 100644 --- a/src/core/lib/surface/channel_ping.cc +++ b/src/core/lib/surface/channel_ping.cc @@ -33,15 +33,14 @@ typedef struct { grpc_cq_completion completion_storage; } ping_result; -static void ping_destroy(grpc_exec_ctx *exec_ctx, void *arg, - grpc_cq_completion *storage) { +static void ping_destroy(void *arg, grpc_cq_completion *storage) { gpr_free(arg); } -static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { +static void ping_done(void *arg, grpc_error *error) { ping_result *pr = (ping_result *)arg; - grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, GRPC_ERROR_REF(error), ping_destroy, - pr, &pr->completion_storage); + grpc_cq_end_op(pr->cq, pr->tag, GRPC_ERROR_REF(error), ping_destroy, pr, + &pr->completion_storage); } void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq, @@ -52,7 +51,7 @@ void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq, ping_result *pr = (ping_result *)gpr_malloc(sizeof(*pr)); grpc_channel_element *top_elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GPR_ASSERT(reserved == NULL); pr->tag = tag; pr->cq = cq; @@ -60,6 +59,6 @@ void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq, op->send_ping = &pr->closure; op->bind_pollset = grpc_cq_pollset(cq); GPR_ASSERT(grpc_cq_begin_op(cq, tag)); - top_elem->filter->start_transport_op(&exec_ctx, top_elem, op); - grpc_exec_ctx_finish(&exec_ctx); + top_elem->filter->start_transport_op(top_elem, op); + grpc_exec_ctx_finish(); } diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc index 21664f03c8..8abcb70d25 100644 --- a/src/core/lib/surface/completion_queue.cc +++ b/src/core/lib/surface/completion_queue.cc @@ -58,13 +58,12 @@ typedef struct { bool can_listen; size_t (*size)(void); void (*init)(grpc_pollset *pollset, gpr_mu **mu); - grpc_error *(*kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_error *(*kick)(grpc_pollset *pollset, grpc_pollset_worker *specific_worker); - grpc_error *(*work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker, grpc_millis deadline); - void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure); - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset); + grpc_error *(*work)(grpc_pollset *pollset, grpc_pollset_worker **worker, + grpc_millis deadline); + void (*shutdown)(grpc_pollset *pollset, grpc_closure *closure); + void (*destroy)(grpc_pollset *pollset); } cq_poller_vtable; typedef struct non_polling_worker { @@ -90,14 +89,12 @@ static void non_polling_poller_init(grpc_pollset *pollset, gpr_mu **mu) { *mu = &npp->mu; } -static void non_polling_poller_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { +static void non_polling_poller_destroy(grpc_pollset *pollset) { non_polling_poller *npp = (non_polling_poller *)pollset; gpr_mu_destroy(&npp->mu); } -static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset, +static grpc_error *non_polling_poller_work(grpc_pollset *pollset, grpc_pollset_worker **worker, grpc_millis deadline) { non_polling_poller *npp = (non_polling_poller *)pollset; @@ -122,7 +119,7 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx, npp->root = w.next; if (&w == npp->root) { if (npp->shutdown) { - GRPC_CLOSURE_SCHED(exec_ctx, npp->shutdown, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(npp->shutdown, GRPC_ERROR_NONE); } npp->root = NULL; } @@ -135,8 +132,7 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx, } static grpc_error *non_polling_poller_kick( - grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *specific_worker) { + grpc_pollset *pollset, grpc_pollset_worker *specific_worker) { non_polling_poller *p = (non_polling_poller *)pollset; if (specific_worker == NULL) specific_worker = (grpc_pollset_worker *)p->root; if (specific_worker != NULL) { @@ -149,14 +145,13 @@ static grpc_error *non_polling_poller_kick( return GRPC_ERROR_NONE; } -static void non_polling_poller_shutdown(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset, +static void non_polling_poller_shutdown(grpc_pollset *pollset, grpc_closure *closure) { non_polling_poller *p = (non_polling_poller *)pollset; GPR_ASSERT(closure != NULL); p->shutdown = closure; if (p->root == NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); } else { non_polling_worker *w = p->root; do { @@ -183,13 +178,11 @@ typedef struct cq_vtable { grpc_cq_completion_type cq_completion_type; size_t data_size; void (*init)(void *data); - void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq); + void (*shutdown)(grpc_completion_queue *cq); void (*destroy)(void *data); bool (*begin_op)(grpc_completion_queue *cq, void *tag); - void (*end_op)(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq, void *tag, - grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg, - grpc_cq_completion *storage), + void (*end_op)(grpc_completion_queue *cq, void *tag, grpc_error *error, + void (*done)(void *done_arg, grpc_cq_completion *storage), void *done_arg, grpc_cq_completion *storage); grpc_event (*next)(grpc_completion_queue *cq, gpr_timespec deadline, void *reserved); @@ -274,31 +267,23 @@ struct grpc_completion_queue { }; /* Forward declarations */ -static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq); -static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq); -static void cq_shutdown_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq); -static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq); +static void cq_finish_shutdown_next(grpc_completion_queue *cq); +static void cq_finish_shutdown_pluck(grpc_completion_queue *cq); +static void cq_shutdown_next(grpc_completion_queue *cq); +static void cq_shutdown_pluck(grpc_completion_queue *cq); static bool cq_begin_op_for_next(grpc_completion_queue *cq, void *tag); static bool cq_begin_op_for_pluck(grpc_completion_queue *cq, void *tag); -static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq, void *tag, +static void cq_end_op_for_next(grpc_completion_queue *cq, void *tag, grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, - void *done_arg, + void (*done)(void *done_arg, grpc_cq_completion *storage), void *done_arg, grpc_cq_completion *storage); -static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq, void *tag, +static void cq_end_op_for_pluck(grpc_completion_queue *cq, void *tag, grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, - void *done_arg, + void (*done)(void *done_arg, grpc_cq_completion *storage), void *done_arg, grpc_cq_completion *storage); @@ -342,8 +327,7 @@ grpc_tracer_flag grpc_cq_event_timeout_trace = gpr_free(_ev); \ } -static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *cq, - grpc_error *error); +static void on_pollset_shutdown_done(void *cq, grpc_error *error); static void cq_event_queue_init(grpc_cq_event_queue *q) { gpr_mpscq_init(&q->queue); @@ -362,23 +346,23 @@ static bool cq_event_queue_push(grpc_cq_event_queue *q, grpc_cq_completion *c) { static grpc_cq_completion *cq_event_queue_pop(grpc_cq_event_queue *q) { grpc_cq_completion *c = NULL; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; if (gpr_spinlock_trylock(&q->queue_lock)) { - GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES(&exec_ctx); + GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES(); bool is_empty = false; c = (grpc_cq_completion *)gpr_mpscq_pop_and_check_end(&q->queue, &is_empty); gpr_spinlock_unlock(&q->queue_lock); if (c == NULL && !is_empty) { - GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES(&exec_ctx); + GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES(); } } else { - GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES(&exec_ctx); + GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES(); } - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); if (c) { gpr_atm_no_barrier_fetch_add(&q->num_queue_items, -1); @@ -409,9 +393,9 @@ grpc_completion_queue *grpc_completion_queue_create_internal( const cq_poller_vtable *poller_vtable = &g_poller_vtable_by_poller_type[polling_type]; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_STATS_INC_CQS_CREATED(&exec_ctx); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + GRPC_STATS_INC_CQS_CREATED(); + grpc_exec_ctx_finish(); cq = (grpc_completion_queue *)gpr_zalloc(sizeof(grpc_completion_queue) + vtable->data_size + @@ -493,15 +477,14 @@ void grpc_cq_internal_ref(grpc_completion_queue *cq) { gpr_ref(&cq->owning_refs); } -static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void on_pollset_shutdown_done(void *arg, grpc_error *error) { grpc_completion_queue *cq = (grpc_completion_queue *)arg; - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "pollset_destroy"); + GRPC_CQ_INTERNAL_UNREF(cq, "pollset_destroy"); } #ifndef NDEBUG -void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq, - const char *reason, const char *file, int line) { +void grpc_cq_internal_unref(grpc_completion_queue *cq, const char *reason, + const char *file, int line) { if (GRPC_TRACER_ON(grpc_trace_cq_refcount)) { gpr_atm val = gpr_atm_no_barrier_load(&cq->owning_refs.count); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, @@ -509,12 +492,11 @@ void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq, reason); } #else -void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq) { +void grpc_cq_internal_unref(grpc_completion_queue *cq) { #endif if (gpr_unref(&cq->owning_refs)) { cq->vtable->destroy(DATA_FROM_CQ(cq)); - cq->poller_vtable->destroy(exec_ctx, POLLSET_FROM_CQ(cq)); + cq->poller_vtable->destroy(POLLSET_FROM_CQ(cq)); #ifndef NDEBUG gpr_free(cq->outstanding_tags); #endif @@ -595,11 +577,9 @@ bool grpc_cq_begin_op(grpc_completion_queue *cq, void *tag) { /* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a * completion * type of GRPC_CQ_NEXT) */ -static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq, void *tag, +static void cq_end_op_for_next(grpc_completion_queue *cq, void *tag, grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, - void *done_arg, + void (*done)(void *done_arg, grpc_cq_completion *storage), void *done_arg, grpc_cq_completion *storage) { GPR_TIMER_BEGIN("cq_end_op_for_next", 0); @@ -609,9 +589,9 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx, error != GRPC_ERROR_NONE)) { const char *errmsg = grpc_error_string(error); GRPC_API_TRACE( - "cq_end_op_for_next(exec_ctx=%p, cq=%p, tag=%p, error=%s, " + "cq_end_op_for_next(=%p, cq=%p, tag=%p, error=%s, " "done=%p, done_arg=%p, storage=%p)", - 7, (exec_ctx, cq, tag, errmsg, done, done_arg, storage)); + 7, (&exec_ctx, cq, tag, errmsg, done, done_arg, storage)); if (GRPC_TRACER_ON(grpc_trace_operation_failures) && error != GRPC_ERROR_NONE) { gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg); @@ -643,7 +623,7 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx, if (is_first) { gpr_mu_lock(cq->mu); grpc_error *kick_error = - cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), NULL); + cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL); gpr_mu_unlock(cq->mu); if (kick_error != GRPC_ERROR_NONE) { @@ -655,17 +635,17 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx, if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { GRPC_CQ_INTERNAL_REF(cq, "shutting_down"); gpr_mu_lock(cq->mu); - cq_finish_shutdown_next(exec_ctx, cq); + cq_finish_shutdown_next(cq); gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down"); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down"); } } else { GRPC_CQ_INTERNAL_REF(cq, "shutting_down"); gpr_atm_rel_store(&cqd->pending_events, 0); gpr_mu_lock(cq->mu); - cq_finish_shutdown_next(exec_ctx, cq); + cq_finish_shutdown_next(cq); gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down"); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down"); } GPR_TIMER_END("cq_end_op_for_next", 0); @@ -676,11 +656,9 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx, /* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a * completion * type of GRPC_CQ_PLUCK) */ -static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq, void *tag, +static void cq_end_op_for_pluck(grpc_completion_queue *cq, void *tag, grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, - void *done_arg, + void (*done)(void *done_arg, grpc_cq_completion *storage), void *done_arg, grpc_cq_completion *storage) { cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); @@ -693,9 +671,9 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx, error != GRPC_ERROR_NONE)) { const char *errmsg = grpc_error_string(error); GRPC_API_TRACE( - "cq_end_op_for_pluck(exec_ctx=%p, cq=%p, tag=%p, error=%s, " + "cq_end_op_for_pluck(=%p, cq=%p, tag=%p, error=%s, " "done=%p, done_arg=%p, storage=%p)", - 7, (exec_ctx, cq, tag, errmsg, done, done_arg, storage)); + 7, (&exec_ctx, cq, tag, errmsg, done, done_arg, storage)); if (GRPC_TRACER_ON(grpc_trace_operation_failures) && error != GRPC_ERROR_NONE) { gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg); @@ -717,7 +695,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx, cqd->completed_tail = storage; if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { - cq_finish_shutdown_pluck(exec_ctx, cq); + cq_finish_shutdown_pluck(cq); gpr_mu_unlock(cq->mu); } else { grpc_pollset_worker *pluck_worker = NULL; @@ -729,7 +707,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx, } grpc_error *kick_error = - cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), pluck_worker); + cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), pluck_worker); gpr_mu_unlock(cq->mu); @@ -746,12 +724,10 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx, GRPC_ERROR_UNREF(error); } -void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq, - void *tag, grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg, - grpc_cq_completion *storage), +void grpc_cq_end_op(grpc_completion_queue *cq, void *tag, grpc_error *error, + void (*done)(void *done_arg, grpc_cq_completion *storage), void *done_arg, grpc_cq_completion *storage) { - cq->vtable->end_op(exec_ctx, cq, tag, error, done, done_arg, storage); + cq->vtable->end_op(cq, tag, error, done, done_arg, storage); } typedef struct { @@ -763,7 +739,7 @@ typedef struct { bool first_loop; } cq_is_finished_arg; -static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) { +static bool cq_is_next_finished(void *arg) { cq_is_finished_arg *a = (cq_is_finished_arg *)arg; grpc_completion_queue *cq = a->cq; cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq); @@ -786,7 +762,7 @@ static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) { return true; } } - return !a->first_loop && a->deadline < grpc_exec_ctx_now(exec_ctx); + return !a->first_loop && a->deadline < grpc_exec_ctx_now(); } #ifndef NDEBUG @@ -841,8 +817,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, NULL, NULL, true}; - grpc_exec_ctx exec_ctx = - GRPC_EXEC_CTX_INITIALIZER(0, cq_is_next_finished, &is_finished_arg); + ExecCtx _local_exec_ctx(0, cq_is_next_finished, &is_finished_arg); for (;;) { grpc_millis iteration_deadline = deadline_millis; @@ -852,7 +827,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, ret.type = GRPC_OP_COMPLETE; ret.success = c->next & 1u; ret.tag = c->tag; - c->done(&exec_ctx, c->done_arg, c); + c->done(c->done_arg, c); break; } @@ -862,7 +837,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, ret.type = GRPC_OP_COMPLETE; ret.success = c->next & 1u; ret.tag = c->tag; - c->done(&exec_ctx, c->done_arg, c); + c->done(c->done_arg, c); break; } else { /* If c == NULL it means either the queue is empty OR in an transient @@ -892,8 +867,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, break; } - if (!is_finished_arg.first_loop && - grpc_exec_ctx_now(&exec_ctx) >= deadline_millis) { + if (!is_finished_arg.first_loop && grpc_exec_ctx_now() >= deadline_millis) { memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_TIMEOUT; dump_pending_tags(cq); @@ -903,8 +877,8 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, /* The main polling work happens in grpc_pollset_work */ gpr_mu_lock(cq->mu); cq->num_polls++; - grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq), - NULL, iteration_deadline); + grpc_error *err = + cq->poller_vtable->work(POLLSET_FROM_CQ(cq), NULL, iteration_deadline); gpr_mu_unlock(cq->mu); if (err != GRPC_ERROR_NONE) { @@ -923,13 +897,13 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, if (cq_event_queue_num_items(&cqd->queue) > 0 && gpr_atm_acq_load(&cqd->pending_events) > 0) { gpr_mu_lock(cq->mu); - cq->poller_vtable->kick(&exec_ctx, POLLSET_FROM_CQ(cq), NULL); + cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL); gpr_mu_unlock(cq->mu); } GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret); - GRPC_CQ_INTERNAL_UNREF(&exec_ctx, cq, "next"); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_CQ_INTERNAL_UNREF(cq, "next"); + grpc_exec_ctx_finish(); GPR_ASSERT(is_finished_arg.stolen_completion == NULL); GPR_TIMER_END("grpc_completion_queue_next", 0); @@ -943,19 +917,16 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, - Must be called only once in completion queue's lifetime - grpc_completion_queue_shutdown() MUST have been called before calling this function */ -static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq) { +static void cq_finish_shutdown_next(grpc_completion_queue *cq) { cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq); GPR_ASSERT(cqd->shutdown_called); GPR_ASSERT(gpr_atm_no_barrier_load(&cqd->pending_events) == 0); - cq->poller_vtable->shutdown(exec_ctx, POLLSET_FROM_CQ(cq), - &cq->pollset_shutdown_done); + cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done); } -static void cq_shutdown_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq) { +static void cq_shutdown_next(grpc_completion_queue *cq) { cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq); /* Need an extra ref for cq here because: @@ -968,7 +939,7 @@ static void cq_shutdown_next(grpc_exec_ctx *exec_ctx, gpr_mu_lock(cq->mu); if (cqd->shutdown_called) { gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down"); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down"); return; } cqd->shutdown_called = true; @@ -976,10 +947,10 @@ static void cq_shutdown_next(grpc_exec_ctx *exec_ctx, * cq_begin_op_for_next and and cq_end_op_for_next functions which read/write * on this counter without necessarily holding a lock on cq */ if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { - cq_finish_shutdown_next(exec_ctx, cq); + cq_finish_shutdown_next(cq); } gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down"); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down"); } grpc_event grpc_completion_queue_next(grpc_completion_queue *cq, @@ -1012,7 +983,7 @@ static void del_plucker(grpc_completion_queue *cq, void *tag, GPR_UNREACHABLE_CODE(return ); } -static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) { +static bool cq_is_pluck_finished(void *arg) { cq_is_finished_arg *a = (cq_is_finished_arg *)arg; grpc_completion_queue *cq = a->cq; cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); @@ -1041,7 +1012,7 @@ static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) { } gpr_mu_unlock(cq->mu); } - return !a->first_loop && a->deadline < grpc_exec_ctx_now(exec_ctx); + return !a->first_loop && a->deadline < grpc_exec_ctx_now(); } static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, @@ -1078,8 +1049,7 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, NULL, tag, true}; - grpc_exec_ctx exec_ctx = - GRPC_EXEC_CTX_INITIALIZER(0, cq_is_pluck_finished, &is_finished_arg); + ExecCtx _local_exec_ctx(0, cq_is_pluck_finished, &is_finished_arg); for (;;) { if (is_finished_arg.stolen_completion != NULL) { gpr_mu_unlock(cq->mu); @@ -1088,7 +1058,7 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, ret.type = GRPC_OP_COMPLETE; ret.success = c->next & 1u; ret.tag = c->tag; - c->done(&exec_ctx, c->done_arg, c); + c->done(c->done_arg, c); break; } prev = &cqd->completed_head; @@ -1103,7 +1073,7 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, ret.type = GRPC_OP_COMPLETE; ret.success = c->next & 1u; ret.tag = c->tag; - c->done(&exec_ctx, c->done_arg, c); + c->done(c->done_arg, c); goto done; } prev = c; @@ -1126,8 +1096,7 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, dump_pending_tags(cq); break; } - if (!is_finished_arg.first_loop && - grpc_exec_ctx_now(&exec_ctx) >= deadline_millis) { + if (!is_finished_arg.first_loop && grpc_exec_ctx_now() >= deadline_millis) { del_plucker(cq, tag, &worker); gpr_mu_unlock(cq->mu); memset(&ret, 0, sizeof(ret)); @@ -1136,8 +1105,8 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, break; } cq->num_polls++; - grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq), - &worker, deadline_millis); + grpc_error *err = + cq->poller_vtable->work(POLLSET_FROM_CQ(cq), &worker, deadline_millis); if (err != GRPC_ERROR_NONE) { del_plucker(cq, tag, &worker); gpr_mu_unlock(cq->mu); @@ -1155,8 +1124,8 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, } done: GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret); - GRPC_CQ_INTERNAL_UNREF(&exec_ctx, cq, "pluck"); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_CQ_INTERNAL_UNREF(cq, "pluck"); + grpc_exec_ctx_finish(); GPR_ASSERT(is_finished_arg.stolen_completion == NULL); GPR_TIMER_END("grpc_completion_queue_pluck", 0); @@ -1169,22 +1138,19 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag, return cq->vtable->pluck(cq, tag, deadline, reserved); } -static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq) { +static void cq_finish_shutdown_pluck(grpc_completion_queue *cq) { cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); GPR_ASSERT(cqd->shutdown_called); GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown)); gpr_atm_no_barrier_store(&cqd->shutdown, 1); - cq->poller_vtable->shutdown(exec_ctx, POLLSET_FROM_CQ(cq), - &cq->pollset_shutdown_done); + cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done); } /* NOTE: This function is almost exactly identical to cq_shutdown_next() but * merging them is a bit tricky and probably not worth it */ -static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq) { +static void cq_shutdown_pluck(grpc_completion_queue *cq) { cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); /* Need an extra ref for cq here because: @@ -1197,25 +1163,25 @@ static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx, gpr_mu_lock(cq->mu); if (cqd->shutdown_called) { gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down (pluck cq)"); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (pluck cq)"); return; } cqd->shutdown_called = true; if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { - cq_finish_shutdown_pluck(exec_ctx, cq); + cq_finish_shutdown_pluck(cq); } gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down (pluck cq)"); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (pluck cq)"); } /* Shutdown simply drops a ref that we reserved at creation time; if we drop to zero here, then enter shutdown mode and wake up any waiters */ void grpc_completion_queue_shutdown(grpc_completion_queue *cq) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GPR_TIMER_BEGIN("grpc_completion_queue_shutdown", 0); GRPC_API_TRACE("grpc_completion_queue_shutdown(cq=%p)", 1, (cq)); - cq->vtable->shutdown(&exec_ctx, cq); - grpc_exec_ctx_finish(&exec_ctx); + cq->vtable->shutdown(cq); + grpc_exec_ctx_finish(); GPR_TIMER_END("grpc_completion_queue_shutdown", 0); } @@ -1224,9 +1190,9 @@ void grpc_completion_queue_destroy(grpc_completion_queue *cq) { GPR_TIMER_BEGIN("grpc_completion_queue_destroy", 0); grpc_completion_queue_shutdown(cq); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_CQ_INTERNAL_UNREF(&exec_ctx, cq, "destroy"); - grpc_exec_ctx_finish(&exec_ctx); + ExecCtx _local_exec_ctx; + GRPC_CQ_INTERNAL_UNREF(cq, "destroy"); + grpc_exec_ctx_finish(); GPR_TIMER_END("grpc_completion_queue_destroy", 0); } diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h index 69d144bd95..304afb17f7 100644 --- a/src/core/lib/surface/completion_queue.h +++ b/src/core/lib/surface/completion_queue.h @@ -47,8 +47,7 @@ typedef struct grpc_cq_completion { void *tag; /** done callback - called when this queue element is no longer needed by the completion queue */ - void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg, - struct grpc_cq_completion *c); + void (*done)(void *done_arg, struct grpc_cq_completion *c); void *done_arg; /** next pointer; low bit is used to indicate success or not */ uintptr_t next; @@ -57,17 +56,17 @@ typedef struct grpc_cq_completion { #ifndef NDEBUG void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason, const char *file, int line); -void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc, - const char *reason, const char *file, int line); +void grpc_cq_internal_unref(grpc_completion_queue *cc, const char *reason, + const char *file, int line); #define GRPC_CQ_INTERNAL_REF(cc, reason) \ grpc_cq_internal_ref(cc, reason, __FILE__, __LINE__) -#define GRPC_CQ_INTERNAL_UNREF(ec, cc, reason) \ - grpc_cq_internal_unref(ec, cc, reason, __FILE__, __LINE__) +#define GRPC_CQ_INTERNAL_UNREF(cc, reason) \ + grpc_cq_internal_unref(cc, reason, __FILE__, __LINE__) #else void grpc_cq_internal_ref(grpc_completion_queue *cc); -void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc); +void grpc_cq_internal_unref(grpc_completion_queue *cc); #define GRPC_CQ_INTERNAL_REF(cc, reason) grpc_cq_internal_ref(cc) -#define GRPC_CQ_INTERNAL_UNREF(ec, cc, reason) grpc_cq_internal_unref(ec, cc) +#define GRPC_CQ_INTERNAL_UNREF(cc, reason) grpc_cq_internal_unref(cc) #endif /* Flag that an operation is beginning: the completion channel will not finish @@ -78,10 +77,8 @@ bool grpc_cq_begin_op(grpc_completion_queue *cc, void *tag); /* Queue a GRPC_OP_COMPLETED operation; tag must correspond to the tag passed to grpc_cq_begin_op */ -void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc, - void *tag, grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg, - grpc_cq_completion *storage), +void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_error *error, + void (*done)(void *done_arg, grpc_cq_completion *storage), void *done_arg, grpc_cq_completion *storage); grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc); diff --git a/src/core/lib/surface/init.cc b/src/core/lib/surface/init.cc index b089da2c54..16d2cd189d 100644 --- a/src/core/lib/surface/init.cc +++ b/src/core/lib/surface/init.cc @@ -67,14 +67,12 @@ static void do_basic_init(void) { g_initializations = 0; } -static bool append_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, void *arg) { +static bool append_filter(grpc_channel_stack_builder *builder, void *arg) { return grpc_channel_stack_builder_append_filter( builder, (const grpc_channel_filter *)arg, NULL, NULL); } -static bool prepend_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, void *arg) { +static bool prepend_filter(grpc_channel_stack_builder *builder, void *arg) { return grpc_channel_stack_builder_prepend_filter( builder, (const grpc_channel_filter *)arg, NULL, NULL); } @@ -117,7 +115,7 @@ void grpc_init(void) { int i; gpr_once_init(&g_basic_init, do_basic_init); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; gpr_mu_lock(&g_init_mu); if (++g_initializations == 1) { gpr_time_init(); @@ -150,7 +148,7 @@ void grpc_init(void) { grpc_register_tracer(&grpc_trace_metadata); #endif grpc_security_pre_init(); - grpc_iomgr_init(&exec_ctx); + grpc_iomgr_init(); gpr_timers_global_init(); grpc_handshaker_factory_registry_init(); grpc_security_init(); @@ -166,37 +164,36 @@ void grpc_init(void) { grpc_tracer_init("GRPC_TRACE"); /* no more changes to channel init pipelines */ grpc_channel_init_finalize(); - grpc_iomgr_start(&exec_ctx); + grpc_iomgr_start(); } gpr_mu_unlock(&g_init_mu); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); GRPC_API_TRACE("grpc_init(void)", 0, ()); } void grpc_shutdown(void) { int i; GRPC_API_TRACE("grpc_shutdown(void)", 0, ()); - grpc_exec_ctx exec_ctx = - GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL); + ExecCtx _local_exec_ctx(0, grpc_never_ready_to_finish, NULL); gpr_mu_lock(&g_init_mu); if (--g_initializations == 0) { - grpc_executor_shutdown(&exec_ctx); + grpc_executor_shutdown(); grpc_timer_manager_set_threading(false); // shutdown timer_manager thread for (i = g_number_of_plugins; i >= 0; i--) { if (g_all_of_the_plugins[i].destroy != NULL) { g_all_of_the_plugins[i].destroy(); } } - grpc_iomgr_shutdown(&exec_ctx); + grpc_iomgr_shutdown(); gpr_timers_global_destroy(); grpc_tracer_shutdown(); - grpc_mdctx_global_shutdown(&exec_ctx); - grpc_handshaker_factory_registry_shutdown(&exec_ctx); + grpc_mdctx_global_shutdown(); + grpc_handshaker_factory_registry_shutdown(); grpc_slice_intern_shutdown(); grpc_stats_shutdown(); } gpr_mu_unlock(&g_init_mu); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } int grpc_is_initialized(void) { diff --git a/src/core/lib/surface/init_secure.cc b/src/core/lib/surface/init_secure.cc index 8fbde3d1b4..9fc721b5e4 100644 --- a/src/core/lib/surface/init_secure.cc +++ b/src/core/lib/surface/init_secure.cc @@ -47,7 +47,7 @@ void grpc_security_pre_init(void) { } static bool maybe_prepend_client_auth_filter( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) { + grpc_channel_stack_builder *builder, void *arg) { const grpc_channel_args *args = grpc_channel_stack_builder_get_channel_arguments(builder); if (args) { @@ -62,7 +62,7 @@ static bool maybe_prepend_client_auth_filter( } static bool maybe_prepend_server_auth_filter( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) { + grpc_channel_stack_builder *builder, void *arg) { const grpc_channel_args *args = grpc_channel_stack_builder_get_channel_arguments(builder); if (args) { diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc index 88e26cbeb7..79131a7b68 100644 --- a/src/core/lib/surface/lame_client.cc +++ b/src/core/lib/surface/lame_client.cc @@ -51,8 +51,7 @@ struct ChannelData { const char *error_message; }; -static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_metadata_batch *mdb) { +static void fill_metadata(grpc_call_element *elem, grpc_metadata_batch *mdb) { CallData *calld = reinterpret_cast<CallData *>(elem->call_data); bool expected = false; if (!calld->filled_metadata.compare_exchange_strong( @@ -64,9 +63,9 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, char tmp[GPR_LTOA_MIN_BUFSIZE]; gpr_ltoa(chand->error_code, tmp); calld->status.md = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_GRPC_STATUS, grpc_slice_from_copied_string(tmp)); + GRPC_MDSTR_GRPC_STATUS, grpc_slice_from_copied_string(tmp)); calld->details.md = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_GRPC_MESSAGE, + GRPC_MDSTR_GRPC_MESSAGE, grpc_slice_from_copied_string(chand->error_message)); calld->status.prev = calld->details.next = NULL; calld->status.next = &calld->details; @@ -78,69 +77,61 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } static void lame_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { + grpc_call_element *elem, grpc_transport_stream_op_batch *op) { CallData *calld = reinterpret_cast<CallData *>(elem->call_data); if (op->recv_initial_metadata) { - fill_metadata(exec_ctx, elem, + fill_metadata(elem, op->payload->recv_initial_metadata.recv_initial_metadata); } else if (op->recv_trailing_metadata) { - fill_metadata(exec_ctx, elem, + fill_metadata(elem, op->payload->recv_trailing_metadata.recv_trailing_metadata); } grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"), + op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"), calld->call_combiner); } -static void lame_get_channel_info(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, +static void lame_get_channel_info(grpc_channel_element *elem, const grpc_channel_info *channel_info) {} -static void lame_start_transport_op(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, +static void lame_start_transport_op(grpc_channel_element *elem, grpc_transport_op *op) { if (op->on_connectivity_state_change) { GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN); *op->connectivity_state = GRPC_CHANNEL_SHUTDOWN; - GRPC_CLOSURE_SCHED(exec_ctx, op->on_connectivity_state_change, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(op->on_connectivity_state_change, GRPC_ERROR_NONE); } if (op->send_ping != NULL) { - GRPC_CLOSURE_SCHED( - exec_ctx, op->send_ping, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel")); + GRPC_CLOSURE_SCHED(op->send_ping, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "lame client channel")); } GRPC_ERROR_UNREF(op->disconnect_with_error); if (op->on_consumed != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE); } } -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, +static grpc_error *init_call_elem(grpc_call_element *elem, const grpc_call_element_args *args) { CallData *calld = reinterpret_cast<CallData *>(elem->call_data); calld->call_combiner = args->call_combiner; return GRPC_ERROR_NONE; } -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, +static void destroy_call_elem(grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *then_schedule_closure) { - GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE); } -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, +static grpc_error *init_channel_elem(grpc_channel_element *elem, grpc_channel_element_args *args) { GPR_ASSERT(args->is_first); GPR_ASSERT(args->is_last); return GRPC_ERROR_NONE; } -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) {} +static void destroy_channel_elem(grpc_channel_element *elem) {} } // namespace @@ -165,10 +156,10 @@ extern "C" const grpc_channel_filter grpc_lame_filter = { grpc_channel *grpc_lame_client_channel_create(const char *target, grpc_status_code error_code, const char *error_message) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; grpc_channel_element *elem; - grpc_channel *channel = grpc_channel_create(&exec_ctx, target, NULL, - GRPC_CLIENT_LAME_CHANNEL, NULL); + grpc_channel *channel = + grpc_channel_create(target, NULL, GRPC_CLIENT_LAME_CHANNEL, NULL); elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0); GRPC_API_TRACE( "grpc_lame_client_channel_create(target=%s, error_code=%d, " @@ -178,6 +169,6 @@ grpc_channel *grpc_lame_client_channel_create(const char *target, auto chand = reinterpret_cast<grpc_core::ChannelData *>(elem->channel_data); chand->error_code = error_code; chand->error_message = error_message; - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return channel; } diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc index dd09cb91de..cf6883bd7e 100644 --- a/src/core/lib/surface/server.cc +++ b/src/core/lib/surface/server.cc @@ -45,10 +45,9 @@ typedef struct listener { void *arg; - void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_pollset **pollsets, size_t pollset_count); - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_closure *closure); + void (*start)(grpc_server *server, void *arg, grpc_pollset **pollsets, + size_t pollset_count); + void (*destroy)(grpc_server *server, void *arg, grpc_closure *closure); struct listener *next; grpc_closure destroy_done; } listener; @@ -231,13 +230,12 @@ struct grpc_server { #define SERVER_FROM_CALL_ELEM(elem) \ (((channel_data *)(elem)->channel_data)->server) -static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *calld, - grpc_error *error); -static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server, - size_t cq_idx, requested_call *rc, grpc_error *error); +static void publish_new_rpc(void *calld, grpc_error *error); +static void fail_call(grpc_server *server, size_t cq_idx, requested_call *rc, + grpc_error *error); /* Before calling maybe_finish_shutdown, we must hold mu_global and not hold mu_call */ -static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_server *server); +static void maybe_finish_shutdown(grpc_server *server); /* * channel broadcaster @@ -265,15 +263,14 @@ struct shutdown_cleanup_args { grpc_slice slice; }; -static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void shutdown_cleanup(void *arg, grpc_error *error) { struct shutdown_cleanup_args *a = (struct shutdown_cleanup_args *)arg; - grpc_slice_unref_internal(exec_ctx, a->slice); + grpc_slice_unref_internal(a->slice); gpr_free(a); } -static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel, - bool send_goaway, grpc_error *send_disconnect) { +static void send_shutdown(grpc_channel *channel, bool send_goaway, + grpc_error *send_disconnect) { struct shutdown_cleanup_args *sc = (struct shutdown_cleanup_args *)gpr_malloc(sizeof(*sc)); GRPC_CLOSURE_INIT(&sc->closure, shutdown_cleanup, sc, @@ -291,19 +288,18 @@ static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel, op->disconnect_with_error = send_disconnect; elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0); - elem->filter->start_transport_op(exec_ctx, elem, op); + elem->filter->start_transport_op(elem, op); } -static void channel_broadcaster_shutdown(grpc_exec_ctx *exec_ctx, - channel_broadcaster *cb, +static void channel_broadcaster_shutdown(channel_broadcaster *cb, bool send_goaway, grpc_error *force_disconnect) { size_t i; for (i = 0; i < cb->num_channels; i++) { - send_shutdown(exec_ctx, cb->channels[i], send_goaway, + send_shutdown(cb->channels[i], send_goaway, GRPC_ERROR_REF(force_disconnect)); - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, cb->channels[i], "broadcast"); + GRPC_CHANNEL_INTERNAL_UNREF(cb->channels[i], "broadcast"); } gpr_free(cb->channels); GRPC_ERROR_UNREF(force_disconnect); @@ -332,13 +328,11 @@ static void request_matcher_destroy(request_matcher *rm) { gpr_free(rm->requests_per_cq); } -static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem, - grpc_error *error) { +static void kill_zombie(void *elem, grpc_error *error) { grpc_call_unref(grpc_call_from_top_element((grpc_call_element *)elem)); } -static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx, - request_matcher *rm) { +static void request_matcher_zombify_all_pending_calls(request_matcher *rm) { while (rm->pending_head) { call_data *calld = rm->pending_head; rm->pending_head = calld->pending_next; @@ -349,20 +343,18 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx, &calld->kill_zombie_closure, kill_zombie, grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE); } } -static void request_matcher_kill_requests(grpc_exec_ctx *exec_ctx, - grpc_server *server, +static void request_matcher_kill_requests(grpc_server *server, request_matcher *rm, grpc_error *error) { int request_id; for (size_t i = 0; i < server->cq_count; i++) { while ((request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[i])) != -1) { - fail_call(exec_ctx, server, i, - &server->requested_calls_per_cq[i][request_id], + fail_call(server, i, &server->requested_calls_per_cq[i][request_id], GRPC_ERROR_REF(error)); } } @@ -377,10 +369,10 @@ static void server_ref(grpc_server *server) { gpr_ref(&server->internal_refcount); } -static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) { +static void server_delete(grpc_server *server) { registered_method *rm; size_t i; - grpc_channel_args_destroy(exec_ctx, server->channel_args); + grpc_channel_args_destroy(server->channel_args); gpr_mu_destroy(&server->mu_global); gpr_mu_destroy(&server->mu_call); gpr_cv_destroy(&server->starting_cv); @@ -397,7 +389,7 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) { request_matcher_destroy(&server->unregistered_request_matcher); } for (i = 0; i < server->cq_count; i++) { - GRPC_CQ_INTERNAL_UNREF(exec_ctx, server->cqs[i], "server"); + GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server"); if (server->started) { gpr_stack_lockfree_destroy(server->request_freelist_per_cq[i]); gpr_free(server->requested_calls_per_cq[i]); @@ -411,9 +403,9 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) { gpr_free(server); } -static void server_unref(grpc_exec_ctx *exec_ctx, grpc_server *server) { +static void server_unref(grpc_server *server) { if (gpr_unref(&server->internal_refcount)) { - server_delete(exec_ctx, server); + server_delete(server); } } @@ -427,21 +419,19 @@ static void orphan_channel(channel_data *chand) { chand->next = chand->prev = chand; } -static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd, - grpc_error *error) { +static void finish_destroy_channel(void *cd, grpc_error *error) { channel_data *chand = (channel_data *)cd; grpc_server *server = chand->server; - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server"); - server_unref(exec_ctx, server); + GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "server"); + server_unref(server); } -static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand, - grpc_error *error) { +static void destroy_channel(channel_data *chand, grpc_error *error) { if (is_channel_orphaned(chand)) return; GPR_ASSERT(chand->server != NULL); orphan_channel(chand); server_ref(chand->server); - maybe_finish_shutdown(exec_ctx, chand->server); + maybe_finish_shutdown(chand->server); GRPC_CLOSURE_INIT(&chand->finish_destroy_channel_closure, finish_destroy_channel, chand, grpc_schedule_on_exec_ctx); @@ -454,14 +444,12 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand, grpc_transport_op *op = grpc_make_transport_op(&chand->finish_destroy_channel_closure); op->set_accept_stream = true; - grpc_channel_next_op(exec_ctx, - grpc_channel_stack_element( + grpc_channel_next_op(grpc_channel_stack_element( grpc_channel_get_channel_stack(chand->channel), 0), op); } -static void done_request_event(grpc_exec_ctx *exec_ctx, void *req, - grpc_cq_completion *c) { +static void done_request_event(void *req, grpc_cq_completion *c) { requested_call *rc = (requested_call *)req; grpc_server *server = rc->server; @@ -476,12 +464,12 @@ static void done_request_event(grpc_exec_ctx *exec_ctx, void *req, gpr_free(req); } - server_unref(exec_ctx, server); + server_unref(server); } -static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server, - call_data *calld, size_t cq_idx, requested_call *rc) { - grpc_call_set_completion_queue(exec_ctx, calld->call, rc->cq_bound_to_call); +static void publish_call(grpc_server *server, call_data *calld, size_t cq_idx, + requested_call *rc) { + grpc_call_set_completion_queue(calld->call, rc->cq_bound_to_call); grpc_call *call = calld->call; *rc->call = call; calld->cq_new = server->cqs[cq_idx]; @@ -512,12 +500,11 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server, grpc_call_stack_element(grpc_call_get_call_stack(call), 0); channel_data *chand = (channel_data *)elem->channel_data; server_ref(chand->server); - grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, GRPC_ERROR_NONE, - done_request_event, rc, &rc->completion); + grpc_cq_end_op(calld->cq_new, rc->tag, GRPC_ERROR_NONE, done_request_event, + rc, &rc->completion); } -static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void publish_new_rpc(void *arg, grpc_error *error) { grpc_call_element *call_elem = (grpc_call_element *)arg; call_data *calld = (call_data *)call_elem->call_data; channel_data *chand = (channel_data *)call_elem->channel_data; @@ -532,8 +519,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg, &calld->kill_zombie_closure, kill_zombie, grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, - GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_REF(error)); return; } @@ -543,18 +529,18 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg, if (request_id == -1) { continue; } else { - GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i); + GRPC_STATS_INC_SERVER_CQS_CHECKED(i); gpr_mu_lock(&calld->mu_state); calld->state = ACTIVATED; gpr_mu_unlock(&calld->mu_state); - publish_call(exec_ctx, server, calld, cq_idx, + publish_call(server, calld, cq_idx, &server->requested_calls_per_cq[cq_idx][request_id]); return; /* early out */ } } /* no cq to take the request found: queue it on the slow list */ - GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx); + GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(); gpr_mu_lock(&server->mu_call); gpr_mu_lock(&calld->mu_state); calld->state = PENDING; @@ -570,8 +556,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg, } static void finish_start_new_rpc( - grpc_exec_ctx *exec_ctx, grpc_server *server, grpc_call_element *elem, - request_matcher *rm, + grpc_server *server, grpc_call_element *elem, request_matcher *rm, grpc_server_register_method_payload_handling payload_handling) { call_data *calld = (call_data *)elem->call_data; @@ -581,7 +566,7 @@ static void finish_start_new_rpc( gpr_mu_unlock(&calld->mu_state); GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem, grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE); return; } @@ -589,7 +574,7 @@ static void finish_start_new_rpc( switch (payload_handling) { case GRPC_SRM_PAYLOAD_NONE: - publish_new_rpc(exec_ctx, elem, GRPC_ERROR_NONE); + publish_new_rpc(elem, GRPC_ERROR_NONE); break; case GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER: { grpc_op op; @@ -598,14 +583,13 @@ static void finish_start_new_rpc( op.data.recv_message.recv_message = &calld->payload; GRPC_CLOSURE_INIT(&calld->publish, publish_new_rpc, elem, grpc_schedule_on_exec_ctx); - grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1, - &calld->publish); + grpc_call_start_batch_and_execute(calld->call, &op, 1, &calld->publish); break; } } } -static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { +static void start_new_rpc(grpc_call_element *elem) { channel_data *chand = (channel_data *)elem->channel_data; call_data *calld = (call_data *)elem->call_data; grpc_server *server = chand->server; @@ -630,8 +614,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) { continue; } - finish_start_new_rpc(exec_ctx, server, elem, - &rm->server_registered_method->matcher, + finish_start_new_rpc(server, elem, &rm->server_registered_method->matcher, rm->server_registered_method->payload_handling); return; } @@ -648,14 +631,12 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) { continue; } - finish_start_new_rpc(exec_ctx, server, elem, - &rm->server_registered_method->matcher, + finish_start_new_rpc(server, elem, &rm->server_registered_method->matcher, rm->server_registered_method->payload_handling); return; } } - finish_start_new_rpc(exec_ctx, server, elem, - &server->unregistered_request_matcher, + finish_start_new_rpc(server, elem, &server->unregistered_request_matcher, GRPC_SRM_PAYLOAD_NONE); } @@ -668,9 +649,8 @@ static int num_listeners(grpc_server *server) { return n; } -static void done_shutdown_event(grpc_exec_ctx *exec_ctx, void *server, - grpc_cq_completion *completion) { - server_unref(exec_ctx, (grpc_server *)server); +static void done_shutdown_event(void *server, grpc_cq_completion *completion) { + server_unref((grpc_server *)server); } static int num_channels(grpc_server *server) { @@ -683,34 +663,30 @@ static int num_channels(grpc_server *server) { return n; } -static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx, - grpc_server *server, grpc_error *error) { +static void kill_pending_work_locked(grpc_server *server, grpc_error *error) { if (server->started) { - request_matcher_kill_requests(exec_ctx, server, - &server->unregistered_request_matcher, + request_matcher_kill_requests(server, &server->unregistered_request_matcher, GRPC_ERROR_REF(error)); request_matcher_zombify_all_pending_calls( - exec_ctx, &server->unregistered_request_matcher); + &server->unregistered_request_matcher); for (registered_method *rm = server->registered_methods; rm; rm = rm->next) { - request_matcher_kill_requests(exec_ctx, server, &rm->matcher, + request_matcher_kill_requests(server, &rm->matcher, GRPC_ERROR_REF(error)); - request_matcher_zombify_all_pending_calls(exec_ctx, &rm->matcher); + request_matcher_zombify_all_pending_calls(&rm->matcher); } } GRPC_ERROR_UNREF(error); } -static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, - grpc_server *server) { +static void maybe_finish_shutdown(grpc_server *server) { size_t i; if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) { return; } kill_pending_work_locked( - exec_ctx, server, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown")); + server, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown")); if (server->root_channel_data.next != &server->root_channel_data || server->listeners_destroyed < num_listeners(server)) { @@ -730,15 +706,13 @@ static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, server->shutdown_published = 1; for (i = 0; i < server->num_shutdown_tags; i++) { server_ref(server); - grpc_cq_end_op(exec_ctx, server->shutdown_tags[i].cq, - server->shutdown_tags[i].tag, GRPC_ERROR_NONE, - done_shutdown_event, server, + grpc_cq_end_op(server->shutdown_tags[i].cq, server->shutdown_tags[i].tag, + GRPC_ERROR_NONE, done_shutdown_event, server, &server->shutdown_tags[i].completion); } } -static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr, - grpc_error *error) { +static void server_on_recv_initial_metadata(void *ptr, grpc_error *error) { grpc_call_element *elem = (grpc_call_element *)ptr; call_data *calld = (call_data *)elem->call_data; grpc_millis op_deadline; @@ -752,10 +726,10 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr, GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.authority->md)); calld->path_set = true; calld->host_set = true; - grpc_metadata_batch_remove(exec_ctx, calld->recv_initial_metadata, + grpc_metadata_batch_remove(calld->recv_initial_metadata, calld->recv_initial_metadata->idx.named.path); grpc_metadata_batch_remove( - exec_ctx, calld->recv_initial_metadata, + calld->recv_initial_metadata, calld->recv_initial_metadata->idx.named.authority); } else { GRPC_ERROR_REF(error); @@ -773,7 +747,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr, GRPC_ERROR_UNREF(src_error); } - GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv_initial_metadata, error); + GRPC_CLOSURE_RUN(calld->on_done_recv_initial_metadata, error); } static void server_mutate_op(grpc_call_element *elem, @@ -794,18 +768,16 @@ static void server_mutate_op(grpc_call_element *elem, } static void server_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { + grpc_call_element *elem, grpc_transport_stream_op_batch *op) { server_mutate_op(elem, op); - grpc_call_next_op(exec_ctx, elem, op); + grpc_call_next_op(elem, op); } -static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr, - grpc_error *error) { +static void got_initial_metadata(void *ptr, grpc_error *error) { grpc_call_element *elem = (grpc_call_element *)ptr; call_data *calld = (call_data *)elem->call_data; if (error == GRPC_ERROR_NONE) { - start_new_rpc(exec_ctx, elem); + start_new_rpc(elem); } else { gpr_mu_lock(&calld->mu_state); if (calld->state == NOT_STARTED) { @@ -813,8 +785,7 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr, gpr_mu_unlock(&calld->mu_state); GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem, grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE); } else if (calld->state == PENDING) { calld->state = ZOMBIED; gpr_mu_unlock(&calld->mu_state); @@ -826,8 +797,7 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr, } } -static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd, - grpc_transport *transport, +static void accept_stream(void *cd, grpc_transport *transport, const void *transport_server_data) { channel_data *chand = (channel_data *)cd; /* create a call */ @@ -837,11 +807,11 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd, args.server_transport_data = transport_server_data; args.send_deadline = GRPC_MILLIS_INF_FUTURE; grpc_call *call; - grpc_error *error = grpc_call_create(exec_ctx, &args, &call); + grpc_error *error = grpc_call_create(&args, &call); grpc_call_element *elem = grpc_call_stack_element(grpc_call_get_call_stack(call), 0); if (error != GRPC_ERROR_NONE) { - got_initial_metadata(exec_ctx, elem, error); + got_initial_metadata(elem, error); GRPC_ERROR_UNREF(error); return; } @@ -853,32 +823,28 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd, &calld->initial_metadata; GRPC_CLOSURE_INIT(&calld->got_initial_metadata, got_initial_metadata, elem, grpc_schedule_on_exec_ctx); - grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1, - &calld->got_initial_metadata); + grpc_call_start_batch_and_execute(call, &op, 1, &calld->got_initial_metadata); } -static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd, - grpc_error *error) { +static void channel_connectivity_changed(void *cd, grpc_error *error) { channel_data *chand = (channel_data *)cd; grpc_server *server = chand->server; if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) { grpc_transport_op *op = grpc_make_transport_op(NULL); op->on_connectivity_state_change = &chand->channel_connectivity_changed, op->connectivity_state = &chand->connectivity_state; - grpc_channel_next_op(exec_ctx, - grpc_channel_stack_element( + grpc_channel_next_op(grpc_channel_stack_element( grpc_channel_get_channel_stack(chand->channel), 0), op); } else { gpr_mu_lock(&server->mu_global); - destroy_channel(exec_ctx, chand, GRPC_ERROR_REF(error)); + destroy_channel(chand, GRPC_ERROR_REF(error)); gpr_mu_unlock(&server->mu_global); - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "connectivity"); + GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "connectivity"); } } -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, +static grpc_error *init_call_elem(grpc_call_element *elem, const grpc_call_element_args *args) { call_data *calld = (call_data *)elem->call_data; channel_data *chand = (channel_data *)elem->channel_data; @@ -895,7 +861,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, +static void destroy_call_elem(grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *ignored) { channel_data *chand = (channel_data *)elem->channel_data; @@ -904,21 +870,20 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, GPR_ASSERT(calld->state != PENDING); if (calld->host_set) { - grpc_slice_unref_internal(exec_ctx, calld->host); + grpc_slice_unref_internal(calld->host); } if (calld->path_set) { - grpc_slice_unref_internal(exec_ctx, calld->path); + grpc_slice_unref_internal(calld->path); } grpc_metadata_array_destroy(&calld->initial_metadata); grpc_byte_buffer_destroy(calld->payload); gpr_mu_destroy(&calld->mu_state); - server_unref(exec_ctx, chand->server); + server_unref(chand->server); } -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, +static grpc_error *init_channel_elem(grpc_channel_element *elem, grpc_channel_element_args *args) { channel_data *chand = (channel_data *)elem->channel_data; GPR_ASSERT(args->is_first); @@ -934,15 +899,14 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { +static void destroy_channel_elem(grpc_channel_element *elem) { size_t i; channel_data *chand = (channel_data *)elem->channel_data; if (chand->registered_methods) { for (i = 0; i < chand->registered_method_slots; i++) { - grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].method); + grpc_slice_unref_internal(chand->registered_methods[i].method); if (chand->registered_methods[i].has_host) { - grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].host); + grpc_slice_unref_internal(chand->registered_methods[i].host); } } gpr_free(chand->registered_methods); @@ -952,9 +916,9 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, chand->next->prev = chand->prev; chand->prev->next = chand->next; chand->next = chand->prev = chand; - maybe_finish_shutdown(exec_ctx, chand->server); + maybe_finish_shutdown(chand->server); gpr_mu_unlock(&chand->server->mu_global); - server_unref(exec_ctx, chand->server); + server_unref(chand->server); } } @@ -1070,11 +1034,10 @@ void *grpc_server_register_method( return m; } -static void start_listeners(grpc_exec_ctx *exec_ctx, void *s, - grpc_error *error) { +static void start_listeners(void *s, grpc_error *error) { grpc_server *server = (grpc_server *)s; for (listener *l = server->listeners; l; l = l->next) { - l->start(exec_ctx, server, l->arg, server->pollsets, server->pollset_count); + l->start(server, l->arg, server->pollsets, server->pollset_count); } gpr_mu_lock(&server->mu_global); @@ -1082,12 +1045,12 @@ static void start_listeners(grpc_exec_ctx *exec_ctx, void *s, gpr_cv_signal(&server->starting_cv); gpr_mu_unlock(&server->mu_global); - server_unref(exec_ctx, server); + server_unref(server); } void grpc_server_start(grpc_server *server) { size_t i; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GRPC_API_TRACE("grpc_server_start(server=%p)", 1, (server)); @@ -1123,12 +1086,11 @@ void grpc_server_start(grpc_server *server) { server_ref(server); server->starting = true; GRPC_CLOSURE_SCHED( - &exec_ctx, GRPC_CLOSURE_CREATE(start_listeners, server, grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)), GRPC_ERROR_NONE); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } void grpc_server_get_pollsets(grpc_server *server, grpc_pollset ***pollsets, @@ -1137,8 +1099,7 @@ void grpc_server_get_pollsets(grpc_server *server, grpc_pollset ***pollsets, *pollsets = server->pollsets; } -void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, - grpc_transport *transport, +void grpc_server_setup_transport(grpc_server *s, grpc_transport *transport, grpc_pollset *accepting_pollset, const grpc_channel_args *args) { size_t num_registered_methods; @@ -1153,8 +1114,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, uint32_t max_probes = 0; grpc_transport_op *op = NULL; - channel = - grpc_channel_create(exec_ctx, NULL, args, GRPC_SERVER_CHANNEL, transport); + channel = grpc_channel_create(NULL, args, GRPC_SERVER_CHANNEL, transport); chand = (channel_data *)grpc_channel_stack_element( grpc_channel_get_channel_stack(channel), 0) ->channel_data; @@ -1231,21 +1191,19 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, op->disconnect_with_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown"); } - grpc_transport_perform_op(exec_ctx, transport, op); + grpc_transport_perform_op(transport, op); } -void done_published_shutdown(grpc_exec_ctx *exec_ctx, void *done_arg, - grpc_cq_completion *storage) { +void done_published_shutdown(void *done_arg, grpc_cq_completion *storage) { (void)done_arg; gpr_free(storage); } -static void listener_destroy_done(grpc_exec_ctx *exec_ctx, void *s, - grpc_error *error) { +static void listener_destroy_done(void *s, grpc_error *error) { grpc_server *server = (grpc_server *)s; gpr_mu_lock(&server->mu_global); server->listeners_destroyed++; - maybe_finish_shutdown(exec_ctx, server); + maybe_finish_shutdown(server); gpr_mu_unlock(&server->mu_global); } @@ -1254,7 +1212,7 @@ void grpc_server_shutdown_and_notify(grpc_server *server, listener *l; shutdown_tag *sdt; channel_broadcaster broadcaster; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3, (server, cq, tag)); @@ -1270,7 +1228,7 @@ void grpc_server_shutdown_and_notify(grpc_server *server, GPR_ASSERT(grpc_cq_begin_op(cq, tag)); if (server->shutdown_published) { grpc_cq_end_op( - &exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown, NULL, + cq, tag, GRPC_ERROR_NONE, done_published_shutdown, NULL, (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion))); gpr_mu_unlock(&server->mu_global); goto done; @@ -1295,30 +1253,29 @@ void grpc_server_shutdown_and_notify(grpc_server *server, /* collect all unregistered then registered calls */ gpr_mu_lock(&server->mu_call); kill_pending_work_locked( - &exec_ctx, server, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown")); + server, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown")); gpr_mu_unlock(&server->mu_call); - maybe_finish_shutdown(&exec_ctx, server); + maybe_finish_shutdown(server); gpr_mu_unlock(&server->mu_global); /* Shutdown listeners */ for (l = server->listeners; l; l = l->next) { GRPC_CLOSURE_INIT(&l->destroy_done, listener_destroy_done, server, grpc_schedule_on_exec_ctx); - l->destroy(&exec_ctx, server, l->arg, &l->destroy_done); + l->destroy(server, l->arg, &l->destroy_done); } - channel_broadcaster_shutdown(&exec_ctx, &broadcaster, true /* send_goaway */, + channel_broadcaster_shutdown(&broadcaster, true /* send_goaway */, GRPC_ERROR_NONE); done: - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } void grpc_server_cancel_all_calls(grpc_server *server) { channel_broadcaster broadcaster; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GRPC_API_TRACE("grpc_server_cancel_all_calls(server=%p)", 1, (server)); @@ -1327,14 +1284,14 @@ void grpc_server_cancel_all_calls(grpc_server *server) { gpr_mu_unlock(&server->mu_global); channel_broadcaster_shutdown( - &exec_ctx, &broadcaster, false /* send_goaway */, + &broadcaster, false /* send_goaway */, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Cancelling all calls")); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); } void grpc_server_destroy(grpc_server *server) { listener *l; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; GRPC_API_TRACE("grpc_server_destroy(server=%p)", 1, (server)); @@ -1350,16 +1307,16 @@ void grpc_server_destroy(grpc_server *server) { gpr_mu_unlock(&server->mu_global); - server_unref(&exec_ctx, server); - grpc_exec_ctx_finish(&exec_ctx); + server_unref(server); + grpc_exec_ctx_finish(); } -void grpc_server_add_listener( - grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_pollset **pollsets, size_t pollset_count), - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_closure *on_done)) { +void grpc_server_add_listener(grpc_server *server, void *arg, + void (*start)(grpc_server *server, void *arg, + grpc_pollset **pollsets, + size_t pollset_count), + void (*destroy)(grpc_server *server, void *arg, + grpc_closure *on_done)) { listener *l = (listener *)gpr_malloc(sizeof(listener)); l->arg = arg; l->start = start; @@ -1368,21 +1325,20 @@ void grpc_server_add_listener( server->listeners = l; } -static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx, - grpc_server *server, size_t cq_idx, +static grpc_call_error queue_call_request(grpc_server *server, size_t cq_idx, requested_call *rc) { call_data *calld = NULL; request_matcher *rm = NULL; int request_id; if (gpr_atm_acq_load(&server->shutdown_flag)) { - fail_call(exec_ctx, server, cq_idx, rc, + fail_call(server, cq_idx, rc, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown")); return GRPC_CALL_OK; } request_id = gpr_stack_lockfree_pop(server->request_freelist_per_cq[cq_idx]); if (request_id == -1) { /* out of request ids: just fail this one */ - fail_call(exec_ctx, server, cq_idx, rc, + fail_call(server, cq_idx, rc, grpc_error_set_int( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Out of request ids"), GRPC_ERROR_INT_LIMIT, server->max_requested_calls_per_cq)); @@ -1414,13 +1370,12 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx, &calld->kill_zombie_closure, kill_zombie, grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE); } else { GPR_ASSERT(calld->state == PENDING); calld->state = ACTIVATED; gpr_mu_unlock(&calld->mu_state); - publish_call(exec_ctx, server, calld, cq_idx, + publish_call(server, calld, cq_idx, &server->requested_calls_per_cq[cq_idx][request_id]); } gpr_mu_lock(&server->mu_call); @@ -1436,9 +1391,9 @@ grpc_call_error grpc_server_request_call( grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag) { grpc_call_error error; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc)); - GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx); + GRPC_STATS_INC_SERVER_REQUESTED_CALLS(); GRPC_API_TRACE( "grpc_server_request_call(" "server=%p, call=%p, details=%p, initial_metadata=%p, " @@ -1470,9 +1425,9 @@ grpc_call_error grpc_server_request_call( rc->call = call; rc->data.batch.details = details; rc->initial_metadata = initial_metadata; - error = queue_call_request(&exec_ctx, server, cq_idx, rc); + error = queue_call_request(server, cq_idx, rc); done: - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return error; } @@ -1482,10 +1437,10 @@ grpc_call_error grpc_server_request_registered_call( grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag) { grpc_call_error error; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + ExecCtx _local_exec_ctx; requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc)); registered_method *rm = (registered_method *)rmp; - GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx); + GRPC_STATS_INC_SERVER_REQUESTED_CALLS(); GRPC_API_TRACE( "grpc_server_request_registered_call(" "server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, " @@ -1526,21 +1481,21 @@ grpc_call_error grpc_server_request_registered_call( rc->data.registered.deadline = deadline; rc->initial_metadata = initial_metadata; rc->data.registered.optional_payload = optional_payload; - error = queue_call_request(&exec_ctx, server, cq_idx, rc); + error = queue_call_request(server, cq_idx, rc); done: - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_finish(); return error; } -static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server, - size_t cq_idx, requested_call *rc, grpc_error *error) { +static void fail_call(grpc_server *server, size_t cq_idx, requested_call *rc, + grpc_error *error) { *rc->call = NULL; rc->initial_metadata->count = 0; GPR_ASSERT(error != GRPC_ERROR_NONE); server_ref(server); - grpc_cq_end_op(exec_ctx, server->cqs[cq_idx], rc->tag, error, - done_request_event, rc, &rc->completion); + grpc_cq_end_op(server->cqs[cq_idx], rc->tag, error, done_request_event, rc, + &rc->completion); } const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server) { diff --git a/src/core/lib/surface/server.h b/src/core/lib/surface/server.h index 375eab4a04..604e038b80 100644 --- a/src/core/lib/surface/server.h +++ b/src/core/lib/surface/server.h @@ -35,17 +35,16 @@ extern grpc_tracer_flag grpc_server_channel_trace; /* Add a listener to the server: when the server starts, it will call start, and when it shuts down, it will call destroy */ -void grpc_server_add_listener( - grpc_exec_ctx *exec_ctx, grpc_server *server, void *listener, - void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_pollset **pollsets, size_t npollsets), - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_closure *on_done)); +void grpc_server_add_listener(grpc_server *server, void *listener, + void (*start)(grpc_server *server, void *arg, + grpc_pollset **pollsets, + size_t npollsets), + void (*destroy)(grpc_server *server, void *arg, + grpc_closure *on_done)); /* Setup a transport - creates a channel stack, binds the transport to the server */ -void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *server, - grpc_transport *transport, +void grpc_server_setup_transport(grpc_server *server, grpc_transport *transport, grpc_pollset *accepting_pollset, const grpc_channel_args *args); diff --git a/src/core/lib/transport/bdp_estimator.cc b/src/core/lib/transport/bdp_estimator.cc index f1597014b1..6fd6597cfe 100644 --- a/src/core/lib/transport/bdp_estimator.cc +++ b/src/core/lib/transport/bdp_estimator.cc @@ -38,7 +38,7 @@ BdpEstimator::BdpEstimator(const char *name) bw_est_(0), name_(name) {} -grpc_millis BdpEstimator::CompletePing(grpc_exec_ctx *exec_ctx) { +grpc_millis BdpEstimator::CompletePing() { gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); gpr_timespec dt_ts = gpr_time_sub(now, ping_start_time_); double dt = (double)dt_ts.tv_sec + 1e-9 * (double)dt_ts.tv_nsec; @@ -78,7 +78,7 @@ grpc_millis BdpEstimator::CompletePing(grpc_exec_ctx *exec_ctx) { } ping_state_ = PingState::UNSCHEDULED; accumulator_ = 0; - return grpc_exec_ctx_now(exec_ctx) + inter_ping_delay_; + return grpc_exec_ctx_now() + inter_ping_delay_; } } // namespace grpc_core diff --git a/src/core/lib/transport/bdp_estimator.h b/src/core/lib/transport/bdp_estimator.h index 470c127f7f..81c5b0f98b 100644 --- a/src/core/lib/transport/bdp_estimator.h +++ b/src/core/lib/transport/bdp_estimator.h @@ -80,7 +80,7 @@ class BdpEstimator { } // Completes a previously started ping, returns when to schedule the next one - grpc_millis CompletePing(grpc_exec_ctx *exec_ctx); + grpc_millis CompletePing(); private: enum class PingState { UNSCHEDULED, SCHEDULED, STARTED }; diff --git a/src/core/lib/transport/byte_stream.cc b/src/core/lib/transport/byte_stream.cc index 08f61629a9..4f32aeeaaa 100644 --- a/src/core/lib/transport/byte_stream.cc +++ b/src/core/lib/transport/byte_stream.cc @@ -25,34 +25,28 @@ #include "src/core/lib/slice/slice_internal.h" -bool grpc_byte_stream_next(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, size_t max_size_hint, +bool grpc_byte_stream_next(grpc_byte_stream *byte_stream, size_t max_size_hint, grpc_closure *on_complete) { - return byte_stream->vtable->next(exec_ctx, byte_stream, max_size_hint, - on_complete); + return byte_stream->vtable->next(byte_stream, max_size_hint, on_complete); } -grpc_error *grpc_byte_stream_pull(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, +grpc_error *grpc_byte_stream_pull(grpc_byte_stream *byte_stream, grpc_slice *slice) { - return byte_stream->vtable->pull(exec_ctx, byte_stream, slice); + return byte_stream->vtable->pull(byte_stream, slice); } -void grpc_byte_stream_shutdown(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, +void grpc_byte_stream_shutdown(grpc_byte_stream *byte_stream, grpc_error *error) { - byte_stream->vtable->shutdown(exec_ctx, byte_stream, error); + byte_stream->vtable->shutdown(byte_stream, error); } -void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream) { - byte_stream->vtable->destroy(exec_ctx, byte_stream); +void grpc_byte_stream_destroy(grpc_byte_stream *byte_stream) { + byte_stream->vtable->destroy(byte_stream); } // grpc_slice_buffer_stream -static bool slice_buffer_stream_next(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, +static bool slice_buffer_stream_next(grpc_byte_stream *byte_stream, size_t max_size_hint, grpc_closure *on_complete) { grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream; @@ -60,8 +54,7 @@ static bool slice_buffer_stream_next(grpc_exec_ctx *exec_ctx, return true; } -static grpc_error *slice_buffer_stream_pull(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, +static grpc_error *slice_buffer_stream_pull(grpc_byte_stream *byte_stream, grpc_slice *slice) { grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream; if (stream->shutdown_error != GRPC_ERROR_NONE) { @@ -74,18 +67,16 @@ static grpc_error *slice_buffer_stream_pull(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static void slice_buffer_stream_shutdown(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, +static void slice_buffer_stream_shutdown(grpc_byte_stream *byte_stream, grpc_error *error) { grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream; GRPC_ERROR_UNREF(stream->shutdown_error); stream->shutdown_error = error; } -static void slice_buffer_stream_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream) { +static void slice_buffer_stream_destroy(grpc_byte_stream *byte_stream) { grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, stream->backing_buffer); + grpc_slice_buffer_reset_and_unref_internal(stream->backing_buffer); GRPC_ERROR_UNREF(stream->shutdown_error); } @@ -113,25 +104,22 @@ void grpc_byte_stream_cache_init(grpc_byte_stream_cache *cache, grpc_slice_buffer_init(&cache->cache_buffer); } -void grpc_byte_stream_cache_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream_cache *cache) { - grpc_byte_stream_destroy(exec_ctx, cache->underlying_stream); - grpc_slice_buffer_destroy_internal(exec_ctx, &cache->cache_buffer); +void grpc_byte_stream_cache_destroy(grpc_byte_stream_cache *cache) { + grpc_byte_stream_destroy(cache->underlying_stream); + grpc_slice_buffer_destroy_internal(&cache->cache_buffer); } -static bool caching_byte_stream_next(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, +static bool caching_byte_stream_next(grpc_byte_stream *byte_stream, size_t max_size_hint, grpc_closure *on_complete) { grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream; if (stream->shutdown_error != GRPC_ERROR_NONE) return true; if (stream->cursor < stream->cache->cache_buffer.count) return true; - return grpc_byte_stream_next(exec_ctx, stream->cache->underlying_stream, - max_size_hint, on_complete); + return grpc_byte_stream_next(stream->cache->underlying_stream, max_size_hint, + on_complete); } -static grpc_error *caching_byte_stream_pull(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, +static grpc_error *caching_byte_stream_pull(grpc_byte_stream *byte_stream, grpc_slice *slice) { grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream; if (stream->shutdown_error != GRPC_ERROR_NONE) { @@ -144,7 +132,7 @@ static grpc_error *caching_byte_stream_pull(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } grpc_error *error = - grpc_byte_stream_pull(exec_ctx, stream->cache->underlying_stream, slice); + grpc_byte_stream_pull(stream->cache->underlying_stream, slice); if (error == GRPC_ERROR_NONE) { ++stream->cursor; grpc_slice_buffer_add(&stream->cache->cache_buffer, @@ -153,17 +141,15 @@ static grpc_error *caching_byte_stream_pull(grpc_exec_ctx *exec_ctx, return error; } -static void caching_byte_stream_shutdown(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, +static void caching_byte_stream_shutdown(grpc_byte_stream *byte_stream, grpc_error *error) { grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream; GRPC_ERROR_UNREF(stream->shutdown_error); stream->shutdown_error = GRPC_ERROR_REF(error); - grpc_byte_stream_shutdown(exec_ctx, stream->cache->underlying_stream, error); + grpc_byte_stream_shutdown(stream->cache->underlying_stream, error); } -static void caching_byte_stream_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream) { +static void caching_byte_stream_destroy(grpc_byte_stream *byte_stream) { grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream; GRPC_ERROR_UNREF(stream->shutdown_error); } diff --git a/src/core/lib/transport/byte_stream.h b/src/core/lib/transport/byte_stream.h index c1d8ee543f..a2d57a680c 100644 --- a/src/core/lib/transport/byte_stream.h +++ b/src/core/lib/transport/byte_stream.h @@ -35,13 +35,11 @@ extern "C" { typedef struct grpc_byte_stream grpc_byte_stream; typedef struct { - bool (*next)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream, - size_t max_size_hint, grpc_closure *on_complete); - grpc_error *(*pull)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream, - grpc_slice *slice); - void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream, - grpc_error *error); - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream); + bool (*next)(grpc_byte_stream *byte_stream, size_t max_size_hint, + grpc_closure *on_complete); + grpc_error *(*pull)(grpc_byte_stream *byte_stream, grpc_slice *slice); + void (*shutdown)(grpc_byte_stream *byte_stream, grpc_error *error); + void (*destroy)(grpc_byte_stream *byte_stream); } grpc_byte_stream_vtable; struct grpc_byte_stream { @@ -56,8 +54,7 @@ struct grpc_byte_stream { // // max_size_hint can be set as a hint as to the maximum number // of bytes that would be acceptable to read. -bool grpc_byte_stream_next(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, size_t max_size_hint, +bool grpc_byte_stream_next(grpc_byte_stream *byte_stream, size_t max_size_hint, grpc_closure *on_complete); // Returns the next slice in the byte stream when it is ready (indicated by @@ -65,8 +62,7 @@ bool grpc_byte_stream_next(grpc_exec_ctx *exec_ctx, // grpc_byte_stream_next is called). // // Once a slice is returned into *slice, it is owned by the caller. -grpc_error *grpc_byte_stream_pull(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, +grpc_error *grpc_byte_stream_pull(grpc_byte_stream *byte_stream, grpc_slice *slice); // Shuts down the byte stream. @@ -76,12 +72,10 @@ grpc_error *grpc_byte_stream_pull(grpc_exec_ctx *exec_ctx, // // The next call to grpc_byte_stream_pull() (if any) will return the error // passed to grpc_byte_stream_shutdown(). -void grpc_byte_stream_shutdown(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, +void grpc_byte_stream_shutdown(grpc_byte_stream *byte_stream, grpc_error *error); -void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream); +void grpc_byte_stream_destroy(grpc_byte_stream *byte_stream); // grpc_slice_buffer_stream // @@ -123,8 +117,7 @@ void grpc_byte_stream_cache_init(grpc_byte_stream_cache *cache, grpc_byte_stream *underlying_stream); // Must not be called while still in use by a grpc_caching_byte_stream. -void grpc_byte_stream_cache_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream_cache *cache); +void grpc_byte_stream_cache_destroy(grpc_byte_stream_cache *cache); typedef struct { grpc_byte_stream base; diff --git a/src/core/lib/transport/connectivity_state.cc b/src/core/lib/transport/connectivity_state.cc index f328a6cdbb..8775eed767 100644 --- a/src/core/lib/transport/connectivity_state.cc +++ b/src/core/lib/transport/connectivity_state.cc @@ -54,8 +54,7 @@ void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker, tracker->name = gpr_strdup(name); } -void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx, - grpc_connectivity_state_tracker *tracker) { +void grpc_connectivity_state_destroy(grpc_connectivity_state_tracker *tracker) { grpc_error *error; grpc_connectivity_state_watcher *w; while ((w = tracker->watchers)) { @@ -68,7 +67,7 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx, error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutdown connectivity owner"); } - GRPC_CLOSURE_SCHED(exec_ctx, w->notify, error); + GRPC_CLOSURE_SCHED(w->notify, error); gpr_free(w); } GRPC_ERROR_UNREF(tracker->current_error); @@ -108,8 +107,8 @@ bool grpc_connectivity_state_has_watchers( } bool grpc_connectivity_state_notify_on_state_change( - grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker, - grpc_connectivity_state *current, grpc_closure *notify) { + grpc_connectivity_state_tracker *tracker, grpc_connectivity_state *current, + grpc_closure *notify) { grpc_connectivity_state cur = (grpc_connectivity_state)gpr_atm_no_barrier_load( &tracker->current_state_atm); @@ -126,7 +125,7 @@ bool grpc_connectivity_state_notify_on_state_change( if (current == NULL) { grpc_connectivity_state_watcher *w = tracker->watchers; if (w != NULL && w->notify == notify) { - GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(notify, GRPC_ERROR_CANCELLED); tracker->watchers = w->next; gpr_free(w); return false; @@ -134,7 +133,7 @@ bool grpc_connectivity_state_notify_on_state_change( while (w != NULL) { grpc_connectivity_state_watcher *rm_candidate = w->next; if (rm_candidate != NULL && rm_candidate->notify == notify) { - GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(notify, GRPC_ERROR_CANCELLED); w->next = w->next->next; gpr_free(rm_candidate); return false; @@ -145,8 +144,7 @@ bool grpc_connectivity_state_notify_on_state_change( } else { if (cur != *current) { *current = cur; - GRPC_CLOSURE_SCHED(exec_ctx, notify, - GRPC_ERROR_REF(tracker->current_error)); + GRPC_CLOSURE_SCHED(notify, GRPC_ERROR_REF(tracker->current_error)); } else { grpc_connectivity_state_watcher *w = (grpc_connectivity_state_watcher *)gpr_malloc(sizeof(*w)); @@ -159,8 +157,7 @@ bool grpc_connectivity_state_notify_on_state_change( } } -void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, - grpc_connectivity_state_tracker *tracker, +void grpc_connectivity_state_set(grpc_connectivity_state_tracker *tracker, grpc_connectivity_state state, grpc_error *error, const char *reason) { grpc_connectivity_state cur = @@ -199,8 +196,7 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name, w->notify); } - GRPC_CLOSURE_SCHED(exec_ctx, w->notify, - GRPC_ERROR_REF(tracker->current_error)); + GRPC_CLOSURE_SCHED(w->notify, GRPC_ERROR_REF(tracker->current_error)); gpr_free(w); } } diff --git a/src/core/lib/transport/connectivity_state.h b/src/core/lib/transport/connectivity_state.h index c0ba188148..b2dece3478 100644 --- a/src/core/lib/transport/connectivity_state.h +++ b/src/core/lib/transport/connectivity_state.h @@ -55,13 +55,11 @@ const char *grpc_connectivity_state_name(grpc_connectivity_state state); void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker, grpc_connectivity_state init_state, const char *name); -void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx, - grpc_connectivity_state_tracker *tracker); +void grpc_connectivity_state_destroy(grpc_connectivity_state_tracker *tracker); /** Set connectivity state; not thread safe; access must be serialized with an * external lock */ -void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, - grpc_connectivity_state_tracker *tracker, +void grpc_connectivity_state_set(grpc_connectivity_state_tracker *tracker, grpc_connectivity_state state, grpc_error *associated_error, const char *reason); @@ -85,8 +83,8 @@ grpc_connectivity_state grpc_connectivity_state_get( case). Access must be serialized with an external lock. */ bool grpc_connectivity_state_notify_on_state_change( - grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker, - grpc_connectivity_state *current, grpc_closure *notify); + grpc_connectivity_state_tracker *tracker, grpc_connectivity_state *current, + grpc_closure *notify); #ifdef __cplusplus } diff --git a/src/core/lib/transport/error_utils.cc b/src/core/lib/transport/error_utils.cc index 2e3b61b7ab..276fdf1cdc 100644 --- a/src/core/lib/transport/error_utils.cc +++ b/src/core/lib/transport/error_utils.cc @@ -39,9 +39,8 @@ static grpc_error *recursively_find_error_with_field(grpc_error *error, return NULL; } -void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error, - grpc_millis deadline, grpc_status_code *code, - grpc_slice *slice, +void grpc_error_get_status(grpc_error *error, grpc_millis deadline, + grpc_status_code *code, grpc_slice *slice, grpc_http2_error_code *http_error) { // Start with the parent error and recurse through the tree of children // until we find the first one that has a status code. @@ -64,8 +63,8 @@ void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error, status = (grpc_status_code)integer; } else if (grpc_error_get_int(found_error, GRPC_ERROR_INT_HTTP2_ERROR, &integer)) { - status = grpc_http2_error_to_grpc_status( - exec_ctx, (grpc_http2_error_code)integer, deadline); + status = grpc_http2_error_to_grpc_status((grpc_http2_error_code)integer, + deadline); } if (code != NULL) *code = status; diff --git a/src/core/lib/transport/error_utils.h b/src/core/lib/transport/error_utils.h index b4f9df4bf1..94012450f7 100644 --- a/src/core/lib/transport/error_utils.h +++ b/src/core/lib/transport/error_utils.h @@ -33,9 +33,8 @@ extern "C" { /// All attributes are pulled from the same child error. If any of the /// attributes (code, msg, http_status) are unneeded, they can be passed as /// NULL. -void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error, - grpc_millis deadline, grpc_status_code *code, - grpc_slice *slice, +void grpc_error_get_status(grpc_error *error, grpc_millis deadline, + grpc_status_code *code, grpc_slice *slice, grpc_http2_error_code *http_status); /// A utility function to check whether there is a clear status code that diff --git a/src/core/lib/transport/metadata.cc b/src/core/lib/transport/metadata.cc index 5455b2481b..9f88917daa 100644 --- a/src/core/lib/transport/metadata.cc +++ b/src/core/lib/transport/metadata.cc @@ -108,7 +108,7 @@ typedef struct mdtab_shard { static mdtab_shard g_shards[SHARD_COUNT]; -static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard); +static void gc_mdtab(mdtab_shard *shard); void grpc_mdctx_global_init(void) { /* initialize shards */ @@ -123,11 +123,11 @@ void grpc_mdctx_global_init(void) { } } -void grpc_mdctx_global_shutdown(grpc_exec_ctx *exec_ctx) { +void grpc_mdctx_global_shutdown() { for (size_t i = 0; i < SHARD_COUNT; i++) { mdtab_shard *shard = &g_shards[i]; gpr_mu_destroy(&shard->mu); - gc_mdtab(exec_ctx, shard); + gc_mdtab(shard); /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */ if (shard->count != 0) { gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata elements were leaked", @@ -165,7 +165,7 @@ static void ref_md_locked(mdtab_shard *shard, } } -static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) { +static void gc_mdtab(mdtab_shard *shard) { size_t i; interned_metadata **prev_next; interned_metadata *md, *next; @@ -178,8 +178,8 @@ static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) { void *user_data = (void *)gpr_atm_no_barrier_load(&md->user_data); next = md->bucket_next; if (gpr_atm_acq_load(&md->refcnt) == 0) { - grpc_slice_unref_internal(exec_ctx, md->key); - grpc_slice_unref_internal(exec_ctx, md->value); + grpc_slice_unref_internal(md->key); + grpc_slice_unref_internal(md->value); if (md->user_data) { ((destroy_user_data_func)gpr_atm_no_barrier_load( &md->destroy_user_data))(user_data); @@ -228,17 +228,17 @@ static void grow_mdtab(mdtab_shard *shard) { GPR_TIMER_END("grow_mdtab", 0); } -static void rehash_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) { +static void rehash_mdtab(mdtab_shard *shard) { if (gpr_atm_no_barrier_load(&shard->free_estimate) > (gpr_atm)(shard->capacity / 4)) { - gc_mdtab(exec_ctx, shard); + gc_mdtab(shard); } else { grow_mdtab(shard); } } grpc_mdelem grpc_mdelem_create( - grpc_exec_ctx *exec_ctx, grpc_slice key, grpc_slice value, + grpc_slice key, grpc_slice value, grpc_mdelem_data *compatible_external_backing_store) { if (!grpc_slice_is_interned(key) || !grpc_slice_is_interned(value)) { if (compatible_external_backing_store != NULL) { @@ -318,7 +318,7 @@ grpc_mdelem grpc_mdelem_create( shard->count++; if (shard->count > shard->capacity * 2) { - rehash_mdtab(exec_ctx, shard); + rehash_mdtab(shard); } gpr_mu_unlock(&shard->mu); @@ -328,22 +328,20 @@ grpc_mdelem grpc_mdelem_create( return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED); } -grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx *exec_ctx, grpc_slice key, - grpc_slice value) { - grpc_mdelem out = grpc_mdelem_create(exec_ctx, key, value, NULL); - grpc_slice_unref_internal(exec_ctx, key); - grpc_slice_unref_internal(exec_ctx, value); +grpc_mdelem grpc_mdelem_from_slices(grpc_slice key, grpc_slice value) { + grpc_mdelem out = grpc_mdelem_create(key, value, NULL); + grpc_slice_unref_internal(key); + grpc_slice_unref_internal(value); return out; } -grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx *exec_ctx, - grpc_metadata *metadata) { +grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata *metadata) { bool changed = false; grpc_slice key_slice = grpc_slice_maybe_static_intern(metadata->key, &changed); grpc_slice value_slice = grpc_slice_maybe_static_intern(metadata->value, &changed); - return grpc_mdelem_create(exec_ctx, key_slice, value_slice, + return grpc_mdelem_create(key_slice, value_slice, changed ? NULL : (grpc_mdelem_data *)metadata); } @@ -414,7 +412,7 @@ grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) { return gmd; } -void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) { +void grpc_mdelem_unref(grpc_mdelem gmd DEBUG_ARGS) { switch (GRPC_MDELEM_STORAGE(gmd)) { case GRPC_MDELEM_STORAGE_EXTERNAL: case GRPC_MDELEM_STORAGE_STATIC: @@ -462,8 +460,8 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) { const gpr_atm prev_refcount = gpr_atm_full_fetch_add(&md->refcnt, -1); GPR_ASSERT(prev_refcount >= 1); if (1 == prev_refcount) { - grpc_slice_unref_internal(exec_ctx, md->key); - grpc_slice_unref_internal(exec_ctx, md->value); + grpc_slice_unref_internal(md->key); + grpc_slice_unref_internal(md->value); gpr_free(md); } break; diff --git a/src/core/lib/transport/metadata.h b/src/core/lib/transport/metadata.h index 9f82225dc3..d0465d7215 100644 --- a/src/core/lib/transport/metadata.h +++ b/src/core/lib/transport/metadata.h @@ -114,20 +114,18 @@ struct grpc_mdelem { (uintptr_t)GRPC_MDELEM_STORAGE_INTERNED_BIT)) /* Unrefs the slices. */ -grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx *exec_ctx, grpc_slice key, - grpc_slice value); +grpc_mdelem grpc_mdelem_from_slices(grpc_slice key, grpc_slice value); /* Cheaply convert a grpc_metadata to a grpc_mdelem; may use the grpc_metadata object as backing storage (so lifetimes should align) */ -grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx *exec_ctx, - grpc_metadata *metadata); +grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata *metadata); /* Does not unref the slices; if a new non-interned mdelem is needed, allocates one if compatible_external_backing_store is NULL, or uses compatible_external_backing_store if it is non-NULL (in which case it's the users responsibility to ensure that it outlives usage) */ grpc_mdelem grpc_mdelem_create( - grpc_exec_ctx *exec_ctx, grpc_slice key, grpc_slice value, + grpc_slice key, grpc_slice value, grpc_mdelem_data *compatible_external_backing_store); bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b); @@ -143,16 +141,14 @@ void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *), #ifndef NDEBUG #define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s), __FILE__, __LINE__) -#define GRPC_MDELEM_UNREF(exec_ctx, s) \ - grpc_mdelem_unref((exec_ctx), (s), __FILE__, __LINE__) +#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s), __FILE__, __LINE__) grpc_mdelem grpc_mdelem_ref(grpc_mdelem md, const char *file, int line); -void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem md, - const char *file, int line); +void grpc_mdelem_unref(grpc_mdelem md, const char *file, int line); #else #define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s)) -#define GRPC_MDELEM_UNREF(exec_ctx, s) grpc_mdelem_unref((exec_ctx), (s)) +#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s)) grpc_mdelem grpc_mdelem_ref(grpc_mdelem md); -void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem md); +void grpc_mdelem_unref(grpc_mdelem md); #endif #define GRPC_MDKEY(md) (GRPC_MDELEM_DATA(md)->key) @@ -169,7 +165,7 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem md); #define GRPC_MDSTR_KV_HASH(k_hash, v_hash) (GPR_ROTL((k_hash), 2) ^ (v_hash)) void grpc_mdctx_global_init(void); -void grpc_mdctx_global_shutdown(grpc_exec_ctx *exec_ctx); +void grpc_mdctx_global_shutdown(); #ifdef __cplusplus } diff --git a/src/core/lib/transport/metadata_batch.cc b/src/core/lib/transport/metadata_batch.cc index 2df9c9189c..075a03c0f9 100644 --- a/src/core/lib/transport/metadata_batch.cc +++ b/src/core/lib/transport/metadata_batch.cc @@ -51,8 +51,7 @@ static void assert_valid_list(grpc_mdelem_list *list) { #endif /* NDEBUG */ } -static void assert_valid_callouts(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch) { +static void assert_valid_callouts(grpc_metadata_batch *batch) { #ifndef NDEBUG for (grpc_linked_mdelem *l = batch->list.head; l != NULL; l = l->next) { grpc_slice key_interned = grpc_slice_intern(GRPC_MDKEY(l->md)); @@ -61,7 +60,7 @@ static void assert_valid_callouts(grpc_exec_ctx *exec_ctx, if (callout_idx != GRPC_BATCH_CALLOUTS_COUNT) { GPR_ASSERT(batch->idx.array[callout_idx] == l); } - grpc_slice_unref_internal(exec_ctx, key_interned); + grpc_slice_unref_internal(key_interned); } #endif } @@ -77,11 +76,10 @@ void grpc_metadata_batch_init(grpc_metadata_batch *batch) { batch->deadline = GRPC_MILLIS_INF_FUTURE; } -void grpc_metadata_batch_destroy(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch) { +void grpc_metadata_batch_destroy(grpc_metadata_batch *batch) { grpc_linked_mdelem *l; for (l = batch->list.head; l; l = l->next) { - GRPC_MDELEM_UNREF(exec_ctx, l->md); + GRPC_MDELEM_UNREF(l->md); } } @@ -126,13 +124,12 @@ static void maybe_unlink_callout(grpc_metadata_batch *batch, batch->idx.array[idx] = NULL; } -grpc_error *grpc_metadata_batch_add_head(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, +grpc_error *grpc_metadata_batch_add_head(grpc_metadata_batch *batch, grpc_linked_mdelem *storage, grpc_mdelem elem_to_add) { GPR_ASSERT(!GRPC_MDISNULL(elem_to_add)); storage->md = elem_to_add; - return grpc_metadata_batch_link_head(exec_ctx, batch, storage); + return grpc_metadata_batch_link_head(batch, storage); } static void link_head(grpc_mdelem_list *list, grpc_linked_mdelem *storage) { @@ -150,27 +147,25 @@ static void link_head(grpc_mdelem_list *list, grpc_linked_mdelem *storage) { assert_valid_list(list); } -grpc_error *grpc_metadata_batch_link_head(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, +grpc_error *grpc_metadata_batch_link_head(grpc_metadata_batch *batch, grpc_linked_mdelem *storage) { - assert_valid_callouts(exec_ctx, batch); + assert_valid_callouts(batch); grpc_error *err = maybe_link_callout(batch, storage); if (err != GRPC_ERROR_NONE) { - assert_valid_callouts(exec_ctx, batch); + assert_valid_callouts(batch); return err; } link_head(&batch->list, storage); - assert_valid_callouts(exec_ctx, batch); + assert_valid_callouts(batch); return GRPC_ERROR_NONE; } -grpc_error *grpc_metadata_batch_add_tail(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, +grpc_error *grpc_metadata_batch_add_tail(grpc_metadata_batch *batch, grpc_linked_mdelem *storage, grpc_mdelem elem_to_add) { GPR_ASSERT(!GRPC_MDISNULL(elem_to_add)); storage->md = elem_to_add; - return grpc_metadata_batch_link_tail(exec_ctx, batch, storage); + return grpc_metadata_batch_link_tail(batch, storage); } static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) { @@ -189,17 +184,16 @@ static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) { assert_valid_list(list); } -grpc_error *grpc_metadata_batch_link_tail(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, +grpc_error *grpc_metadata_batch_link_tail(grpc_metadata_batch *batch, grpc_linked_mdelem *storage) { - assert_valid_callouts(exec_ctx, batch); + assert_valid_callouts(batch); grpc_error *err = maybe_link_callout(batch, storage); if (err != GRPC_ERROR_NONE) { - assert_valid_callouts(exec_ctx, batch); + assert_valid_callouts(batch); return err; } link_tail(&batch->list, storage); - assert_valid_callouts(exec_ctx, batch); + assert_valid_callouts(batch); return GRPC_ERROR_NONE; } @@ -220,31 +214,28 @@ static void unlink_storage(grpc_mdelem_list *list, assert_valid_list(list); } -void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, +void grpc_metadata_batch_remove(grpc_metadata_batch *batch, grpc_linked_mdelem *storage) { - assert_valid_callouts(exec_ctx, batch); + assert_valid_callouts(batch); maybe_unlink_callout(batch, storage); unlink_storage(&batch->list, storage); - GRPC_MDELEM_UNREF(exec_ctx, storage->md); - assert_valid_callouts(exec_ctx, batch); + GRPC_MDELEM_UNREF(storage->md); + assert_valid_callouts(batch); } -void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx, - grpc_linked_mdelem *storage, +void grpc_metadata_batch_set_value(grpc_linked_mdelem *storage, grpc_slice value) { grpc_mdelem old_mdelem = storage->md; grpc_mdelem new_mdelem = grpc_mdelem_from_slices( - exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value); + grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value); storage->md = new_mdelem; - GRPC_MDELEM_UNREF(exec_ctx, old_mdelem); + GRPC_MDELEM_UNREF(old_mdelem); } -grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, +grpc_error *grpc_metadata_batch_substitute(grpc_metadata_batch *batch, grpc_linked_mdelem *storage, grpc_mdelem new_mdelem) { - assert_valid_callouts(exec_ctx, batch); + assert_valid_callouts(batch); grpc_error *error = GRPC_ERROR_NONE; grpc_mdelem old_mdelem = storage->md; if (!grpc_slice_eq(GRPC_MDKEY(new_mdelem), GRPC_MDKEY(old_mdelem))) { @@ -253,19 +244,18 @@ grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx, error = maybe_link_callout(batch, storage); if (error != GRPC_ERROR_NONE) { unlink_storage(&batch->list, storage); - GRPC_MDELEM_UNREF(exec_ctx, storage->md); + GRPC_MDELEM_UNREF(storage->md); } } else { storage->md = new_mdelem; } - GRPC_MDELEM_UNREF(exec_ctx, old_mdelem); - assert_valid_callouts(exec_ctx, batch); + GRPC_MDELEM_UNREF(old_mdelem); + assert_valid_callouts(batch); return error; } -void grpc_metadata_batch_clear(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch) { - grpc_metadata_batch_destroy(exec_ctx, batch); +void grpc_metadata_batch_clear(grpc_metadata_batch *batch) { + grpc_metadata_batch_destroy(batch); grpc_metadata_batch_init(batch); } @@ -291,8 +281,7 @@ static void add_error(grpc_error **composite, grpc_error *error, *composite = grpc_error_add_child(*composite, error); } -grpc_error *grpc_metadata_batch_filter(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, +grpc_error *grpc_metadata_batch_filter(grpc_metadata_batch *batch, grpc_metadata_batch_filter_func func, void *user_data, const char *composite_error_string) { @@ -300,12 +289,12 @@ grpc_error *grpc_metadata_batch_filter(grpc_exec_ctx *exec_ctx, grpc_error *error = GRPC_ERROR_NONE; while (l) { grpc_linked_mdelem *next = l->next; - grpc_filtered_mdelem new_mdelem = func(exec_ctx, user_data, l->md); + grpc_filtered_mdelem new_mdelem = func(user_data, l->md); add_error(&error, new_mdelem.error, composite_error_string); if (GRPC_MDISNULL(new_mdelem.md)) { - grpc_metadata_batch_remove(exec_ctx, batch, l); + grpc_metadata_batch_remove(batch, l); } else if (new_mdelem.md.payload != l->md.payload) { - grpc_metadata_batch_substitute(exec_ctx, batch, l, new_mdelem.md); + grpc_metadata_batch_substitute(batch, l, new_mdelem.md); } l = next; } diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h index a2b4b92385..0e6ef21091 100644 --- a/src/core/lib/transport/metadata_batch.h +++ b/src/core/lib/transport/metadata_batch.h @@ -57,28 +57,23 @@ typedef struct grpc_metadata_batch { } grpc_metadata_batch; void grpc_metadata_batch_init(grpc_metadata_batch *batch); -void grpc_metadata_batch_destroy(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch); -void grpc_metadata_batch_clear(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch); +void grpc_metadata_batch_destroy(grpc_metadata_batch *batch); +void grpc_metadata_batch_clear(grpc_metadata_batch *batch); bool grpc_metadata_batch_is_empty(grpc_metadata_batch *batch); /* Returns the transport size of the batch. */ size_t grpc_metadata_batch_size(grpc_metadata_batch *batch); /** Remove \a storage from the batch, unreffing the mdelem contained */ -void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, +void grpc_metadata_batch_remove(grpc_metadata_batch *batch, grpc_linked_mdelem *storage); /** Substitute a new mdelem for an old value */ -grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, +grpc_error *grpc_metadata_batch_substitute(grpc_metadata_batch *batch, grpc_linked_mdelem *storage, grpc_mdelem new_value); -void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx, - grpc_linked_mdelem *storage, +void grpc_metadata_batch_set_value(grpc_linked_mdelem *storage, grpc_slice value); /** Add \a storage to the beginning of \a batch. storage->md is @@ -86,17 +81,17 @@ void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx, \a storage is owned by the caller and must survive for the lifetime of batch. This usually means it should be around for the lifetime of the call. */ -grpc_error *grpc_metadata_batch_link_head( - grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch, - grpc_linked_mdelem *storage) GRPC_MUST_USE_RESULT; +grpc_error *grpc_metadata_batch_link_head(grpc_metadata_batch *batch, + grpc_linked_mdelem *storage) + GRPC_MUST_USE_RESULT; /** Add \a storage to the end of \a batch. storage->md is assumed to be valid. \a storage is owned by the caller and must survive for the lifetime of batch. This usually means it should be around for the lifetime of the call. */ -grpc_error *grpc_metadata_batch_link_tail( - grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch, - grpc_linked_mdelem *storage) GRPC_MUST_USE_RESULT; +grpc_error *grpc_metadata_batch_link_tail(grpc_metadata_batch *batch, + grpc_linked_mdelem *storage) + GRPC_MUST_USE_RESULT; /** Add \a elem_to_add as the first element in \a batch, using \a storage as backing storage for the linked list element. @@ -105,8 +100,8 @@ grpc_error *grpc_metadata_batch_link_tail( for the lifetime of the call. Takes ownership of \a elem_to_add */ grpc_error *grpc_metadata_batch_add_head( - grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch, - grpc_linked_mdelem *storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT; + grpc_metadata_batch *batch, grpc_linked_mdelem *storage, + grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT; /** Add \a elem_to_add as the last element in \a batch, using \a storage as backing storage for the linked list element. \a storage is owned by the caller and must survive for the @@ -114,8 +109,8 @@ grpc_error *grpc_metadata_batch_add_head( for the lifetime of the call. Takes ownership of \a elem_to_add */ grpc_error *grpc_metadata_batch_add_tail( - grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch, - grpc_linked_mdelem *storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT; + grpc_metadata_batch *batch, grpc_linked_mdelem *storage, + grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT; grpc_error *grpc_attach_md_to_error(grpc_error *src, grpc_mdelem md); @@ -132,11 +127,10 @@ typedef struct { { GRPC_ERROR_NONE, GRPC_MDNULL } typedef grpc_filtered_mdelem (*grpc_metadata_batch_filter_func)( - grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem elem); + void *user_data, grpc_mdelem elem); grpc_error *grpc_metadata_batch_filter( - grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch, - grpc_metadata_batch_filter_func func, void *user_data, - const char *composite_error_string) GRPC_MUST_USE_RESULT; + grpc_metadata_batch *batch, grpc_metadata_batch_filter_func func, + void *user_data, const char *composite_error_string) GRPC_MUST_USE_RESULT; #ifndef NDEBUG void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd); diff --git a/src/core/lib/transport/service_config.cc b/src/core/lib/transport/service_config.cc index 070a13a2b4..7aa4873d37 100644 --- a/src/core/lib/transport/service_config.cc +++ b/src/core/lib/transport/service_config.cc @@ -146,8 +146,7 @@ static char* parse_json_method_name(grpc_json* json) { // each name found, incrementing \a idx for each entry added. // Returns false on error. static bool parse_json_method_config( - grpc_exec_ctx* exec_ctx, grpc_json* json, - void* (*create_value)(const grpc_json* method_config_json), + grpc_json* json, void* (*create_value)(const grpc_json* method_config_json), grpc_slice_hash_table_entry* entries, size_t* idx) { // Construct value. void* method_config = create_value(json); @@ -180,9 +179,9 @@ done: } grpc_slice_hash_table* grpc_service_config_create_method_config_table( - grpc_exec_ctx* exec_ctx, const grpc_service_config* service_config, + const grpc_service_config* service_config, void* (*create_value)(const grpc_json* method_config_json), - void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value)) { + void (*destroy_value)(void* value)) { const grpc_json* json = service_config->json_tree; // Traverse parsed JSON tree. if (json->type != GRPC_JSON_OBJECT || json->key != NULL) return NULL; @@ -204,8 +203,7 @@ grpc_slice_hash_table* grpc_service_config_create_method_config_table( size_t idx = 0; for (grpc_json* method = field->child; method != NULL; method = method->next) { - if (!parse_json_method_config(exec_ctx, method, create_value, entries, - &idx)) { + if (!parse_json_method_config(method, create_value, entries, &idx)) { return NULL; } } @@ -222,8 +220,7 @@ grpc_slice_hash_table* grpc_service_config_create_method_config_table( return method_config_table; } -void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx, - const grpc_slice_hash_table* table, +void* grpc_method_config_table_get(const grpc_slice_hash_table* table, grpc_slice path) { void* value = grpc_slice_hash_table_get(table, path); // If we didn't find a match for the path, try looking for a wildcard @@ -239,7 +236,7 @@ void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx, grpc_slice wildcard_path = grpc_slice_from_copied_string(buf); gpr_free(buf); value = grpc_slice_hash_table_get(table, wildcard_path); - grpc_slice_unref_internal(exec_ctx, wildcard_path); + grpc_slice_unref_internal(wildcard_path); gpr_free(path_str); } return value; diff --git a/src/core/lib/transport/service_config.h b/src/core/lib/transport/service_config.h index 9c43093627..6395a368f2 100644 --- a/src/core/lib/transport/service_config.h +++ b/src/core/lib/transport/service_config.h @@ -49,9 +49,9 @@ const char* grpc_service_config_get_lb_policy_name( /// \a destroy_value is used to clean up values. /// Returns NULL on error. grpc_slice_hash_table* grpc_service_config_create_method_config_table( - grpc_exec_ctx* exec_ctx, const grpc_service_config* service_config, + const grpc_service_config* service_config, void* (*create_value)(const grpc_json* method_config_json), - void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value)); + void (*destroy_value)(void* value)); /// A helper function for looking up values in the table returned by /// \a grpc_service_config_create_method_config_table(). @@ -59,8 +59,7 @@ grpc_slice_hash_table* grpc_service_config_create_method_config_table( /// the form "/service/method". /// Returns NULL if the method has no config. /// Caller does NOT own a reference to the result. -void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx, - const grpc_slice_hash_table* table, +void* grpc_method_config_table_get(const grpc_slice_hash_table* table, grpc_slice path); #ifdef __cplusplus diff --git a/src/core/lib/transport/static_metadata.cc b/src/core/lib/transport/static_metadata.cc index 472cf888ea..e6c8d290f2 100644 --- a/src/core/lib/transport/static_metadata.cc +++ b/src/core/lib/transport/static_metadata.cc @@ -104,7 +104,7 @@ static uint8_t g_bytes[] = { 101, 44, 103, 122, 105, 112}; static void static_ref(void *unused) {} -static void static_unref(grpc_exec_ctx *exec_ctx, void *unused) {} +static void static_unref(void *unused) {} static const grpc_slice_refcount_vtable static_sub_vtable = { static_ref, static_unref, grpc_slice_default_eq_impl, grpc_slice_default_hash_impl}; diff --git a/src/core/lib/transport/status_conversion.cc b/src/core/lib/transport/status_conversion.cc index 891c4427d7..fd7764f2db 100644 --- a/src/core/lib/transport/status_conversion.cc +++ b/src/core/lib/transport/status_conversion.cc @@ -37,8 +37,7 @@ grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status) { } } -grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx *exec_ctx, - grpc_http2_error_code error, +grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error, grpc_millis deadline) { switch (error) { case GRPC_HTTP2_NO_ERROR: @@ -47,9 +46,8 @@ grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx *exec_ctx, case GRPC_HTTP2_CANCEL: /* http2 cancel translates to STATUS_CANCELLED iff deadline hasn't been * exceeded */ - return grpc_exec_ctx_now(exec_ctx) > deadline - ? GRPC_STATUS_DEADLINE_EXCEEDED - : GRPC_STATUS_CANCELLED; + return grpc_exec_ctx_now() > deadline ? GRPC_STATUS_DEADLINE_EXCEEDED + : GRPC_STATUS_CANCELLED; case GRPC_HTTP2_ENHANCE_YOUR_CALM: return GRPC_STATUS_RESOURCE_EXHAUSTED; case GRPC_HTTP2_INADEQUATE_SECURITY: diff --git a/src/core/lib/transport/status_conversion.h b/src/core/lib/transport/status_conversion.h index 8ef91aecfe..c89a3e992f 100644 --- a/src/core/lib/transport/status_conversion.h +++ b/src/core/lib/transport/status_conversion.h @@ -29,8 +29,7 @@ extern "C" { /* Conversion of grpc status codes to http2 error codes (for RST_STREAM) */ grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status); -grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx *exec_ctx, - grpc_http2_error_code error, +grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error, grpc_millis deadline); /* Conversion of HTTP status codes (:status) to grpc status codes */ diff --git a/src/core/lib/transport/transport.cc b/src/core/lib/transport/transport.cc index ab4f938e7b..2afc7ba7b5 100644 --- a/src/core/lib/transport/transport.cc +++ b/src/core/lib/transport/transport.cc @@ -51,8 +51,7 @@ void grpc_stream_ref(grpc_stream_refcount *refcount) { } #ifndef NDEBUG -void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount, - const char *reason) { +void grpc_stream_unref(grpc_stream_refcount *refcount, const char *reason) { if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) { gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count); gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s", @@ -60,8 +59,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount, val - 1, reason); } #else -void grpc_stream_unref(grpc_exec_ctx *exec_ctx, - grpc_stream_refcount *refcount) { +void grpc_stream_unref(grpc_stream_refcount *refcount) { #endif if (gpr_unref(&refcount->refs)) { if (exec_ctx->flags & GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP) { @@ -75,7 +73,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx, refcount->destroy.scheduler = grpc_executor_scheduler(GRPC_EXECUTOR_SHORT); } - GRPC_CLOSURE_SCHED(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&refcount->destroy, GRPC_ERROR_NONE); } } @@ -91,11 +89,11 @@ static void slice_stream_ref(void *p) { #endif } -static void slice_stream_unref(grpc_exec_ctx *exec_ctx, void *p) { +static void slice_stream_unref(void *p) { #ifndef NDEBUG - grpc_stream_unref(exec_ctx, STREAM_REF_FROM_SLICE_REF(p), "slice"); + grpc_stream_unref(STREAM_REF_FROM_SLICE_REF(p), "slice"); #else - grpc_stream_unref(exec_ctx, STREAM_REF_FROM_SLICE_REF(p)); + grpc_stream_unref(STREAM_REF_FROM_SLICE_REF(p)); #endif } @@ -153,58 +151,49 @@ size_t grpc_transport_stream_size(grpc_transport *transport) { return transport->vtable->sizeof_stream; } -void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, - grpc_transport *transport) { - transport->vtable->destroy(exec_ctx, transport); +void grpc_transport_destroy(grpc_transport *transport) { + transport->vtable->destroy(transport); } -int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, grpc_stream *stream, +int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream, grpc_stream_refcount *refcount, const void *server_data, gpr_arena *arena) { - return transport->vtable->init_stream(exec_ctx, transport, stream, refcount, + return transport->vtable->init_stream(transport, stream, refcount, server_data, arena); } -void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, +void grpc_transport_perform_stream_op(grpc_transport *transport, grpc_stream *stream, grpc_transport_stream_op_batch *op) { - transport->vtable->perform_stream_op(exec_ctx, transport, stream, op); + transport->vtable->perform_stream_op(transport, stream, op); } -void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, +void grpc_transport_perform_op(grpc_transport *transport, grpc_transport_op *op) { - transport->vtable->perform_op(exec_ctx, transport, op); + transport->vtable->perform_op(transport, op); } -void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport, - grpc_stream *stream, +void grpc_transport_set_pops(grpc_transport *transport, grpc_stream *stream, grpc_polling_entity *pollent) { grpc_pollset *pollset; grpc_pollset_set *pollset_set; if ((pollset = grpc_polling_entity_pollset(pollent)) != NULL) { - transport->vtable->set_pollset(exec_ctx, transport, stream, pollset); + transport->vtable->set_pollset(transport, stream, pollset); } else if ((pollset_set = grpc_polling_entity_pollset_set(pollent)) != NULL) { - transport->vtable->set_pollset_set(exec_ctx, transport, stream, - pollset_set); + transport->vtable->set_pollset_set(transport, stream, pollset_set); } else { abort(); } } -void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, +void grpc_transport_destroy_stream(grpc_transport *transport, grpc_stream *stream, grpc_closure *then_schedule_closure) { - transport->vtable->destroy_stream(exec_ctx, transport, stream, - then_schedule_closure); + transport->vtable->destroy_stream(transport, stream, then_schedule_closure); } -grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx, - grpc_transport *transport) { - return transport->vtable->get_endpoint(exec_ctx, transport); +grpc_endpoint *grpc_transport_get_endpoint(grpc_transport *transport) { + return transport->vtable->get_endpoint(transport); } // This comment should be sung to the tune of @@ -215,25 +204,23 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx, // though it lives in lib, it handles transport stream ops sure // it's grpc_transport_stream_op_batch_finish_with_failure void grpc_transport_stream_op_batch_finish_with_failure( - grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *batch, - grpc_error *error, grpc_call_combiner *call_combiner) { + grpc_transport_stream_op_batch *batch, grpc_error *error, + grpc_call_combiner *call_combiner) { if (batch->send_message) { - grpc_byte_stream_destroy(exec_ctx, - batch->payload->send_message.send_message); + grpc_byte_stream_destroy(batch->payload->send_message.send_message); } if (batch->recv_message) { - GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, - batch->payload->recv_message.recv_message_ready, - GRPC_ERROR_REF(error), - "failing recv_message_ready"); + GRPC_CALL_COMBINER_START( + call_combiner, batch->payload->recv_message.recv_message_ready, + GRPC_ERROR_REF(error), "failing recv_message_ready"); } if (batch->recv_initial_metadata) { GRPC_CALL_COMBINER_START( - exec_ctx, call_combiner, + call_combiner, batch->payload->recv_initial_metadata.recv_initial_metadata_ready, GRPC_ERROR_REF(error), "failing recv_initial_metadata_ready"); } - GRPC_CLOSURE_SCHED(exec_ctx, batch->on_complete, error); + GRPC_CLOSURE_SCHED(batch->on_complete, error); if (batch->cancel_stream) { GRPC_ERROR_UNREF(batch->payload->cancel_stream.cancel_error); } @@ -245,10 +232,9 @@ typedef struct { grpc_transport_op op; } made_transport_op; -static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void destroy_made_transport_op(void *arg, grpc_error *error) { made_transport_op *op = (made_transport_op *)arg; - GRPC_CLOSURE_SCHED(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(op->inner_on_complete, GRPC_ERROR_REF(error)); gpr_free(op); } @@ -269,12 +255,11 @@ typedef struct { grpc_transport_stream_op_batch_payload payload; } made_transport_stream_op; -static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void destroy_made_transport_stream_op(void *arg, grpc_error *error) { made_transport_stream_op *op = (made_transport_stream_op *)arg; grpc_closure *c = op->inner_on_complete; gpr_free(op); - GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_RUN(c, GRPC_ERROR_REF(error)); } grpc_transport_stream_op_batch *grpc_make_transport_stream_op( diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h index fbf5dcb8b5..db61f710f7 100644 --- a/src/core/lib/transport/transport.h +++ b/src/core/lib/transport/transport.h @@ -61,15 +61,14 @@ void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs, grpc_iomgr_cb_func cb, void *cb_arg, const char *object_type); void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason); -void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount, - const char *reason); +void grpc_stream_unref(grpc_stream_refcount *refcount, const char *reason); #define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \ grpc_stream_ref_init(rc, ir, cb, cb_arg, objtype) #else void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs, grpc_iomgr_cb_func cb, void *cb_arg); void grpc_stream_ref(grpc_stream_refcount *refcount); -void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount); +void grpc_stream_unref(grpc_stream_refcount *refcount); #define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \ grpc_stream_ref_init(rc, ir, cb, cb_arg) #endif @@ -243,8 +242,7 @@ typedef struct grpc_transport_op { If true, the callback is set to set_accept_stream_fn, with its user_data argument set to set_accept_stream_user_data */ bool set_accept_stream; - void (*set_accept_stream_fn)(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_transport *transport, + void (*set_accept_stream_fn)(void *user_data, grpc_transport *transport, const void *server_data); void *set_accept_stream_user_data; /** add this transport to a pollset */ @@ -275,13 +273,12 @@ size_t grpc_transport_stream_size(grpc_transport *transport); stream - a pointer to uninitialized memory to initialize server_data - either NULL for a client initiated stream, or a pointer supplied from the accept_stream callback function */ -int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, grpc_stream *stream, +int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream, grpc_stream_refcount *refcount, const void *server_data, gpr_arena *arena); -void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport, - grpc_stream *stream, grpc_polling_entity *pollent); +void grpc_transport_set_pops(grpc_transport *transport, grpc_stream *stream, + grpc_polling_entity *pollent); /* Destroy transport data for a stream. @@ -293,14 +290,13 @@ void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport, transport - the transport on which to create this stream stream - the grpc_stream to destroy (memory is still owned by the caller, but any child memory must be cleaned up) */ -void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, +void grpc_transport_destroy_stream(grpc_transport *transport, grpc_stream *stream, grpc_closure *then_schedule_closure); void grpc_transport_stream_op_batch_finish_with_failure( - grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op, - grpc_error *error, grpc_call_combiner *call_combiner); + grpc_transport_stream_op_batch *op, grpc_error *error, + grpc_call_combiner *call_combiner); char *grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch *op); char *grpc_transport_op_string(grpc_transport_op *op); @@ -315,13 +311,11 @@ char *grpc_transport_op_string(grpc_transport_op *op); non-NULL and previously initialized by the same transport. op - a grpc_transport_stream_op_batch specifying the op to perform */ -void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, +void grpc_transport_perform_stream_op(grpc_transport *transport, grpc_stream *stream, grpc_transport_stream_op_batch *op); -void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, +void grpc_transport_perform_op(grpc_transport *transport, grpc_transport_op *op); /* Send a ping on a transport @@ -337,11 +331,10 @@ void grpc_transport_goaway(grpc_transport *transport, grpc_status_code status, void grpc_transport_close(grpc_transport *transport); /* Destroy the transport */ -void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport); +void grpc_transport_destroy(grpc_transport *transport); /* Get the endpoint used by \a transport */ -grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx, - grpc_transport *transport); +grpc_endpoint *grpc_transport_get_endpoint(grpc_transport *transport); /* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to \a on_consumed and then delete the returned transport op */ diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h index 445fb41ab1..c7b78afa61 100644 --- a/src/core/lib/transport/transport_impl.h +++ b/src/core/lib/transport/transport_impl.h @@ -34,37 +34,34 @@ typedef struct grpc_transport_vtable { const char *name; /* implementation of grpc_transport_init_stream */ - int (*init_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, grpc_stream_refcount *refcount, - const void *server_data, gpr_arena *arena); + int (*init_stream)(grpc_transport *self, grpc_stream *stream, + grpc_stream_refcount *refcount, const void *server_data, + gpr_arena *arena); /* implementation of grpc_transport_set_pollset */ - void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, grpc_pollset *pollset); + void (*set_pollset)(grpc_transport *self, grpc_stream *stream, + grpc_pollset *pollset); /* implementation of grpc_transport_set_pollset */ - void (*set_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, grpc_pollset_set *pollset_set); + void (*set_pollset_set)(grpc_transport *self, grpc_stream *stream, + grpc_pollset_set *pollset_set); /* implementation of grpc_transport_perform_stream_op */ - void (*perform_stream_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, + void (*perform_stream_op)(grpc_transport *self, grpc_stream *stream, grpc_transport_stream_op_batch *op); /* implementation of grpc_transport_perform_op */ - void (*perform_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_transport_op *op); + void (*perform_op)(grpc_transport *self, grpc_transport_op *op); /* implementation of grpc_transport_destroy_stream */ - void (*destroy_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, + void (*destroy_stream)(grpc_transport *self, grpc_stream *stream, grpc_closure *then_schedule_closure); /* implementation of grpc_transport_destroy */ - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self); + void (*destroy)(grpc_transport *self); /* implementation of grpc_transport_get_endpoint */ - grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self); + grpc_endpoint *(*get_endpoint)(grpc_transport *self); } grpc_transport_vtable; /* an instance of a grpc transport */ |