aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib/surface
diff options
context:
space:
mode:
authorGravatar Mark D. Roth <roth@google.com>2017-09-25 12:36:30 -0700
committerGravatar Mark D. Roth <roth@google.com>2017-09-25 12:36:30 -0700
commitb4c01f9e2982d61723014f76c42fc059c554b084 (patch)
tree7ac46ab54a86744ae26317a2365e0f9fe8647812 /src/core/lib/surface
parentad9208c07e74d90dd5110324fe9cb830e6e6f68b (diff)
parent008a173a7e2ba1d5c0933aa7a77395945ba2024d (diff)
Merge remote-tracking branch 'upstream/master' into plugin_credentials_api_fix
Diffstat (limited to 'src/core/lib/surface')
-rw-r--r--src/core/lib/surface/call.c108
-rw-r--r--src/core/lib/surface/call.h2
-rw-r--r--src/core/lib/surface/channel.c8
-rw-r--r--src/core/lib/surface/completion_queue.c86
-rw-r--r--src/core/lib/surface/init.c9
-rw-r--r--src/core/lib/surface/server.c48
6 files changed, 145 insertions, 116 deletions
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 93c512df69..03f47553a1 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -158,7 +158,7 @@ struct grpc_call {
grpc_channel *channel;
gpr_timespec start_time;
/* parent_call* */ gpr_atm parent_call_atm;
- child_call *child_call;
+ child_call *child;
/* client or server call */
bool is_client;
@@ -293,11 +293,11 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl);
static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
grpc_error *error, bool has_cancelled);
-static void add_init_error(grpc_error **composite, grpc_error *new) {
- if (new == GRPC_ERROR_NONE) return;
+static void add_init_error(grpc_error **composite, grpc_error *new_err) {
+ if (new_err == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE)
*composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Call creation failed");
- *composite = grpc_error_add_child(*composite, new);
+ *composite = grpc_error_add_child(*composite, new_err);
}
void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
@@ -330,8 +330,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
grpc_channel_get_channel_stack(args->channel);
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
- gpr_arena *arena =
- gpr_arena_create(grpc_channel_get_call_size_estimate(args->channel));
+ size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
+ GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, initial_size);
+ gpr_arena *arena = gpr_arena_create(initial_size);
call = (grpc_call *)gpr_arena_alloc(
arena, sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1);
@@ -377,24 +378,24 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
bool immediately_cancel = false;
- if (args->parent_call != NULL) {
- child_call *cc = call->child_call =
- gpr_arena_alloc(arena, sizeof(child_call));
- call->child_call->parent = args->parent_call;
+ if (args->parent != NULL) {
+ child_call *cc = call->child =
+ (child_call *)gpr_arena_alloc(arena, sizeof(child_call));
+ call->child->parent = args->parent;
- GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
+ GRPC_CALL_INTERNAL_REF(args->parent, "child");
GPR_ASSERT(call->is_client);
- GPR_ASSERT(!args->parent_call->is_client);
+ GPR_ASSERT(!args->parent->is_client);
- parent_call *pc = get_or_create_parent_call(args->parent_call);
+ parent_call *pc = get_or_create_parent_call(args->parent);
gpr_mu_lock(&pc->child_list_mu);
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = gpr_time_min(
gpr_convert_clock_type(send_deadline,
- args->parent_call->send_deadline.clock_type),
- args->parent_call->send_deadline);
+ args->parent->send_deadline.clock_type),
+ args->parent->send_deadline);
}
/* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
* GRPC_PROPAGATE_STATS_CONTEXT */
@@ -406,9 +407,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
"Census tracing propagation requested "
"without Census context propagation"));
}
- grpc_call_context_set(
- call, GRPC_CONTEXT_TRACING,
- args->parent_call->context[GRPC_CONTEXT_TRACING].value, NULL);
+ grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
+ args->parent->context[GRPC_CONTEXT_TRACING].value,
+ NULL);
} else if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT) {
add_init_error(&error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Census context propagation requested "
@@ -416,7 +417,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
}
if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
- if (gpr_atm_acq_load(&args->parent_call->received_final_op_atm)) {
+ if (gpr_atm_acq_load(&args->parent->received_final_op_atm)) {
immediately_cancel = true;
}
}
@@ -426,9 +427,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
cc->sibling_next = cc->sibling_prev = call;
} else {
cc->sibling_next = pc->first_child;
- cc->sibling_prev = pc->first_child->child_call->sibling_prev;
- cc->sibling_next->child_call->sibling_prev =
- cc->sibling_prev->child_call->sibling_next = call;
+ cc->sibling_prev = pc->first_child->child->sibling_prev;
+ cc->sibling_next->child->sibling_prev =
+ cc->sibling_prev->child->sibling_next = call;
}
gpr_mu_unlock(&pc->child_list_mu);
@@ -549,7 +550,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
GRPC_CQ_INTERNAL_UNREF(exec_ctx, c->cq, "bind");
}
- get_final_status(call, set_status_value_directly, &c->final_info.final_status,
+ get_final_status(c, set_status_value_directly, &c->final_info.final_status,
NULL);
c->final_info.stats.latency =
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
@@ -570,7 +571,7 @@ void grpc_call_ref(grpc_call *c) { gpr_ref(&c->ext_ref); }
void grpc_call_unref(grpc_call *c) {
if (!gpr_unref(&c->ext_ref)) return;
- child_call *cc = c->child_call;
+ child_call *cc = c->child;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_call_unref", 0);
@@ -585,8 +586,8 @@ void grpc_call_unref(grpc_call *c) {
pc->first_child = NULL;
}
}
- cc->sibling_prev->child_call->sibling_next = cc->sibling_next;
- cc->sibling_next->child_call->sibling_prev = cc->sibling_prev;
+ cc->sibling_prev->child->sibling_next = cc->sibling_next;
+ cc->sibling_next->child->sibling_prev = cc->sibling_prev;
gpr_mu_unlock(&pc->child_list_mu);
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, cc->parent, "child");
}
@@ -1316,7 +1317,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
child = pc->first_child;
if (child != NULL) {
do {
- next_child_call = child->child_call->sibling_next;
+ next_child_call = child->child->sibling_next;
if (child->cancellation_is_inherited) {
GRPC_CALL_INTERNAL_REF(child, "propagate_cancel");
cancel_with_error(exec_ctx, child, STATUS_FROM_API_OVERRIDE,
@@ -1345,7 +1346,8 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
if (bctl->completion_data.notify_tag.is_closure) {
/* unrefs bctl->error */
bctl->call = NULL;
- GRPC_CLOSURE_RUN(exec_ctx, bctl->completion_data.notify_tag.tag, error);
+ GRPC_CLOSURE_RUN(
+ exec_ctx, (grpc_closure *)bctl->completion_data.notify_tag.tag, error);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
@@ -1474,7 +1476,7 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
* acq_load is in receiving_initial_metadata_ready() */
if (error != GRPC_ERROR_NONE || call->receiving_stream == NULL ||
!gpr_atm_rel_cas(&call->recv_state, RECV_NONE, (gpr_atm)bctlp)) {
- process_data_after_md(exec_ctx, bctlp);
+ process_data_after_md(exec_ctx, bctl);
}
}
@@ -1510,7 +1512,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (grpc_compression_options_is_stream_compression_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_stream_compression_algorithm_name(algo, &algo_name);
gpr_asprintf(&error_msg, "Stream compression algorithm '%s' is disabled.",
algo_name);
@@ -1524,7 +1526,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
if (!GPR_BITGET(call->stream_encodings_accepted_by_peer,
call->incoming_stream_compression_algorithm)) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_stream_compression_algorithm_name(
call->incoming_stream_compression_algorithm, &algo_name);
gpr_log(
@@ -1551,7 +1553,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (grpc_compression_options_is_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_compression_algorithm_name(algo, &algo_name);
gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.",
algo_name);
@@ -1567,7 +1569,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
if (!GPR_BITGET(call->encodings_accepted_by_peer,
call->incoming_compression_algorithm)) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_compression_algorithm_name(call->incoming_compression_algorithm,
&algo_name);
gpr_log(GPR_ERROR,
@@ -1672,6 +1674,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
batch_control *bctl;
int num_completion_callbacks_needed = 1;
grpc_call_error error = GRPC_CALL_OK;
+ grpc_transport_stream_op_batch *stream_op;
+ grpc_transport_stream_op_batch_payload *stream_op_payload;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
@@ -1679,11 +1683,12 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (nops == 0) {
if (!is_notify_tag_closure) {
GPR_ASSERT(grpc_cq_begin_op(call->cq, notify_tag));
- grpc_cq_end_op(exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
- free_no_op_completion, NULL,
- gpr_malloc(sizeof(grpc_cq_completion)));
+ grpc_cq_end_op(
+ exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
+ free_no_op_completion, NULL,
+ (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, notify_tag, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)notify_tag, GRPC_ERROR_NONE);
}
error = GRPC_CALL_OK;
goto done;
@@ -1697,9 +1702,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
bctl->completion_data.notify_tag.is_closure =
(uint8_t)(is_notify_tag_closure != 0);
- grpc_transport_stream_op_batch *stream_op = &bctl->op;
- grpc_transport_stream_op_batch_payload *stream_op_payload =
- &call->stream_op_payload;
+ stream_op = &bctl->op;
+ stream_op_payload = &call->stream_op_payload;
/* rewrite batch ops into a transport op */
for (i = 0; i < nops; i++) {
@@ -1709,7 +1713,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
goto done_with_error;
}
switch (op->op) {
- case GRPC_OP_SEND_INITIAL_METADATA:
+ case GRPC_OP_SEND_INITIAL_METADATA: {
/* Flag validation: currently allow no flags */
if (!are_initial_metadata_flags_valid(op->flags, call->is_client)) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1803,7 +1807,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->peer_string;
}
break;
- case GRPC_OP_SEND_MESSAGE:
+ }
+ case GRPC_OP_SEND_MESSAGE: {
if (!are_write_flags_valid(op->flags)) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
goto done_with_error;
@@ -1832,7 +1837,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_message.send_message =
&call->sending_stream.base;
break;
- case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
+ }
+ case GRPC_OP_SEND_CLOSE_FROM_CLIENT: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1851,7 +1857,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
- case GRPC_OP_SEND_STATUS_FROM_SERVER:
+ }
+ case GRPC_OP_SEND_STATUS_FROM_SERVER: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1913,7 +1920,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
- case GRPC_OP_RECV_INITIAL_METADATA:
+ }
+ case GRPC_OP_RECV_INITIAL_METADATA: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1940,7 +1948,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
}
num_completion_callbacks_needed++;
break;
- case GRPC_OP_RECV_MESSAGE:
+ }
+ case GRPC_OP_RECV_MESSAGE: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1961,7 +1970,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->receiving_stream_ready;
num_completion_callbacks_needed++;
break;
- case GRPC_OP_RECV_STATUS_ON_CLIENT:
+ }
+ case GRPC_OP_RECV_STATUS_ON_CLIENT: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1988,7 +1998,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
- case GRPC_OP_RECV_CLOSE_ON_SERVER:
+ }
+ case GRPC_OP_RECV_CLOSE_ON_SERVER: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -2012,6 +2023,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
+ }
}
}
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index d537637cbb..c680139cf6 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -37,7 +37,7 @@ typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
typedef struct grpc_call_create_args {
grpc_channel *channel;
- grpc_call *parent_call;
+ grpc_call *parent;
uint32_t propagation_mask;
grpc_completion_queue *cq;
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 34548dac26..48962e5e45 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -27,6 +27,7 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
@@ -77,6 +78,11 @@ grpc_channel *grpc_channel_create_with_builder(
grpc_channel_args *args = grpc_channel_args_copy(
grpc_channel_stack_builder_get_channel_arguments(builder));
grpc_channel *channel;
+ if (channel_stack_type == GRPC_SERVER_CHANNEL) {
+ GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx);
+ } else {
+ GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx);
+ }
grpc_error *error = grpc_channel_stack_builder_finish(
exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL,
(void **)&channel);
@@ -276,7 +282,7 @@ static grpc_call *grpc_channel_create_call_internal(
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = channel;
- args.parent_call = parent_call;
+ args.parent = parent_call;
args.propagation_mask = propagation_mask;
args.cq = cq;
args.pollset_set_alternative = pollset_set_alternative;
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 4726503994..fed66e3a20 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -26,6 +26,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
@@ -54,7 +55,7 @@ typedef struct {
bool can_listen;
size_t (*size)(void);
void (*init)(grpc_pollset *pollset, gpr_mu **mu);
- grpc_error *(*kick)(grpc_pollset *pollset,
+ grpc_error *(*kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
grpc_error *(*work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker, gpr_timespec now,
@@ -130,7 +131,8 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
}
static grpc_error *non_polling_poller_kick(
- grpc_pollset *pollset, grpc_pollset_worker *specific_worker) {
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *specific_worker) {
non_polling_poller *p = (non_polling_poller *)pollset;
if (specific_worker == NULL) specific_worker = (grpc_pollset_worker *)p->root;
if (specific_worker != NULL) {
@@ -327,25 +329,12 @@ static void cq_destroy_pluck(void *data);
/* Completion queue vtables based on the completion-type */
static const cq_vtable g_cq_vtable[] = {
/* GRPC_CQ_NEXT */
- {.data_size = sizeof(cq_next_data),
- .cq_completion_type = GRPC_CQ_NEXT,
- .init = cq_init_next,
- .shutdown = cq_shutdown_next,
- .destroy = cq_destroy_next,
- .begin_op = cq_begin_op_for_next,
- .end_op = cq_end_op_for_next,
- .next = cq_next,
- .pluck = NULL},
+ {GRPC_CQ_NEXT, sizeof(cq_next_data), cq_init_next, cq_shutdown_next,
+ cq_destroy_next, cq_begin_op_for_next, cq_end_op_for_next, cq_next, NULL},
/* GRPC_CQ_PLUCK */
- {.data_size = sizeof(cq_pluck_data),
- .cq_completion_type = GRPC_CQ_PLUCK,
- .init = cq_init_pluck,
- .shutdown = cq_shutdown_pluck,
- .destroy = cq_destroy_pluck,
- .begin_op = cq_begin_op_for_pluck,
- .end_op = cq_end_op_for_pluck,
- .next = NULL,
- .pluck = cq_pluck},
+ {GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck,
+ cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, NULL,
+ cq_pluck},
};
#define DATA_FROM_CQ(cq) ((void *)(cq + 1))
@@ -420,6 +409,10 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
const cq_poller_vtable *poller_vtable =
&g_poller_vtable_by_poller_type[polling_type];
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_STATS_INC_CQS_CREATED(&exec_ctx);
+ grpc_exec_ctx_finish(&exec_ctx);
+
cq = (grpc_completion_queue *)gpr_zalloc(sizeof(grpc_completion_queue) +
vtable->data_size +
poller_vtable->size());
@@ -560,13 +553,13 @@ static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) {}
* true if the increment was successful; false if the counter is zero */
static bool atm_inc_if_nonzero(gpr_atm *counter) {
while (true) {
- gpr_atm count = gpr_atm_no_barrier_load(counter);
+ gpr_atm count = gpr_atm_acq_load(counter);
/* If zero, we are done. If not, we must to a CAS (instead of an atomic
* increment) to maintain the contract: do not increment the counter if it
* is zero. */
if (count == 0) {
return false;
- } else if (gpr_atm_no_barrier_cas(counter, count, count + 1)) {
+ } else if (gpr_atm_full_cas(counter, count, count + 1)) {
break;
}
}
@@ -575,12 +568,12 @@ static bool atm_inc_if_nonzero(gpr_atm *counter) {
}
static bool cq_begin_op_for_next(grpc_completion_queue *cq, void *tag) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
static bool cq_begin_op_for_pluck(grpc_completion_queue *cq, void *tag) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
@@ -625,7 +618,7 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
}
}
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
storage->tag = tag;
@@ -638,15 +631,19 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
/* Add the completion to the queue */
bool is_first = cq_event_queue_push(&cqd->queue, storage);
gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
- bool will_definitely_shutdown =
- gpr_atm_no_barrier_load(&cqd->pending_events) == 1;
+
+ /* Since we do not hold the cq lock here, it is important to do an 'acquire'
+ load here (instead of a 'no_barrier' load) to match with the release store
+ (done via gpr_atm_full_fetch_add(pending_events, -1)) in cq_shutdown_next
+ */
+ bool will_definitely_shutdown = gpr_atm_acq_load(&cqd->pending_events) == 1;
if (!will_definitely_shutdown) {
/* Only kick if this is the first item queued */
if (is_first) {
gpr_mu_lock(cq->mu);
grpc_error *kick_error =
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL);
+ cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), NULL);
gpr_mu_unlock(cq->mu);
if (kick_error != GRPC_ERROR_NONE) {
@@ -686,7 +683,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
void *done_arg,
grpc_cq_completion *storage),
void *done_arg, grpc_cq_completion *storage) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
GPR_TIMER_BEGIN("cq_end_op_for_pluck", 0);
@@ -732,7 +729,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
}
grpc_error *kick_error =
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), pluck_worker);
+ cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), pluck_worker);
gpr_mu_unlock(cq->mu);
@@ -769,7 +766,7 @@ typedef struct {
static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@@ -820,7 +817,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
void *reserved) {
grpc_event ret;
gpr_timespec now;
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@@ -883,7 +880,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
}
}
- if (gpr_atm_no_barrier_load(&cqd->pending_events) == 0) {
+ if (gpr_atm_acq_load(&cqd->pending_events) == 0) {
/* Before returning, check if the queue has any items left over (since
gpr_mpscq_pop() can sometimes return NULL even if the queue is not
empty. If so, keep retrying but do not return GRPC_QUEUE_SHUTDOWN */
@@ -929,9 +926,9 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
}
if (cq_event_queue_num_items(&cqd->queue) > 0 &&
- gpr_atm_no_barrier_load(&cqd->pending_events) > 0) {
+ gpr_atm_acq_load(&cqd->pending_events) > 0) {
gpr_mu_lock(cq->mu);
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL);
+ cq->poller_vtable->kick(&exec_ctx, POLLSET_FROM_CQ(cq), NULL);
gpr_mu_unlock(cq->mu);
}
@@ -953,7 +950,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
this function */
static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(gpr_atm_no_barrier_load(&cqd->pending_events) == 0);
@@ -964,7 +961,7 @@ static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_next() below, that would call pollset shutdown.
@@ -980,6 +977,9 @@ static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
return;
}
cqd->shutdown_called = true;
+ /* Doing a full_fetch_add (i.e acq/release) here to match with
+ * cq_begin_op_for_next and and cq_end_op_for_next functions which read/write
+ * on this counter without necessarily holding a lock on cq */
if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
cq_finish_shutdown_next(exec_ctx, cq);
}
@@ -994,7 +994,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
static int add_plucker(grpc_completion_queue *cq, void *tag,
grpc_pollset_worker **worker) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
if (cqd->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
return 0;
}
@@ -1006,7 +1006,7 @@ static int add_plucker(grpc_completion_queue *cq, void *tag,
static void del_plucker(grpc_completion_queue *cq, void *tag,
grpc_pollset_worker **worker) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
for (int i = 0; i < cqd->num_pluckers; i++) {
if (cqd->pluckers[i].tag == tag && cqd->pluckers[i].worker == worker) {
cqd->num_pluckers--;
@@ -1020,7 +1020,7 @@ static void del_plucker(grpc_completion_queue *cq, void *tag,
static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@@ -1057,7 +1057,7 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
grpc_cq_completion *prev;
grpc_pollset_worker *worker = NULL;
gpr_timespec now;
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@@ -1181,7 +1181,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown));
@@ -1195,7 +1195,7 @@ static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
* merging them is a bit tricky and probably not worth it */
static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_pluck() below, that would call pollset shutdown.
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index 280315036f..b089da2c54 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -36,6 +36,7 @@
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/resource_quota.h"
+#include "src/core/lib/iomgr/timer_manager.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/alarm_internal.h"
@@ -179,14 +180,16 @@ void grpc_shutdown(void) {
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
- grpc_iomgr_shutdown(&exec_ctx);
- gpr_timers_global_destroy();
- grpc_tracer_shutdown();
+ grpc_executor_shutdown(&exec_ctx);
+ grpc_timer_manager_set_threading(false); // shutdown timer_manager thread
for (i = g_number_of_plugins; i >= 0; i--) {
if (g_all_of_the_plugins[i].destroy != NULL) {
g_all_of_the_plugins[i].destroy();
}
}
+ grpc_iomgr_shutdown(&exec_ctx);
+ gpr_timers_global_destroy();
+ grpc_tracer_shutdown();
grpc_mdctx_global_shutdown(&exec_ctx);
grpc_handshaker_factory_registry_shutdown(&exec_ctx);
grpc_slice_intern_shutdown();
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index a8eb1b0425..1d0fd472d0 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -29,6 +29,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -75,7 +76,7 @@ typedef struct requested_call {
grpc_call_details *details;
} batch;
struct {
- registered_method *registered_method;
+ registered_method *method;
gpr_timespec *deadline;
grpc_byte_buffer **optional_payload;
} registered;
@@ -144,7 +145,7 @@ struct call_data {
uint32_t recv_initial_metadata_flags;
grpc_metadata_array initial_metadata;
- request_matcher *request_matcher;
+ request_matcher *matcher;
grpc_byte_buffer *payload;
grpc_closure got_initial_metadata;
@@ -170,7 +171,7 @@ struct registered_method {
grpc_server_register_method_payload_handling payload_handling;
uint32_t flags;
/* one request matcher per method */
- request_matcher request_matcher;
+ request_matcher matcher;
registered_method *next;
};
@@ -333,7 +334,7 @@ static void request_matcher_destroy(request_matcher *rm) {
static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem,
grpc_error *error) {
- grpc_call_unref(grpc_call_from_top_element(elem));
+ grpc_call_unref(grpc_call_from_top_element((grpc_call_element *)elem));
}
static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
@@ -386,7 +387,7 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
while ((rm = server->registered_methods) != NULL) {
server->registered_methods = rm->next;
if (server->started) {
- request_matcher_destroy(&rm->request_matcher);
+ request_matcher_destroy(&rm->matcher);
}
gpr_free(rm->method);
gpr_free(rm->host);
@@ -518,7 +519,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_element *call_elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)call_elem->call_data;
channel_data *chand = (channel_data *)call_elem->channel_data;
- request_matcher *rm = calld->request_matcher;
+ request_matcher *rm = calld->matcher;
grpc_server *server = rm->server;
if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) {
@@ -540,6 +541,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
if (request_id == -1) {
continue;
} else {
+ GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i);
gpr_mu_lock(&calld->mu_state);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
@@ -550,6 +552,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
}
/* no cq to take the request found: queue it on the slow list */
+ GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx);
gpr_mu_lock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
calld->state = PENDING;
@@ -580,7 +583,7 @@ static void finish_start_new_rpc(
return;
}
- calld->request_matcher = rm;
+ calld->matcher = rm;
switch (payload_handling) {
case GRPC_SRM_PAYLOAD_NONE:
@@ -626,7 +629,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
- &rm->server_registered_method->request_matcher,
+ &rm->server_registered_method->matcher,
rm->server_registered_method->payload_handling);
return;
}
@@ -644,7 +647,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
- &rm->server_registered_method->request_matcher,
+ &rm->server_registered_method->matcher,
rm->server_registered_method->payload_handling);
return;
}
@@ -665,7 +668,7 @@ static int num_listeners(grpc_server *server) {
static void done_shutdown_event(grpc_exec_ctx *exec_ctx, void *server,
grpc_cq_completion *completion) {
- server_unref(exec_ctx, server);
+ server_unref(exec_ctx, (grpc_server *)server);
}
static int num_channels(grpc_server *server) {
@@ -688,9 +691,9 @@ static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, &server->unregistered_request_matcher);
for (registered_method *rm = server->registered_methods; rm;
rm = rm->next) {
- request_matcher_kill_requests(exec_ctx, server, &rm->request_matcher,
+ request_matcher_kill_requests(exec_ctx, server, &rm->matcher,
GRPC_ERROR_REF(error));
- request_matcher_zombify_all_pending_calls(exec_ctx, &rm->request_matcher);
+ request_matcher_zombify_all_pending_calls(exec_ctx, &rm->matcher);
}
}
GRPC_ERROR_UNREF(error);
@@ -1111,15 +1114,17 @@ void grpc_server_start(grpc_server *server) {
request_matcher_init(&server->unregistered_request_matcher,
(size_t)server->max_requested_calls_per_cq, server);
for (registered_method *rm = server->registered_methods; rm; rm = rm->next) {
- request_matcher_init(&rm->request_matcher,
+ request_matcher_init(&rm->matcher,
(size_t)server->max_requested_calls_per_cq, server);
}
server_ref(server);
server->starting = true;
- GRPC_CLOSURE_SCHED(&exec_ctx, GRPC_CLOSURE_CREATE(start_listeners, server,
- grpc_executor_scheduler),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ &exec_ctx,
+ GRPC_CLOSURE_CREATE(start_listeners, server,
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)),
+ GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -1262,8 +1267,9 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* stay locked, and gather up some stuff to do */
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
if (server->shutdown_published) {
- grpc_cq_end_op(&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown,
- NULL, gpr_malloc(sizeof(grpc_cq_completion)));
+ grpc_cq_end_op(
+ &exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown, NULL,
+ (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
gpr_mu_unlock(&server->mu_global);
goto done;
}
@@ -1385,7 +1391,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
rm = &server->unregistered_request_matcher;
break;
case REGISTERED_CALL:
- rm = &rc->data.registered.registered_method->request_matcher;
+ rm = &rc->data.registered.method->matcher;
break;
}
server->requested_calls_per_cq[cq_idx][request_id] = *rc;
@@ -1430,6 +1436,7 @@ grpc_call_error grpc_server_request_call(
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
+ GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
@@ -1476,6 +1483,7 @@ grpc_call_error grpc_server_request_registered_call(
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
registered_method *rm = (registered_method *)rmp;
+ GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_registered_call("
"server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
@@ -1512,7 +1520,7 @@ grpc_call_error grpc_server_request_registered_call(
rc->tag = tag;
rc->cq_bound_to_call = cq_bound_to_call;
rc->call = call;
- rc->data.registered.registered_method = rm;
+ rc->data.registered.method = rm;
rc->data.registered.deadline = deadline;
rc->initial_metadata = initial_metadata;
rc->data.registered.optional_payload = optional_payload;