aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/lib')
-rw-r--r--src/core/lib/channel/channel_stack.c15
-rw-r--r--src/core/lib/channel/channel_stack.h2
-rw-r--r--src/core/lib/channel/compress_filter.c2
-rw-r--r--src/core/lib/channel/connected_channel.c2
-rw-r--r--src/core/lib/channel/deadline_filter.c104
-rw-r--r--src/core/lib/channel/deadline_filter.h12
-rw-r--r--src/core/lib/channel/handshaker.c37
-rw-r--r--src/core/lib/channel/handshaker.h16
-rw-r--r--src/core/lib/channel/http_client_filter.c2
-rw-r--r--src/core/lib/channel/http_server_filter.c2
-rw-r--r--src/core/lib/channel/message_size_filter.c2
-rw-r--r--src/core/lib/iomgr/timer_generic.c18
-rw-r--r--src/core/lib/iomgr/timer_generic.h2
-rw-r--r--src/core/lib/iomgr/timer_uv.c12
-rw-r--r--src/core/lib/iomgr/timer_uv.h2
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c2
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c2
-rw-r--r--src/core/lib/support/sync_posix.c10
-rw-r--r--src/core/lib/surface/call.c143
-rw-r--r--src/core/lib/surface/lame_client.c2
-rw-r--r--src/core/lib/surface/server.c6
21 files changed, 249 insertions, 146 deletions
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index ec973d4e7f..3fb2a60ac7 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -173,7 +173,6 @@ grpc_error *grpc_call_stack_init(
grpc_slice path, gpr_timespec start_time, gpr_timespec deadline,
grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
- grpc_call_element_args args;
size_t count = channel_stack->count;
grpc_call_element *call_elems;
char *user_data;
@@ -188,13 +187,15 @@ grpc_error *grpc_call_stack_init(
/* init per-filter data */
grpc_error *first_error = GRPC_ERROR_NONE;
- args.start_time = start_time;
+ const grpc_call_element_args args = {
+ .start_time = start_time,
+ .call_stack = call_stack,
+ .server_transport_data = transport_server_data,
+ .context = context,
+ .path = path,
+ .deadline = deadline,
+ };
for (i = 0; i < count; i++) {
- args.call_stack = call_stack;
- args.server_transport_data = transport_server_data;
- args.context = context;
- args.path = path;
- args.deadline = deadline;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 9975d5981e..6d3340bcbf 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -132,7 +132,7 @@ typedef struct {
Implementations may assume that elem->call_data is all zeros. */
grpc_error *(*init_call_elem)(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args);
+ const grpc_call_element_args *args);
void (*set_pollset_or_pollset_set)(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent);
diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c
index 22781c7839..aa41014a21 100644
--- a/src/core/lib/channel/compress_filter.c
+++ b/src/core/lib/channel/compress_filter.c
@@ -274,7 +274,7 @@ static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index 068c61c92a..29796f7ca7 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -83,7 +83,7 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
int r = grpc_transport_init_stream(
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index 3e8456e192..5a12d62f1d 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -52,9 +52,6 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = arg;
grpc_deadline_state* deadline_state = elem->call_data;
- gpr_mu_lock(&deadline_state->timer_mu);
- deadline_state->timer_pending = false;
- gpr_mu_unlock(&deadline_state->timer_mu);
if (error != GRPC_ERROR_CANCELLED) {
grpc_call_element_signal_error(
exec_ctx, elem,
@@ -66,53 +63,64 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
}
// Starts the deadline timer.
-static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
- grpc_call_element* elem,
- gpr_timespec deadline) {
- grpc_deadline_state* deadline_state = elem->call_data;
- deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
- // Note: We do not start the timer if there is already a timer
- // pending. This should be okay, because this is only called from two
- // functions exported by this module: grpc_deadline_state_start(), which
- // starts the initial timer, and grpc_deadline_state_reset(), which
- // cancels any pre-existing timer before starting a new one. In
- // particular, we want to ensure that if grpc_deadline_state_start()
- // winds up trying to start the timer after grpc_deadline_state_reset()
- // has already done so, we ignore the value from the former.
- if (!deadline_state->timer_pending &&
- gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
- // Take a reference to the call stack, to be owned by the timer.
- GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
- deadline_state->timer_pending = true;
- grpc_closure_init(&deadline_state->timer_callback, timer_callback, elem,
- grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &deadline_state->timer, deadline,
- &deadline_state->timer_callback,
- gpr_now(GPR_CLOCK_MONOTONIC));
- }
-}
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
+ deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
+ if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) {
+ return;
+ }
grpc_deadline_state* deadline_state = elem->call_data;
- gpr_mu_lock(&deadline_state->timer_mu);
- start_timer_if_needed_locked(exec_ctx, elem, deadline);
- gpr_mu_unlock(&deadline_state->timer_mu);
+ grpc_deadline_timer_state cur_state;
+ grpc_closure* closure = NULL;
+retry:
+ cur_state =
+ (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
+ switch (cur_state) {
+ case GRPC_DEADLINE_STATE_PENDING:
+ // Note: We do not start the timer if there is already a timer
+ return;
+ case GRPC_DEADLINE_STATE_FINISHED:
+ if (gpr_atm_rel_cas(&deadline_state->timer_state,
+ GRPC_DEADLINE_STATE_FINISHED,
+ GRPC_DEADLINE_STATE_PENDING)) {
+ // If we've already created and destroyed a timer, we always create a
+ // new closure: we have no other guarantee that the inlined closure is
+ // not in use (it may hold a pending call to timer_callback)
+ closure = grpc_closure_create(timer_callback, elem,
+ grpc_schedule_on_exec_ctx);
+ } else {
+ goto retry;
+ }
+ break;
+ case GRPC_DEADLINE_STATE_INITIAL:
+ if (gpr_atm_rel_cas(&deadline_state->timer_state,
+ GRPC_DEADLINE_STATE_INITIAL,
+ GRPC_DEADLINE_STATE_PENDING)) {
+ closure =
+ grpc_closure_init(&deadline_state->timer_callback, timer_callback,
+ elem, grpc_schedule_on_exec_ctx);
+ } else {
+ goto retry;
+ }
+ break;
+ }
+ GPR_ASSERT(closure);
+ GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
+ grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
+ gpr_now(GPR_CLOCK_MONOTONIC));
}
// Cancels the deadline timer.
-static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
- grpc_deadline_state* deadline_state) {
- if (deadline_state->timer_pending) {
- grpc_timer_cancel(exec_ctx, &deadline_state->timer);
- deadline_state->timer_pending = false;
- }
-}
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
- gpr_mu_lock(&deadline_state->timer_mu);
- cancel_timer_if_needed_locked(exec_ctx, deadline_state);
- gpr_mu_unlock(&deadline_state->timer_mu);
+ if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING,
+ GRPC_DEADLINE_STATE_FINISHED)) {
+ grpc_timer_cancel(exec_ctx, &deadline_state->timer);
+ } else {
+ // timer was either in STATE_INITAL (nothing to cancel)
+ // OR in STATE_FINISHED (again nothing to cancel)
+ }
}
// Callback run when the call is complete.
@@ -120,8 +128,8 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_deadline_state* deadline_state = arg;
cancel_timer_if_needed(exec_ctx, deadline_state);
// Invoke the next callback.
- deadline_state->next_on_complete->cb(
- exec_ctx, deadline_state->next_on_complete->cb_arg, error);
+ grpc_closure_run(exec_ctx, deadline_state->next_on_complete,
+ GRPC_ERROR_REF(error));
}
// Inject our own on_complete callback into op.
@@ -137,14 +145,12 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack) {
grpc_deadline_state* deadline_state = elem->call_data;
deadline_state->call_stack = call_stack;
- gpr_mu_init(&deadline_state->timer_mu);
}
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
grpc_deadline_state* deadline_state = elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
- gpr_mu_destroy(&deadline_state->timer_mu);
}
// Callback and associated state for starting the timer after call stack
@@ -186,10 +192,8 @@ void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
- gpr_mu_lock(&deadline_state->timer_mu);
- cancel_timer_if_needed_locked(exec_ctx, deadline_state);
- start_timer_if_needed_locked(exec_ctx, elem, new_deadline);
- gpr_mu_unlock(&deadline_state->timer_mu);
+ cancel_timer_if_needed(exec_ctx, deadline_state);
+ start_timer_if_needed(exec_ctx, elem, new_deadline);
}
void grpc_deadline_state_client_start_transport_stream_op(
@@ -243,7 +247,7 @@ typedef struct server_call_data {
// Constructor for call_data. Used for both client and server filters.
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
- grpc_call_element_args* args) {
+ const grpc_call_element_args* args) {
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
grpc_deadline_state_start(exec_ctx, elem, args->deadline);
return GRPC_ERROR_NONE;
diff --git a/src/core/lib/channel/deadline_filter.h b/src/core/lib/channel/deadline_filter.h
index 8e9fed05c7..72cd5cb929 100644
--- a/src/core/lib/channel/deadline_filter.h
+++ b/src/core/lib/channel/deadline_filter.h
@@ -35,16 +35,18 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/timer.h"
+typedef enum grpc_deadline_timer_state {
+ GRPC_DEADLINE_STATE_INITIAL,
+ GRPC_DEADLINE_STATE_PENDING,
+ GRPC_DEADLINE_STATE_FINISHED
+} grpc_deadline_timer_state;
+
// State used for filters that enforce call deadlines.
// Must be the first field in the filter's call_data.
typedef struct grpc_deadline_state {
// We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack;
- // Guards access to timer_pending and timer.
- gpr_mu timer_mu;
- // True if the timer callback is currently pending.
- bool timer_pending;
- // The deadline timer.
+ gpr_atm timer_state;
grpc_timer timer;
grpc_closure timer_callback;
// Closure to invoke when the call is complete.
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 24e43188cb..1b4240bb10 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -92,6 +92,10 @@ struct grpc_handshake_manager {
void* user_data;
// Handshaker args.
grpc_handshaker_args args;
+ // Links to the previous and next managers in a list of all pending handshakes
+ // Used at server side only.
+ grpc_handshake_manager* prev;
+ grpc_handshake_manager* next;
};
grpc_handshake_manager* grpc_handshake_manager_create() {
@@ -101,6 +105,39 @@ grpc_handshake_manager* grpc_handshake_manager_create() {
return mgr;
}
+void grpc_handshake_manager_pending_list_add(grpc_handshake_manager** head,
+ grpc_handshake_manager* mgr) {
+ GPR_ASSERT(mgr->prev == NULL);
+ GPR_ASSERT(mgr->next == NULL);
+ mgr->next = *head;
+ if (*head) {
+ (*head)->prev = mgr;
+ }
+ *head = mgr;
+}
+
+void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head,
+ grpc_handshake_manager* mgr) {
+ if (mgr->next != NULL) {
+ mgr->next->prev = mgr->prev;
+ }
+ if (mgr->prev != NULL) {
+ mgr->prev->next = mgr->next;
+ } else {
+ GPR_ASSERT(*head == mgr);
+ *head = mgr->next;
+ }
+}
+
+void grpc_handshake_manager_pending_list_shutdown_all(
+ grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why) {
+ while (head != NULL) {
+ grpc_handshake_manager_shutdown(exec_ctx, head, GRPC_ERROR_REF(why));
+ head = head->next;
+ }
+ GRPC_ERROR_UNREF(why);
+}
+
static bool is_power_of_2(size_t n) { return (n & (n - 1)) == 0; }
void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h
index a8e3692add..5f97c3fc73 100644
--- a/src/core/lib/channel/handshaker.h
+++ b/src/core/lib/channel/handshaker.h
@@ -163,4 +163,20 @@ void grpc_handshake_manager_do_handshake(
gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, void* user_data);
+/// Add \a mgr to the server side list of all pending handshake managers, the
+/// list starts with \a *head.
+// Not thread-safe. Caller needs to synchronize.
+void grpc_handshake_manager_pending_list_add(grpc_handshake_manager** head,
+ grpc_handshake_manager* mgr);
+
+/// Remove \a mgr from the server side list of all pending handshake managers.
+// Not thread-safe. Caller needs to synchronize.
+void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head,
+ grpc_handshake_manager* mgr);
+
+/// Shutdown all pending handshake managers on the server side.
+// Not thread-safe. Caller needs to synchronize.
+void grpc_handshake_manager_pending_list_shutdown_all(
+ grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why);
+
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H */
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index 49a2a980e0..c031533dd8 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -386,7 +386,7 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
calld->on_done_recv_initial_metadata = NULL;
calld->on_done_recv_trailing_metadata = NULL;
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index eebc0f9a10..37c58755d5 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -343,7 +343,7 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c
index 3a460a935f..b424c0d2ac 100644
--- a/src/core/lib/channel/message_size_filter.c
+++ b/src/core/lib/channel/message_size_filter.c
@@ -166,7 +166,7 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
// Constructor for call_data.
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
- grpc_call_element_args* args) {
+ const grpc_call_element_args* args) {
channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL;
diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c
index 8a5617e7c1..6d638bcbaa 100644
--- a/src/core/lib/iomgr/timer_generic.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -180,25 +180,25 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
GPR_ASSERT(now.clock_type == g_clock_type);
timer->closure = closure;
timer->deadline = deadline;
- timer->triggered = 0;
if (!g_initialized) {
- timer->triggered = 1;
+ timer->pending = false;
grpc_closure_sched(
exec_ctx, timer->closure,
GRPC_ERROR_CREATE("Attempt to create timer before initialization"));
return;
}
+ gpr_mu_lock(&shard->mu);
+ timer->pending = true;
if (gpr_time_cmp(deadline, now) <= 0) {
- timer->triggered = 1;
+ timer->pending = false;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
+ gpr_mu_unlock(&shard->mu);
+ /* early out */
return;
}
- /* TODO(ctiller): check deadline expired */
-
- gpr_mu_lock(&shard->mu);
grpc_time_averaged_stats_add_sample(&shard->stats,
ts_to_dbl(gpr_time_sub(deadline, now)));
if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) {
@@ -243,9 +243,9 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
gpr_mu_lock(&shard->mu);
- if (!timer->triggered) {
+ if (timer->pending) {
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
- timer->triggered = 1;
+ timer->pending = false;
if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer);
} else {
@@ -296,7 +296,7 @@ static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) {
}
timer = grpc_timer_heap_top(&shard->heap);
if (gpr_time_cmp(timer->deadline, now) > 0) return NULL;
- timer->triggered = 1;
+ timer->pending = false;
grpc_timer_heap_pop(&shard->heap);
return timer;
}
diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h
index 9d901c7e68..1608dce9fb 100644
--- a/src/core/lib/iomgr/timer_generic.h
+++ b/src/core/lib/iomgr/timer_generic.h
@@ -40,7 +40,7 @@
struct grpc_timer {
gpr_timespec deadline;
uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
- int triggered;
+ bool pending;
struct grpc_timer *next;
struct grpc_timer *prev;
grpc_closure *closure;
diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c
index fa2cdee964..f28a14405d 100644
--- a/src/core/lib/iomgr/timer_uv.c
+++ b/src/core/lib/iomgr/timer_uv.c
@@ -53,8 +53,8 @@ static void stop_uv_timer(uv_timer_t *handle) {
void run_expired_timer(uv_timer_t *handle) {
grpc_timer *timer = (grpc_timer *)handle->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- GPR_ASSERT(!timer->triggered);
- timer->triggered = 1;
+ GPR_ASSERT(timer->pending);
+ timer->pending = 0;
grpc_closure_sched(&exec_ctx, timer->closure, GRPC_ERROR_NONE);
stop_uv_timer(handle);
grpc_exec_ctx_finish(&exec_ctx);
@@ -67,11 +67,11 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
uv_timer_t *uv_timer;
timer->closure = closure;
if (gpr_time_cmp(deadline, now) <= 0) {
- timer->triggered = 1;
+ timer->pending = 0;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
return;
}
- timer->triggered = 0;
+ timer->pending = 1;
timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
uv_timer = gpr_malloc(sizeof(uv_timer_t));
uv_timer_init(uv_default_loop(), uv_timer);
@@ -81,8 +81,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
}
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
- if (!timer->triggered) {
- timer->triggered = 1;
+ if (timer->pending) {
+ timer->pending = 0;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
stop_uv_timer((uv_timer_t *)timer->uv_timer);
}
diff --git a/src/core/lib/iomgr/timer_uv.h b/src/core/lib/iomgr/timer_uv.h
index 13cf8bd4fa..9870cd4a5c 100644
--- a/src/core/lib/iomgr/timer_uv.h
+++ b/src/core/lib/iomgr/timer_uv.h
@@ -41,7 +41,7 @@ struct grpc_timer {
/* This is actually a uv_timer_t*, but we want to keep platform-specific
types out of headers */
void *uv_timer;
- int triggered;
+ int pending;
};
#endif /* GRPC_CORE_LIB_IOMGR_TIMER_UV_H */
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index b9bbe1b304..a23082a866 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -302,7 +302,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(*calld));
return GRPC_ERROR_NONE;
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index 36e81d6501..14619d97ca 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -197,7 +197,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
diff --git a/src/core/lib/support/sync_posix.c b/src/core/lib/support/sync_posix.c
index de0f0484b5..16e7d6e12a 100644
--- a/src/core/lib/support/sync_posix.c
+++ b/src/core/lib/support/sync_posix.c
@@ -42,8 +42,10 @@
#include <time.h>
#include "src/core/lib/profiling/timers.h"
-#ifdef GPR_MU_COUNTERS
-gpr_atm grpc_mu_locks = 0;
+#ifdef GPR_LOW_LEVEL_COUNTERS
+gpr_atm gpr_mu_locks = 0;
+gpr_atm gpr_counter_atm_cas = 0;
+gpr_atm gpr_counter_atm_add = 0;
#endif
void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
@@ -51,8 +53,8 @@ void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
void gpr_mu_destroy(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_destroy(mu) == 0); }
void gpr_mu_lock(gpr_mu* mu) {
-#ifdef GPR_MU_COUNTERS
- gpr_atm_no_barrier_fetch_add(&grpc_mu_locks, 1);
+#ifdef GPR_LOW_LEVEL_COUNTERS
+ GPR_ATM_INC_COUNTER(gpr_mu_locks);
#endif
GPR_TIMER_BEGIN("gpr_mu_lock", 0);
GPR_ASSERT(pthread_mutex_lock(mu) == 0);
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 96463d42c8..0f03d6d9cb 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -101,6 +101,17 @@ typedef struct {
grpc_error *error;
} received_status;
+static gpr_atm pack_received_status(received_status r) {
+ return r.is_set ? (1 | (gpr_atm)r.error) : 0;
+}
+
+static received_status unpack_received_status(gpr_atm atm) {
+ return (atm & 1) == 0
+ ? (received_status){.is_set = false, .error = GRPC_ERROR_NONE}
+ : (received_status){.is_set = true,
+ .error = (grpc_error *)(atm & ~(gpr_atm)1)};
+}
+
#define MAX_ERRORS_PER_BATCH 3
typedef struct batch_control {
@@ -142,8 +153,6 @@ struct grpc_call {
bool destroy_called;
/** flag indicating that cancellation is inherited */
bool cancellation_is_inherited;
- /** bitmask of live batches */
- uint8_t used_batches;
/** which ops are in-flight */
bool sent_initial_metadata;
bool sending_message;
@@ -165,8 +174,8 @@ struct grpc_call {
Element 0 is initial metadata, element 1 is trailing metadata. */
grpc_metadata_array *buffered_metadata[2];
- /* Received call statuses from various sources */
- received_status status[STATUS_SOURCE_COUNT];
+ /* Packed received call statuses from various sources */
+ gpr_atm status[STATUS_SOURCE_COUNT];
/* Call data useful used for reporting. Only valid after the call has
* completed */
@@ -445,7 +454,8 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- GRPC_ERROR_UNREF(c->status[i].error);
+ GRPC_ERROR_UNREF(
+ unpack_received_status(gpr_atm_no_barrier_load(&c->status[i])).error);
}
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, c);
@@ -613,13 +623,12 @@ static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
*/
static bool get_final_status_from(
- grpc_call *call, status_source from_source, bool allow_ok_status,
+ grpc_call *call, grpc_error *error, bool allow_ok_status,
void (*set_value)(grpc_status_code code, void *user_data),
void *set_value_user_data, grpc_slice *details) {
grpc_status_code code;
const char *msg = NULL;
- grpc_error_get_status(call->status[from_source].error, call->send_deadline,
- &code, &msg, NULL);
+ grpc_error_get_status(error, call->send_deadline, &code, &msg, NULL);
if (code == GRPC_STATUS_OK && !allow_ok_status) {
return false;
}
@@ -637,12 +646,15 @@ static void get_final_status(grpc_call *call,
void *user_data),
void *set_value_user_data, grpc_slice *details) {
int i;
+ received_status status[STATUS_SOURCE_COUNT];
+ for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
+ status[i] = unpack_received_status(gpr_atm_acq_load(&call->status[i]));
+ }
if (grpc_call_error_trace) {
gpr_log(GPR_DEBUG, "get_final_status %s", call->is_client ? "CLI" : "SVR");
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (call->status[i].is_set) {
- gpr_log(GPR_DEBUG, " %d: %s", i,
- grpc_error_string(call->status[i].error));
+ if (status[i].is_set) {
+ gpr_log(GPR_DEBUG, " %d: %s", i, grpc_error_string(status[i].error));
}
}
}
@@ -652,9 +664,9 @@ static void get_final_status(grpc_call *call,
/* search for the best status we can present: ideally the error we use has a
clearly defined grpc-status, and we'll prefer that. */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (call->status[i].is_set &&
- grpc_error_has_clear_grpc_status(call->status[i].error)) {
- if (get_final_status_from(call, (status_source)i, allow_ok_status != 0,
+ if (status[i].is_set &&
+ grpc_error_has_clear_grpc_status(status[i].error)) {
+ if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
set_value, set_value_user_data, details)) {
return;
}
@@ -662,8 +674,8 @@ static void get_final_status(grpc_call *call,
}
/* If no clearly defined status exists, search for 'anything' */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (call->status[i].is_set) {
- if (get_final_status_from(call, (status_source)i, allow_ok_status != 0,
+ if (status[i].is_set) {
+ if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
set_value, set_value_user_data, details)) {
return;
}
@@ -680,12 +692,13 @@ static void get_final_status(grpc_call *call,
static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
status_source source, grpc_error *error) {
- if (call->status[source].is_set) {
+ if (!gpr_atm_rel_cas(&call->status[source],
+ pack_received_status((received_status){
+ .is_set = false, .error = GRPC_ERROR_NONE}),
+ pack_received_status((received_status){
+ .is_set = true, .error = error}))) {
GRPC_ERROR_UNREF(error);
- return;
}
- call->status[source].is_set = true;
- call->status[source].error = error;
}
/*******************************************************************************
@@ -996,25 +1009,48 @@ static bool are_initial_metadata_flags_valid(uint32_t flags, bool is_client) {
return !(flags & invalid_positions);
}
-static batch_control *allocate_batch_control(grpc_call *call) {
- size_t i;
- for (i = 0; i < MAX_CONCURRENT_BATCHES; i++) {
- if ((call->used_batches & (1 << i)) == 0) {
- call->used_batches = (uint8_t)(call->used_batches | (uint8_t)(1 << i));
- return &call->active_batches[i];
- }
+static int batch_slot_for_op(grpc_op_type type) {
+ switch (type) {
+ case GRPC_OP_SEND_INITIAL_METADATA:
+ return 0;
+ case GRPC_OP_SEND_MESSAGE:
+ return 1;
+ case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
+ case GRPC_OP_SEND_STATUS_FROM_SERVER:
+ return 2;
+ case GRPC_OP_RECV_INITIAL_METADATA:
+ return 3;
+ case GRPC_OP_RECV_MESSAGE:
+ return 4;
+ case GRPC_OP_RECV_CLOSE_ON_SERVER:
+ case GRPC_OP_RECV_STATUS_ON_CLIENT:
+ return 5;
+ }
+ GPR_UNREACHABLE_CODE(return 123456789);
+}
+
+static batch_control *allocate_batch_control(grpc_call *call,
+ const grpc_op *ops,
+ size_t num_ops) {
+ int slot = batch_slot_for_op(ops[0].op);
+ for (size_t i = 1; i < num_ops; i++) {
+ int op_slot = batch_slot_for_op(ops[i].op);
+ slot = GPR_MIN(slot, op_slot);
}
- return NULL;
+ batch_control *bctl = &call->active_batches[slot];
+ if (bctl->call != NULL) {
+ return NULL;
+ }
+ memset(bctl, 0, sizeof(*bctl));
+ bctl->call = call;
+ return bctl;
}
static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_cq_completion *storage) {
batch_control *bctl = user_data;
grpc_call *call = bctl->call;
- gpr_mu_lock(&call->mu);
- call->used_batches = (uint8_t)(
- call->used_batches & ~(uint8_t)(1 << (bctl - call->active_batches)));
- gpr_mu_unlock(&call->mu);
+ bctl->call = NULL;
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
}
@@ -1097,12 +1133,8 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
if (bctl->is_notify_tag_closure) {
/* unrefs bctl->error */
+ bctl->call = NULL;
grpc_closure_run(exec_ctx, bctl->notify_tag, error);
- gpr_mu_lock(&call->mu);
- bctl->call->used_batches =
- (uint8_t)(bctl->call->used_batches &
- ~(uint8_t)(1 << (bctl - bctl->call->active_batches)));
- gpr_mu_unlock(&call->mu);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
@@ -1314,6 +1346,11 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
finish_batch_step(exec_ctx, bctl);
}
+static void free_no_op_completion(grpc_exec_ctx *exec_ctx, void *p,
+ grpc_cq_completion *completion) {
+ gpr_free(completion);
+}
+
static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_call *call, const grpc_op *ops,
size_t nops, void *notify_tag,
@@ -1328,31 +1365,33 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_metadata compression_md;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
-
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
- /* TODO(ctiller): this feels like it could be made lock-free */
- gpr_mu_lock(&call->mu);
- bctl = allocate_batch_control(call);
- memset(bctl, 0, sizeof(*bctl));
- bctl->call = call;
- bctl->notify_tag = notify_tag;
- bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
-
- grpc_transport_stream_op *stream_op = &bctl->op;
- stream_op->covered_by_poller = true;
-
if (nops == 0) {
- GRPC_CALL_INTERNAL_REF(call, "completion");
if (!is_notify_tag_closure) {
grpc_cq_begin_op(call->cq, notify_tag);
+ grpc_cq_end_op(exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
+ free_no_op_completion, NULL,
+ gpr_malloc(sizeof(grpc_cq_completion)));
+ } else {
+ grpc_closure_sched(exec_ctx, notify_tag, GRPC_ERROR_NONE);
}
- gpr_mu_unlock(&call->mu);
- post_batch_completion(exec_ctx, bctl);
error = GRPC_CALL_OK;
goto done;
}
+ bctl = allocate_batch_control(call, ops, nops);
+ if (bctl == NULL) {
+ return GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ }
+ bctl->notify_tag = notify_tag;
+ bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
+
+ gpr_mu_lock(&call->mu);
+ grpc_transport_stream_op *stream_op = &bctl->op;
+ memset(stream_op, 0, sizeof(*stream_op));
+ stream_op->covered_by_poller = true;
+
/* rewrite batch ops into a transport op */
for (i = 0; i < nops; i++) {
op = &ops[i];
diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c
index 48de0e1d5b..49bc4c114b 100644
--- a/src/core/lib/surface/lame_client.c
+++ b/src/core/lib/surface/lame_client.c
@@ -122,7 +122,7 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
gpr_atm_no_barrier_store(&calld->filled_metadata, 0);
return GRPC_ERROR_NONE;
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index d4d86c9176..d6a401ae82 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -879,7 +879,7 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
memset(calld, 0, sizeof(call_data));
@@ -1194,7 +1194,9 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
crm->server_registered_method = rm;
crm->flags = rm->flags;
crm->has_host = has_host;
- crm->host = host;
+ if (has_host) {
+ crm->host = host;
+ }
crm->method = method;
}
GPR_ASSERT(slots <= UINT32_MAX);