aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/client_channel/client_channel.c147
-rw-r--r--src/core/lib/iomgr/udp_server.c16
-rw-r--r--src/core/lib/iomgr/udp_server.h12
-rw-r--r--src/core/lib/support/arena.c98
-rw-r--r--src/core/lib/support/arena.h54
-rw-r--r--src/core/lib/surface/call.c109
-rw-r--r--src/core/lib/transport/transport.c3
7 files changed, 248 insertions, 191 deletions
diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c
index bf64f84772..f2475cf6ae 100644
--- a/src/core/ext/client_channel/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -71,7 +71,8 @@
*/
typedef enum {
- WAIT_FOR_READY_UNSET,
+ /* zero so it can be default initialized */
+ WAIT_FOR_READY_UNSET = 0,
WAIT_FOR_READY_FALSE,
WAIT_FOR_READY_TRUE
} wait_for_ready_value;
@@ -631,7 +632,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
#define CANCELLED_CALL ((grpc_subchannel_call *)1)
typedef enum {
- GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
+ /* zero so that it can be default-initialized */
+ GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING = 0,
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
} subchannel_creation_phase;
@@ -653,7 +655,6 @@ typedef struct client_channel_call_data {
gpr_timespec call_start_time;
gpr_timespec deadline;
method_parameters *method_params;
- grpc_closure read_service_config;
grpc_error *cancel_error;
@@ -726,6 +727,47 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
gpr_free(ops);
}
+// Sets calld->method_params.
+// If the method params specify a timeout, populates
+// *per_method_deadline and returns true.
+static bool set_call_method_params_from_service_config_locked(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ gpr_timespec *per_method_deadline) {
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ if (chand->method_params_table != NULL) {
+ calld->method_params = grpc_method_config_table_get(
+ exec_ctx, chand->method_params_table, calld->path);
+ if (calld->method_params != NULL) {
+ method_parameters_ref(calld->method_params);
+ if (gpr_time_cmp(calld->method_params->timeout,
+ gpr_time_0(GPR_TIMESPAN)) != 0) {
+ *per_method_deadline =
+ gpr_time_add(calld->call_start_time, calld->method_params->timeout);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static void apply_final_configuration_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ /* apply service-config level configuration to the call (now that we're
+ * certain it exists) */
+ call_data *calld = elem->call_data;
+ gpr_timespec per_method_deadline;
+ if (set_call_method_params_from_service_config_locked(exec_ctx, elem,
+ &per_method_deadline)) {
+ // If the deadline from the service config is shorter than the one
+ // from the client API, reset the deadline timer.
+ if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+ calld->deadline = per_method_deadline;
+ grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
+ }
+ }
+}
+
static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
@@ -851,6 +893,7 @@ static bool pick_subchannel_locked(
}
GPR_ASSERT(error == GRPC_ERROR_NONE);
if (chand->lb_policy != NULL) {
+ apply_final_configuration_locked(exec_ctx, elem);
grpc_lb_policy *lb_policy = chand->lb_policy;
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
// If the application explicitly set wait_for_ready, use that.
@@ -1060,114 +1103,18 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
-// Sets calld->method_params.
-// If the method params specify a timeout, populates
-// *per_method_deadline and returns true.
-static bool set_call_method_params_from_service_config_locked(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- gpr_timespec *per_method_deadline) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- if (chand->method_params_table != NULL) {
- calld->method_params = grpc_method_config_table_get(
- exec_ctx, chand->method_params_table, calld->path);
- if (calld->method_params != NULL) {
- method_parameters_ref(calld->method_params);
- if (gpr_time_cmp(calld->method_params->timeout,
- gpr_time_0(GPR_TIMESPAN)) != 0) {
- *per_method_deadline =
- gpr_time_add(calld->call_start_time, calld->method_params->timeout);
- return true;
- }
- }
- }
- return false;
-}
-
-// Gets data from the service config. Invoked when the resolver returns
-// its initial result.
-static void read_service_config_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = arg;
- call_data *calld = elem->call_data;
- // If this is an error, there's no point in looking at the service config.
- if (error == GRPC_ERROR_NONE) {
- gpr_timespec per_method_deadline;
- if (set_call_method_params_from_service_config_locked(
- exec_ctx, elem, &per_method_deadline)) {
- // If the deadline from the service config is shorter than the one
- // from the client API, reset the deadline timer.
- if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
- calld->deadline = per_method_deadline;
- grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
- }
- }
- }
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
-}
-
-static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error_ignored) {
- grpc_call_element *elem = arg;
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- // If the resolver has already returned results, then we can access
- // the service config parameters immediately. Otherwise, we need to
- // defer that work until the resolver returns an initial result.
- if (chand->lb_policy != NULL) {
- // We already have a resolver result, so check for service config.
- gpr_timespec per_method_deadline;
- if (set_call_method_params_from_service_config_locked(
- exec_ctx, elem, &per_method_deadline)) {
- calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
- }
- } else {
- // We don't yet have a resolver result, so register a callback to
- // get the service config data once the resolver returns.
- // Take a reference to the call stack to be owned by the callback.
- GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
- grpc_closure_init(&calld->read_service_config, read_service_config_locked,
- elem, grpc_combiner_scheduler(chand->combiner, false));
- grpc_closure_list_append(&chand->waiting_for_config_closures,
- &calld->read_service_config, GRPC_ERROR_NONE);
- }
- // Start the deadline timer with the current deadline value. If we
- // do not yet have service config data, then the timer may be reset
- // later.
- grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
- "initial_read_service_config");
-}
-
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
// Initialize data members.
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
- calld->method_params = NULL;
- calld->cancel_error = GRPC_ERROR_NONE;
- gpr_atm_rel_store(&calld->subchannel_call, 0);
- calld->connected_subchannel = NULL;
- calld->waiting_ops = NULL;
- calld->waiting_ops_count = 0;
- calld->waiting_ops_capacity = 0;
- calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
calld->owning_call = args->call_stack;
- calld->pollent = NULL;
- GRPC_CALL_STACK_REF(calld->owning_call, "initial_read_service_config");
- grpc_closure_sched(
- exec_ctx,
- grpc_closure_init(&calld->read_service_config,
- initial_read_service_config_locked, elem,
- grpc_combiner_scheduler(chand->combiner, false)),
- GRPC_ERROR_NONE);
+ grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index d1bcd89af1..71e295770a 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -109,8 +109,8 @@ struct grpc_udp_server {
grpc_pollset **pollsets;
/* number of pollsets in the pollsets array */
size_t pollset_count;
- /* The parent grpc server */
- grpc_server *grpc_server;
+ /* opaque object to pass to callbacks */
+ void *user_data;
};
grpc_udp_server *grpc_udp_server_create(void) {
@@ -178,7 +178,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
/* Call the orphan_cb to signal that the FD is about to be closed and
* should no longer be used. */
GPR_ASSERT(sp->orphan_cb);
- sp->orphan_cb(exec_ctx, sp->emfd);
+ sp->orphan_cb(exec_ctx, sp->emfd, sp->server->user_data);
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
"udp_listener_shutdown");
@@ -204,7 +204,7 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
if (s->active_ports) {
for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
- sp->orphan_cb(exec_ctx, sp->emfd);
+ sp->orphan_cb(exec_ctx, sp->emfd, sp->server->user_data);
grpc_fd_shutdown(exec_ctx, sp->emfd,
GRPC_ERROR_CREATE("Server destroyed"));
}
@@ -299,7 +299,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
/* Tell the registered callback that data is available to read. */
GPR_ASSERT(sp->read_cb);
- sp->read_cb(exec_ctx, sp->emfd, sp->server->grpc_server);
+ sp->read_cb(exec_ctx, sp->emfd, sp->server->user_data);
/* Re-arm the notification event so we get another chance to read. */
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
@@ -322,7 +322,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
/* Tell the registered callback that the socket is writeable. */
GPR_ASSERT(sp->write_cb);
- sp->write_cb(exec_ctx, sp->emfd);
+ sp->write_cb(exec_ctx, sp->emfd, sp->server->user_data);
/* Re-arm the notification event so we get another chance to write. */
grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);
@@ -464,13 +464,13 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) {
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
grpc_pollset **pollsets, size_t pollset_count,
- grpc_server *server) {
+ void *user_data) {
size_t i;
gpr_mu_lock(&s->mu);
grpc_udp_listener *sp;
GPR_ASSERT(s->active_ports == 0);
s->pollsets = pollsets;
- s->grpc_server = server;
+ s->user_data = user_data;
sp = s->head;
while (sp != NULL) {
diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h
index ed63fa7d81..90842a47f0 100644
--- a/src/core/lib/iomgr/udp_server.h
+++ b/src/core/lib/iomgr/udp_server.h
@@ -47,23 +47,23 @@ typedef struct grpc_udp_server grpc_udp_server;
/* Called when data is available to read from the socket. */
typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
- struct grpc_server *server);
+ void *user_data);
/* Called when the socket is writeable. */
-typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx,
- grpc_fd *emfd);
+typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
+ void *user_data);
/* Called when the grpc_fd is about to be orphaned (and the FD closed). */
typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx *exec_ctx,
- grpc_fd *emfd);
+ grpc_fd *emfd, void *user_data);
/* Create a server, initially not bound to any ports */
grpc_udp_server *grpc_udp_server_create(void);
-/* Start listening to bound ports */
+/* Start listening to bound ports. user_data is passed to callbacks. */
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server,
grpc_pollset **pollsets, size_t pollset_count,
- struct grpc_server *server);
+ void *user_data);
int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index);
diff --git a/src/core/lib/support/arena.c b/src/core/lib/support/arena.c
new file mode 100644
index 0000000000..7bcb983f24
--- /dev/null
+++ b/src/core/lib/support/arena.c
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/support/arena.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
+#include <grpc/support/log.h>
+#include <grpc/support/useful.h>
+
+#define ROUND_UP_TO_ALIGNMENT_SIZE(x) \
+ (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u))
+
+typedef struct zone {
+ size_t size_begin;
+ size_t size_end;
+ gpr_atm next_atm;
+} zone;
+
+struct gpr_arena {
+ gpr_atm size_so_far;
+ zone initial_zone;
+};
+
+gpr_arena *gpr_arena_create(size_t initial_size) {
+ initial_size = ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
+ gpr_arena *a = gpr_zalloc(sizeof(gpr_arena) + initial_size);
+ a->initial_zone.size_end = initial_size;
+ return a;
+}
+
+size_t gpr_arena_destroy(gpr_arena *arena) {
+ gpr_atm size = gpr_atm_no_barrier_load(&arena->size_so_far);
+ zone *z = (zone *)gpr_atm_no_barrier_load(&arena->initial_zone.next_atm);
+ gpr_free(arena);
+ while (z) {
+ zone *next_z = (zone *)gpr_atm_no_barrier_load(&z->next_atm);
+ gpr_free(z);
+ z = next_z;
+ }
+ return (size_t)size;
+}
+
+void *gpr_arena_alloc(gpr_arena *arena, size_t size) {
+ size = ROUND_UP_TO_ALIGNMENT_SIZE(size);
+ size_t start =
+ (size_t)gpr_atm_no_barrier_fetch_add(&arena->size_so_far, size);
+ zone *z = &arena->initial_zone;
+ while (start > z->size_end) {
+ zone *next_z = (zone *)gpr_atm_acq_load(&z->next_atm);
+ if (next_z == NULL) {
+ size_t next_z_size = (size_t)gpr_atm_no_barrier_load(&arena->size_so_far);
+ next_z = gpr_zalloc(sizeof(zone) + next_z_size);
+ next_z->size_begin = z->size_end;
+ next_z->size_end = z->size_end + next_z_size;
+ if (!gpr_atm_rel_cas(&z->next_atm, (gpr_atm)NULL, (gpr_atm)next_z)) {
+ gpr_free(next_z);
+ next_z = (zone *)gpr_atm_acq_load(&z->next_atm);
+ }
+ }
+ z = next_z;
+ }
+ if (start + size > z->size_end) {
+ return gpr_arena_alloc(arena, size);
+ }
+ GPR_ASSERT(start >= z->size_begin);
+ GPR_ASSERT(start + size <= z->size_end);
+ return ((char *)(z + 1)) + start - z->size_begin;
+}
diff --git a/src/core/lib/support/arena.h b/src/core/lib/support/arena.h
new file mode 100644
index 0000000000..c28033ffc3
--- /dev/null
+++ b/src/core/lib/support/arena.h
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// \file Arena based allocator
+// Allows very fast allocation of memory, but that memory cannot be freed until
+// the arena as a whole is freed
+// Tracks the total memory allocated against it, so that future arenas can
+// pre-allocate the right amount of memory
+
+#ifndef GRPC_CORE_LIB_SUPPORT_ARENA_H
+#define GRPC_CORE_LIB_SUPPORT_ARENA_H
+
+#include <stddef.h>
+
+typedef struct gpr_arena gpr_arena;
+
+// Create an arena, with \a initial_size bytes in the first allocated buffer
+gpr_arena *gpr_arena_create(size_t initial_size);
+// Allocate \a size bytes from the arena
+void *gpr_arena_alloc(gpr_arena *arena, size_t size);
+// Destroy an arena, returning the total number of bytes allocated
+size_t gpr_arena_destroy(gpr_arena *arena);
+
+#endif /* GRPC_CORE_LIB_SUPPORT_ARENA_H */
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index c2547c5147..47f36be5fd 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -144,8 +144,8 @@ struct grpc_call {
grpc_call *parent;
grpc_call *first_child;
gpr_timespec start_time;
- /* TODO(ctiller): share with cq if possible? */
- gpr_mu mu;
+ /* protects first_child, and child next/prev links */
+ gpr_mu child_list_mu;
/* client or server call */
bool is_client;
@@ -160,8 +160,8 @@ struct grpc_call {
bool received_initial_metadata;
bool receiving_message;
bool requested_final_op;
- bool received_final_op;
- bool sent_any_op;
+ gpr_atm any_ops_sent_atm;
+ gpr_atm received_final_op_atm;
/* have we received initial metadata */
bool has_initial_md_been_received;
@@ -275,7 +275,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
GPR_TIMER_BEGIN("grpc_call_create", 0);
call = gpr_zalloc(sizeof(grpc_call) + channel_stack->call_stack_size);
*out_call = call;
- gpr_mu_init(&call->mu);
+ gpr_mu_init(&call->child_list_mu);
call->channel = args->channel;
call->cq = args->cq;
call->parent = args->parent_call;
@@ -313,7 +313,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(call->is_client);
GPR_ASSERT(!args->parent_call->is_client);
- gpr_mu_lock(&args->parent_call->mu);
+ gpr_mu_lock(&args->parent_call->child_list_mu);
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = gpr_time_min(
@@ -341,6 +341,10 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
}
if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
+ if (gpr_atm_acq_load(&args->parent_call->received_final_op_atm)) {
+ cancel_with_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE,
+ GRPC_ERROR_CANCELLED);
+ }
}
if (args->parent_call->first_child == NULL) {
@@ -353,7 +357,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
call;
}
- gpr_mu_unlock(&args->parent_call->mu);
+ gpr_mu_unlock(&args->parent_call->child_list_mu);
}
call->send_deadline = send_deadline;
@@ -435,7 +439,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
if (c->receiving_stream != NULL) {
grpc_byte_stream_destroy(exec_ctx, c->receiving_stream);
}
- gpr_mu_destroy(&c->mu);
+ gpr_mu_destroy(&c->child_list_mu);
for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
GRPC_MDELEM_UNREF(exec_ctx, c->send_extra_metadata[ii].md);
}
@@ -456,7 +460,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
GRPC_ERROR_UNREF(
- unpack_received_status(gpr_atm_no_barrier_load(&c->status[i])).error);
+ unpack_received_status(gpr_atm_acq_load(&c->status[i])).error);
}
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, c);
@@ -473,7 +477,7 @@ void grpc_call_destroy(grpc_call *c) {
GRPC_API_TRACE("grpc_call_destroy(c=%p)", 1, (c));
if (parent) {
- gpr_mu_lock(&parent->mu);
+ gpr_mu_lock(&parent->child_list_mu);
if (c == parent->first_child) {
parent->first_child = c->sibling_next;
if (c == parent->first_child) {
@@ -482,15 +486,14 @@ void grpc_call_destroy(grpc_call *c) {
c->sibling_prev->sibling_next = c->sibling_next;
c->sibling_next->sibling_prev = c->sibling_prev;
}
- gpr_mu_unlock(&parent->mu);
+ gpr_mu_unlock(&parent->child_list_mu);
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, parent, "child");
}
- gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->destroy_called);
c->destroy_called = 1;
- cancel = c->sent_any_op && !c->received_final_op;
- gpr_mu_unlock(&c->mu);
+ cancel = gpr_atm_acq_load(&c->any_ops_sent_atm) &&
+ !gpr_atm_acq_load(&c->received_final_op_atm);
if (cancel) {
cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE,
GRPC_ERROR_CANCELLED);
@@ -555,53 +558,25 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
"c=%p, status=%d, description=%s, reserved=%p)",
4, (c, (int)status, description, reserved));
GPR_ASSERT(reserved == NULL);
- gpr_mu_lock(&c->mu);
cancel_with_status(&exec_ctx, c, STATUS_FROM_API_OVERRIDE, status,
description);
- gpr_mu_unlock(&c->mu);
grpc_exec_ctx_finish(&exec_ctx);
return GRPC_CALL_OK;
}
-typedef struct termination_closure {
- grpc_closure closure;
- grpc_call *call;
- grpc_transport_stream_op op;
-} termination_closure;
-
-static void done_termination(grpc_exec_ctx *exec_ctx, void *tcp,
- grpc_error *error) {
- termination_closure *tc = tcp;
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, tc->call, "termination");
- gpr_free(tc);
-}
-
-static void send_termination(grpc_exec_ctx *exec_ctx, void *tcp,
+static void done_termination(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
- termination_closure *tc = tcp;
- memset(&tc->op, 0, sizeof(tc->op));
- tc->op.cancel_error = GRPC_ERROR_REF(error);
- /* reuse closure to catch completion */
- tc->op.on_complete = grpc_closure_init(&tc->closure, done_termination, tc,
- grpc_schedule_on_exec_ctx);
- execute_op(exec_ctx, tc->call, &tc->op);
-}
-
-static void terminate_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
- grpc_error *error) {
- termination_closure *tc = gpr_malloc(sizeof(*tc));
- memset(tc, 0, sizeof(*tc));
- tc->call = c;
- GRPC_CALL_INTERNAL_REF(tc->call, "termination");
- grpc_closure_sched(exec_ctx, grpc_closure_init(&tc->closure, send_termination,
- tc, grpc_schedule_on_exec_ctx),
- error);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "termination");
}
static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_error *error) {
+ GRPC_CALL_INTERNAL_REF(c, "termination");
set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error));
- terminate_with_error(exec_ctx, c, error);
+ grpc_transport_stream_op *op = grpc_make_transport_stream_op(
+ grpc_closure_create(done_termination, c, grpc_schedule_on_exec_ctx));
+ op->cancel_error = error;
+ execute_op(exec_ctx, c, op);
}
static grpc_error *error_from_status(grpc_status_code status,
@@ -715,9 +690,7 @@ static void set_incoming_compression_algorithm(
grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
grpc_call *call) {
grpc_compression_algorithm algorithm;
- gpr_mu_lock(&call->mu);
algorithm = call->incoming_compression_algorithm;
- gpr_mu_unlock(&call->mu);
return algorithm;
}
@@ -729,9 +702,7 @@ static grpc_compression_algorithm compression_algorithm_for_level_locked(
uint32_t grpc_call_test_only_get_message_flags(grpc_call *call) {
uint32_t flags;
- gpr_mu_lock(&call->mu);
flags = call->test_only_last_message_flags;
- gpr_mu_unlock(&call->mu);
return flags;
}
@@ -785,9 +756,7 @@ static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) {
uint32_t encodings_accepted_by_peer;
- gpr_mu_lock(&call->mu);
encodings_accepted_by_peer = call->encodings_accepted_by_peer;
- gpr_mu_unlock(&call->mu);
return encodings_accepted_by_peer;
}
@@ -1056,7 +1025,7 @@ static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
}
static grpc_error *consolidate_batch_errors(batch_control *bctl) {
- size_t n = (size_t)gpr_atm_no_barrier_load(&bctl->num_errors);
+ size_t n = (size_t)gpr_atm_acq_load(&bctl->num_errors);
if (n == 0) {
return GRPC_ERROR_NONE;
} else if (n == 1) {
@@ -1083,8 +1052,6 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
grpc_call *call = bctl->call;
grpc_error *error = consolidate_batch_errors(bctl);
- gpr_mu_lock(&call->mu);
-
if (bctl->send_initial_metadata) {
grpc_metadata_batch_destroy(
exec_ctx,
@@ -1103,20 +1070,23 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
recv_trailing_filter(exec_ctx, call, md);
- call->received_final_op = true;
/* propagate cancellation to any interested children */
+ gpr_atm_rel_store(&call->received_final_op_atm, 1);
+ gpr_mu_lock(&call->child_list_mu);
child_call = call->first_child;
if (child_call != NULL) {
do {
next_child_call = child_call->sibling_next;
if (child_call->cancellation_is_inherited) {
GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel");
- grpc_call_cancel(child_call, NULL);
+ cancel_with_error(exec_ctx, child_call, STATUS_FROM_API_OVERRIDE,
+ GRPC_ERROR_CANCELLED);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel");
}
child_call = next_child_call;
} while (child_call != call->first_child);
}
+ gpr_mu_unlock(&call->child_list_mu);
if (call->is_client) {
get_final_status(call, set_status_value_directly,
@@ -1130,7 +1100,6 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
}
- gpr_mu_unlock(&call->mu);
if (bctl->is_notify_tag_closure) {
/* unrefs bctl->error */
@@ -1221,7 +1190,6 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
batch_control *bctl = bctlp;
grpc_call *call = bctl->call;
- gpr_mu_lock(&bctl->call->mu);
if (error != GRPC_ERROR_NONE) {
if (call->receiving_stream != NULL) {
grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
@@ -1233,11 +1201,9 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
}
if (call->has_initial_md_been_received || error != GRPC_ERROR_NONE ||
call->receiving_stream == NULL) {
- gpr_mu_unlock(&bctl->call->mu);
process_data_after_md(exec_ctx, bctlp);
} else {
call->saved_receiving_stream_ready_bctlp = bctlp;
- gpr_mu_unlock(&bctl->call->mu);
}
}
@@ -1296,7 +1262,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
grpc_error *error, bool has_cancelled) {
if (error == GRPC_ERROR_NONE) return;
- int idx = (int)gpr_atm_no_barrier_fetch_add(&bctl->num_errors, 1);
+ int idx = (int)gpr_atm_full_fetch_add(&bctl->num_errors, 1);
if (idx == 0 && !has_cancelled) {
cancel_with_error(exec_ctx, bctl->call, STATUS_FROM_CORE,
GRPC_ERROR_REF(error));
@@ -1309,8 +1275,6 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
batch_control *bctl = bctlp;
grpc_call *call = bctl->call;
- gpr_mu_lock(&call->mu);
-
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
@@ -1336,11 +1300,9 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
receiving_stream_ready, call->saved_receiving_stream_ready_bctlp,
grpc_schedule_on_exec_ctx);
call->saved_receiving_stream_ready_bctlp = NULL;
- grpc_closure_sched(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error));
+ grpc_closure_run(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error));
}
- gpr_mu_unlock(&call->mu);
-
finish_batch_step(exec_ctx, bctl);
}
@@ -1393,7 +1355,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
bctl->notify_tag = notify_tag;
bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
- gpr_mu_lock(&call->mu);
grpc_transport_stream_op *stream_op = &bctl->op;
memset(stream_op, 0, sizeof(*stream_op));
stream_op->covered_by_poller = true;
@@ -1679,8 +1640,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_closure_init(&bctl->finish_batch, finish_batch, bctl,
grpc_schedule_on_exec_ctx);
stream_op->on_complete = &bctl->finish_batch;
- call->sent_any_op = true;
- gpr_mu_unlock(&call->mu);
+ gpr_atm_rel_store(&call->any_ops_sent_atm, 1);
execute_op(exec_ctx, call, stream_op);
@@ -1711,7 +1671,6 @@ done_with_error:
if (bctl->recv_final_op) {
call->requested_final_op = 0;
}
- gpr_mu_unlock(&call->mu);
goto done;
}
@@ -1760,10 +1719,8 @@ uint8_t grpc_call_is_client(grpc_call *call) { return call->is_client; }
grpc_compression_algorithm grpc_call_compression_for_level(
grpc_call *call, grpc_compression_level level) {
- gpr_mu_lock(&call->mu);
grpc_compression_algorithm algo =
compression_algorithm_for_level_locked(call, level);
- gpr_mu_unlock(&call->mu);
return algo;
}
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 165950e288..3024f2ae78 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -254,8 +254,9 @@ typedef struct {
static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
made_transport_stream_op *op = arg;
- grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
+ grpc_closure *c = op->inner_on_complete;
gpr_free(op);
+ grpc_closure_run(exec_ctx, c, GRPC_ERROR_REF(error));
}
grpc_transport_stream_op *grpc_make_transport_stream_op(