aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-07-05 08:37:09 -0700
committerGravatar Craig Tiller <ctiller@google.com>2017-07-05 08:37:09 -0700
commita1a3f363ee0309bdfbcdf919ab57468e3522731a (patch)
treea4d755c6b8512f47771a23ae8255ccb567391359 /src/core
parentf95873e67632c2b7c323c6613b5e23cae60ecf17 (diff)
parent7405347cd848f27067b9ce3c029325799ebaa888 (diff)
Merge github.com:grpc/grpc into optimize_for
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/filters/client_channel/client_channel.c667
-rw-r--r--src/core/ext/filters/client_channel/client_channel_plugin.c3
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.c17
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h7
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c3
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c45
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c38
-rw-r--r--src/core/ext/filters/client_channel/resolver.c35
-rw-r--r--src/core/ext/filters/client_channel/resolver.h14
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c2
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c19
-rw-r--r--src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c4
-rw-r--r--src/core/ext/filters/client_channel/subchannel.c24
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h2
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.c35
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_plugin.c3
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c69
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.h4
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.c14
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h6
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c12
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c87
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c52
-rw-r--r--src/core/lib/channel/channel_stack.h2
-rw-r--r--src/core/lib/http/httpcli_security_connector.c10
-rw-r--r--src/core/lib/iomgr/closure.c24
-rw-r--r--src/core/lib/iomgr/closure.h30
-rw-r--r--src/core/lib/iomgr/combiner.c18
-rw-r--r--src/core/lib/iomgr/combiner.h3
-rw-r--r--src/core/lib/iomgr/error.c55
-rw-r--r--src/core/lib/iomgr/error.h20
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.c6
-rw-r--r--src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c65
-rw-r--r--src/core/lib/iomgr/ev_epoll_thread_pool_linux.c28
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.c32
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.c62
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c35
-rw-r--r--src/core/lib/iomgr/ev_posix.c4
-rw-r--r--src/core/lib/iomgr/exec_ctx.c15
-rw-r--r--src/core/lib/iomgr/executor.c2
-rw-r--r--src/core/lib/iomgr/pollset.h4
-rw-r--r--src/core/lib/iomgr/pollset_uv.c6
-rw-r--r--src/core/lib/iomgr/pollset_windows.c4
-rw-r--r--src/core/lib/iomgr/tcp_posix.c19
-rw-r--r--src/core/lib/iomgr/tcp_uv.c21
-rw-r--r--src/core/lib/iomgr/tcp_windows.c21
-rw-r--r--src/core/lib/security/context/security_context.c27
-rw-r--r--src/core/lib/security/context/security_context.h6
-rw-r--r--src/core/lib/security/transport/secure_endpoint.c22
-rw-r--r--src/core/lib/security/transport/security_connector.c27
-rw-r--r--src/core/lib/security/transport/security_connector.h6
-rw-r--r--src/core/lib/support/stack_lockfree.c137
-rw-r--r--src/core/lib/support/stack_lockfree.h38
-rw-r--r--src/core/lib/support/time_precise.c16
-rw-r--r--src/core/lib/surface/call.c55
-rw-r--r--src/core/lib/surface/call.h2
-rw-r--r--src/core/lib/surface/channel.c2
-rw-r--r--src/core/lib/surface/channel.h2
-rw-r--r--src/core/lib/surface/completion_queue.c25
-rw-r--r--src/core/lib/surface/completion_queue.h6
-rw-r--r--src/core/lib/surface/init.c13
-rw-r--r--src/core/lib/surface/init_secure.c10
-rw-r--r--src/core/lib/surface/server.c2
-rw-r--r--src/core/lib/transport/metadata.c136
-rw-r--r--src/core/lib/transport/metadata.h8
-rw-r--r--src/core/lib/transport/transport.c39
-rw-r--r--src/core/lib/transport/transport.h22
70 files changed, 1206 insertions, 1049 deletions
diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c
index f29c5d55ed..de516ab4c9 100644
--- a/src/core/ext/filters/client_channel/client_channel.c
+++ b/src/core/ext/filters/client_channel/client_channel.c
@@ -178,8 +178,8 @@ typedef struct client_channel_channel_data {
grpc_slice_hash_table *method_params_table;
/** incoming resolver result - set by resolver.next() */
grpc_channel_args *resolver_result;
- /** a list of closures that are all waiting for config to come in */
- grpc_closure_list waiting_for_config_closures;
+ /** a list of closures that are all waiting for resolver result to come in */
+ grpc_closure_list waiting_for_resolver_result_closures;
/** resolver callback */
grpc_closure on_resolver_result_changed;
/** connectivity state being tracked */
@@ -342,49 +342,15 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
}
}
-// Wrap a closure associated with \a lb_policy. The associated callback (\a
-// wrapped_on_pick_closure_cb) is responsible for unref'ing \a lb_policy after
-// scheduling \a wrapped_closure.
-typedef struct wrapped_on_pick_closure_arg {
- /* the closure instance using this struct as argument */
- grpc_closure wrapper_closure;
-
- /* the original closure. Usually a on_complete/notify cb for pick() and ping()
- * calls against the internal RR instance, respectively. */
- grpc_closure *wrapped_closure;
-
- /* The policy instance related to the closure */
- grpc_lb_policy *lb_policy;
-} wrapped_on_pick_closure_arg;
-
-// Invoke \a arg->wrapped_closure, unref \a arg->lb_policy and free \a arg.
-static void wrapped_on_pick_closure_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- wrapped_on_pick_closure_arg *wc_arg = arg;
- GPR_ASSERT(wc_arg != NULL);
- GPR_ASSERT(wc_arg->wrapped_closure != NULL);
- GPR_ASSERT(wc_arg->lb_policy != NULL);
- GRPC_CLOSURE_RUN(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->lb_policy, "pick_subchannel_wrapping");
- gpr_free(wc_arg);
-}
-
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
channel_data *chand = arg;
+ // Extract the following fields from the resolver result, if non-NULL.
char *lb_policy_name = NULL;
- grpc_lb_policy *lb_policy = NULL;
- grpc_lb_policy *old_lb_policy = NULL;
- grpc_slice_hash_table *method_params_table = NULL;
- grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
- bool exit_idle = false;
- grpc_error *state_error =
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
+ grpc_lb_policy *new_lb_policy = NULL;
char *service_config_json = NULL;
- service_config_parsing_state parsing_state;
- memset(&parsing_state, 0, sizeof(parsing_state));
-
- bool lb_policy_updated = false;
+ grpc_server_retry_throttle_data *retry_throttle_data = NULL;
+ grpc_slice_hash_table *method_params_table = NULL;
if (chand->resolver_result != NULL) {
// Find LB policy name.
const grpc_arg *channel_arg =
@@ -419,32 +385,29 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
if (lb_policy_name == NULL) lb_policy_name = "pick_first";
- // Instantiate LB policy.
grpc_lb_policy_args lb_policy_args;
lb_policy_args.args = chand->resolver_result;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
lb_policy_args.combiner = chand->combiner;
-
+ // Check to see if we're already using the right LB policy.
+ // Note: It's safe to use chand->info_lb_policy_name here without
+ // taking a lock on chand->info_mu, because this function is the
+ // only thing that modifies its value, and it can only be invoked
+ // once at any given time.
const bool lb_policy_type_changed =
- (chand->info_lb_policy_name == NULL) ||
- (strcmp(chand->info_lb_policy_name, lb_policy_name) != 0);
+ chand->info_lb_policy_name == NULL ||
+ strcmp(chand->info_lb_policy_name, lb_policy_name) != 0;
if (chand->lb_policy != NULL && !lb_policy_type_changed) {
- // update
- lb_policy_updated = true;
+ // Continue using the same LB policy. Update with new addresses.
grpc_lb_policy_update_locked(exec_ctx, chand->lb_policy, &lb_policy_args);
} else {
- lb_policy =
+ // Instantiate new LB policy.
+ new_lb_policy =
grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
- if (lb_policy != NULL) {
- GRPC_LB_POLICY_REF(lb_policy, "config_change");
- GRPC_ERROR_UNREF(state_error);
- state = grpc_lb_policy_check_connectivity_locked(exec_ctx, lb_policy,
- &state_error);
- old_lb_policy = chand->lb_policy;
- chand->lb_policy = lb_policy;
+ if (new_lb_policy == NULL) {
+ gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
}
}
-
// Find service config.
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
@@ -461,12 +424,14 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_uri *uri =
grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
GPR_ASSERT(uri->path[0] != '\0');
+ service_config_parsing_state parsing_state;
+ memset(&parsing_state, 0, sizeof(parsing_state));
parsing_state.server_name =
uri->path[0] == '/' ? uri->path + 1 : uri->path;
grpc_service_config_parse_global_params(
service_config, parse_retry_throttle_params, &parsing_state);
- parsing_state.server_name = NULL;
grpc_uri_destroy(uri);
+ retry_throttle_data = parsing_state.retry_throttle_data;
method_params_table = grpc_service_config_create_method_config_table(
exec_ctx, service_config, method_parameters_create_from_json,
method_parameters_free);
@@ -480,12 +445,11 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
chand->resolver_result = NULL;
}
-
- if (lb_policy != NULL) {
- grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties,
- chand->interested_parties);
- }
-
+ // Now swap out fields in chand. Note that the new values may still
+ // be NULL if (e.g.) the resolver failed to return results or the
+ // results did not contain the necessary data.
+ //
+ // First, swap out the data used by cc_get_channel_info().
gpr_mu_lock(&chand->info_mu);
if (lb_policy_name != NULL) {
gpr_free(chand->info_lb_policy_name);
@@ -496,75 +460,77 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
chand->info_service_config_json = service_config_json;
}
gpr_mu_unlock(&chand->info_mu);
-
+ // Swap out the retry throttle data.
if (chand->retry_throttle_data != NULL) {
grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
}
- chand->retry_throttle_data = parsing_state.retry_throttle_data;
+ chand->retry_throttle_data = retry_throttle_data;
+ // Swap out the method params table.
if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
chand->method_params_table = method_params_table;
- if (lb_policy != NULL) {
- GRPC_CLOSURE_LIST_SCHED(exec_ctx, &chand->waiting_for_config_closures);
- } else if (chand->resolver == NULL /* disconnected */) {
- grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Channel disconnected", &error, 1));
- GRPC_CLOSURE_LIST_SCHED(exec_ctx, &chand->waiting_for_config_closures);
- }
- if (!lb_policy_updated && lb_policy != NULL &&
- chand->exit_idle_when_lb_policy_arrives) {
- GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
- exit_idle = true;
- chand->exit_idle_when_lb_policy_arrives = false;
- }
-
- if (error == GRPC_ERROR_NONE && chand->resolver) {
- if (!lb_policy_updated) {
- set_channel_connectivity_state_locked(exec_ctx, chand, state,
- GRPC_ERROR_REF(state_error),
- "new_lb+resolver");
- if (lb_policy != NULL) {
- watch_lb_policy_locked(exec_ctx, chand, lb_policy, state);
- }
+ // If we have a new LB policy or are shutting down (in which case
+ // new_lb_policy will be NULL), swap out the LB policy, unreffing the
+ // old one and removing its fds from chand->interested_parties.
+ // Note that we do NOT do this if either (a) we updated the existing
+ // LB policy above or (b) we failed to create the new LB policy (in
+ // which case we want to continue using the most recent one we had).
+ if (new_lb_policy != NULL || error != GRPC_ERROR_NONE ||
+ chand->resolver == NULL) {
+ if (chand->lb_policy != NULL) {
+ grpc_pollset_set_del_pollset_set(exec_ctx,
+ chand->lb_policy->interested_parties,
+ chand->interested_parties);
+ GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
- GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- grpc_resolver_next_locked(exec_ctx, chand->resolver,
- &chand->resolver_result,
- &chand->on_resolver_result_changed);
- } else {
+ chand->lb_policy = new_lb_policy;
+ }
+ // Now that we've swapped out the relevant fields of chand, check for
+ // error or shutdown.
+ if (error != GRPC_ERROR_NONE || chand->resolver == NULL) {
if (chand->resolver != NULL) {
grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
}
- grpc_error *refs[] = {error, state_error};
set_channel_connectivity_state_locked(
exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Got config after disconnection", refs, GPR_ARRAY_SIZE(refs)),
+ "Got resolver result after disconnection", &error, 1),
"resolver_gone");
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
+ grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Channel disconnected", &error, 1));
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx,
+ &chand->waiting_for_resolver_result_closures);
+ } else { // Not shutting down.
+ grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
+ grpc_error *state_error =
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
+ if (new_lb_policy != NULL) {
+ GRPC_ERROR_UNREF(state_error);
+ state = grpc_lb_policy_check_connectivity_locked(exec_ctx, new_lb_policy,
+ &state_error);
+ grpc_pollset_set_add_pollset_set(exec_ctx,
+ new_lb_policy->interested_parties,
+ chand->interested_parties);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx,
+ &chand->waiting_for_resolver_result_closures);
+ if (chand->exit_idle_when_lb_policy_arrives) {
+ grpc_lb_policy_exit_idle_locked(exec_ctx, new_lb_policy);
+ chand->exit_idle_when_lb_policy_arrives = false;
+ }
+ watch_lb_policy_locked(exec_ctx, chand, new_lb_policy, state);
+ }
+ set_channel_connectivity_state_locked(
+ exec_ctx, chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
+ grpc_resolver_next_locked(exec_ctx, chand->resolver,
+ &chand->resolver_result,
+ &chand->on_resolver_result_changed);
+ GRPC_ERROR_UNREF(state_error);
}
-
- if (!lb_policy_updated && lb_policy != NULL && exit_idle) {
- grpc_lb_policy_exit_idle_locked(exec_ctx, lb_policy);
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
- }
-
- if (old_lb_policy != NULL) {
- grpc_pollset_set_del_pollset_set(
- exec_ctx, old_lb_policy->interested_parties, chand->interested_parties);
- GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
- old_lb_policy = NULL;
- }
-
- if (!lb_policy_updated && lb_policy != NULL) {
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
- }
-
- GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
- GRPC_ERROR_UNREF(state_error);
}
static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -602,9 +568,10 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
if (!chand->started_resolving) {
- grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
+ grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
GRPC_ERROR_REF(op->disconnect_with_error));
- GRPC_CLOSURE_LIST_SCHED(exec_ctx, &chand->waiting_for_config_closures);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx,
+ &chand->waiting_for_resolver_result_closures);
}
if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
@@ -770,6 +737,16 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
* PER-CALL FUNCTIONS
*/
+// Max number of batches that can be pending on a call at any given
+// time. This includes:
+// recv_initial_metadata
+// send_initial_metadata
+// recv_message
+// send_message
+// recv_trailing_metadata
+// send_trailing_metadata
+#define MAX_WAITING_BATCHES 6
+
/** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer.
Handles queueing of stream ops until a call object is ready, waiting
@@ -800,11 +777,10 @@ typedef struct client_channel_call_data {
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
grpc_polling_entity *pollent;
- grpc_transport_stream_op_batch **waiting_ops;
- size_t waiting_ops_count;
- size_t waiting_ops_capacity;
+ grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
+ size_t waiting_for_pick_batches_count;
- grpc_closure next_step;
+ grpc_transport_stream_op_batch_payload *initial_metadata_payload;
grpc_call_stack *owning_call;
@@ -853,57 +829,44 @@ grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
return get_call_or_error(call_elem->call_data).subchannel_call;
}
-static void add_waiting_locked(call_data *calld,
- grpc_transport_stream_op_batch *op) {
- GPR_TIMER_BEGIN("add_waiting_locked", 0);
- if (calld->waiting_ops_count == calld->waiting_ops_capacity) {
- calld->waiting_ops_capacity = GPR_MAX(3, 2 * calld->waiting_ops_capacity);
- calld->waiting_ops =
- gpr_realloc(calld->waiting_ops,
- calld->waiting_ops_capacity * sizeof(*calld->waiting_ops));
- }
- calld->waiting_ops[calld->waiting_ops_count++] = op;
- GPR_TIMER_END("add_waiting_locked", 0);
+static void waiting_for_pick_batches_add_locked(
+ call_data *calld, grpc_transport_stream_op_batch *batch) {
+ GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
+ batch;
}
-static void fail_locked(grpc_exec_ctx *exec_ctx, call_data *calld,
- grpc_error *error) {
- size_t i;
- for (i = 0; i < calld->waiting_ops_count; i++) {
+static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
+ call_data *calld,
+ grpc_error *error) {
+ for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->waiting_ops[i], GRPC_ERROR_REF(error));
+ exec_ctx, calld->waiting_for_pick_batches[i], GRPC_ERROR_REF(error));
}
- calld->waiting_ops_count = 0;
+ calld->waiting_for_pick_batches_count = 0;
GRPC_ERROR_UNREF(error);
}
-static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
- if (calld->waiting_ops_count == 0) {
- return;
- }
-
- call_or_error call = get_call_or_error(calld);
- grpc_transport_stream_op_batch **ops = calld->waiting_ops;
- size_t nops = calld->waiting_ops_count;
- if (call.error != GRPC_ERROR_NONE) {
- fail_locked(exec_ctx, calld, GRPC_ERROR_REF(call.error));
+static void waiting_for_pick_batches_resume_locked(grpc_exec_ctx *exec_ctx,
+ call_data *calld) {
+ if (calld->waiting_for_pick_batches_count == 0) return;
+ call_or_error coe = get_call_or_error(calld);
+ if (coe.error != GRPC_ERROR_NONE) {
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld,
+ GRPC_ERROR_REF(coe.error));
return;
}
- calld->waiting_ops = NULL;
- calld->waiting_ops_count = 0;
- calld->waiting_ops_capacity = 0;
- for (size_t i = 0; i < nops; i++) {
- grpc_subchannel_call_process_op(exec_ctx, call.subchannel_call, ops[i]);
+ for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
+ grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call,
+ calld->waiting_for_pick_batches[i]);
}
- gpr_free(ops);
+ calld->waiting_for_pick_batches_count = 0;
}
-// Sets calld->method_params and calld->retry_throttle_data.
-// If the method params specify a timeout, populates
-// *per_method_deadline and returns true.
-static bool set_call_method_params_from_service_config_locked(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- gpr_timespec *per_method_deadline) {
+// Applies service config to the call. Must be invoked once we know
+// that the resolver has returned results to the channel.
+static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
if (chand->retry_throttle_data != NULL) {
@@ -915,39 +878,48 @@ static bool set_call_method_params_from_service_config_locked(
exec_ctx, chand->method_params_table, calld->path);
if (calld->method_params != NULL) {
method_parameters_ref(calld->method_params);
- if (gpr_time_cmp(calld->method_params->timeout,
+ // If the deadline from the service config is shorter than the one
+ // from the client API, reset the deadline timer.
+ if (chand->deadline_checking_enabled &&
+ gpr_time_cmp(calld->method_params->timeout,
gpr_time_0(GPR_TIMESPAN)) != 0) {
- *per_method_deadline =
+ const gpr_timespec per_method_deadline =
gpr_time_add(calld->call_start_time, calld->method_params->timeout);
- return true;
+ if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+ calld->deadline = per_method_deadline;
+ grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
+ }
}
}
}
- return false;
}
-static void apply_final_configuration_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- /* apply service-config level configuration to the call (now that we're
- * certain it exists) */
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- gpr_timespec per_method_deadline;
- if (set_call_method_params_from_service_config_locked(exec_ctx, elem,
- &per_method_deadline)) {
- // If the deadline from the service config is shorter than the one
- // from the client API, reset the deadline timer.
- if (chand->deadline_checking_enabled &&
- gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
- calld->deadline = per_method_deadline;
- grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
- }
+static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
+ call_data *calld, grpc_error *error) {
+ grpc_subchannel_call *subchannel_call = NULL;
+ const grpc_connected_subchannel_call_args call_args = {
+ .pollent = calld->pollent,
+ .path = calld->path,
+ .start_time = calld->call_start_time,
+ .deadline = calld->deadline,
+ .arena = calld->arena,
+ .context = calld->subchannel_call_context};
+ grpc_error *new_error = grpc_connected_subchannel_create_call(
+ exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
+ GPR_ASSERT(set_call_or_error(
+ calld, (call_or_error){.subchannel_call = subchannel_call}));
+ if (new_error != GRPC_ERROR_NONE) {
+ new_error = grpc_error_add_child(new_error, error);
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld, new_error);
+ } else {
+ waiting_for_pick_batches_resume_locked(exec_ctx, calld);
}
+ GRPC_ERROR_UNREF(error);
}
-static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
+static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_error *error) {
- grpc_call_element *elem = arg;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GPR_ASSERT(calld->pick_pending);
@@ -956,6 +928,7 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
chand->interested_parties);
call_or_error coe = get_call_or_error(calld);
if (calld->connected_subchannel == NULL) {
+ // Failed to create subchannel.
grpc_error *failure =
error == GRPC_ERROR_NONE
? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -963,7 +936,7 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to create subchannel", &error, 1);
set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)});
- fail_locked(exec_ctx, calld, failure);
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld, failure);
} else if (coe.error != GRPC_ERROR_NONE) {
/* already cancelled before subchannel became ready */
grpc_error *child_errors[] = {error, coe.error};
@@ -977,29 +950,13 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_DEADLINE_EXCEEDED);
}
- fail_locked(exec_ctx, calld, cancellation_error);
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld, cancellation_error);
} else {
/* Create call on subchannel. */
- grpc_subchannel_call *subchannel_call = NULL;
- const grpc_connected_subchannel_call_args call_args = {
- .pollent = calld->pollent,
- .path = calld->path,
- .start_time = calld->call_start_time,
- .deadline = calld->deadline,
- .arena = calld->arena,
- .context = calld->subchannel_call_context};
- grpc_error *new_error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
- GPR_ASSERT(set_call_or_error(
- calld, (call_or_error){.subchannel_call = subchannel_call}));
- if (new_error != GRPC_ERROR_NONE) {
- new_error = grpc_error_add_child(new_error, error);
- fail_locked(exec_ctx, calld, new_error);
- } else {
- retry_waiting_locked(exec_ctx, calld);
- }
+ create_subchannel_call_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
}
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
+ GRPC_ERROR_UNREF(error);
}
static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
@@ -1013,41 +970,32 @@ static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
}
}
+/** Return true if subchannel is available immediately (in which case
+ subchannel_ready_locked() should not be called), or false otherwise (in
+ which case subchannel_ready_locked() should be called when the subchannel
+ is available). */
+static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem);
+
typedef struct {
- grpc_metadata_batch *initial_metadata;
- uint32_t initial_metadata_flags;
- grpc_connected_subchannel **connected_subchannel;
- grpc_call_context_element *subchannel_call_context;
- grpc_closure *on_ready;
grpc_call_element *elem;
+ bool cancelled;
grpc_closure closure;
-} continue_picking_args;
+} pick_after_resolver_result_args;
-/** Return true if subchannel is available immediately (in which case on_ready
- should not be called), or false otherwise (in which case on_ready should be
- called when the subchannel is available). */
-static bool pick_subchannel_locked(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags,
- grpc_connected_subchannel **connected_subchannel,
- grpc_call_context_element *subchannel_call_context, grpc_closure *on_ready);
-
-static void continue_picking_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- continue_picking_args *cpa = arg;
- if (cpa->connected_subchannel == NULL) {
+static void continue_picking_after_resolver_result_locked(
+ grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ pick_after_resolver_result_args *args = arg;
+ if (args->cancelled) {
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
- GRPC_CLOSURE_SCHED(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error));
+ subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
} else {
- if (pick_subchannel_locked(exec_ctx, cpa->elem, cpa->initial_metadata,
- cpa->initial_metadata_flags,
- cpa->connected_subchannel,
- cpa->subchannel_call_context, cpa->on_ready)) {
- GRPC_CLOSURE_SCHED(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE);
+ if (pick_subchannel_locked(exec_ctx, args->elem)) {
+ subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE);
}
}
- gpr_free(cpa);
+ gpr_free(args);
}
static void cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
@@ -1059,39 +1007,85 @@ static void cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
&calld->connected_subchannel,
GRPC_ERROR_REF(error));
}
- for (grpc_closure *closure = chand->waiting_for_config_closures.head;
+ // If we don't yet have a resolver result, then a closure for
+ // continue_picking_after_resolver_result_locked() will have been added to
+ // chand->waiting_for_resolver_result_closures, and it may not be invoked
+ // until after this call has been destroyed. We mark the operation as
+ // cancelled, so that when continue_picking_after_resolver_result_locked()
+ // is called, it will be a no-op. We also immediately invoke
+ // subchannel_ready_locked() to propagate the error back to the caller.
+ for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
closure != NULL; closure = closure->next_data.next) {
- continue_picking_args *cpa = closure->cb_arg;
- if (cpa->connected_subchannel == &calld->connected_subchannel) {
- cpa->connected_subchannel = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, cpa->on_ready,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick cancelled", &error, 1));
+ pick_after_resolver_result_args *args = closure->cb_arg;
+ if (!args->cancelled && args->elem == elem) {
+ args->cancelled = true;
+ subchannel_ready_locked(exec_ctx, elem,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
}
}
GRPC_ERROR_UNREF(error);
}
-static bool pick_subchannel_locked(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags,
- grpc_connected_subchannel **connected_subchannel,
- grpc_call_context_element *subchannel_call_context,
- grpc_closure *on_ready) {
- GPR_TIMER_BEGIN("pick_subchannel", 0);
+// State for pick callback that holds a reference to the LB policy
+// from which the pick was requested.
+typedef struct {
+ grpc_lb_policy *lb_policy;
+ grpc_call_element *elem;
+ grpc_closure closure;
+} pick_callback_args;
+
+// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
+// Unrefs the LB policy after invoking subchannel_ready_locked().
+static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ pick_callback_args *args = arg;
+ GPR_ASSERT(args != NULL);
+ GPR_ASSERT(args->lb_policy != NULL);
+ subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
+ GRPC_LB_POLICY_UNREF(exec_ctx, args->lb_policy, "pick_subchannel");
+ gpr_free(args);
+}
+// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
+// If the pick was completed synchronously, unrefs the LB policy and
+// returns true.
+static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ const grpc_lb_policy_pick_args *inputs) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
+ pick_callback_args *pick_args = gpr_zalloc(sizeof(*pick_args));
+ GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
+ pick_args->lb_policy = chand->lb_policy;
+ pick_args->elem = elem;
+ GRPC_CLOSURE_INIT(&pick_args->closure, pick_callback_done_locked, pick_args,
+ grpc_combiner_scheduler(chand->combiner));
+ const bool pick_done = grpc_lb_policy_pick_locked(
+ exec_ctx, chand->lb_policy, inputs, &calld->connected_subchannel,
+ calld->subchannel_call_context, NULL, &pick_args->closure);
+ if (pick_done) {
+ /* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
+ GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "pick_subchannel");
+ gpr_free(pick_args);
+ }
+ return pick_done;
+}
- GPR_ASSERT(connected_subchannel);
-
+static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ GPR_TIMER_BEGIN("pick_subchannel", 0);
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ bool pick_done = false;
if (chand->lb_policy != NULL) {
- apply_final_configuration_locked(exec_ctx, elem);
- grpc_lb_policy *lb_policy = chand->lb_policy;
- GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
+ apply_service_config_to_call_locked(exec_ctx, elem);
// If the application explicitly set wait_for_ready, use that.
// Otherwise, if the service config specified a value for this
// method, use that.
+ uint32_t initial_metadata_flags =
+ calld->initial_metadata_payload->send_initial_metadata
+ .send_initial_metadata_flags;
const bool wait_for_ready_set_from_api =
initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
@@ -1107,78 +1101,57 @@ static bool pick_subchannel_locked(
}
}
const grpc_lb_policy_pick_args inputs = {
- initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem};
-
- // Wrap the user-provided callback in order to hold a strong reference to
- // the LB policy for the duration of the pick.
- wrapped_on_pick_closure_arg *w_on_pick_arg =
- gpr_zalloc(sizeof(*w_on_pick_arg));
- GRPC_CLOSURE_INIT(&w_on_pick_arg->wrapper_closure,
- wrapped_on_pick_closure_cb, w_on_pick_arg,
- grpc_schedule_on_exec_ctx);
- w_on_pick_arg->wrapped_closure = on_ready;
- GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel_wrapping");
- w_on_pick_arg->lb_policy = lb_policy;
- const bool pick_done = grpc_lb_policy_pick_locked(
- exec_ctx, lb_policy, &inputs, connected_subchannel,
- subchannel_call_context, NULL, &w_on_pick_arg->wrapper_closure);
- if (pick_done) {
- /* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
- GRPC_LB_POLICY_UNREF(exec_ctx, w_on_pick_arg->lb_policy,
- "pick_subchannel_wrapping");
- gpr_free(w_on_pick_arg);
+ calld->initial_metadata_payload->send_initial_metadata
+ .send_initial_metadata,
+ initial_metadata_flags, &calld->lb_token_mdelem};
+ pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs);
+ } else if (chand->resolver != NULL) {
+ if (!chand->started_resolving) {
+ chand->started_resolving = true;
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
+ grpc_resolver_next_locked(exec_ctx, chand->resolver,
+ &chand->resolver_result,
+ &chand->on_resolver_result_changed);
}
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
- GPR_TIMER_END("pick_subchannel", 0);
- return pick_done;
- }
- if (chand->resolver != NULL && !chand->started_resolving) {
- chand->started_resolving = true;
- GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- grpc_resolver_next_locked(exec_ctx, chand->resolver,
- &chand->resolver_result,
- &chand->on_resolver_result_changed);
- }
- if (chand->resolver != NULL) {
- continue_picking_args *cpa = gpr_malloc(sizeof(*cpa));
- cpa->initial_metadata = initial_metadata;
- cpa->initial_metadata_flags = initial_metadata_flags;
- cpa->connected_subchannel = connected_subchannel;
- cpa->subchannel_call_context = subchannel_call_context;
- cpa->on_ready = on_ready;
- cpa->elem = elem;
- GRPC_CLOSURE_INIT(&cpa->closure, continue_picking_locked, cpa,
+ pick_after_resolver_result_args *args =
+ (pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
+ args->elem = elem;
+ GRPC_CLOSURE_INIT(&args->closure,
+ continue_picking_after_resolver_result_locked, args,
grpc_combiner_scheduler(chand->combiner));
- grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
- GRPC_ERROR_NONE);
+ grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
+ &args->closure, GRPC_ERROR_NONE);
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, on_ready,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
+ subchannel_ready_locked(
+ exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
}
-
GPR_TIMER_END("pick_subchannel", 0);
- return false;
+ return pick_done;
}
-static void start_transport_stream_op_batch_locked_inner(
- grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
- grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
+static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error_ignored) {
+ GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0);
+ grpc_transport_stream_op_batch *op = arg;
+ grpc_call_element *elem = op->handler_private.extra_arg;
call_data *calld = elem->call_data;
-
+ channel_data *chand = elem->channel_data;
/* need to recheck that another thread hasn't set the call */
call_or_error coe = get_call_or_error(calld);
if (coe.error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op, GRPC_ERROR_REF(coe.error));
- /* early out */
- return;
+ goto done;
}
if (coe.subchannel_call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, op);
- /* early out */
- return;
+ goto done;
}
+ // Add to waiting-for-pick list. If we succeed in getting a
+ // subchannel call below, we'll handle this batch (along with any
+ // other waiting batches) in waiting_for_pick_batches_resume_locked().
+ waiting_for_pick_batches_add_locked(calld, op);
/* if this is a cancellation, then we can raise our cancelled flag */
if (op->cancel_stream) {
grpc_error *error = op->payload->cancel_stream.cancel_error;
@@ -1190,30 +1163,22 @@ static void start_transport_stream_op_batch_locked_inner(
set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)});
if (calld->pick_pending) {
cancel_pick_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
- } else {
- fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
}
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op,
- GRPC_ERROR_REF(error));
- /* early out */
- return;
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld,
+ GRPC_ERROR_REF(error));
+ goto done;
}
/* if we don't have a subchannel, try to get one */
if (!calld->pick_pending && calld->connected_subchannel == NULL &&
op->send_initial_metadata) {
+ calld->initial_metadata_payload = op->payload;
calld->pick_pending = true;
- GRPC_CLOSURE_INIT(&calld->next_step, subchannel_ready_locked, elem,
- grpc_combiner_scheduler(chand->combiner));
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
/* If a subchannel is not available immediately, the polling entity from
call_data should be provided to channel_data's interested_parties, so
that IO of the lb_policy and resolver could be done under it. */
- if (pick_subchannel_locked(
- exec_ctx, elem,
- op->payload->send_initial_metadata.send_initial_metadata,
- op->payload->send_initial_metadata.send_initial_metadata_flags,
- &calld->connected_subchannel, calld->subchannel_call_context,
- &calld->next_step)) {
+ if (pick_subchannel_locked(exec_ctx, elem)) {
+ // Pick was returned synchronously.
calld->pick_pending = false;
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
if (calld->connected_subchannel == NULL) {
@@ -1221,42 +1186,20 @@ static void start_transport_stream_op_batch_locked_inner(
"Call dropped by load balancing policy");
set_call_or_error(calld,
(call_or_error){.error = GRPC_ERROR_REF(error)});
- fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- return; // Early out.
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld, error);
+ } else {
+ // Create subchannel call.
+ create_subchannel_call_locked(exec_ctx, calld, GRPC_ERROR_NONE);
}
} else {
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
}
}
- /* if we've got a subchannel, then let's ask it to create a call */
- if (!calld->pick_pending && calld->connected_subchannel != NULL) {
- grpc_subchannel_call *subchannel_call = NULL;
- const grpc_connected_subchannel_call_args call_args = {
- .pollent = calld->pollent,
- .path = calld->path,
- .start_time = calld->call_start_time,
- .deadline = calld->deadline,
- .arena = calld->arena,
- .context = calld->subchannel_call_context};
- grpc_error *error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
- GPR_ASSERT(set_call_or_error(
- calld, (call_or_error){.subchannel_call = subchannel_call}));
- if (error != GRPC_ERROR_NONE) {
- fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- } else {
- retry_waiting_locked(exec_ctx, calld);
- /* recurse to retry */
- start_transport_stream_op_batch_locked_inner(exec_ctx, op, elem);
- }
- /* early out */
- return;
- }
- /* nothing to be done but wait */
- add_waiting_locked(calld, op);
+done:
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
+ "start_transport_stream_op_batch");
+ GPR_TIMER_END("start_transport_stream_op_batch_locked", 0);
}
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -1279,30 +1222,6 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GRPC_ERROR_REF(error));
}
-static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error_ignored) {
- GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0);
-
- grpc_transport_stream_op_batch *op = arg;
- grpc_call_element *elem = op->handler_private.extra_arg;
- call_data *calld = elem->call_data;
-
- if (op->recv_trailing_metadata) {
- GPR_ASSERT(op->on_complete != NULL);
- calld->original_on_complete = op->on_complete;
- GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
- grpc_schedule_on_exec_ctx);
- op->on_complete = &calld->on_complete;
- }
-
- start_transport_stream_op_batch_locked_inner(exec_ctx, op, elem);
-
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
- "start_transport_stream_op_batch");
- GPR_TIMER_END("start_transport_stream_op_batch_locked", 0);
-}
-
/* The logic here is fairly complicated, due to (a) the fact that we
need to handle the case where we receive the send op before the
initial metadata op, and (b) the need for efficiency, especially in
@@ -1321,6 +1240,15 @@ static void cc_start_transport_stream_op_batch(
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
op);
}
+ // Intercept on_complete for recv_trailing_metadata so that we can
+ // check retry throttle status.
+ if (op->recv_trailing_metadata) {
+ GPR_ASSERT(op->on_complete != NULL);
+ calld->original_on_complete = op->on_complete;
+ GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
+ grpc_schedule_on_exec_ctx);
+ op->on_complete = &calld->on_complete;
+ }
/* try to (atomically) get the call */
call_or_error coe = get_call_or_error(calld);
GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
@@ -1390,7 +1318,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
"client_channel_destroy_call");
}
GPR_ASSERT(!calld->pick_pending);
- GPR_ASSERT(calld->waiting_ops_count == 0);
+ GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
if (calld->connected_subchannel != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
"picked");
@@ -1401,7 +1329,6 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
calld->subchannel_call_context[i].value);
}
}
- gpr_free(calld->waiting_ops);
GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.c b/src/core/ext/filters/client_channel/client_channel_plugin.c
index 2c6af1d78e..60e77d6268 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.c
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.c
@@ -78,6 +78,9 @@ void grpc_client_channel_init(void) {
GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter,
(void *)&grpc_client_channel_filter);
grpc_http_connect_register_handshaker_factory();
+#ifndef NDEBUG
+ grpc_register_tracer("resolver_refcount", &grpc_trace_resolver_refcount);
+#endif
}
void grpc_client_channel_shutdown(void) {
diff --git a/src/core/ext/filters/client_channel/lb_policy.c b/src/core/ext/filters/client_channel/lb_policy.c
index 50f8faef8e..8d69ba6af5 100644
--- a/src/core/ext/filters/client_channel/lb_policy.c
+++ b/src/core/ext/filters/client_channel/lb_policy.c
@@ -21,6 +21,10 @@
#define WEAK_REF_BITS 16
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_lb_policy_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable,
grpc_combiner *combiner) {
@@ -30,7 +34,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy");
}
-#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char *purpose
#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
@@ -46,11 +50,12 @@ static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
-#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "LB_POLICY: 0x%" PRIxPTR " %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR
- " [%s]",
- (intptr_t)c, purpose, old_val, old_val + delta, reason);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_lb_policy_refcount)) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "LB_POLICY: 0x%p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c,
+ purpose, old_val, old_val + delta, reason);
+ }
#endif
return old_val;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 42503c37ca..645d51e138 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -29,6 +29,10 @@ typedef struct grpc_lb_policy grpc_lb_policy;
typedef struct grpc_lb_policy_vtable grpc_lb_policy_vtable;
typedef struct grpc_lb_policy_args grpc_lb_policy_args;
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_lb_policy_refcount;
+#endif
+
struct grpc_lb_policy {
const grpc_lb_policy_vtable *vtable;
gpr_atm ref_pair;
@@ -96,8 +100,7 @@ struct grpc_lb_policy_vtable {
const grpc_lb_policy_args *args);
};
-//#define GRPC_LB_POLICY_REFCOUNT_DEBUG
-#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
+#ifndef NDEBUG
/* Strong references: the policy will shutdown when they reach zero */
#define GRPC_LB_POLICY_REF(p, r) \
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
index e80b1dcd80..5a5ff2902d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
@@ -1880,6 +1880,9 @@ static bool maybe_add_client_load_reporting_filter(
void grpc_lb_policy_grpclb_init() {
grpc_register_lb_policy(grpc_glb_lb_factory_create());
grpc_register_tracer("glb", &grpc_lb_glb_trace);
+#ifndef NDEBUG
+ grpc_register_tracer("lb_policy_refcount", &grpc_trace_lb_policy_refcount);
+#endif
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_client_load_reporting_filter,
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
index 307e3bad67..d0acd7a901 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
@@ -95,6 +95,9 @@ static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p->subchannels);
gpr_free(p->new_subchannels);
gpr_free(p);
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p);
+ }
}
static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
@@ -268,11 +271,20 @@ static void stop_connectivity_watchers(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
if (p->num_subchannels > 0) {
GPR_ASSERT(p->selected == NULL);
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p unsubscribing from subchannel %p",
+ (void *)p, (void *)p->subchannels[p->checking_subchannel]);
+ }
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
p->updating_subchannels = true;
} else if (p->selected != NULL) {
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG,
+ "Pick First %p unsubscribing from selected subchannel %p",
+ (void *)p, (void *)p->selected);
+ }
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
p->updating_selected = true;
@@ -451,12 +463,25 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_subchannel *selected_subchannel;
pending_pick *pp;
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(
+ GPR_DEBUG,
+ "Pick First %p connectivity changed. Updating selected: %d; Updating "
+ "subchannels: %d; Checking %lu index (%lu total); State: %d; ",
+ (void *)p, p->updating_selected, p->updating_subchannels,
+ (unsigned long)p->checking_subchannel,
+ (unsigned long)p->num_subchannels, p->checking_connectivity);
+ }
bool restart = false;
- if (p->updating_selected && error == GRPC_ERROR_CANCELLED) {
+ if (p->updating_selected && error != GRPC_ERROR_NONE) {
/* Captured the unsubscription for p->selected */
GPR_ASSERT(p->selected != NULL);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected,
"pf_update_connectivity");
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p unreffing selected subchannel %p",
+ (void *)p, (void *)p->selected);
+ }
p->updating_selected = false;
if (p->num_new_subchannels == 0) {
p->selected = NULL;
@@ -464,12 +489,16 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
restart = true;
}
- if (p->updating_subchannels && error == GRPC_ERROR_CANCELLED) {
+ if (p->updating_subchannels && error != GRPC_ERROR_NONE) {
/* Captured the unsubscription for the checking subchannel */
GPR_ASSERT(p->selected == NULL);
for (size_t i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i],
"pf_update_connectivity");
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p unreffing subchannel %p", (void *)p,
+ (void *)p->subchannels[i]);
+ }
}
gpr_free(p->subchannels);
p->subchannels = NULL;
@@ -481,14 +510,12 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (restart) {
p->selected = NULL;
p->selected_key = NULL;
-
GPR_ASSERT(p->new_subchannels != NULL);
GPR_ASSERT(p->num_new_subchannels > 0);
p->num_subchannels = p->num_new_subchannels;
p->subchannels = p->new_subchannels;
p->num_new_subchannels = 0;
p->new_subchannels = NULL;
-
if (p->started_picking) {
/* If we were picking, continue to do so over the new subchannels,
* starting from the 0th index. */
@@ -542,7 +569,9 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
"picked_first");
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO, "Selected subchannel %p", (void *)p->selected);
+ gpr_log(GPR_INFO,
+ "Pick First %p selected subchannel %p (connected %p)",
+ (void *)p, (void *)selected_subchannel, (void *)p->selected);
}
p->selected_key = grpc_subchannel_get_key(selected_subchannel);
/* drop the pick list: we are connected now */
@@ -568,7 +597,8 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
p->checking_subchannel =
(p->checking_subchannel + 1) % p->num_subchannels;
if (p->checking_subchannel == 0) {
- /* only trigger transient failure when we've tried all alternatives */
+ /* only trigger transient failure when we've tried all alternatives
+ */
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connecting_transient_failure");
@@ -652,6 +682,9 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->client_channel_factory != NULL);
pick_first_lb_policy *p = gpr_zalloc(sizeof(*p));
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p created.", (void *)p);
+ }
pf_update_locked(exec_ctx, &p->base, args);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
GRPC_CLOSURE_INIT(&p->connectivity_changed, pf_connectivity_changed_locked, p,
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
index 3c8520cc1c..8e9d6b0f47 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
@@ -126,6 +126,8 @@ struct rr_subchannel_list {
size_t num_ready;
/** how many subchannels are in state TRANSIENT_FAILURE */
size_t num_transient_failures;
+ /** how many subchannels are in state SHUTDOWN */
+ size_t num_shutdown;
/** how many subchannels are in state IDLE */
size_t num_idle;
@@ -425,6 +427,9 @@ static void update_state_counters_locked(subchannel_data *sd) {
} else if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
GPR_ASSERT(subchannel_list->num_transient_failures > 0);
--subchannel_list->num_transient_failures;
+ } else if (sd->prev_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
+ GPR_ASSERT(subchannel_list->num_shutdown > 0);
+ --subchannel_list->num_shutdown;
} else if (sd->prev_connectivity_state == GRPC_CHANNEL_IDLE) {
GPR_ASSERT(subchannel_list->num_idle > 0);
--subchannel_list->num_idle;
@@ -433,6 +438,8 @@ static void update_state_counters_locked(subchannel_data *sd) {
++subchannel_list->num_ready;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
++subchannel_list->num_transient_failures;
+ } else if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
+ ++subchannel_list->num_shutdown;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_IDLE) {
++subchannel_list->num_idle;
}
@@ -455,7 +462,8 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
* CHECK: sd->curr_connectivity_state == CONNECTING.
*
* 3) RULE: ALL subchannels are SHUTDOWN => policy is SHUTDOWN.
- * CHECK: p->subchannel_list->num_subchannels = 0.
+ * CHECK: p->subchannel_list->num_shutdown ==
+ * p->subchannel_list->num_subchannels.
*
* 4) RULE: ALL subchannels are TRANSIENT_FAILURE => policy is
* TRANSIENT_FAILURE.
@@ -464,37 +472,39 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
* 5) RULE: ALL subchannels are IDLE => policy is IDLE.
* CHECK: p->num_idle == p->subchannel_list->num_subchannels.
*/
+ grpc_connectivity_state new_state = sd->curr_connectivity_state;
rr_subchannel_list *subchannel_list = sd->subchannel_list;
round_robin_lb_policy *p = subchannel_list->policy;
if (subchannel_list->num_ready > 0) { /* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
- return GRPC_CHANNEL_READY;
+ new_state = GRPC_CHANNEL_READY;
} else if (sd->curr_connectivity_state ==
GRPC_CHANNEL_CONNECTING) { /* 2) CONNECTING */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
"rr_connecting");
- return GRPC_CHANNEL_CONNECTING;
- } else if (p->subchannel_list->num_subchannels == 0) { /* 3) SHUTDOWN */
+ new_state = GRPC_CHANNEL_CONNECTING;
+ } else if (p->subchannel_list->num_shutdown ==
+ p->subchannel_list->num_subchannels) { /* 3) SHUTDOWN */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"rr_shutdown");
- return GRPC_CHANNEL_SHUTDOWN;
+ new_state = GRPC_CHANNEL_SHUTDOWN;
} else if (subchannel_list->num_transient_failures ==
p->subchannel_list->num_subchannels) { /* 4) TRANSIENT_FAILURE */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "rr_transient_failure");
- return GRPC_CHANNEL_TRANSIENT_FAILURE;
+ new_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
} else if (subchannel_list->num_idle ==
p->subchannel_list->num_subchannels) { /* 5) IDLE */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_IDLE,
GRPC_ERROR_NONE, "rr_idle");
- return GRPC_CHANNEL_IDLE;
+ new_state = GRPC_CHANNEL_IDLE;
}
- /* no change */
- return sd->curr_connectivity_state;
+ GRPC_ERROR_UNREF(error);
+ return new_state;
}
static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -571,13 +581,15 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(sd->subchannel_list == p->latest_pending_subchannel_list);
GPR_ASSERT(!sd->subchannel_list->shutting_down);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ const unsigned long num_subchannels =
+ p->subchannel_list != NULL
+ ? (unsigned long)p->subchannel_list->num_subchannels
+ : 0;
gpr_log(GPR_DEBUG,
"[RR %p] phasing out subchannel list %p (size %lu) in favor "
"of %p (size %lu)",
- (void *)p, (void *)p->subchannel_list,
- (unsigned long)p->subchannel_list->num_subchannels,
- (void *)sd->subchannel_list,
- (unsigned long)sd->subchannel_list->num_subchannels);
+ (void *)p, (void *)p->subchannel_list, num_subchannels,
+ (void *)sd->subchannel_list, num_subchannels);
}
if (p->subchannel_list != NULL) {
// dispose of the current subchannel_list
diff --git a/src/core/ext/filters/client_channel/resolver.c b/src/core/ext/filters/client_channel/resolver.c
index 69b1c31e59..de9a8ce41b 100644
--- a/src/core/ext/filters/client_channel/resolver.c
+++ b/src/core/ext/filters/client_channel/resolver.c
@@ -19,6 +19,10 @@
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/iomgr/combiner.h"
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_resolver_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
void grpc_resolver_init(grpc_resolver *resolver,
const grpc_resolver_vtable *vtable,
grpc_combiner *combiner) {
@@ -27,25 +31,30 @@ void grpc_resolver_init(grpc_resolver *resolver,
gpr_ref_init(&resolver->refs, 1);
}
-#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
-void grpc_resolver_ref(grpc_resolver *resolver, grpc_closure_list *closure_list,
- const char *file, int line, const char *reason) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "RESOLVER:%p ref %d -> %d %s",
- resolver, (int)resolver->refs.count, (int)resolver->refs.count + 1,
- reason);
+#ifndef NDEBUG
+void grpc_resolver_ref(grpc_resolver *resolver, const char *file, int line,
+ const char *reason) {
+ if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
+ gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "RESOLVER:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", resolver,
+ old_refs, old_refs + 1, reason);
+ }
#else
void grpc_resolver_ref(grpc_resolver *resolver) {
#endif
gpr_ref(&resolver->refs);
}
-#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
-void grpc_resolver_unref(grpc_resolver *resolver,
- grpc_closure_list *closure_list, const char *file,
- int line, const char *reason) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "RESOLVER:%p unref %d -> %d %s",
- resolver, (int)resolver->refs.count, (int)resolver->refs.count - 1,
- reason);
+#ifndef NDEBUG
+void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ const char *file, int line, const char *reason) {
+ if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
+ gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "RESOLVER:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", resolver,
+ old_refs, old_refs - 1, reason);
+ }
#else
void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
#endif
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index c78bb316cb..ae9c8f66fe 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -25,6 +25,10 @@
typedef struct grpc_resolver grpc_resolver;
typedef struct grpc_resolver_vtable grpc_resolver_vtable;
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_resolver_refcount;
+#endif
+
/** \a grpc_resolver provides \a grpc_channel_args objects to its caller */
struct grpc_resolver {
const grpc_resolver_vtable *vtable;
@@ -41,17 +45,17 @@ struct grpc_resolver_vtable {
grpc_channel_args **result, grpc_closure *on_complete);
};
-#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_RESOLVER_UNREF(cl, p, r) \
- grpc_resolver_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_RESOLVER_UNREF(e, p, r) \
+ grpc_resolver_unref((e), (p), __FILE__, __LINE__, (r))
void grpc_resolver_ref(grpc_resolver *policy, const char *file, int line,
const char *reason);
-void grpc_resolver_unref(grpc_resolver *policy, grpc_closure_list *closure_list,
+void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy,
const char *file, int line, const char *reason);
#else
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p))
-#define GRPC_RESOLVER_UNREF(cl, p, r) grpc_resolver_unref((cl), (p))
+#define GRPC_RESOLVER_UNREF(e, p, r) grpc_resolver_unref((e), (p))
void grpc_resolver_ref(grpc_resolver *policy);
void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy);
#endif
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
index eb1a8ab011..386012d2ed 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -19,8 +19,6 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H
-#include <ares.h>
-
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/pollset_set.h"
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
index 4e79c44ba3..1ab8295e9e 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
@@ -19,6 +19,8 @@
#include "src/core/lib/iomgr/port.h"
#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET)
+#include <ares.h>
+
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
#include <grpc/support/alloc.h>
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
index 8e73d606aa..56ed4371a9 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
@@ -56,6 +56,10 @@ typedef struct {
// grpc_resolver_next_locked()'s closure.
grpc_channel_args* next_results;
+ // Results to use for the pretended re-resolution in
+ // fake_resolver_channel_saw_error_locked().
+ grpc_channel_args* results_upon_error;
+
// pending next completion, or NULL
grpc_closure* next_completion;
// target result address for next completion
@@ -65,6 +69,7 @@ typedef struct {
static void fake_resolver_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
fake_resolver* r = (fake_resolver*)gr;
grpc_channel_args_destroy(exec_ctx, r->next_results);
+ grpc_channel_args_destroy(exec_ctx, r->results_upon_error);
grpc_channel_args_destroy(exec_ctx, r->channel_args);
gpr_free(r);
}
@@ -74,7 +79,9 @@ static void fake_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
fake_resolver* r = (fake_resolver*)resolver;
if (r->next_completion != NULL) {
*r->target_result = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, r->next_completion,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
r->next_completion = NULL;
}
}
@@ -85,15 +92,19 @@ static void fake_resolver_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
*r->target_result =
grpc_channel_args_union(r->next_results, r->channel_args);
grpc_channel_args_destroy(exec_ctx, r->next_results);
+ r->next_results = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
- r->next_results = NULL;
}
}
static void fake_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
grpc_resolver* resolver) {
fake_resolver* r = (fake_resolver*)resolver;
+ if (r->next_results == NULL && r->results_upon_error != NULL) {
+ // Pretend we re-resolved.
+ r->next_results = grpc_channel_args_copy(r->results_upon_error);
+ }
fake_resolver_maybe_finish_next_locked(exec_ctx, r);
}
@@ -149,6 +160,10 @@ static void set_response_cb(grpc_exec_ctx* exec_ctx, void* arg,
grpc_channel_args_destroy(exec_ctx, r->next_results);
}
r->next_results = generator->next_response;
+ if (r->results_upon_error != NULL) {
+ grpc_channel_args_destroy(exec_ctx, r->results_upon_error);
+ }
+ r->results_upon_error = grpc_channel_args_copy(generator->next_response);
fake_resolver_maybe_finish_next_locked(exec_ctx, r);
}
diff --git a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c
index b5d2e5c92b..7b4fe38272 100644
--- a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c
@@ -73,7 +73,9 @@ static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx,
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
if (r->next_completion != NULL) {
*r->target_result = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, r->next_completion,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
r->next_completion = NULL;
}
}
diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c
index 0877ef4e67..88157ed738 100644
--- a/src/core/ext/filters/client_channel/subchannel.c
+++ b/src/core/ext/filters/client_channel/subchannel.c
@@ -140,25 +140,13 @@ struct grpc_subchannel_call {
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
grpc_error *error);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define REF_REASON reason
-#define REF_LOG(name, p) \
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "%s: %p ref %d -> %d %s", \
- (name), (p), (p)->refs.count, (p)->refs.count + 1, reason)
-#define UNREF_LOG(name, p) \
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "%s: %p unref %d -> %d %s", \
- (name), (p), (p)->refs.count, (p)->refs.count - 1, reason)
#define REF_MUTATE_EXTRA_ARGS \
GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char *purpose
#define REF_MUTATE_PURPOSE(x) , file, line, reason, x
#else
#define REF_REASON ""
-#define REF_LOG(name, p) \
- do { \
- } while (0)
-#define UNREF_LOG(name, p) \
- do { \
- } while (0)
#define REF_MUTATE_EXTRA_ARGS
#define REF_MUTATE_PURPOSE(x)
#endif
@@ -207,10 +195,12 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "SUBCHANNEL: %p %s 0x%08" PRIxPTR " -> 0x%08" PRIxPTR " [%s]", c,
- purpose, old_val, old_val + delta, reason);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SUBCHANNEL: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c,
+ purpose, old_val, old_val + delta, reason);
+ }
#endif
return old_val;
}
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index f38bf42803..6d2abb04df 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -37,7 +37,7 @@ typedef struct grpc_subchannel_call grpc_subchannel_call;
typedef struct grpc_subchannel_args grpc_subchannel_args;
typedef struct grpc_subchannel_key grpc_subchannel_key;
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_SUBCHANNEL_REF(p, r) \
grpc_subchannel_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.c b/src/core/ext/filters/http/message_compress/message_compress_filter.c
index 04cb1d94f8..71a8bc5bec 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.c
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.c
@@ -255,6 +255,23 @@ static void continue_send_message(grpc_exec_ctx *exec_ctx,
}
}
+static void handle_send_message_batch(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op,
+ bool has_compression_algorithm) {
+ call_data *calld = elem->call_data;
+ if (!skip_compression(elem, op->payload->send_message.send_message->flags,
+ has_compression_algorithm)) {
+ calld->send_op = op;
+ calld->send_length = op->payload->send_message.send_message->length;
+ calld->send_flags = op->payload->send_message.send_message->flags;
+ continue_send_message(exec_ctx, elem);
+ } else {
+ /* pass control down the stack */
+ grpc_call_next_op(exec_ctx, elem, op);
+ }
+}
+
static void compress_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
@@ -307,8 +324,9 @@ static void compress_start_transport_stream_op_batch(
goto retry_send_im;
}
if (cur != INITIAL_METADATA_UNSEEN) {
- grpc_call_next_op(exec_ctx, elem,
- (grpc_transport_stream_op_batch *)cur);
+ handle_send_message_batch(exec_ctx, elem,
+ (grpc_transport_stream_op_batch *)cur,
+ has_compression_algorithm);
}
}
}
@@ -325,17 +343,8 @@ static void compress_start_transport_stream_op_batch(
break;
case HAS_COMPRESSION_ALGORITHM:
case NO_COMPRESSION_ALGORITHM:
- if (!skip_compression(elem,
- op->payload->send_message.send_message->flags,
- cur == HAS_COMPRESSION_ALGORITHM)) {
- calld->send_op = op;
- calld->send_length = op->payload->send_message.send_message->length;
- calld->send_flags = op->payload->send_message.send_message->flags;
- continue_send_message(exec_ctx, elem);
- } else {
- /* pass control down the stack */
- grpc_call_next_op(exec_ctx, elem, op);
- }
+ handle_send_message_batch(exec_ctx, elem, op,
+ cur == HAS_COMPRESSION_ALGORITHM);
break;
default:
if (cur & CANCELLED_BIT) {
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
index b0ffdc0cf9..6a8c81445a 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
@@ -23,6 +23,9 @@
void grpc_chttp2_plugin_init(void) {
grpc_register_tracer("http", &grpc_http_trace);
grpc_register_tracer("flowctl", &grpc_flowctl_trace);
+#ifndef NDEBUG
+ grpc_register_tracer("chttp2_refcount", &grpc_trace_chttp2_refcount);
+#endif
}
void grpc_chttp2_plugin_shutdown(void) {}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 3b3782c1ec..9157170f52 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -53,7 +53,7 @@
#define DEFAULT_CONNECTION_WINDOW_TARGET (1024 * 1024)
#define MAX_WINDOW 0x7fffffffu
#define MAX_WRITE_BUFFER_SIZE (64 * 1024 * 1024)
-#define DEFAULT_MAX_HEADER_LIST_SIZE (16 * 1024)
+#define DEFAULT_MAX_HEADER_LIST_SIZE (8 * 1024)
#define DEFAULT_CLIENT_KEEPALIVE_TIME_MS INT_MAX
#define DEFAULT_CLIENT_KEEPALIVE_TIMEOUT_MS 20000 /* 20 seconds */
@@ -77,6 +77,10 @@ static bool g_default_keepalive_permit_without_calls =
grpc_tracer_flag grpc_http_trace = GRPC_TRACER_INITIALIZER(false);
grpc_tracer_flag grpc_flowctl_trace = GRPC_TRACER_INITIALIZER(false);
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_chttp2_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
static const grpc_transport_vtable vtable;
/* forward declarations of various callbacks that we'll build closures around */
@@ -92,8 +96,9 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *t,
static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
grpc_error *error);
/** Set a transport level setting, and push it to our peer */
-static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_setting_id id, uint32_t value);
+static void queue_setting_update(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_setting_id id, uint32_t value);
static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error);
@@ -213,20 +218,26 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
gpr_free(t);
}
-#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
+#ifndef NDEBUG
void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, const char *reason,
const char *file, int line) {
- gpr_log(GPR_DEBUG, "chttp2:unref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]", t,
- t->refs.count, t->refs.count - 1, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_chttp2_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&t->refs.count);
+ gpr_log(GPR_DEBUG, "chttp2:unref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]",
+ t, val, val - 1, reason, file, line);
+ }
if (!gpr_unref(&t->refs)) return;
destruct_transport(exec_ctx, t);
}
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
const char *file, int line) {
- gpr_log(GPR_DEBUG, "chttp2: ref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]", t,
- t->refs.count, t->refs.count + 1, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_chttp2_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&t->refs.count);
+ gpr_log(GPR_DEBUG, "chttp2: ref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]",
+ t, val, val + 1, reason, file, line);
+ }
gpr_ref(&t->refs);
}
#else
@@ -341,15 +352,16 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
/* configure http2 the way we like it */
if (is_client) {
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
+ queue_setting_update(exec_ctx, t,
+ GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
}
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
- DEFAULT_WINDOW);
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
- DEFAULT_MAX_HEADER_LIST_SIZE);
- push_setting(exec_ctx, t,
- GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
+ DEFAULT_WINDOW);
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
+ DEFAULT_MAX_HEADER_LIST_SIZE);
+ queue_setting_update(exec_ctx, t,
+ GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
t->ping_policy = (grpc_chttp2_repeated_ping_policy){
.max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA,
@@ -535,8 +547,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
int value = grpc_channel_arg_get_integer(
&channel_args->args[i], settings_map[j].integer_options);
if (value >= 0) {
- push_setting(exec_ctx, t, settings_map[j].setting_id,
- (uint32_t)value);
+ queue_setting_update(exec_ctx, t, settings_map[j].setting_id,
+ (uint32_t)value);
}
}
break;
@@ -643,7 +655,7 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason) {
grpc_stream_ref(s->refcount, reason);
}
@@ -952,8 +964,11 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_END("terminate_writing_with_lock", 0);
}
-static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_setting_id id, uint32_t value) {
+// Dirties an HTTP2 setting to be sent out next time a writing path occurs.
+// If the change needs to occur immediately, manually initiate a write.
+static void queue_setting_update(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_setting_id id, uint32_t value) {
const grpc_chttp2_setting_parameters *sp =
&grpc_chttp2_settings_parameters[id];
uint32_t use_value = GPR_CLAMP(value, sp->min_value, sp->max_value);
@@ -964,7 +979,6 @@ static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (use_value != t->settings[GRPC_LOCAL_SETTINGS][id]) {
t->settings[GRPC_LOCAL_SETTINGS][id] = use_value;
t->dirtied_local_settings = 1;
- grpc_chttp2_initiate_write(exec_ctx, t, "push_setting");
}
}
@@ -1426,6 +1440,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
op_payload->recv_initial_metadata.recv_initial_metadata_ready;
s->recv_initial_metadata =
op_payload->recv_initial_metadata.recv_initial_metadata;
+ s->trailing_metadata_available =
+ op_payload->recv_initial_metadata.trailing_metadata_available;
grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
}
@@ -2130,8 +2146,8 @@ static void update_bdp(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_log(GPR_DEBUG, "%s: update initial window size to %d", t->peer_string,
(int)bdp);
}
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
- (uint32_t)bdp);
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
+ (uint32_t)bdp);
}
static void update_frame(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -2150,8 +2166,8 @@ static void update_frame(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_log(GPR_DEBUG, "%s: update max_frame size to %d", t->peer_string,
(int)frame_size);
}
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
- (uint32_t)frame_size);
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
+ (uint32_t)frame_size);
}
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
@@ -2749,6 +2765,7 @@ grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
gpr_ref_init(&incoming_byte_stream->refs, 2);
incoming_byte_stream->transport = t;
incoming_byte_stream->stream = s;
+ GRPC_ERROR_UNREF(s->byte_stream_error);
s->byte_stream_error = GRPC_ERROR_NONE;
return incoming_byte_stream;
}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
index 0a1fb4d772..0c4e2a91c0 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.h
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
@@ -26,6 +26,10 @@
extern grpc_tracer_flag grpc_http_trace;
extern grpc_tracer_flag grpc_flowctl_trace;
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_chttp2_refcount;
+#endif
+
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
grpc_endpoint *ep, int is_client);
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
index ccca0f1871..689dc8935c 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
@@ -93,7 +93,7 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]));
grpc_error *error = GRPC_ERROR_NONE;
- if (reason != GRPC_HTTP2_NO_ERROR || s->header_frames_received < 2) {
+ if (reason != GRPC_HTTP2_NO_ERROR || s->metadata_buffer[1].size == 0) {
char *message;
gpr_asprintf(&message, "Received RST_STREAM with error code %d", reason);
error = grpc_error_set_int(
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
index 28c6632695..a0e748e7b1 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
@@ -608,15 +608,14 @@ void grpc_chttp2_hpack_compressor_set_max_table_size(
void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c,
+ grpc_mdelem **extra_headers,
+ size_t extra_headers_size,
grpc_metadata_batch *metadata,
const grpc_encode_header_options *options,
grpc_slice_buffer *outbuf) {
- framer_state st;
- grpc_linked_mdelem *l;
- gpr_timespec deadline;
-
GPR_ASSERT(options->stream_id != 0);
+ framer_state st;
st.seen_regular_header = 0;
st.stream_id = options->stream_id;
st.output = outbuf;
@@ -633,11 +632,14 @@ void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
if (c->advertise_table_size_change != 0) {
emit_advertise_table_size_change(c, &st);
}
+ for (size_t i = 0; i < extra_headers_size; ++i) {
+ hpack_enc(exec_ctx, c, *extra_headers[i], &st);
+ }
grpc_metadata_batch_assert_ok(metadata);
- for (l = metadata->list.head; l; l = l->next) {
+ for (grpc_linked_mdelem *l = metadata->list.head; l; l = l->next) {
hpack_enc(exec_ctx, c, l->md, &st);
}
- deadline = metadata->deadline;
+ gpr_timespec deadline = metadata->deadline;
if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
deadline_enc(exec_ctx, c, deadline, &st);
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.h b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
index 84ab6dde2c..271192f894 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
@@ -85,6 +85,8 @@ typedef struct {
void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c,
+ grpc_mdelem **extra_headers,
+ size_t extra_headers_size,
grpc_metadata_batch *metadata,
const grpc_encode_header_options *options,
grpc_slice_buffer *outbuf);
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 4209d66c98..a5b67e5aba 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -454,6 +454,7 @@ struct grpc_chttp2_stream {
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *recv_initial_metadata_ready;
+ bool *trailing_metadata_available;
grpc_byte_stream **recv_message;
grpc_closure *recv_message_ready;
grpc_metadata_batch *recv_trailing_metadata;
@@ -755,7 +756,7 @@ void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_CHTTP2_STREAM_REF(stream, reason) \
grpc_chttp2_stream_ref(stream, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
@@ -771,8 +772,7 @@ void grpc_chttp2_stream_ref(grpc_chttp2_stream *s);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s);
#endif
-//#define GRPC_CHTTP2_REFCOUNTING_DEBUG 1
-#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
+#ifndef NDEBUG
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) \
grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) \
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 941260be9a..3c8b470b4f 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -681,9 +681,19 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
t->parser_data = &t->hpack_parser;
switch (s->header_frames_received) {
case 0:
- t->hpack_parser.on_header = on_initial_header;
+ if (t->is_client && t->header_eof) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing Trailers-Only"));
+ if (s->trailing_metadata_available != NULL) {
+ *s->trailing_metadata_available = true;
+ }
+ t->hpack_parser.on_header = on_trailing_header;
+ } else {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata"));
+ t->hpack_parser.on_header = on_initial_header;
+ }
break;
case 1:
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata"));
t->hpack_parser.on_header = on_trailing_header;
break;
case 2:
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index 4db0fbb098..315f2a67a2 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -162,6 +162,20 @@ static uint32_t target_write_size(grpc_chttp2_transport *t) {
return 1024 * 1024;
}
+// Returns true if initial_metadata contains only default headers.
+//
+// TODO(roth): The fact that we hard-code these particular headers here
+// is fairly ugly. Need some better way to know which headers are
+// default, maybe via a bit in the static metadata table?
+static bool is_default_initial_metadata(grpc_metadata_batch *initial_metadata) {
+ int num_default_fields =
+ (initial_metadata->idx.named.status != NULL) +
+ (initial_metadata->idx.named.content_type != NULL) +
+ (initial_metadata->idx.named.grpc_encoding != NULL) +
+ (initial_metadata->idx.named.grpc_accept_encoding != NULL);
+ return (size_t)num_default_fields == initial_metadata->list.count;
+}
+
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
@@ -218,31 +232,59 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
t->is_client ? "CLIENT" : "SERVER", s->id, sent_initial_metadata,
s->send_initial_metadata != NULL, s->announce_window));
+ grpc_mdelem *extra_headers_for_trailing_metadata[2];
+ size_t num_extra_headers_for_trailing_metadata = 0;
+
/* send initial metadata if it's available */
- if (!sent_initial_metadata && s->send_initial_metadata) {
- grpc_encode_header_options hopt = {
- .stream_id = s->id,
- .is_eof = false,
- .use_true_binary_metadata =
- t->settings
- [GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] != 0,
- .max_frame_size = t->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
- .stats = &s->stats.outgoing};
- grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor,
- s->send_initial_metadata, &hopt, &t->outbuf);
+ if (!sent_initial_metadata && s->send_initial_metadata != NULL) {
+ // We skip this on the server side if there is no custom initial
+ // metadata, there are no messages to send, and we are also sending
+ // trailing metadata. This results in a Trailers-Only response,
+ // which is required for retries, as per:
+ // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#when-retries-are-valid
+ if (t->is_client || s->fetching_send_message != NULL ||
+ s->flow_controlled_buffer.length != 0 ||
+ s->send_trailing_metadata == NULL ||
+ !is_default_initial_metadata(s->send_initial_metadata)) {
+ grpc_encode_header_options hopt = {
+ .stream_id = s->id,
+ .is_eof = false,
+ .use_true_binary_metadata =
+ t->settings
+ [GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] != 0,
+ .max_frame_size = t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ .stats = &s->stats.outgoing};
+ grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0,
+ s->send_initial_metadata, &hopt, &t->outbuf);
+ now_writing = true;
+ t->ping_state.pings_before_data_required =
+ t->ping_policy.max_pings_without_data;
+ if (!t->is_client) {
+ t->ping_recv_state.last_ping_recv_time =
+ gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ t->ping_recv_state.ping_strikes = 0;
+ }
+ } else {
+ GRPC_CHTTP2_IF_TRACING(
+ gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)"));
+ // When sending Trailers-Only, we need to move the :status and
+ // content-type headers to the trailers.
+ if (s->send_initial_metadata->idx.named.status != NULL) {
+ extra_headers_for_trailing_metadata
+ [num_extra_headers_for_trailing_metadata++] =
+ &s->send_initial_metadata->idx.named.status->md;
+ }
+ if (s->send_initial_metadata->idx.named.content_type != NULL) {
+ extra_headers_for_trailing_metadata
+ [num_extra_headers_for_trailing_metadata++] =
+ &s->send_initial_metadata->idx.named.content_type->md;
+ }
+ }
s->send_initial_metadata = NULL;
s->sent_initial_metadata = true;
sent_initial_metadata = true;
- now_writing = true;
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
- if (!t->is_client) {
- t->ping_recv_state.last_ping_recv_time =
- gpr_inf_past(GPR_CLOCK_MONOTONIC);
- t->ping_recv_state.ping_strikes = 0;
- }
}
/* send any window updates */
if (s->announce_window > 0) {
@@ -320,6 +362,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (s->send_trailing_metadata != NULL &&
s->fetching_send_message == NULL &&
s->flow_controlled_buffer.length == 0) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata"));
if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) {
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true,
&s->stats.outgoing, &t->outbuf);
@@ -337,6 +380,8 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
.stats = &s->stats.outgoing};
grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor,
+ extra_headers_for_trailing_metadata,
+ num_extra_headers_for_trailing_metadata,
s->send_trailing_metadata, &hopt,
&t->outbuf);
}
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index ce72fc3d08..29dfa885de 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -766,20 +766,50 @@ static bool op_can_be_run(grpc_transport_stream_op_batch *curr_op,
bool is_canceled_or_failed = stream_state->state_op_done[OP_CANCEL_ERROR] ||
stream_state->state_callback_received[OP_FAILED];
if (is_canceled_or_failed) {
- if (op_id == OP_SEND_INITIAL_METADATA) result = false;
- if (op_id == OP_SEND_MESSAGE) result = false;
- if (op_id == OP_SEND_TRAILING_METADATA) result = false;
- if (op_id == OP_CANCEL_ERROR) result = false;
+ if (op_id == OP_SEND_INITIAL_METADATA) {
+ CRONET_LOG(GPR_DEBUG, "Because");
+ result = false;
+ }
+ if (op_id == OP_SEND_MESSAGE) {
+ CRONET_LOG(GPR_DEBUG, "Because");
+ result = false;
+ }
+ if (op_id == OP_SEND_TRAILING_METADATA) {
+ CRONET_LOG(GPR_DEBUG, "Because");
+ result = false;
+ }
+ if (op_id == OP_CANCEL_ERROR) {
+ CRONET_LOG(GPR_DEBUG, "Because");
+ result = false;
+ }
/* already executed */
if (op_id == OP_RECV_INITIAL_METADATA &&
- stream_state->state_op_done[OP_RECV_INITIAL_METADATA])
+ stream_state->state_op_done[OP_RECV_INITIAL_METADATA]) {
+ CRONET_LOG(GPR_DEBUG, "Because");
result = false;
- if (op_id == OP_RECV_MESSAGE &&
- stream_state->state_op_done[OP_RECV_MESSAGE])
+ }
+ if (op_id == OP_RECV_MESSAGE && op_state->state_op_done[OP_RECV_MESSAGE]) {
+ CRONET_LOG(GPR_DEBUG, "Because");
result = false;
+ }
if (op_id == OP_RECV_TRAILING_METADATA &&
- stream_state->state_op_done[OP_RECV_TRAILING_METADATA])
+ stream_state->state_op_done[OP_RECV_TRAILING_METADATA]) {
+ CRONET_LOG(GPR_DEBUG, "Because");
result = false;
+ }
+ /* ON_COMPLETE can be processed if one of the following conditions is met:
+ * 1. the stream failed
+ * 2. the stream is cancelled, and the callback is received
+ * 3. the stream succeeded before cancel is effective
+ * 4. the stream is cancelled, and the stream is never started */
+ if (op_id == OP_ON_COMPLETE &&
+ !(stream_state->state_callback_received[OP_FAILED] ||
+ stream_state->state_callback_received[OP_CANCELED] ||
+ stream_state->state_callback_received[OP_SUCCEEDED] ||
+ !stream_state->state_op_done[OP_SEND_INITIAL_METADATA])) {
+ CRONET_LOG(GPR_DEBUG, "Because");
+ result = false;
+ }
} else if (op_id == OP_SEND_INITIAL_METADATA) {
/* already executed */
if (stream_state->state_op_done[OP_SEND_INITIAL_METADATA]) result = false;
@@ -868,7 +898,7 @@ static bool op_can_be_run(grpc_transport_stream_op_batch *curr_op,
CRONET_LOG(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->recv_message &&
- !stream_state->state_op_done[OP_RECV_MESSAGE]) {
+ !op_state->state_op_done[OP_RECV_MESSAGE]) {
CRONET_LOG(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->cancel_stream &&
@@ -1067,6 +1097,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
+ oas->state.state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->state_callback_received[OP_FAILED]) {
CRONET_LOG(GPR_DEBUG, "Stream failed.");
@@ -1074,6 +1105,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
+ oas->state.state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->rs.read_stream_closed == true) {
/* No more data will be received */
@@ -1214,8 +1246,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
} else if (stream_op->cancel_stream &&
op_can_be_run(stream_op, s, &oas->state, OP_CANCEL_ERROR)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_CANCEL_ERROR", oas);
- CRONET_LOG(GPR_DEBUG, "W: bidirectional_stream_cancel(%p)", s->cbs);
if (s->cbs) {
+ CRONET_LOG(GPR_DEBUG, "W: bidirectional_stream_cancel(%p)", s->cbs);
bidirectional_stream_cancel(s->cbs);
result = ACTION_TAKEN_WITH_CALLBACK;
} else {
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index b0559ad745..a80f8aa826 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -234,7 +234,7 @@ void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_stack *call_stack,
grpc_polling_entity *pollent);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_CALL_STACK_REF(call_stack, reason) \
grpc_stream_ref(&(call_stack)->refcount, reason)
#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index 34a77c3bf4..97c2886525 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -25,6 +25,7 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/channel/handshaker_registry.h"
#include "src/core/lib/security/transport/security_handshaker.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
@@ -157,7 +158,6 @@ static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
gpr_timespec deadline,
void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *endpoint)) {
- grpc_channel_security_connector *sc = NULL;
on_done_closure *c = gpr_malloc(sizeof(*c));
const char *pem_root_certs = grpc_get_default_ssl_roots();
if (pem_root_certs == NULL) {
@@ -168,11 +168,13 @@ static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
}
c->func = on_done;
c->arg = arg;
- c->handshake_mgr = grpc_handshake_manager_create();
+ grpc_channel_security_connector *sc = NULL;
GPR_ASSERT(httpcli_ssl_channel_security_connector_create(
exec_ctx, pem_root_certs, host, &sc) == GRPC_SECURITY_OK);
- grpc_channel_security_connector_add_handshakers(exec_ctx, sc,
- c->handshake_mgr);
+ grpc_arg channel_arg = grpc_security_connector_to_arg(&sc->base);
+ grpc_channel_args args = {1, &channel_arg};
+ c->handshake_mgr = grpc_handshake_manager_create();
+ grpc_handshakers_add(exec_ctx, HANDSHAKER_CLIENT, &args, c->handshake_mgr);
grpc_handshake_manager_do_handshake(
exec_ctx, c->handshake_mgr, tcp, NULL /* channel_args */, deadline,
NULL /* acceptor */, on_handshake_done, c /* user_data */);
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index 719e2e8cee..e028e72ed6 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -24,7 +24,11 @@
#include "src/core/lib/profiling/timers.h"
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_closure = GRPC_TRACER_INITIALIZER(false);
+#endif
+
+#ifndef NDEBUG
grpc_closure *grpc_closure_init(const char *file, int line,
grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg,
@@ -37,7 +41,7 @@ grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
closure->cb = cb;
closure->cb_arg = cb_arg;
closure->scheduler = scheduler;
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
closure->scheduled = false;
closure->file_initiated = NULL;
closure->line_initiated = 0;
@@ -112,7 +116,7 @@ static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
cb(exec_ctx, cb_arg, error);
}
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
grpc_closure *grpc_closure_create(const char *file, int line,
grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler) {
@@ -123,7 +127,7 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
wrapped_closure *wc = gpr_malloc(sizeof(*wc));
wc->cb = cb;
wc->cb_arg = cb_arg;
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
grpc_closure_init(file, line, &wc->wrapper, closure_wrapper, wc, scheduler);
#else
grpc_closure_init(&wc->wrapper, closure_wrapper, wc, scheduler);
@@ -131,7 +135,7 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
return &wc->wrapper;
}
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx,
grpc_closure *c, grpc_error *error) {
#else
@@ -140,7 +144,7 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
#endif
GPR_TIMER_BEGIN("grpc_closure_run", 0);
if (c != NULL) {
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
c->file_initiated = file;
c->line_initiated = line;
c->run = true;
@@ -153,7 +157,7 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_END("grpc_closure_run", 0);
}
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx,
grpc_closure *c, grpc_error *error) {
#else
@@ -162,7 +166,7 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
#endif
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) {
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
GPR_ASSERT(!c->scheduled);
c->scheduled = true;
c->file_initiated = file;
@@ -177,7 +181,7 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_END("grpc_closure_sched", 0);
}
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
void grpc_closure_list_sched(const char *file, int line,
grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
#else
@@ -186,7 +190,7 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
grpc_closure *c = list->head;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
GPR_ASSERT(!c->scheduled);
c->scheduled = true;
c->file_initiated = file;
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index 3ecf9e947b..cd32a4ba38 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -26,9 +26,17 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/support/mpscq.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
struct grpc_closure;
typedef struct grpc_closure grpc_closure;
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_closure;
+#endif
+
typedef struct grpc_closure_list {
grpc_closure *head;
grpc_closure *tail;
@@ -38,7 +46,9 @@ typedef struct grpc_closure_list {
*
* \param arg Arbitrary input.
* \param error GRPC_ERROR_NONE if no error occurred, otherwise some grpc_error
- * describing what went wrong */
+ * describing what went wrong.
+ * Error contract: it is not the cb's job to unref this error;
+ * the closure scheduler will do that after the cb returns */
typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
@@ -59,8 +69,6 @@ struct grpc_closure_scheduler {
const grpc_closure_scheduler_vtable *vtable;
};
-// #define GRPC_CLOSURE_RICH_DEBUG
-
/** A closure over a grpc_iomgr_cb_func. */
struct grpc_closure {
/** Once queued, next indicates the next queued closure; before then, scratch
@@ -89,7 +97,7 @@ struct grpc_closure {
// extra tracing and debugging for grpc_closure. This incurs a decent amount of
// overhead per closure, so it must be enabled at compile time.
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
bool scheduled;
bool run; // true = run, false = scheduled
const char *file_created;
@@ -100,7 +108,7 @@ struct grpc_closure {
};
/** Initializes \a closure with \a cb and \a cb_arg. Returns \a closure. */
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
grpc_closure *grpc_closure_init(const char *file, int line,
grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg,
@@ -116,7 +124,7 @@ grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
#endif
/* Create a heap allocated closure: try to avoid except for very rare events */
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
grpc_closure *grpc_closure_create(const char *file, int line,
grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler);
@@ -153,7 +161,7 @@ bool grpc_closure_list_empty(grpc_closure_list list);
/** Run a closure directly. Caller ensures that no locks are being held above.
* Note that calling this at the end of a closure callback function itself is
* by definition safe. */
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error);
#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \
@@ -166,7 +174,7 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
#endif
/** Schedule a closure to be run. Does not need to be run from a safe point. */
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error);
#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \
@@ -180,7 +188,7 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
/** Schedule all closures in a list to be run. Does not need to be run from a
* safe point. */
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
void grpc_closure_list_sched(const char *file, int line,
grpc_exec_ctx *exec_ctx,
grpc_closure_list *closure_list);
@@ -193,4 +201,8 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx,
grpc_closure_list_sched(exec_ctx, closure_list)
#endif
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index 750ff102ff..7f9c5d837f 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -102,12 +102,14 @@ static void start_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
}
-#ifdef GRPC_COMBINER_REFCOUNT_DEBUG
-#define GRPC_COMBINER_DEBUG_SPAM(op, delta) \
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, \
- "combiner[%p] %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op), \
- gpr_atm_no_barrier_load(&lock->refs.count), \
- gpr_atm_no_barrier_load(&lock->refs.count) + (delta), reason);
+#ifndef NDEBUG
+#define GRPC_COMBINER_DEBUG_SPAM(op, delta) \
+ if (GRPC_TRACER_ON(grpc_combiner_trace)) { \
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, \
+ "C:%p %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op), \
+ gpr_atm_no_barrier_load(&lock->refs.count), \
+ gpr_atm_no_barrier_load(&lock->refs.count) + (delta), reason); \
+ }
#else
#define GRPC_COMBINER_DEBUG_SPAM(op, delta)
#endif
@@ -247,7 +249,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
GPR_TIMER_BEGIN("combiner.exec1", 0);
grpc_closure *cl = (grpc_closure *)n;
grpc_error *cl_err = cl->error_data.error;
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
cl->scheduled = false;
#endif
cl->cb(exec_ctx, cl->cb_arg, cl_err);
@@ -264,7 +266,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
c->scheduled = false;
#endif
c->cb(exec_ctx, c->cb_arg, error);
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index a616113ca0..8e0434369d 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -35,8 +35,7 @@
// necessary
grpc_combiner *grpc_combiner_create(void);
-//#define GRPC_COMBINER_REFCOUNT_DEBUG
-#ifdef GRPC_COMBINER_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_COMBINER_DEBUG_ARGS \
, const char *file, int line, const char *reason
#define GRPC_COMBINER_REF(combiner, reason) \
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 68884226b5..a95929a1fb 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -30,10 +30,15 @@
#include <grpc/support/log_windows.h>
#endif
+#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/error_internal.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_error_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
static const char *error_int_name(grpc_error_ints key) {
switch (key) {
case GRPC_ERROR_INT_ERRNO:
@@ -119,14 +124,14 @@ bool grpc_error_is_special(grpc_error *err) {
err == GRPC_ERROR_CANCELLED;
}
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
-grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line,
- const char *func) {
+#ifndef NDEBUG
+grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line) {
if (grpc_error_is_special(err)) return err;
- gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err,
- gpr_atm_no_barrier_load(&err->atomics.refs.count),
- gpr_atm_no_barrier_load(&err->atomics.refs.count) + 1, file, line,
- func);
+ if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
+ gpr_atm_no_barrier_load(&err->atomics.refs.count),
+ gpr_atm_no_barrier_load(&err->atomics.refs.count) + 1, file, line);
+ }
gpr_ref(&err->atomics.refs);
return err;
}
@@ -172,14 +177,14 @@ static void error_destroy(grpc_error *err) {
gpr_free(err);
}
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
-void grpc_error_unref(grpc_error *err, const char *file, int line,
- const char *func) {
+#ifndef NDEBUG
+void grpc_error_unref(grpc_error *err, const char *file, int line) {
if (grpc_error_is_special(err)) return;
- gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err,
- gpr_atm_no_barrier_load(&err->atomics.refs.count),
- gpr_atm_no_barrier_load(&err->atomics.refs.count) - 1, file, line,
- func);
+ if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
+ gpr_atm_no_barrier_load(&err->atomics.refs.count),
+ gpr_atm_no_barrier_load(&err->atomics.refs.count) - 1, file, line);
+ }
if (gpr_unref(&err->atomics.refs)) {
error_destroy(err);
}
@@ -202,13 +207,17 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
if ((*err)->arena_size + slots > (*err)->arena_capacity) {
return UINT8_MAX;
}
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
+#ifndef NDEBUG
grpc_error *orig = *err;
#endif
*err = gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
- if (*err != orig) gpr_log(GPR_DEBUG, "realloc %p -> %p", orig, *err);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ if (*err != orig) {
+ gpr_log(GPR_DEBUG, "realloc %p -> %p", orig, *err);
+ }
+ }
#endif
}
uint8_t placement = (*err)->arena_size;
@@ -316,8 +325,10 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
if (err == NULL) { // TODO(ctiller): make gpr_malloc return NULL
return GRPC_ERROR_OOM;
}
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
- gpr_log(GPR_DEBUG, "%p create [%s:%d]", err, file, line);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ gpr_log(GPR_DEBUG, "%p create [%s:%d]", err, file, line);
+ }
#endif
err->arena_size = 0;
@@ -395,8 +406,10 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2);
}
out = gpr_malloc(sizeof(*in) + new_arena_capacity * sizeof(intptr_t));
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
- gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
+ }
#endif
// bulk memcpy of the rest of the struct.
size_t skip = sizeof(&out->atomics);
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index 1ce916f2ec..b362948691 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -26,6 +26,8 @@
#include <grpc/status.h>
#include <grpc/support/time.h>
+#include "src/core/lib/debug/trace.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -36,6 +38,10 @@ extern "C" {
typedef struct grpc_error grpc_error;
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_error_refcount;
+#endif
+
typedef enum {
/// 'errno' from the operating system
GRPC_ERROR_INT_ERRNO,
@@ -149,15 +155,11 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
grpc_error_create(__FILE__, __LINE__, grpc_slice_from_copied_string(desc), \
errs, count)
-// #define GRPC_ERROR_REFCOUNT_DEBUG
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
-grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line,
- const char *func);
-void grpc_error_unref(grpc_error *err, const char *file, int line,
- const char *func);
-#define GRPC_ERROR_REF(err) grpc_error_ref(err, __FILE__, __LINE__, __func__)
-#define GRPC_ERROR_UNREF(err) \
- grpc_error_unref(err, __FILE__, __LINE__, __func__)
+#ifndef NDEBUG
+grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line);
+void grpc_error_unref(grpc_error *err, const char *file, int line);
+#define GRPC_ERROR_REF(err) grpc_error_ref(err, __FILE__, __LINE__)
+#define GRPC_ERROR_UNREF(err) grpc_error_unref(err, __FILE__, __LINE__)
#else
grpc_error *grpc_error_ref(grpc_error *err);
void grpc_error_unref(grpc_error *err);
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c
index e0c05457e3..66ba601adb 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.c
+++ b/src/core/lib/iomgr/ev_epoll1_linux.c
@@ -194,8 +194,10 @@ static grpc_fd *fd_create(int fd, const char *name) {
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
+ }
#endif
gpr_free(fd_name);
diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
index 761e2ed5fe..2c91ad357c 100644
--- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
@@ -147,8 +147,7 @@ struct grpc_fd {
};
/* Reference counting for fds */
-// #define GRPC_FD_REF_COUNT_DEBUG
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
int line);
@@ -168,20 +167,18 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
-//#define PI_REFCOUNT_DEBUG
-
-#ifdef PI_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-#else /* defined(PI_REFCOUNT_DEBUG) */
+#else
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
-#endif /* !defined(GRPC_PI_REF_COUNT_DEBUG) */
+#endif
typedef struct worker_node {
struct worker_node *next;
@@ -313,21 +310,27 @@ gpr_atm g_epoll_sync;
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
-#ifdef PI_REFCOUNT_DEBUG
+#ifndef NDEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ pi, old_cnt, old_cnt + 1, reason, file, line);
+ }
pi_add_ref(pi);
- gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
- (void *)pi, old_cnt, old_cnt + 1, reason, file, line);
}
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
const char *reason, const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ pi, old_cnt, (old_cnt - 1), reason, file, line);
+ }
pi_unref(exec_ctx, pi);
- gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
- (void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
}
#endif
@@ -792,14 +795,17 @@ static void polling_island_global_shutdown() {
static grpc_fd *fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_log(GPR_DEBUG, "FD %d %p ref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ }
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
@@ -808,18 +814,19 @@ static void ref_by(grpc_fd *fd, int n) {
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_atm old;
- gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ }
#else
static void unref_by(grpc_fd *fd, int n) {
- gpr_atm old;
#endif
- old = gpr_atm_full_fetch_add(&fd->refst, -n);
+ gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
/* Add the fd to the freelist */
gpr_mu_lock(&fd_freelist_mu);
@@ -837,7 +844,7 @@ static void unref_by(grpc_fd *fd, int n) {
}
/* Increment refcount by two to avoid changing the orphan bit */
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
int line) {
ref_by(fd, 2, reason, file, line);
@@ -905,8 +912,10 @@ static grpc_fd *fd_create(int fd, const char *name) {
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
+ }
#endif
gpr_free(fd_name);
return new_fd;
diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
index 0ab3594818..49be72c03e 100644
--- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
@@ -93,20 +93,18 @@ static void fd_global_shutdown(void);
* epoll set Declarations
*/
-//#define EPS_REFCOUNT_DEBUG
-
-#ifdef EPS_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define EPS_ADD_REF(p, r) eps_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define EPS_UNREF(exec_ctx, p, r) \
eps_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-#else /* defined(EPS_REFCOUNT_DEBUG) */
+#else
#define EPS_ADD_REF(p, r) eps_add_ref((p))
#define EPS_UNREF(exec_ctx, p, r) eps_unref((exec_ctx), (p))
-#endif /* !defined(GRPC_EPS_REF_COUNT_DEBUG) */
+#endif
typedef struct epoll_set {
/* Mutex poller should acquire to poll this. This enforces that only one
@@ -226,21 +224,27 @@ gpr_atm g_epoll_sync;
static void eps_add_ref(epoll_set *eps);
static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps);
-#ifdef EPS_REFCOUNT_DEBUG
+#ifndef NDEBUG
static void eps_add_ref_dbg(epoll_set *eps, const char *reason,
const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&eps->ref_count);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count);
+ gpr_log(GPR_DEBUG, "Add ref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ eps, old_cnt, old_cnt + 1, reason, file, line);
+ }
eps_add_ref(eps);
- gpr_log(GPR_DEBUG, "Add ref eps: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
- (void *)eps, old_cnt, old_cnt + 1, reason, file, line);
}
static void eps_unref_dbg(grpc_exec_ctx *exec_ctx, epoll_set *eps,
const char *reason, const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&eps->ref_count);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count);
+ gpr_log(GPR_DEBUG, "Unref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ eps, old_cnt, (old_cnt - 1), reason, file, line);
+ }
eps_unref(exec_ctx, eps);
- gpr_log(GPR_DEBUG, "Unref eps: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
- (void *)eps, old_cnt, (old_cnt - 1), reason, file, line);
}
#endif
diff --git a/src/core/lib/iomgr/ev_epollex_linux.c b/src/core/lib/iomgr/ev_epollex_linux.c
index 949f8a845d..5574838187 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.c
+++ b/src/core/lib/iomgr/ev_epollex_linux.c
@@ -231,15 +231,18 @@ static bool append_error(grpc_error **composite, grpc_error *error,
static grpc_fd *fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(ec, fd, n, reason) \
unref_by(ec, fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_log(GPR_DEBUG, "FD %d %p ref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ }
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n)
@@ -264,18 +267,19 @@ static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_mu_unlock(&fd_freelist_mu);
}
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n,
const char *reason, const char *file, int line) {
- gpr_atm old;
- gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ }
#else
static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) {
- gpr_atm old;
#endif
- old = gpr_atm_full_fetch_add(&fd->refst, -n);
+ gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd,
grpc_schedule_on_exec_ctx),
@@ -328,8 +332,10 @@ static grpc_fd *fd_create(int fd, const char *name) {
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
+ }
#endif
gpr_free(fd_name);
return new_fd;
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.c b/src/core/lib/iomgr/ev_epollsig_linux.c
index fa4b4e8d0a..255e07010b 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.c
+++ b/src/core/lib/iomgr/ev_epollsig_linux.c
@@ -140,8 +140,7 @@ struct grpc_fd {
};
/* Reference counting for fds */
-// #define GRPC_FD_REF_COUNT_DEBUG
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
int line);
@@ -161,20 +160,18 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
-//#define PI_REFCOUNT_DEBUG
-
-#ifdef PI_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
+#else
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
-#endif /* !defined(GRPC_PI_REF_COUNT_DEBUG) */
+#endif
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
@@ -287,21 +284,27 @@ gpr_atm g_epoll_sync;
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
-#ifdef PI_REFCOUNT_DEBUG
+#ifndef NDEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ pi, old_cnt, old_cnt + 1, reason, file, line);
+ }
pi_add_ref(pi);
- gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
- (void *)pi, old_cnt, old_cnt + 1, reason, file, line);
}
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
const char *reason, const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ pi, old_cnt, (old_cnt - 1), reason, file, line);
+ }
pi_unref(exec_ctx, pi);
- gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
- (void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
}
#endif
@@ -720,14 +723,17 @@ static void polling_island_global_shutdown() {
static grpc_fd *fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_log(GPR_DEBUG, "FD %d %p ref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ }
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
@@ -736,18 +742,19 @@ static void ref_by(grpc_fd *fd, int n) {
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_atm old;
- gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ }
#else
static void unref_by(grpc_fd *fd, int n) {
- gpr_atm old;
#endif
- old = gpr_atm_full_fetch_add(&fd->refst, -n);
+ gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
/* Add the fd to the freelist */
gpr_mu_lock(&fd_freelist_mu);
@@ -765,7 +772,7 @@ static void unref_by(grpc_fd *fd, int n) {
}
/* Increment refcount by two to avoid changing the orphan bit */
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
int line) {
ref_by(fd, 2, reason, file, line);
@@ -833,9 +840,6 @@ static grpc_fd *fd_create(int fd, const char *name) {
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
-#endif
gpr_free(fd_name);
return new_fd;
}
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index d1a27b8228..1f8d7eef26 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -134,9 +134,7 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
/* Return 1 if this fd is orphaned, 0 otherwise */
static bool fd_is_orphaned(grpc_fd *fd);
-/* Reference counting for fds */
-//#define GRPC_FD_REF_COUNT_DEBUG
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
int line);
@@ -263,14 +261,17 @@ cv_fd_table g_cvfds;
* fd_posix.c
*/
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
- (int)gpr_atm_no_barrier_load(&fd->refst),
- (int)gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ }
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
@@ -279,18 +280,19 @@ static void ref_by(grpc_fd *fd, int n) {
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_atm old;
- gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
- (int)gpr_atm_no_barrier_load(&fd->refst),
- (int)gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ }
#else
static void unref_by(grpc_fd *fd, int n) {
- gpr_atm old;
#endif
- old = gpr_atm_full_fetch_add(&fd->refst, -n);
+ gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
gpr_mu_destroy(&fd->mu);
grpc_iomgr_unregister_object(&fd->iomgr_object);
@@ -321,9 +323,6 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_asprintf(&name2, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&r->iomgr_object, name2);
gpr_free(name2);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, r, name);
-#endif
return r;
}
@@ -417,7 +416,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
}
/* increment refcount by two to avoid changing the orphan bit */
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
int line) {
ref_by(fd, 2, reason, file, line);
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index dacdb9f4da..2648df393d 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -41,6 +41,10 @@
grpc_tracer_flag grpc_polling_trace =
GRPC_TRACER_INITIALIZER(false); /* Disabled by default */
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_fd_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
/** Default poll() function - a pointer so that it can be overridden by some
* tests */
grpc_poll_function_type grpc_poll_function = poll;
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c
index 51c36216c5..833170ceed 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.c
@@ -62,7 +62,7 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
did_something = true;
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
c->scheduled = false;
#endif
c->cb(exec_ctx, c->cb_arg, error);
@@ -85,10 +85,21 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
closure->scheduled = false;
+ if (GRPC_TRACER_ON(grpc_trace_closure)) {
+ gpr_log(GPR_DEBUG, "running closure %p: created [%s:%d]: %s [%s:%d]",
+ closure, closure->file_created, closure->line_created,
+ closure->run ? "run" : "scheduled", closure->file_initiated,
+ closure->line_initiated);
+ }
#endif
closure->cb(exec_ctx, closure->cb_arg, error);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_closure)) {
+ gpr_log(GPR_DEBUG, "closure %p finished", closure);
+ }
+#endif
GRPC_ERROR_UNREF(error);
}
diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c
index fda274e797..7621a7fe75 100644
--- a/src/core/lib/iomgr/executor.c
+++ b/src/core/lib/iomgr/executor.c
@@ -58,7 +58,7 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
-#ifdef GRPC_CLOSURE_RICH_DEBUG
+#ifndef NDEBUG
c->scheduled = false;
#endif
c->cb(exec_ctx, c->cb_arg, error);
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index 3ff878e596..a609a3877a 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_fd_refcount;
+#endif
+
/* A grpc_pollset is a set of file descriptors that a higher level item is
interested in. For example:
- a server will typically keep a pollset containing all connected channels,
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c
index 07c7c04559..1a54065a91 100644
--- a/src/core/lib/iomgr/pollset_uv.c
+++ b/src/core/lib/iomgr/pollset_uv.c
@@ -31,6 +31,12 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_uv.h"
+#include "src/core/lib/debug/trace.h"
+
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_fd_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
struct grpc_pollset {
uv_timer_t timer;
int shutting_down;
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index d5a03732cb..1bfc2a22a8 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -30,6 +30,10 @@
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_fd_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
gpr_mu grpc_polling_mu;
static grpc_pollset_worker *g_active_poller;
static grpc_pollset_worker g_global_root_worker;
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 66e81bf81f..5de2b0f4ee 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -163,15 +163,18 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
gpr_free(tcp);
}
-/*#define GRPC_TCP_REFCOUNT_DEBUG*/
-#ifdef GRPC_TCP_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define TCP_UNREF(cl, tcp, reason) \
tcp_unref((cl), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
const char *reason, const char *file, int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
- reason, tcp->refcount.count, tcp->refcount.count - 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val - 1);
+ }
if (gpr_unref(&tcp->refcount)) {
tcp_free(exec_ctx, tcp);
}
@@ -179,8 +182,12 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp,
- reason, tcp->refcount.count, tcp->refcount.count + 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val + 1);
+ }
gpr_ref(&tcp->refcount);
}
#else
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
index ab5bb9f7da..7c21b44e76 100644
--- a/src/core/lib/iomgr/tcp_uv.c
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -69,16 +69,18 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
gpr_free(tcp);
}
-/*#define GRPC_TCP_REFCOUNT_DEBUG*/
-#ifdef GRPC_TCP_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define TCP_UNREF(exec_ctx, tcp, reason) \
tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
const char *reason, const char *file, int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "TCP unref %p : %s %" PRIiPTR " -> %" PRIiPTR, tcp, reason,
- tcp->refcount.count, tcp->refcount.count - 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val - 1);
+ }
if (gpr_unref(&tcp->refcount)) {
tcp_free(exec_ctx, tcp);
}
@@ -86,9 +88,12 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "TCP ref %p : %s %" PRIiPTR " -> %" PRIiPTR, tcp, reason,
- tcp->refcount.count, tcp->refcount.count + 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val + 1);
+ }
gpr_ref(&tcp->refcount);
}
#else
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index 161b397534..6704a158ce 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -48,6 +48,8 @@
#define GRPC_FIONBIO FIONBIO
#endif
+grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false);
+
static grpc_error *set_non_block(SOCKET sock) {
int status;
uint32_t param = 1;
@@ -115,15 +117,18 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
gpr_free(tcp);
}
-/*#define GRPC_TCP_REFCOUNT_DEBUG*/
-#ifdef GRPC_TCP_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define TCP_UNREF(exec_ctx, tcp, reason) \
tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
const char *reason, const char *file, int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
- reason, tcp->refcount.count, tcp->refcount.count - 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val - 1);
+ }
if (gpr_unref(&tcp->refcount)) {
tcp_free(exec_ctx, tcp);
}
@@ -131,8 +136,12 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp,
- reason, tcp->refcount.count, tcp->refcount.count + 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val + 1);
+ }
gpr_ref(&tcp->refcount);
}
#else
diff --git a/src/core/lib/security/context/security_context.c b/src/core/lib/security/context/security_context.c
index 02b6d0164b..dffe6d2e91 100644
--- a/src/core/lib/security/context/security_context.c
+++ b/src/core/lib/security/context/security_context.c
@@ -29,6 +29,11 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_auth_context_refcount =
+ GRPC_TRACER_INITIALIZER(false);
+#endif
+
/* --- grpc_call --- */
grpc_call_error grpc_call_set_credentials(grpc_call *call,
@@ -122,14 +127,17 @@ grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained) {
return ctx;
}
-#ifdef GRPC_AUTH_CONTEXT_REFCOUNT_DEBUG
+#ifndef NDEBUG
grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx,
const char *file, int line,
const char *reason) {
if (ctx == NULL) return NULL;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "AUTH_CONTEXT:%p ref %d -> %d %s", ctx, (int)ctx->refcount.count,
- (int)ctx->refcount.count + 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_auth_context_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&ctx->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "AUTH_CONTEXT:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", ctx, val,
+ val + 1, reason);
+ }
#else
grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx) {
if (ctx == NULL) return NULL;
@@ -138,13 +146,16 @@ grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx) {
return ctx;
}
-#ifdef GRPC_AUTH_CONTEXT_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_auth_context_unref(grpc_auth_context *ctx, const char *file, int line,
const char *reason) {
if (ctx == NULL) return;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "AUTH_CONTEXT:%p unref %d -> %d %s", ctx, (int)ctx->refcount.count,
- (int)ctx->refcount.count - 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_auth_context_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&ctx->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "AUTH_CONTEXT:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", ctx, val,
+ val - 1, reason);
+ }
#else
void grpc_auth_context_unref(grpc_auth_context *ctx) {
if (ctx == NULL) return;
diff --git a/src/core/lib/security/context/security_context.h b/src/core/lib/security/context/security_context.h
index 102f9d6e2f..0df39257a7 100644
--- a/src/core/lib/security/context/security_context.h
+++ b/src/core/lib/security/context/security_context.h
@@ -22,6 +22,10 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/security/credentials/credentials.h"
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_auth_context_refcount;
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -50,7 +54,7 @@ struct grpc_auth_context {
grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained);
/* Refcounting. */
-#ifdef GRPC_AUTH_CONTEXT_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_AUTH_CONTEXT_REF(p, r) \
grpc_auth_context_ref((p), __FILE__, __LINE__, (r))
#define GRPC_AUTH_CONTEXT_UNREF(p, r) \
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c
index 140cf294ae..f4ed81db1a 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.c
@@ -75,18 +75,20 @@ static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) {
gpr_free(ep);
}
-/*#define GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG*/
-#ifdef GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \
secure_endpoint_unref((exec_ctx), (ep), (reason), __FILE__, __LINE__)
#define SECURE_ENDPOINT_REF(ep, reason) \
secure_endpoint_ref((ep), (reason), __FILE__, __LINE__)
-static void secure_endpoint_unref(secure_endpoint *ep,
- grpc_closure_list *closure_list,
+static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
const char *reason, const char *file,
int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP unref %p : %s %d -> %d",
- ep, reason, ep->ref.count, ep->ref.count - 1);
+ if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SECENDP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep, reason, val,
+ val - 1);
+ }
if (gpr_unref(&ep->ref)) {
destroy(exec_ctx, ep);
}
@@ -94,8 +96,12 @@ static void secure_endpoint_unref(secure_endpoint *ep,
static void secure_endpoint_ref(secure_endpoint *ep, const char *reason,
const char *file, int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP ref %p : %s %d -> %d",
- ep, reason, ep->ref.count, ep->ref.count + 1);
+ if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SECENDP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep, reason, val,
+ val + 1);
+ }
gpr_ref(&ep->ref);
}
#else
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index 9934d57ee6..3c0c24254b 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -43,6 +43,11 @@
#include "src/core/tsi/ssl_transport_security.h"
#include "src/core/tsi/transport_security_adapter.h"
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_security_connector_refcount =
+ GRPC_TRACER_INITIALIZER(false);
+#endif
+
/* -- Constants. -- */
#ifndef INSTALL_PREFIX
@@ -142,14 +147,17 @@ void grpc_channel_security_connector_check_call_host(
}
}
-#ifdef GRPC_SECURITY_CONNECTOR_REFCOUNT_DEBUG
+#ifndef NDEBUG
grpc_security_connector *grpc_security_connector_ref(
grpc_security_connector *sc, const char *file, int line,
const char *reason) {
if (sc == NULL) return NULL;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "SECURITY_CONNECTOR:%p ref %d -> %d %s", sc,
- (int)sc->refcount.count, (int)sc->refcount.count + 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_security_connector_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&sc->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SECURITY_CONNECTOR:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", sc,
+ val, val + 1, reason);
+ }
#else
grpc_security_connector *grpc_security_connector_ref(
grpc_security_connector *sc) {
@@ -159,15 +167,18 @@ grpc_security_connector *grpc_security_connector_ref(
return sc;
}
-#ifdef GRPC_SECURITY_CONNECTOR_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc,
const char *file, int line,
const char *reason) {
if (sc == NULL) return;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "SECURITY_CONNECTOR:%p unref %d -> %d %s", sc,
- (int)sc->refcount.count, (int)sc->refcount.count - 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_security_connector_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&sc->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SECURITY_CONNECTOR:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", sc,
+ val, val - 1, reason);
+ }
#else
void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc) {
diff --git a/src/core/lib/security/transport/security_connector.h b/src/core/lib/security/transport/security_connector.h
index 24b1086ee3..1c0fe40045 100644
--- a/src/core/lib/security/transport/security_connector.h
+++ b/src/core/lib/security/transport/security_connector.h
@@ -29,6 +29,10 @@
#include "src/core/tsi/ssl_transport_security.h"
#include "src/core/tsi/transport_security_interface.h"
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_security_connector_refcount;
+#endif
+
/* --- status enum. --- */
typedef enum { GRPC_SECURITY_OK = 0, GRPC_SECURITY_ERROR } grpc_security_status;
@@ -66,7 +70,7 @@ struct grpc_security_connector {
};
/* Refcounting. */
-#ifdef GRPC_SECURITY_CONNECTOR_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_SECURITY_CONNECTOR_REF(p, r) \
grpc_security_connector_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, r) \
diff --git a/src/core/lib/support/stack_lockfree.c b/src/core/lib/support/stack_lockfree.c
deleted file mode 100644
index 0fb64ed001..0000000000
--- a/src/core/lib/support/stack_lockfree.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/support/stack_lockfree.h"
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/atm.h>
-#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
-
-/* The lockfree node structure is a single architecture-level
- word that allows for an atomic CAS to set it up. */
-struct lockfree_node_contents {
- /* next thing to look at. Actual index for head, next index otherwise */
- uint16_t index;
-#ifdef GPR_ARCH_64
- uint16_t pad;
- uint32_t aba_ctr;
-#else
-#ifdef GPR_ARCH_32
- uint16_t aba_ctr;
-#else
-#error Unsupported bit width architecture
-#endif
-#endif
-};
-
-/* Use a union to make sure that these are in the same bits as an atm word */
-typedef union lockfree_node {
- gpr_atm atm;
- struct lockfree_node_contents contents;
-} lockfree_node;
-
-/* make sure that entries aligned to 8-bytes */
-#define ENTRY_ALIGNMENT_BITS 3
-/* reserve this entry as invalid */
-#define INVALID_ENTRY_INDEX ((1 << 16) - 1)
-
-struct gpr_stack_lockfree {
- lockfree_node *entries;
- lockfree_node head; /* An atomic entry describing curr head */
-};
-
-gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries) {
- gpr_stack_lockfree *stack;
- stack = (gpr_stack_lockfree *)gpr_malloc(sizeof(*stack));
- /* Since we only allocate 16 bits to represent an entry number,
- * make sure that we are within the desired range */
- /* Reserve the highest entry number as a dummy */
- GPR_ASSERT(entries < INVALID_ENTRY_INDEX);
- stack->entries = (lockfree_node *)gpr_malloc_aligned(
- entries * sizeof(stack->entries[0]), ENTRY_ALIGNMENT_BITS);
- /* Clear out all entries */
- memset(stack->entries, 0, entries * sizeof(stack->entries[0]));
- memset(&stack->head, 0, sizeof(stack->head));
-
- GPR_ASSERT(sizeof(stack->entries->atm) == sizeof(stack->entries->contents));
-
- /* Point the head at reserved dummy entry */
- stack->head.contents.index = INVALID_ENTRY_INDEX;
-/* Fill in the pad and aba_ctr to avoid confusing memcheck tools */
-#ifdef GPR_ARCH_64
- stack->head.contents.pad = 0;
-#endif
- stack->head.contents.aba_ctr = 0;
- return stack;
-}
-
-void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack) {
- gpr_free_aligned(stack->entries);
- gpr_free(stack);
-}
-
-int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
- lockfree_node head;
- lockfree_node newhead;
- lockfree_node curent;
- lockfree_node newent;
-
- /* First fill in the entry's index and aba ctr for new head */
- newhead.contents.index = (uint16_t)entry;
-#ifdef GPR_ARCH_64
- /* Fill in the pad to avoid confusing memcheck tools */
- newhead.contents.pad = 0;
-#endif
-
- /* Also post-increment the aba_ctr */
- curent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
- newhead.contents.aba_ctr = ++curent.contents.aba_ctr;
- gpr_atm_no_barrier_store(&stack->entries[entry].atm, curent.atm);
-
- do {
- /* Atomically get the existing head value for use */
- head.atm = gpr_atm_no_barrier_load(&(stack->head.atm));
- /* Point to it */
- newent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
- newent.contents.index = head.contents.index;
- gpr_atm_no_barrier_store(&stack->entries[entry].atm, newent.atm);
- } while (!gpr_atm_rel_cas(&(stack->head.atm), head.atm, newhead.atm));
- /* Use rel_cas above to make sure that entry index is set properly */
- return head.contents.index == INVALID_ENTRY_INDEX;
-}
-
-int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) {
- lockfree_node head;
- lockfree_node newhead;
-
- do {
- head.atm = gpr_atm_acq_load(&(stack->head.atm));
- if (head.contents.index == INVALID_ENTRY_INDEX) {
- return -1;
- }
- newhead.atm =
- gpr_atm_no_barrier_load(&(stack->entries[head.contents.index].atm));
-
- } while (!gpr_atm_no_barrier_cas(&(stack->head.atm), head.atm, newhead.atm));
-
- return head.contents.index;
-}
diff --git a/src/core/lib/support/stack_lockfree.h b/src/core/lib/support/stack_lockfree.h
deleted file mode 100644
index 6324211b72..0000000000
--- a/src/core/lib/support/stack_lockfree.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H
-#define GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H
-
-#include <stddef.h>
-
-typedef struct gpr_stack_lockfree gpr_stack_lockfree;
-
-/* This stack must specify the maximum number of entries to track.
- The current implementation only allows up to 65534 entries */
-gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries);
-void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack);
-
-/* Pass in a valid entry number for the next stack entry */
-/* Returns 1 if this is the first element on the stack, 0 otherwise */
-int gpr_stack_lockfree_push(gpr_stack_lockfree *, int entry);
-
-/* Returns -1 on empty or the actual entry number */
-int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack);
-
-#endif /* GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H */
diff --git a/src/core/lib/support/time_precise.c b/src/core/lib/support/time_precise.c
index 1de373e002..6ce19e53cc 100644
--- a/src/core/lib/support/time_precise.c
+++ b/src/core/lib/support/time_precise.c
@@ -22,26 +22,26 @@
#ifdef GRPC_TIMERS_RDTSC
#if defined(__i386__)
-static void gpr_get_cycle_counter(long long int *clk) {
- long long int ret;
+static void gpr_get_cycle_counter(int64_t int *clk) {
+ int64_t int ret;
__asm__ volatile("rdtsc" : "=A"(ret));
*clk = ret;
}
// ----------------------------------------------------------------
#elif defined(__x86_64__) || defined(__amd64__)
-static void gpr_get_cycle_counter(long long int *clk) {
- unsigned long long low, high;
+static void gpr_get_cycle_counter(int64_t *clk) {
+ uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
- *clk = (long long)(high << 32) | (long long)low;
+ *clk = (int64_t)(high << 32) | (int64_t)low;
}
#endif
static double cycles_per_second = 0;
-static long long int start_cycle;
+static int64_t start_cycle;
void gpr_precise_clock_init(void) {
time_t start;
- long long end_cycle;
+ int64_t end_cycle;
gpr_log(GPR_DEBUG, "Calibrating timers");
start = time(NULL);
while (time(NULL) == start)
@@ -55,7 +55,7 @@ void gpr_precise_clock_init(void) {
}
void gpr_precise_clock_now(gpr_timespec *clk) {
- long long int counter;
+ int64_t counter;
double secs;
gpr_get_cycle_counter(&counter);
secs = (double)(counter - start_cycle) / cycles_per_second;
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index b499219e17..c769866ceb 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -457,7 +457,7 @@ void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
}
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define REF_REASON reason
#define REF_ARG , const char *reason
#else
@@ -929,33 +929,6 @@ static grpc_compression_algorithm decode_compression(grpc_mdelem md) {
return algorithm;
}
-static void recv_common_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_metadata_batch *b) {
- if (b->idx.named.grpc_status != NULL) {
- uint32_t status_code = decode_status(b->idx.named.grpc_status->md);
- grpc_error *error =
- status_code == GRPC_STATUS_OK
- ? GRPC_ERROR_NONE
- : grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Error received from peer"),
- GRPC_ERROR_INT_GRPC_STATUS,
- (intptr_t)status_code);
-
- if (b->idx.named.grpc_message != NULL) {
- error = grpc_error_set_str(
- error, GRPC_ERROR_STR_GRPC_MESSAGE,
- grpc_slice_ref_internal(GRPC_MDVALUE(b->idx.named.grpc_message->md)));
- grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_message);
- } else if (error != GRPC_ERROR_NONE) {
- error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE,
- grpc_empty_slice());
- }
-
- set_status_from_error(exec_ctx, call, STATUS_FROM_WIRE, error);
- grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_status);
- }
-}
-
static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
int is_trailing) {
if (b->list.count == 0) return;
@@ -980,8 +953,6 @@ static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_metadata_batch *b) {
- recv_common_filter(exec_ctx, call, b);
-
if (b->idx.named.grpc_encoding != NULL) {
GPR_TIMER_BEGIN("incoming_compression_algorithm", 0);
set_incoming_compression_algorithm(
@@ -989,7 +960,6 @@ static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
GPR_TIMER_END("incoming_compression_algorithm", 0);
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_encoding);
}
-
if (b->idx.named.grpc_accept_encoding != NULL) {
GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0);
set_encodings_accepted_by_peer(exec_ctx, call,
@@ -997,14 +967,33 @@ static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_accept_encoding);
GPR_TIMER_END("encodings_accepted_by_peer", 0);
}
-
publish_app_metadata(call, b, false);
}
static void recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args,
grpc_metadata_batch *b) {
grpc_call *call = args;
- recv_common_filter(exec_ctx, call, b);
+ if (b->idx.named.grpc_status != NULL) {
+ uint32_t status_code = decode_status(b->idx.named.grpc_status->md);
+ grpc_error *error =
+ status_code == GRPC_STATUS_OK
+ ? GRPC_ERROR_NONE
+ : grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Error received from peer"),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ (intptr_t)status_code);
+ if (b->idx.named.grpc_message != NULL) {
+ error = grpc_error_set_str(
+ error, GRPC_ERROR_STR_GRPC_MESSAGE,
+ grpc_slice_ref_internal(GRPC_MDVALUE(b->idx.named.grpc_message->md)));
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_message);
+ } else if (error != GRPC_ERROR_NONE) {
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE,
+ grpc_empty_slice());
+ }
+ set_status_from_error(exec_ctx, call, STATUS_FROM_WIRE, error);
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_status);
+ }
publish_app_metadata(call, b, true);
}
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index 60b661cf8c..185bfccb77 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -62,7 +62,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_call_internal_ref(grpc_call *call, const char *reason);
void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call,
const char *reason);
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 5647dff28b..5780a18ce8 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -345,7 +345,7 @@ grpc_call *grpc_channel_create_registered_call(
return call;
}
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define REF_REASON reason
#define REF_ARG , const char *reason
#else
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
index 848debc7c5..528bb868e2 100644
--- a/src/core/lib/surface/channel.h
+++ b/src/core/lib/surface/channel.h
@@ -59,7 +59,7 @@ grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx,
size_t grpc_channel_get_call_size_estimate(grpc_channel *channel);
void grpc_channel_update_call_size_estimate(grpc_channel *channel, size_t size);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_channel_internal_ref(grpc_channel *channel, const char *reason);
void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
const char *reason);
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 1a5c721214..b04aee6c73 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -38,6 +38,7 @@
grpc_tracer_flag grpc_trace_operation_failures = GRPC_TRACER_INITIALIZER(false);
#ifndef NDEBUG
grpc_tracer_flag grpc_trace_pending_tags = GRPC_TRACER_INITIALIZER(false);
+grpc_tracer_flag grpc_trace_cq_refcount = GRPC_TRACER_INITIALIZER(false);
#endif
typedef struct {
@@ -437,12 +438,16 @@ int grpc_get_cq_poll_num(grpc_completion_queue *cc) {
return cur_num_polls;
}
-#ifdef GRPC_CQ_REF_COUNT_DEBUG
+#ifndef NDEBUG
void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason,
const char *file, int line) {
cq_data *cqd = &cc->data;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p ref %d -> %d %s", cc,
- (int)cqd->owning_refs.count, (int)cqd->owning_refs.count + 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_cq_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&cqd->owning_refs.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "CQ:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", cc, val, val + 1,
+ reason);
+ }
#else
void grpc_cq_internal_ref(grpc_completion_queue *cc) {
cq_data *cqd = &cc->data;
@@ -456,12 +461,16 @@ static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cc, "pollset_destroy");
}
-#ifdef GRPC_CQ_REF_COUNT_DEBUG
-void grpc_cq_internal_unref(grpc_completion_queue *cc, const char *reason,
- const char *file, int line) {
+#ifndef NDEBUG
+void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
+ const char *reason, const char *file, int line) {
cq_data *cqd = &cc->data;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p unref %d -> %d %s", cc,
- (int)cqd->owning_refs.count, (int)cqd->owning_refs.count - 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_cq_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&cqd->owning_refs.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "CQ:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", cc, val, val - 1,
+ reason);
+ }
#else
void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cc) {
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
index 49097bac39..97ea9cae20 100644
--- a/src/core/lib/surface/completion_queue.h
+++ b/src/core/lib/surface/completion_queue.h
@@ -30,8 +30,10 @@
extern grpc_tracer_flag grpc_cq_pluck_trace;
extern grpc_tracer_flag grpc_cq_event_timeout_trace;
extern grpc_tracer_flag grpc_trace_operation_failures;
+
#ifndef NDEBUG
extern grpc_tracer_flag grpc_trace_pending_tags;
+extern grpc_tracer_flag grpc_trace_cq_refcount;
#endif
#ifdef __cplusplus
@@ -52,9 +54,7 @@ typedef struct grpc_cq_completion {
uintptr_t next;
} grpc_cq_completion;
-//#define GRPC_CQ_REF_COUNT_DEBUG
-
-#ifdef GRPC_CQ_REF_COUNT_DEBUG
+#ifndef NDEBUG
void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason,
const char *file, int line);
void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index 01a1f33db2..14a86bfa0a 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -126,18 +126,23 @@ void grpc_init(void) {
grpc_register_tracer("channel_stack_builder",
&grpc_trace_channel_stack_builder);
grpc_register_tracer("http1", &grpc_http1_trace);
- grpc_register_tracer("queue_pluck", &grpc_cq_pluck_trace);
+ grpc_register_tracer("queue_pluck", &grpc_cq_pluck_trace); // default on
grpc_register_tracer("combiner", &grpc_combiner_trace);
grpc_register_tracer("server_channel", &grpc_server_channel_trace);
grpc_register_tracer("bdp_estimator", &grpc_bdp_estimator_trace);
- // Default pluck trace to 1
- grpc_register_tracer("queue_timeout", &grpc_cq_event_timeout_trace);
- // Default timeout trace to 1
+ grpc_register_tracer("queue_timeout",
+ &grpc_cq_event_timeout_trace); // default on
grpc_register_tracer("op_failure", &grpc_trace_operation_failures);
grpc_register_tracer("resource_quota", &grpc_resource_quota_trace);
grpc_register_tracer("call_error", &grpc_call_error_trace);
#ifndef NDEBUG
grpc_register_tracer("pending_tags", &grpc_trace_pending_tags);
+ grpc_register_tracer("queue_refcount", &grpc_trace_cq_refcount);
+ grpc_register_tracer("closure", &grpc_trace_closure);
+ grpc_register_tracer("error_refcount", &grpc_trace_error_refcount);
+ grpc_register_tracer("stream_refcount", &grpc_trace_stream_refcount);
+ grpc_register_tracer("fd_refcount", &grpc_trace_fd_refcount);
+ grpc_register_tracer("metadata", &grpc_trace_metadata);
#endif
grpc_security_pre_init();
grpc_iomgr_init(&exec_ctx);
diff --git a/src/core/lib/surface/init_secure.c b/src/core/lib/surface/init_secure.c
index fb6635716d..7dbea581d0 100644
--- a/src/core/lib/surface/init_secure.c
+++ b/src/core/lib/surface/init_secure.c
@@ -32,9 +32,19 @@
#include "src/core/lib/surface/channel_init.h"
#include "src/core/tsi/transport_security_interface.h"
+#ifndef NDEBUG
+#include "src/core/lib/security/context/security_context.h"
+#endif
+
void grpc_security_pre_init(void) {
grpc_register_tracer("secure_endpoint", &grpc_trace_secure_endpoint);
grpc_register_tracer("transport_security", &tsi_tracing_enabled);
+#ifndef NDEBUG
+ grpc_register_tracer("auth_context_refcount",
+ &grpc_trace_auth_context_refcount);
+ grpc_register_tracer("security_connector_refcount",
+ &grpc_trace_security_connector_refcount);
+#endif
}
static bool maybe_prepend_client_auth_filter(
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 8a2616b027..84ddf74ab9 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -475,6 +475,7 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
*rc->data.registered.deadline = calld->deadline;
if (rc->data.registered.optional_payload) {
*rc->data.registered.optional_payload = calld->payload;
+ calld->payload = NULL;
}
break;
default:
@@ -878,6 +879,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_slice_unref_internal(exec_ctx, calld->path);
}
grpc_metadata_array_destroy(&calld->initial_metadata);
+ grpc_byte_buffer_destroy(calld->payload);
gpr_mu_destroy(&calld->mu_state);
diff --git a/src/core/lib/transport/metadata.c b/src/core/lib/transport/metadata.c
index 9491730719..87a2abf344 100644
--- a/src/core/lib/transport/metadata.c
+++ b/src/core/lib/transport/metadata.c
@@ -47,7 +47,8 @@
* used to determine which kind of element a pointer refers to.
*/
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_metadata = GRPC_TRACER_INITIALIZER(false);
#define DEBUG_ARGS , const char *file, int line
#define FWD_DEBUG_ARGS , file, line
#define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s), __FILE__, __LINE__)
@@ -144,15 +145,17 @@ static int is_mdelem_static(grpc_mdelem e) {
static void ref_md_locked(mdtab_shard *shard,
interned_metadata *md DEBUG_ARGS) {
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM REF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", (void *)md,
+ gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 1)) {
gpr_atm_no_barrier_fetch_add(&shard->free_estimate, -1);
@@ -243,13 +246,16 @@ grpc_mdelem grpc_mdelem_create(
allocated->key = grpc_slice_ref_internal(key);
allocated->value = grpc_slice_ref_internal(value);
gpr_atm_rel_store(&allocated->refcnt, 1);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(allocated->key);
- char *value_str = grpc_slice_to_c_string(allocated->value);
- gpr_log(GPR_DEBUG, "ELM ALLOC:%p:%zu: '%s' = '%s'", (void *)allocated,
- gpr_atm_no_barrier_load(&allocated->refcnt), key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(allocated->key);
+ char *value_str = grpc_slice_to_c_string(allocated->value);
+ gpr_log(GPR_DEBUG, "ELM ALLOC:%p:%" PRIdPTR ": '%s' = '%s'",
+ (void *)allocated, gpr_atm_no_barrier_load(&allocated->refcnt),
+ key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
return GRPC_MAKE_MDELEM(allocated, GRPC_MDELEM_STORAGE_ALLOCATED);
}
@@ -294,13 +300,15 @@ grpc_mdelem grpc_mdelem_create(
md->bucket_next = shard->elems[idx];
shard->elems[idx] = md;
gpr_mu_init(&md->mu_user_data);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(GPR_DEBUG, "ELM NEW:%p:%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt), key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(GPR_DEBUG, "ELM NEW:%p:%" PRIdPTR ": '%s' = '%s'", (void *)md,
+ gpr_atm_no_barrier_load(&md->refcnt), key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
shard->count++;
@@ -356,15 +364,17 @@ grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) {
break;
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata *md = (interned_metadata *)GRPC_MDELEM_DATA(gmd);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM REF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
+ (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
/* we can assume the ref count is >= 1 as the application is calling
this function - meaning that no adjustment to mdtab_free is necessary,
@@ -376,15 +386,17 @@ grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) {
}
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata *md = (allocated_metadata *)GRPC_MDELEM_DATA(gmd);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM REF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
+ (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
/* we can assume the ref count is >= 1 as the application is calling
this function - meaning that no adjustment to mdtab_free is necessary,
@@ -404,15 +416,17 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) {
break;
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata *md = (interned_metadata *)GRPC_MDELEM_DATA(gmd);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM UNREF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
+ (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
uint32_t hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash(md->key),
grpc_slice_hash(md->value));
@@ -428,15 +442,17 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) {
}
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata *md = (allocated_metadata *)GRPC_MDELEM_DATA(gmd);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM UNREF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
+ (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
const gpr_atm prev_refcount = gpr_atm_full_fetch_add(&md->refcnt, -1);
GPR_ASSERT(prev_refcount >= 1);
diff --git a/src/core/lib/transport/metadata.h b/src/core/lib/transport/metadata.h
index 5e1afecd2e..974469e436 100644
--- a/src/core/lib/transport/metadata.h
+++ b/src/core/lib/transport/metadata.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_metadata;
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -132,9 +136,7 @@ void *grpc_mdelem_get_user_data(grpc_mdelem md,
void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *),
void *user_data);
-/* Reference counting */
-//#define GRPC_METADATA_REFCOUNT_DEBUG
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s), __FILE__, __LINE__)
#define GRPC_MDELEM_UNREF(exec_ctx, s) \
grpc_mdelem_unref((exec_ctx), (s), __FILE__, __LINE__)
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index c2afedec58..6a9eba110d 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -31,25 +31,33 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/transport_impl.h"
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_stream_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
+#ifndef NDEBUG
void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
- gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
- gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s",
- refcount->object_type, refcount, refcount->destroy.cb_arg, val,
- val + 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
+ gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s",
+ refcount->object_type, refcount, refcount->destroy.cb_arg, val,
+ val + 1, reason);
+ }
#else
void grpc_stream_ref(grpc_stream_refcount *refcount) {
#endif
gpr_ref_non_zero(&refcount->refs);
}
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
const char *reason) {
- gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
- gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s",
- refcount->object_type, refcount, refcount->destroy.cb_arg, val,
- val - 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
+ gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s",
+ refcount->object_type, refcount, refcount->destroy.cb_arg, val,
+ val - 1, reason);
+ }
#else
void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
@@ -74,7 +82,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
offsetof(grpc_stream_refcount, slice_refcount)))
static void slice_stream_ref(void *p) {
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
grpc_stream_ref(STREAM_REF_FROM_SLICE_REF(p), "slice");
#else
grpc_stream_ref(STREAM_REF_FROM_SLICE_REF(p));
@@ -82,7 +90,7 @@ static void slice_stream_ref(void *p) {
}
static void slice_stream_unref(grpc_exec_ctx *exec_ctx, void *p) {
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
grpc_stream_unref(exec_ctx, STREAM_REF_FROM_SLICE_REF(p), "slice");
#else
grpc_stream_unref(exec_ctx, STREAM_REF_FROM_SLICE_REF(p));
@@ -102,7 +110,7 @@ static const grpc_slice_refcount_vtable stream_ref_slice_vtable = {
.eq = grpc_slice_default_eq_impl,
.hash = grpc_slice_default_hash_impl};
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
grpc_iomgr_cb_func cb, void *cb_arg,
const char *object_type) {
@@ -198,6 +206,11 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
return transport->vtable->get_endpoint(exec_ctx, transport);
}
+// grpc_transport_stream_op_batch_finish_with_failure
+// is a function that must always unref cancel_error
+// though it lives in lib, it handles transport stream ops sure
+// it's grpc_transport_stream_op_batch_finish_with_failure
+
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
grpc_error *error) {
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index d231157c87..84e53e683a 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -42,18 +42,20 @@ typedef struct grpc_transport grpc_transport;
for a stream. */
typedef struct grpc_stream grpc_stream;
-//#define GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_stream_refcount;
+#endif
typedef struct grpc_stream_refcount {
gpr_refcount refs;
grpc_closure destroy;
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
const char *object_type;
#endif
grpc_slice_refcount slice_refcount;
} grpc_stream_refcount;
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
grpc_iomgr_cb_func cb, void *cb_arg,
const char *object_type);
@@ -165,6 +167,10 @@ struct grpc_transport_stream_op_batch_payload {
uint32_t *recv_flags;
/** Should be enqueued when initial metadata is ready to be processed. */
grpc_closure *recv_initial_metadata_ready;
+ // If not NULL, will be set to true if trailing metadata is
+ // immediately available. This may be a signal that we received a
+ // Trailers-Only response.
+ bool *trailing_metadata_available;
} recv_initial_metadata;
struct {
@@ -192,6 +198,8 @@ struct grpc_transport_stream_op_batch_payload {
grpc_chttp2_grpc_status_to_http2_error. Send a RST_STREAM with this
error. */
struct {
+ // Error contract: the transport that gets this op must cause cancel_error
+ // to be unref'ed after processing it
grpc_error *cancel_error;
} cancel_stream;
@@ -206,9 +214,13 @@ typedef struct grpc_transport_op {
/** connectivity monitoring - set connectivity_state to NULL to unsubscribe */
grpc_closure *on_connectivity_state_change;
grpc_connectivity_state *connectivity_state;
- /** should the transport be disconnected */
+ /** should the transport be disconnected
+ * Error contract: the transport that gets this op must cause
+ * disconnect_with_error to be unref'ed after processing it */
grpc_error *disconnect_with_error;
- /** what should the goaway contain? */
+ /** what should the goaway contain?
+ * Error contract: the transport that gets this op must cause
+ * goaway_error to be unref'ed after processing it */
grpc_error *goaway_error;
/** set the callback for accepting new streams;
this is a permanent callback, unlike the other one-shot closures.