aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext
diff options
context:
space:
mode:
authorGravatar Makarand Dharmapurikar <makarandd@google.com>2017-03-27 09:18:21 -0700
committerGravatar Makarand Dharmapurikar <makarandd@google.com>2017-03-27 09:18:21 -0700
commita0649dde0ec92624d56f943d3b0fae09bcb65e5b (patch)
tree8b0885a3e6d7584b79b5a31725932f4c606768a1 /src/core/ext
parent3c7e460de68625c926b8e4581ed573e543a16a67 (diff)
parentc4478a103b68100b86f506061cedcb0ce70016ba (diff)
Merge branch 'master' of https://github.com/grpc/grpc into grpcz_client
Diffstat (limited to 'src/core/ext')
-rw-r--r--src/core/ext/census/grpc_filter.c4
-rw-r--r--src/core/ext/client_channel/channel_connectivity.c4
-rw-r--r--src/core/ext/client_channel/client_channel.c188
-rw-r--r--src/core/ext/client_channel/client_channel_plugin.c3
-rw-r--r--src/core/ext/client_channel/connector.h2
-rw-r--r--src/core/ext/client_channel/http_connect_handshaker.c4
-rw-r--r--src/core/ext/client_channel/proxy_mapper_registry.c12
-rw-r--r--src/core/ext/client_channel/resolver_registry.c1
-rw-r--r--src/core/ext/client_channel/retry_throttle.c210
-rw-r--r--src/core/ext/client_channel/retry_throttle.h (renamed from src/core/ext/client_channel/initial_connect_string.c)43
-rw-r--r--src/core/ext/client_channel/subchannel.c70
-rw-r--r--src/core/ext/client_channel/subchannel.h18
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.c22
-rw-r--r--src/core/ext/lb_policy/pick_first/pick_first.c27
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c29
-rw-r--r--src/core/ext/load_reporting/load_reporting_filter.c6
-rw-r--r--src/core/ext/resolver/dns/c_ares/dns_resolver_ares.c350
-rw-r--r--src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver.h (renamed from src/core/ext/client_channel/initial_connect_string.h)41
-rw-r--r--src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c319
-rw-r--r--src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.c289
-rw-r--r--src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h (renamed from src/core/ext/client_channel/default_initial_connect_string.c)33
-rw-r--r--src/core/ext/resolver/dns/native/dns_resolver.c22
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.c41
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.c4
-rw-r--r--src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c262
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.c17
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.c5
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.c7
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.c11
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c73
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.c65
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.h18
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c48
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c115
39 files changed, 1921 insertions, 458 deletions
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index b80d831557..fc29dbd454 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -138,7 +138,7 @@ static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {
+ grpc_closure *ignored) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc client stats and census_rpc_end_op here */
@@ -160,7 +160,7 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {
+ grpc_closure *ignored) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc server stats and census_tracing_end_op here */
diff --git a/src/core/ext/client_channel/channel_connectivity.c b/src/core/ext/client_channel/channel_connectivity.c
index dd70bc2c6c..f6cb3b9115 100644
--- a/src/core/ext/client_channel/channel_connectivity.c
+++ b/src/core/ext/client_channel/channel_connectivity.c
@@ -139,8 +139,8 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
error = GRPC_ERROR_NONE;
} else {
if (error == GRPC_ERROR_NONE) {
- error =
- GRPC_ERROR_CREATE("Timed out waiting for connection state change");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Timed out waiting for connection state change");
} else if (error == GRPC_ERROR_CANCELLED) {
error = GRPC_ERROR_NONE;
}
diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c
index f2475cf6ae..435a3ab0fe 100644
--- a/src/core/ext/client_channel/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -47,6 +47,7 @@
#include "src/core/ext/client_channel/lb_policy_registry.h"
#include "src/core/ext/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/ext/client_channel/retry_throttle.h"
#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
@@ -189,6 +190,8 @@ typedef struct client_channel_channel_data {
grpc_combiner *combiner;
/** currently active load balancer */
grpc_lb_policy *lb_policy;
+ /** retry throttle data */
+ grpc_server_retry_throttle_data *retry_throttle_data;
/** maps method names to method_parameters structs */
grpc_slice_hash_table *method_params_table;
/** incoming resolver result - set by resolver.next() */
@@ -284,6 +287,65 @@ static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
&w->on_changed);
}
+typedef struct {
+ char *server_name;
+ grpc_server_retry_throttle_data *retry_throttle_data;
+} service_config_parsing_state;
+
+static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
+ service_config_parsing_state *parsing_state = arg;
+ if (strcmp(field->key, "retryThrottling") == 0) {
+ if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
+ if (field->type != GRPC_JSON_OBJECT) return;
+ int max_milli_tokens = 0;
+ int milli_token_ratio = 0;
+ for (grpc_json *sub_field = field->child; sub_field != NULL;
+ sub_field = sub_field->next) {
+ if (sub_field->key == NULL) return;
+ if (strcmp(sub_field->key, "maxTokens") == 0) {
+ if (max_milli_tokens != 0) return; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return;
+ max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value);
+ if (max_milli_tokens == -1) return;
+ max_milli_tokens *= 1000;
+ } else if (strcmp(sub_field->key, "tokenRatio") == 0) {
+ if (milli_token_ratio != 0) return; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return;
+ // We support up to 3 decimal digits.
+ size_t whole_len = strlen(sub_field->value);
+ uint32_t multiplier = 1;
+ uint32_t decimal_value = 0;
+ const char *decimal_point = strchr(sub_field->value, '.');
+ if (decimal_point != NULL) {
+ whole_len = (size_t)(decimal_point - sub_field->value);
+ multiplier = 1000;
+ size_t decimal_len = strlen(decimal_point + 1);
+ if (decimal_len > 3) decimal_len = 3;
+ if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
+ &decimal_value)) {
+ return;
+ }
+ uint32_t decimal_multiplier = 1;
+ for (size_t i = 0; i < (3 - decimal_len); ++i) {
+ decimal_multiplier *= 10;
+ }
+ decimal_value *= decimal_multiplier;
+ }
+ uint32_t whole_value;
+ if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
+ &whole_value)) {
+ return;
+ }
+ milli_token_ratio = (int)((whole_value * multiplier) + decimal_value);
+ if (milli_token_ratio <= 0) return;
+ }
+ }
+ parsing_state->retry_throttle_data =
+ grpc_retry_throttle_map_get_data_for_server(
+ parsing_state->server_name, max_milli_tokens, milli_token_ratio);
+ }
+}
+
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
channel_data *chand = arg;
@@ -293,8 +355,11 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_hash_table *method_params_table = NULL;
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
bool exit_idle = false;
- grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
+ grpc_error *state_error =
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
char *service_config_json = NULL;
+ service_config_parsing_state parsing_state;
+ memset(&parsing_state, 0, sizeof(parsing_state));
if (chand->resolver_result != NULL) {
// Find LB policy name.
@@ -355,6 +420,19 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_service_config *service_config =
grpc_service_config_create(service_config_json);
if (service_config != NULL) {
+ channel_arg =
+ grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
+ GPR_ASSERT(channel_arg != NULL);
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
+ grpc_uri *uri =
+ grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
+ GPR_ASSERT(uri->path[0] != '\0');
+ parsing_state.server_name =
+ uri->path[0] == '/' ? uri->path + 1 : uri->path;
+ grpc_service_config_parse_global_params(
+ service_config, parse_retry_throttle_params, &parsing_state);
+ parsing_state.server_name = NULL;
+ grpc_uri_destroy(uri);
method_params_table = grpc_service_config_create_method_config_table(
exec_ctx, service_config, method_parameters_create_from_json,
&method_parameters_vtable);
@@ -386,6 +464,11 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
chand->info_service_config_json = service_config_json;
}
gpr_mu_unlock(&chand->info_mu);
+
+ if (chand->retry_throttle_data != NULL) {
+ grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
+ }
+ chand->retry_throttle_data = parsing_state.retry_throttle_data;
if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
@@ -393,9 +476,9 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
if (lb_policy != NULL) {
grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
} else if (chand->resolver == NULL /* disconnected */) {
- grpc_closure_list_fail_all(
- &chand->waiting_for_config_closures,
- GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1));
+ grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Channel disconnected", &error, 1));
grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
}
if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
@@ -423,8 +506,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_error *refs[] = {error, state_error};
set_channel_connectivity_state_locked(
exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs,
- GPR_ARRAY_SIZE(refs)),
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Got config after disconnection", refs, GPR_ARRAY_SIZE(refs)),
"resolver_gone");
}
@@ -463,8 +546,9 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (op->send_ping != NULL) {
if (chand->lb_policy == NULL) {
- grpc_closure_sched(exec_ctx, op->send_ping,
- GRPC_ERROR_CREATE("Ping with no load balancing"));
+ grpc_closure_sched(
+ exec_ctx, op->send_ping,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
} else {
grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy, op->send_ping);
op->bind_pollset = NULL;
@@ -579,7 +663,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
if (proxy_name != NULL) gpr_free(proxy_name);
if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
if (chand->resolver == NULL) {
- return GRPC_ERROR_CREATE("resolver creation failed");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
}
return GRPC_ERROR_NONE;
}
@@ -613,6 +697,9 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
}
gpr_free(chand->info_lb_policy_name);
gpr_free(chand->info_service_config_json);
+ if (chand->retry_throttle_data != NULL) {
+ grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
+ }
if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
@@ -654,6 +741,7 @@ typedef struct client_channel_call_data {
grpc_slice path; // Request path.
gpr_timespec call_start_time;
gpr_timespec deadline;
+ grpc_server_retry_throttle_data *retry_throttle_data;
method_parameters *method_params;
grpc_error *cancel_error;
@@ -661,6 +749,7 @@ typedef struct client_channel_call_data {
/** either 0 for no call, 1 for cancelled, or a pointer to a
grpc_subchannel_call */
gpr_atm subchannel_call;
+ gpr_arena *arena;
subchannel_creation_phase creation_phase;
grpc_connected_subchannel *connected_subchannel;
@@ -675,6 +764,9 @@ typedef struct client_channel_call_data {
grpc_call_stack *owning_call;
grpc_linked_mdelem lb_token_mdelem;
+
+ grpc_closure on_complete;
+ grpc_closure *original_on_complete;
} call_data;
grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
@@ -727,7 +819,7 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
gpr_free(ops);
}
-// Sets calld->method_params.
+// Sets calld->method_params and calld->retry_throttle_data.
// If the method params specify a timeout, populates
// *per_method_deadline and returns true.
static bool set_call_method_params_from_service_config_locked(
@@ -735,6 +827,10 @@ static bool set_call_method_params_from_service_config_locked(
gpr_timespec *per_method_deadline) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
+ if (chand->retry_throttle_data != NULL) {
+ calld->retry_throttle_data =
+ grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
+ }
if (chand->method_params_table != NULL) {
calld->method_params = grpc_method_config_table_get(
exec_ctx, chand->method_params_table, calld->path);
@@ -780,12 +876,14 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (calld->connected_subchannel == NULL) {
gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
- fail_locked(exec_ctx, calld, GRPC_ERROR_CREATE_REFERENCING(
- "Failed to create subchannel", &error, 1));
+ fail_locked(exec_ctx, calld,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to create subchannel", &error, 1));
} else if (GET_CALL(calld) == CANCELLED_CALL) {
/* already cancelled before subchannel became ready */
- grpc_error *cancellation_error = GRPC_ERROR_CREATE_REFERENCING(
- "Cancelled before creating subchannel", &error, 1);
+ grpc_error *cancellation_error =
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Cancelled before creating subchannel", &error, 1);
/* if due to deadline, attach the deadline exceeded status to the error */
if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
cancellation_error =
@@ -796,9 +894,14 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
} else {
/* Create call on subchannel. */
grpc_subchannel_call *subchannel_call = NULL;
+ const grpc_connected_subchannel_call_args call_args = {
+ .pollent = calld->pollent,
+ .path = calld->path,
+ .start_time = calld->call_start_time,
+ .deadline = calld->deadline,
+ .arena = calld->arena};
grpc_error *new_error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
- calld->call_start_time, calld->deadline, &subchannel_call);
+ exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
subchannel_call = CANCELLED_CALL;
@@ -882,9 +985,9 @@ static bool pick_subchannel_locked(
cpa = closure->cb_arg;
if (cpa->connected_subchannel == connected_subchannel) {
cpa->connected_subchannel = NULL;
- grpc_closure_sched(
- exec_ctx, cpa->on_ready,
- GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, cpa->on_ready,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
}
}
GPR_TIMER_END("pick_subchannel", 0);
@@ -941,7 +1044,8 @@ static bool pick_subchannel_locked(
grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
GRPC_ERROR_NONE);
} else {
- grpc_closure_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"));
+ grpc_closure_sched(exec_ctx, on_ready,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
}
GPR_TIMER_END("pick_subchannel", 0);
@@ -1025,9 +1129,14 @@ static void start_transport_stream_op_locked_inner(grpc_exec_ctx *exec_ctx,
if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
calld->connected_subchannel != NULL) {
grpc_subchannel_call *subchannel_call = NULL;
+ const grpc_connected_subchannel_call_args call_args = {
+ .pollent = calld->pollent,
+ .path = calld->path,
+ .start_time = calld->call_start_time,
+ .deadline = calld->deadline,
+ .arena = calld->arena};
grpc_error *error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
- calld->call_start_time, calld->deadline, &subchannel_call);
+ exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
if (error != GRPC_ERROR_NONE) {
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
@@ -1045,6 +1154,26 @@ static void start_transport_stream_op_locked_inner(grpc_exec_ctx *exec_ctx,
add_waiting_locked(calld, op);
}
+static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ grpc_call_element *elem = arg;
+ call_data *calld = elem->call_data;
+ if (calld->retry_throttle_data != NULL) {
+ if (error == GRPC_ERROR_NONE) {
+ grpc_server_retry_throttle_data_record_success(
+ calld->retry_throttle_data);
+ } else {
+ // TODO(roth): In a subsequent PR, check the return value here and
+ // decide whether or not to retry. Note that we should only
+ // record failures whose statuses match the configured retryable
+ // or non-fatal status codes.
+ grpc_server_retry_throttle_data_record_failure(
+ calld->retry_throttle_data);
+ }
+ }
+ grpc_closure_run(exec_ctx, calld->original_on_complete,
+ GRPC_ERROR_REF(error));
+}
+
static void start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("start_transport_stream_op_locked", 0);
@@ -1053,6 +1182,14 @@ static void start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_element *elem = op->handler_private.args[0];
call_data *calld = elem->call_data;
+ if (op->recv_trailing_metadata != NULL) {
+ GPR_ASSERT(op->on_complete != NULL);
+ calld->original_on_complete = op->on_complete;
+ grpc_closure_init(&calld->on_complete, on_complete, elem,
+ grpc_schedule_on_exec_ctx);
+ op->on_complete = &calld->on_complete;
+ }
+
start_transport_stream_op_locked_inner(exec_ctx, op, elem);
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
@@ -1114,6 +1251,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
calld->owning_call = args->call_stack;
+ calld->arena = args->arena;
grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
return GRPC_ERROR_NONE;
}
@@ -1122,7 +1260,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *and_free_memory) {
+ grpc_closure *then_schedule_closure) {
call_data *calld = elem->call_data;
grpc_deadline_state_destroy(exec_ctx, elem);
grpc_slice_unref_internal(exec_ctx, calld->path);
@@ -1132,6 +1270,8 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(calld->cancel_error);
grpc_subchannel_call *call = GET_CALL(calld);
if (call != NULL && call != CANCELLED_CALL) {
+ grpc_subchannel_call_set_cleanup_closure(call, then_schedule_closure);
+ then_schedule_closure = NULL;
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call");
}
GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
@@ -1141,7 +1281,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
"picked");
}
gpr_free(calld->waiting_ops);
- gpr_free(and_free_memory);
+ grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/ext/client_channel/client_channel_plugin.c b/src/core/ext/client_channel/client_channel_plugin.c
index 28d3b63f99..f51277d0b2 100644
--- a/src/core/ext/client_channel/client_channel_plugin.c
+++ b/src/core/ext/client_channel/client_channel_plugin.c
@@ -43,6 +43,7 @@
#include "src/core/ext/client_channel/lb_policy_registry.h"
#include "src/core/ext/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/ext/client_channel/retry_throttle.h"
#include "src/core/ext/client_channel/subchannel_index.h"
#include "src/core/lib/surface/channel_init.h"
@@ -82,6 +83,7 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
void grpc_client_channel_init(void) {
grpc_lb_policy_registry_init();
grpc_resolver_registry_init();
+ grpc_retry_throttle_map_init();
grpc_proxy_mapper_registry_init();
grpc_register_http_proxy_mapper();
grpc_subchannel_index_init();
@@ -96,6 +98,7 @@ void grpc_client_channel_shutdown(void) {
grpc_subchannel_index_shutdown();
grpc_channel_init_shutdown();
grpc_proxy_mapper_registry_shutdown();
+ grpc_retry_throttle_map_shutdown();
grpc_resolver_registry_shutdown();
grpc_lb_policy_registry_shutdown();
}
diff --git a/src/core/ext/client_channel/connector.h b/src/core/ext/client_channel/connector.h
index 9bff41f003..94b5fb5c9e 100644
--- a/src/core/ext/client_channel/connector.h
+++ b/src/core/ext/client_channel/connector.h
@@ -48,8 +48,6 @@ struct grpc_connector {
typedef struct {
/** set of pollsets interested in this connection */
grpc_pollset_set *interested_parties;
- /** initial connect string to send */
- grpc_slice initial_connect_string;
/** deadline for connection */
gpr_timespec deadline;
/** channel arguments (to be passed to transport) */
diff --git a/src/core/ext/client_channel/http_connect_handshaker.c b/src/core/ext/client_channel/http_connect_handshaker.c
index 58ab233f1b..778d76c39f 100644
--- a/src/core/ext/client_channel/http_connect_handshaker.c
+++ b/src/core/ext/client_channel/http_connect_handshaker.c
@@ -116,7 +116,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
// If we were shut down after an endpoint operation succeeded but
// before the endpoint callback was invoked, we need to generate our
// own error.
- error = GRPC_ERROR_CREATE("Handshaker shutdown");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshaker shutdown");
}
if (!handshaker->shutdown) {
// TODO(ctiller): It is currently necessary to shutdown endpoints
@@ -226,7 +226,7 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
char* msg;
gpr_asprintf(&msg, "HTTP proxy returned response code %d",
handshaker->http_response.status);
- error = GRPC_ERROR_CREATE(msg);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
handshake_failed_locked(exec_ctx, handshaker, error);
goto done;
diff --git a/src/core/ext/client_channel/proxy_mapper_registry.c b/src/core/ext/client_channel/proxy_mapper_registry.c
index 2c44b9d490..0935ddbdbd 100644
--- a/src/core/ext/client_channel/proxy_mapper_registry.c
+++ b/src/core/ext/client_channel/proxy_mapper_registry.c
@@ -94,6 +94,14 @@ static void grpc_proxy_mapper_list_destroy(grpc_proxy_mapper_list* list) {
grpc_proxy_mapper_destroy(list->list[i]);
}
gpr_free(list->list);
+ // Clean up in case we re-initialze later.
+ // TODO(ctiller): This should ideally live in
+ // grpc_proxy_mapper_registry_init(). However, if we did this there,
+ // then we would do it AFTER we start registering proxy mappers from
+ // third-party plugins, so they'd never show up (and would leak memory).
+ // We probably need some sort of dependency system for plugins to fix
+ // this.
+ memset(list, 0, sizeof(*list));
}
//
@@ -102,9 +110,7 @@ static void grpc_proxy_mapper_list_destroy(grpc_proxy_mapper_list* list) {
static grpc_proxy_mapper_list g_proxy_mapper_list;
-void grpc_proxy_mapper_registry_init() {
- memset(&g_proxy_mapper_list, 0, sizeof(g_proxy_mapper_list));
-}
+void grpc_proxy_mapper_registry_init() {}
void grpc_proxy_mapper_registry_shutdown() {
grpc_proxy_mapper_list_destroy(&g_proxy_mapper_list);
diff --git a/src/core/ext/client_channel/resolver_registry.c b/src/core/ext/client_channel/resolver_registry.c
index 3c5a6fb3ff..0f074a3386 100644
--- a/src/core/ext/client_channel/resolver_registry.c
+++ b/src/core/ext/client_channel/resolver_registry.c
@@ -93,7 +93,6 @@ static grpc_resolver_factory *lookup_factory(const char *name) {
return g_all_of_the_resolvers[i];
}
}
-
return NULL;
}
diff --git a/src/core/ext/client_channel/retry_throttle.c b/src/core/ext/client_channel/retry_throttle.c
new file mode 100644
index 0000000000..8926c3d782
--- /dev/null
+++ b/src/core/ext/client_channel/retry_throttle.c
@@ -0,0 +1,210 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/ext/client_channel/retry_throttle.h"
+
+#include <limits.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
+#include <grpc/support/avl.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/sync.h>
+
+//
+// server_retry_throttle_data
+//
+
+struct grpc_server_retry_throttle_data {
+ gpr_refcount refs;
+ int max_milli_tokens;
+ int milli_token_ratio;
+ gpr_atm milli_tokens;
+ // A pointer to the replacement for this grpc_server_retry_throttle_data
+ // entry. If non-NULL, then this entry is stale and must not be used.
+ // We hold a reference to the replacement.
+ gpr_atm replacement;
+};
+
+static void get_replacement_throttle_data_if_needed(
+ grpc_server_retry_throttle_data** throttle_data) {
+ while (true) {
+ grpc_server_retry_throttle_data* new_throttle_data =
+ (grpc_server_retry_throttle_data*)gpr_atm_acq_load(
+ &(*throttle_data)->replacement);
+ if (new_throttle_data == NULL) return;
+ *throttle_data = new_throttle_data;
+ }
+}
+
+bool grpc_server_retry_throttle_data_record_failure(
+ grpc_server_retry_throttle_data* throttle_data) {
+ // First, check if we are stale and need to be replaced.
+ get_replacement_throttle_data_if_needed(&throttle_data);
+ // We decrement milli_tokens by 1000 (1 token) for each failure.
+ const int new_value = (int)gpr_atm_no_barrier_clamped_add(
+ &throttle_data->milli_tokens, (gpr_atm)-1000, (gpr_atm)0,
+ (gpr_atm)throttle_data->max_milli_tokens);
+ // Retries are allowed as long as the new value is above the threshold
+ // (max_milli_tokens / 2).
+ return new_value > throttle_data->max_milli_tokens / 2;
+}
+
+void grpc_server_retry_throttle_data_record_success(
+ grpc_server_retry_throttle_data* throttle_data) {
+ // First, check if we are stale and need to be replaced.
+ get_replacement_throttle_data_if_needed(&throttle_data);
+ // We increment milli_tokens by milli_token_ratio for each success.
+ gpr_atm_no_barrier_clamped_add(
+ &throttle_data->milli_tokens, (gpr_atm)throttle_data->milli_token_ratio,
+ (gpr_atm)0, (gpr_atm)throttle_data->max_milli_tokens);
+}
+
+grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_ref(
+ grpc_server_retry_throttle_data* throttle_data) {
+ gpr_ref(&throttle_data->refs);
+ return throttle_data;
+}
+
+void grpc_server_retry_throttle_data_unref(
+ grpc_server_retry_throttle_data* throttle_data) {
+ if (gpr_unref(&throttle_data->refs)) {
+ grpc_server_retry_throttle_data* replacement =
+ (grpc_server_retry_throttle_data*)gpr_atm_acq_load(
+ &throttle_data->replacement);
+ if (replacement != NULL) {
+ grpc_server_retry_throttle_data_unref(replacement);
+ }
+ gpr_free(throttle_data);
+ }
+}
+
+static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
+ int max_milli_tokens, int milli_token_ratio,
+ grpc_server_retry_throttle_data* old_throttle_data) {
+ grpc_server_retry_throttle_data* throttle_data =
+ gpr_malloc(sizeof(*throttle_data));
+ memset(throttle_data, 0, sizeof(*throttle_data));
+ gpr_ref_init(&throttle_data->refs, 1);
+ throttle_data->max_milli_tokens = max_milli_tokens;
+ throttle_data->milli_token_ratio = milli_token_ratio;
+ int initial_milli_tokens = max_milli_tokens;
+ // If there was a pre-existing entry for this server name, initialize
+ // the token count by scaling proportionately to the old data. This
+ // ensures that if we're already throttling retries on the old scale,
+ // we will start out doing the same thing on the new one.
+ if (old_throttle_data != NULL) {
+ double token_fraction =
+ (int)gpr_atm_acq_load(&old_throttle_data->milli_tokens) /
+ (double)old_throttle_data->max_milli_tokens;
+ initial_milli_tokens = (int)(token_fraction * max_milli_tokens);
+ }
+ gpr_atm_rel_store(&throttle_data->milli_tokens,
+ (gpr_atm)initial_milli_tokens);
+ // If there was a pre-existing entry, mark it as stale and give it a
+ // pointer to the new entry, which is its replacement.
+ if (old_throttle_data != NULL) {
+ grpc_server_retry_throttle_data_ref(throttle_data);
+ gpr_atm_rel_store(&old_throttle_data->replacement, (gpr_atm)throttle_data);
+ }
+ return throttle_data;
+}
+
+//
+// avl vtable for string -> server_retry_throttle_data map
+//
+
+static void* copy_server_name(void* key) { return gpr_strdup(key); }
+
+static long compare_server_name(void* key1, void* key2) {
+ return strcmp(key1, key2);
+}
+
+static void destroy_server_retry_throttle_data(void* value) {
+ grpc_server_retry_throttle_data* throttle_data = value;
+ grpc_server_retry_throttle_data_unref(throttle_data);
+}
+
+static void* copy_server_retry_throttle_data(void* value) {
+ grpc_server_retry_throttle_data* throttle_data = value;
+ return grpc_server_retry_throttle_data_ref(throttle_data);
+}
+
+static const gpr_avl_vtable avl_vtable = {
+ gpr_free /* destroy_key */, copy_server_name, compare_server_name,
+ destroy_server_retry_throttle_data, copy_server_retry_throttle_data};
+
+//
+// server_retry_throttle_map
+//
+
+static gpr_mu g_mu;
+static gpr_avl g_avl;
+
+void grpc_retry_throttle_map_init() {
+ gpr_mu_init(&g_mu);
+ g_avl = gpr_avl_create(&avl_vtable);
+}
+
+void grpc_retry_throttle_map_shutdown() {
+ gpr_mu_destroy(&g_mu);
+ gpr_avl_unref(g_avl);
+}
+
+grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
+ const char* server_name, int max_milli_tokens, int milli_token_ratio) {
+ gpr_mu_lock(&g_mu);
+ grpc_server_retry_throttle_data* throttle_data =
+ gpr_avl_get(g_avl, (char*)server_name);
+ if (throttle_data == NULL) {
+ // Entry not found. Create a new one.
+ throttle_data = grpc_server_retry_throttle_data_create(
+ max_milli_tokens, milli_token_ratio, NULL);
+ g_avl = gpr_avl_add(g_avl, (char*)server_name, throttle_data);
+ } else {
+ if (throttle_data->max_milli_tokens != max_milli_tokens ||
+ throttle_data->milli_token_ratio != milli_token_ratio) {
+ // Entry found but with old parameters. Create a new one based on
+ // the original one.
+ throttle_data = grpc_server_retry_throttle_data_create(
+ max_milli_tokens, milli_token_ratio, throttle_data);
+ g_avl = gpr_avl_add(g_avl, (char*)server_name, throttle_data);
+ } else {
+ // Entry found. Increase refcount.
+ grpc_server_retry_throttle_data_ref(throttle_data);
+ }
+ }
+ gpr_mu_unlock(&g_mu);
+ return throttle_data;
+}
diff --git a/src/core/ext/client_channel/initial_connect_string.c b/src/core/ext/client_channel/retry_throttle.h
index 8ebd06c458..f9971faf65 100644
--- a/src/core/ext/client_channel/initial_connect_string.c
+++ b/src/core/ext/client_channel/retry_throttle.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,22 +31,35 @@
*
*/
-#include "src/core/ext/client_channel/initial_connect_string.h"
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_RETRY_THROTTLE_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_RETRY_THROTTLE_H
-#include <stddef.h>
+#include <stdbool.h>
-extern void grpc_set_default_initial_connect_string(
- grpc_resolved_address **addr, grpc_slice *initial_str);
+/// Tracks retry throttling data for an individual server name.
+typedef struct grpc_server_retry_throttle_data grpc_server_retry_throttle_data;
-static grpc_set_initial_connect_string_func g_set_initial_connect_string_func =
- grpc_set_default_initial_connect_string;
+/// Records a failure. Returns true if it's okay to send a retry.
+bool grpc_server_retry_throttle_data_record_failure(
+ grpc_server_retry_throttle_data* throttle_data);
+/// Records a success.
+void grpc_server_retry_throttle_data_record_success(
+ grpc_server_retry_throttle_data* throttle_data);
-void grpc_test_set_initial_connect_string_function(
- grpc_set_initial_connect_string_func func) {
- g_set_initial_connect_string_func = func;
-}
+grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_ref(
+ grpc_server_retry_throttle_data* throttle_data);
+void grpc_server_retry_throttle_data_unref(
+ grpc_server_retry_throttle_data* throttle_data);
-void grpc_set_initial_connect_string(grpc_resolved_address **addr,
- grpc_slice *initial_str) {
- g_set_initial_connect_string_func(addr, initial_str);
-}
+/// Initializes global map of failure data for each server name.
+void grpc_retry_throttle_map_init();
+/// Shuts down global map of failure data for each server name.
+void grpc_retry_throttle_map_shutdown();
+
+/// Returns a reference to the failure data for \a server_name, creating
+/// a new entry if needed.
+/// Caller must eventually unref via \a grpc_server_retry_throttle_data_unref().
+grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
+ const char* server_name, int max_milli_tokens, int milli_token_ratio);
+
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RETRY_THROTTLE_H */
diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c
index 5df0a9060d..063c0badff 100644
--- a/src/core/ext/client_channel/subchannel.c
+++ b/src/core/ext/client_channel/subchannel.c
@@ -41,7 +41,6 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/client_channel/client_channel.h"
-#include "src/core/ext/client_channel/initial_connect_string.h"
#include "src/core/ext/client_channel/parse_address.h"
#include "src/core/ext/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/client_channel/subchannel_index.h"
@@ -103,9 +102,6 @@ struct grpc_subchannel {
grpc_subchannel_key *key;
- /** initial string to send to peer */
- grpc_slice initial_connect_string;
-
/** set during connection */
grpc_connect_out_args connecting_result;
@@ -148,6 +144,7 @@ struct grpc_subchannel {
struct grpc_subchannel_call {
grpc_connected_subchannel *connection;
+ grpc_closure *schedule_closure_after_destroy;
};
#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack *)((call) + 1))
@@ -214,7 +211,6 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_subchannel *c = arg;
gpr_free((void *)c->filters);
grpc_channel_args_destroy(exec_ctx, c->args);
- grpc_slice_unref_internal(exec_ctx, c->initial_connect_string);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
grpc_connector_unref(exec_ctx, c->connector);
grpc_pollset_set_destroy(exec_ctx, c->pollset_set);
@@ -273,8 +269,9 @@ static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->disconnected);
c->disconnected = true;
- grpc_connector_shutdown(exec_ctx, c->connector,
- GRPC_ERROR_CREATE("Subchannel disconnected"));
+ grpc_connector_shutdown(
+ exec_ctx, c->connector,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Subchannel disconnected"));
con = GET_CONNECTED_SUBCHANNEL(c, no_barrier);
if (con != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, con, "connection");
@@ -332,7 +329,6 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
c->pollset_set = grpc_pollset_set_create();
grpc_resolved_address *addr = gpr_malloc(sizeof(*addr));
grpc_get_subchannel_address_arg(exec_ctx, args->args, addr);
- grpc_set_initial_connect_string(&addr, &c->initial_connect_string);
grpc_resolved_address *new_address = NULL;
grpc_channel_args *new_args = NULL;
if (grpc_proxy_mappers_map_address(exec_ctx, addr, args->args, &new_address,
@@ -340,17 +336,15 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(new_address != NULL);
gpr_free(addr);
addr = new_address;
- if (new_args != NULL) c->args = new_args;
- }
- if (c->args == NULL) {
- static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
- grpc_arg new_arg = grpc_create_subchannel_address_arg(addr);
- c->args = grpc_channel_args_copy_and_add_and_remove(
- args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &new_arg,
- 1);
- gpr_free(new_arg.value.string);
}
+ static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
+ grpc_arg new_arg = grpc_create_subchannel_address_arg(addr);
gpr_free(addr);
+ c->args = grpc_channel_args_copy_and_add_and_remove(
+ new_args != NULL ? new_args : args->args, keys_to_remove,
+ GPR_ARRAY_SIZE(keys_to_remove), &new_arg, 1);
+ gpr_free(new_arg.value.string);
+ if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
&c->root_external_state_watcher;
grpc_closure_init(&c->connected, subchannel_connected, c,
@@ -405,7 +399,6 @@ static void continue_connect_locked(grpc_exec_ctx *exec_ctx,
args.interested_parties = c->pollset_set;
args.deadline = c->next_attempt;
args.channel_args = c->args;
- args.initial_connect_string = c->initial_connect_string;
grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
@@ -445,7 +438,8 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_mu_lock(&c->mu);
c->have_alarm = false;
if (c->disconnected) {
- error = GRPC_ERROR_CREATE_REFERENCING("Disconnected", &error, 1);
+ error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Disconnected",
+ &error, 1);
} else {
GRPC_ERROR_REF(error);
}
@@ -696,9 +690,9 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
} else {
grpc_connectivity_state_set(
exec_ctx, &c->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
- grpc_error_set_int(
- GRPC_ERROR_CREATE_REFERENCING("Connect Failed", &error, 1),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
+ grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Connect Failed", &error, 1),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
"connect_failed");
const char *errmsg = grpc_error_string(error);
@@ -719,13 +713,22 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
grpc_subchannel_call *c = call;
+ GPR_ASSERT(c->schedule_closure_after_destroy != NULL);
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
grpc_connected_subchannel *connection = c->connection;
- grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), NULL, c);
+ grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), NULL,
+ c->schedule_closure_after_destroy);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, connection, "subchannel_call");
GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
}
+void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call *call,
+ grpc_closure *closure) {
+ GPR_ASSERT(call->schedule_closure_after_destroy == NULL);
+ GPR_ASSERT(closure != NULL);
+ call->schedule_closure_after_destroy = closure;
+}
+
void grpc_subchannel_call_ref(
grpc_subchannel_call *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
@@ -761,15 +764,22 @@ grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- grpc_polling_entity *pollent, grpc_slice path, gpr_timespec start_time,
- gpr_timespec deadline, grpc_subchannel_call **call) {
+ const grpc_connected_subchannel_call_args *args,
+ grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
- *call = gpr_zalloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
+ *call = gpr_arena_alloc(
+ args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = con; // Ref is added below.
- grpc_error *error =
- grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, *call,
- NULL, NULL, path, start_time, deadline, callstk);
+ const grpc_call_element_args call_args = {.call_stack = callstk,
+ .server_transport_data = NULL,
+ .context = NULL,
+ .path = args->path,
+ .start_time = args->start_time,
+ .deadline = args->deadline,
+ .arena = args->arena};
+ grpc_error *error = grpc_call_stack_init(
+ exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args);
if (error != GRPC_ERROR_NONE) {
const char *error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);
@@ -778,7 +788,7 @@ grpc_error *grpc_connected_subchannel_create_call(
return error;
}
GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
- grpc_call_stack_set_pollset_or_pollset_set(exec_ctx, callstk, pollent);
+ grpc_call_stack_set_pollset_or_pollset_set(exec_ctx, callstk, args->pollent);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/ext/client_channel/subchannel.h b/src/core/ext/client_channel/subchannel.h
index 6a70a76467..3e64a2507c 100644
--- a/src/core/ext/client_channel/subchannel.h
+++ b/src/core/ext/client_channel/subchannel.h
@@ -37,6 +37,7 @@
#include "src/core/ext/client_channel/connector.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/polling_entity.h"
+#include "src/core/lib/support/arena.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata.h"
@@ -112,10 +113,18 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
/** construct a subchannel call */
+typedef struct {
+ grpc_polling_entity *pollent;
+ grpc_slice path;
+ gpr_timespec start_time;
+ gpr_timespec deadline;
+ gpr_arena *arena;
+} grpc_connected_subchannel_call_args;
+
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
- grpc_polling_entity *pollent, grpc_slice path, gpr_timespec start_time,
- gpr_timespec deadline, grpc_subchannel_call **subchannel_call);
+ const grpc_connected_subchannel_call_args *args,
+ grpc_subchannel_call **subchannel_call);
/** process a transport level op */
void grpc_connected_subchannel_process_transport_op(
@@ -154,6 +163,11 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call);
+/** Must be called once per call. Sets the 'then_schedule_closure' argument for
+ call stack destruction. */
+void grpc_subchannel_call_set_cleanup_closure(
+ grpc_subchannel_call *subchannel_call, grpc_closure *closure);
+
grpc_call_stack *grpc_subchannel_call_get_call_stack(
grpc_subchannel_call *subchannel_call);
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c
index d612591f2e..601b0e643b 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/lb_policy/grpclb/grpclb.c
@@ -925,7 +925,7 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
grpc_connectivity_state_set(
exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE("Channel Shutdown"), "glb_shutdown");
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
/* We need a copy of the lb_call pointer because we can't cancell the call
* while holding glb_policy->mu: lb_on_server_status_received, invoked due to
* the cancel, needs to acquire that same lock */
@@ -965,9 +965,9 @@ static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_closure_sched(
- exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
} else {
pp->next = glb_policy->pending_picks;
glb_policy->pending_picks = pp;
@@ -989,9 +989,9 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
pending_pick *next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_closure_sched(
- exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
} else {
pp->next = glb_policy->pending_picks;
glb_policy->pending_picks = pp;
@@ -1023,10 +1023,10 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL;
- grpc_closure_sched(
- exec_ctx, on_complete,
- GRPC_ERROR_CREATE("No mdelem storage for the LB token. Load reporting "
- "won't work without it. Failing"));
+ grpc_closure_sched(exec_ctx, on_complete,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "No mdelem storage for the LB token. Load reporting "
+ "won't work without it. Failing"));
return 0;
}
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
index e2a66d16bd..fc65dfdcb9 100644
--- a/src/core/ext/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -101,7 +101,7 @@ static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
p->pending_picks = NULL;
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE("Channel shutdown"), "shutdown");
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"), "shutdown");
/* cancel subscription */
if (p->selected != NULL) {
grpc_connected_subchannel_notify_on_state_change(
@@ -131,9 +131,9 @@ static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_closure_sched(
- exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -156,9 +156,9 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_closure_sched(
- exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -325,8 +325,8 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (p->num_subchannels == 0) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE_REFERENCING("Pick first exhausted channels",
- &error, 1),
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick first exhausted channels", &error, 1),
"no_more_channels");
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
@@ -373,7 +373,8 @@ static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
} else {
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"));
+ grpc_closure_sched(exec_ctx, closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
}
}
@@ -423,11 +424,13 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
"This LB policy doesn't support user data. It will be ignored");
}
+ static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
- grpc_channel_args *new_args =
- grpc_channel_args_copy_and_add(args->args, &addr_arg, 1);
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
+ args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
+ 1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index f2d1d46179..a62082a2ff 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -320,13 +320,14 @@ static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_closure_sched(exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE("Channel Shutdown"));
+ grpc_closure_sched(
+ exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
gpr_free(pp);
}
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE("Channel Shutdown"), "rr_shutdown");
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "rr_shutdown");
for (i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
@@ -345,9 +346,9 @@ static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_closure_sched(
- exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -371,9 +372,9 @@ static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
*pp->target = NULL;
- grpc_closure_sched(
- exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -661,8 +662,8 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
} else {
- grpc_closure_sched(exec_ctx, closure,
- GRPC_ERROR_CREATE("Round Robin not connected"));
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Round Robin not connected"));
}
}
@@ -709,11 +710,13 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
/* Skip balancer addresses, since we only know how to handle backends. */
if (addresses->addresses[i].is_balancer) continue;
+ static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
- grpc_channel_args *new_args =
- grpc_channel_args_copy_and_add(args->args, &addr_arg, 1);
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
+ args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
+ 1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c
index c2750634a5..ea57c85c3a 100644
--- a/src/core/ext/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/load_reporting/load_reporting_filter.c
@@ -78,8 +78,8 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.path->md));
calld->have_service_method = true;
} else {
- err =
- grpc_error_add_child(err, GRPC_ERROR_CREATE("Missing :path header"));
+ err = grpc_error_add_child(
+ err, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing :path header"));
}
if (calld->recv_initial_metadata->idx.named.lb_token != NULL) {
calld->initial_md_string = grpc_slice_ref_internal(
@@ -123,7 +123,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {
+ grpc_closure *ignored) {
call_data *calld = elem->call_data;
/* TODO(dgq): do something with the data
diff --git a/src/core/ext/resolver/dns/c_ares/dns_resolver_ares.c b/src/core/ext/resolver/dns/c_ares/dns_resolver_ares.c
new file mode 100644
index 0000000000..f27da231f5
--- /dev/null
+++ b/src/core/ext/resolver/dns/c_ares/dns_resolver_ares.c
@@ -0,0 +1,350 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+#if GRPC_ARES == 1 && !defined(GRPC_UV)
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/host_port.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/ext/client_channel/http_connect_handshaker.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/combiner.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/support/backoff.h"
+#include "src/core/lib/support/env.h"
+#include "src/core/lib/support/string.h"
+
+#define GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS 1
+#define GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS 1
+#define GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER 1.6
+#define GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS 120
+#define GRPC_DNS_RECONNECT_JITTER 0.2
+
+typedef struct {
+ /** base class: must be first */
+ grpc_resolver base;
+ /** name to resolve (usually the same as target_name) */
+ char *name_to_resolve;
+ /** default port to use */
+ char *default_port;
+ /** channel args. */
+ grpc_channel_args *channel_args;
+ /** pollset_set to drive the name resolution process */
+ grpc_pollset_set *interested_parties;
+
+ /** Closures used by the combiner */
+ grpc_closure dns_ares_on_retry_timer_locked;
+ grpc_closure dns_ares_on_resolved_locked;
+
+ /** Combiner guarding the rest of the state */
+ grpc_combiner *combiner;
+ /** are we currently resolving? */
+ bool resolving;
+ /** which version of the result have we published? */
+ int published_version;
+ /** which version of the result is current? */
+ int resolved_version;
+ /** pending next completion, or NULL */
+ grpc_closure *next_completion;
+ /** target result address for next completion */
+ grpc_channel_args **target_result;
+ /** current (fully resolved) result */
+ grpc_channel_args *resolved_result;
+ /** retry timer */
+ bool have_retry_timer;
+ grpc_timer retry_timer;
+ /** retry backoff state */
+ gpr_backoff backoff_state;
+
+ /** currently resolving addresses */
+ grpc_resolved_addresses *addresses;
+} ares_dns_resolver;
+
+static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+
+static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
+ ares_dns_resolver *r);
+static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ ares_dns_resolver *r);
+
+static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *r);
+static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete);
+
+static const grpc_resolver_vtable dns_ares_resolver_vtable = {
+ dns_ares_destroy, dns_ares_shutdown_locked,
+ dns_ares_channel_saw_error_locked, dns_ares_next_locked};
+
+static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+ if (r->have_retry_timer) {
+ grpc_timer_cancel(exec_ctx, &r->retry_timer);
+ }
+ if (r->next_completion != NULL) {
+ *r->target_result = NULL;
+ grpc_closure_sched(
+ exec_ctx, r->next_completion,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
+ r->next_completion = NULL;
+ }
+}
+
+static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+ if (!r->resolving) {
+ gpr_backoff_reset(&r->backoff_state);
+ dns_ares_start_resolving_locked(exec_ctx, r);
+ }
+}
+
+static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ ares_dns_resolver *r = arg;
+ r->have_retry_timer = false;
+ if (error == GRPC_ERROR_NONE) {
+ if (!r->resolving) {
+ dns_ares_start_resolving_locked(exec_ctx, r);
+ }
+ }
+ GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
+}
+
+static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ ares_dns_resolver *r = arg;
+ grpc_channel_args *result = NULL;
+ GPR_ASSERT(r->resolving);
+ r->resolving = false;
+ if (r->addresses != NULL) {
+ grpc_lb_addresses *addresses = grpc_lb_addresses_create(
+ r->addresses->naddrs, NULL /* user_data_vtable */);
+ for (size_t i = 0; i < r->addresses->naddrs; ++i) {
+ grpc_lb_addresses_set_address(
+ addresses, i, &r->addresses->addrs[i].addr,
+ r->addresses->addrs[i].len, false /* is_balancer */,
+ NULL /* balancer_name */, NULL /* user_data */);
+ }
+ grpc_arg new_arg = grpc_lb_addresses_create_channel_arg(addresses);
+ result = grpc_channel_args_copy_and_add(r->channel_args, &new_arg, 1);
+ grpc_resolved_addresses_destroy(r->addresses);
+ grpc_lb_addresses_destroy(exec_ctx, addresses);
+ } else {
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
+ gpr_timespec timeout = gpr_time_sub(next_try, now);
+ gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
+ grpc_error_string(error));
+ GPR_ASSERT(!r->have_retry_timer);
+ r->have_retry_timer = true;
+ GRPC_RESOLVER_REF(&r->base, "retry-timer");
+ if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
+ gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
+ timeout.tv_nsec);
+ } else {
+ gpr_log(GPR_DEBUG, "retrying immediately");
+ }
+ grpc_timer_init(exec_ctx, &r->retry_timer, next_try,
+ &r->dns_ares_on_retry_timer_locked, now);
+ }
+ if (r->resolved_result != NULL) {
+ grpc_channel_args_destroy(exec_ctx, r->resolved_result);
+ }
+ r->resolved_result = result;
+ r->resolved_version++;
+ dns_ares_maybe_finish_next_locked(exec_ctx, r);
+ GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
+}
+
+static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete) {
+ gpr_log(GPR_DEBUG, "dns_ares_next is called.");
+ ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+ GPR_ASSERT(!r->next_completion);
+ r->next_completion = on_complete;
+ r->target_result = target_result;
+ if (r->resolved_version == 0 && !r->resolving) {
+ gpr_backoff_reset(&r->backoff_state);
+ dns_ares_start_resolving_locked(exec_ctx, r);
+ } else {
+ dns_ares_maybe_finish_next_locked(exec_ctx, r);
+ }
+}
+
+static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
+ ares_dns_resolver *r) {
+ GRPC_RESOLVER_REF(&r->base, "dns-resolving");
+ GPR_ASSERT(!r->resolving);
+ r->resolving = true;
+ r->addresses = NULL;
+ grpc_resolve_address(exec_ctx, r->name_to_resolve, r->default_port,
+ r->interested_parties, &r->dns_ares_on_resolved_locked,
+ &r->addresses);
+}
+
+static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ ares_dns_resolver *r) {
+ if (r->next_completion != NULL &&
+ r->resolved_version != r->published_version) {
+ *r->target_result = r->resolved_result == NULL
+ ? NULL
+ : grpc_channel_args_copy(r->resolved_result);
+ grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+ r->next_completion = NULL;
+ r->published_version = r->resolved_version;
+ }
+}
+
+static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
+ gpr_log(GPR_DEBUG, "dns_ares_destroy");
+ ares_dns_resolver *r = (ares_dns_resolver *)gr;
+ if (r->resolved_result != NULL) {
+ grpc_channel_args_destroy(exec_ctx, r->resolved_result);
+ }
+ grpc_pollset_set_destroy(exec_ctx, r->interested_parties);
+ gpr_free(r->name_to_resolve);
+ gpr_free(r->default_port);
+ grpc_channel_args_destroy(exec_ctx, r->channel_args);
+ gpr_free(r);
+}
+
+static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
+ grpc_resolver_args *args,
+ const char *default_port) {
+ // Get name from args.
+ const char *path = args->uri->path;
+ if (0 != strcmp(args->uri->authority, "")) {
+ gpr_log(GPR_ERROR, "authority based dns uri's not supported");
+ return NULL;
+ }
+ if (path[0] == '/') ++path;
+ // Create resolver.
+ ares_dns_resolver *r = gpr_zalloc(sizeof(ares_dns_resolver));
+ grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
+ r->name_to_resolve = gpr_strdup(path);
+ r->default_port = gpr_strdup(default_port);
+ r->channel_args = grpc_channel_args_copy(args->args);
+ r->interested_parties = grpc_pollset_set_create();
+ if (args->pollset_set != NULL) {
+ grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
+ args->pollset_set);
+ }
+ gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_DNS_RECONNECT_JITTER,
+ GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ grpc_closure_init(&r->dns_ares_on_retry_timer_locked,
+ dns_ares_on_retry_timer_locked, r,
+ grpc_combiner_scheduler(r->base.combiner, false));
+ grpc_closure_init(&r->dns_ares_on_resolved_locked,
+ dns_ares_on_resolved_locked, r,
+ grpc_combiner_scheduler(r->base.combiner, false));
+ return &r->base;
+}
+
+/*
+ * FACTORY
+ */
+
+static void dns_ares_factory_ref(grpc_resolver_factory *factory) {}
+
+static void dns_ares_factory_unref(grpc_resolver_factory *factory) {}
+
+static grpc_resolver *dns_factory_create_resolver(
+ grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory,
+ grpc_resolver_args *args) {
+ return dns_ares_create(exec_ctx, args, "https");
+}
+
+static char *dns_ares_factory_get_default_host_name(
+ grpc_resolver_factory *factory, grpc_uri *uri) {
+ const char *path = uri->path;
+ if (path[0] == '/') ++path;
+ return gpr_strdup(path);
+}
+
+static const grpc_resolver_factory_vtable dns_ares_factory_vtable = {
+ dns_ares_factory_ref, dns_ares_factory_unref, dns_factory_create_resolver,
+ dns_ares_factory_get_default_host_name, "dns"};
+static grpc_resolver_factory dns_resolver_factory = {&dns_ares_factory_vtable};
+
+static grpc_resolver_factory *dns_ares_resolver_factory_create() {
+ return &dns_resolver_factory;
+}
+
+void grpc_resolver_dns_ares_init(void) {
+ char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ /* TODO(zyc): Turn on c-ares based resolver by default after the address
+ sorter and the CNAME support are added. */
+ if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
+ grpc_error *error = grpc_ares_init();
+ if (error != GRPC_ERROR_NONE) {
+ GRPC_LOG_IF_ERROR("ares_library_init() failed", error);
+ return;
+ }
+ grpc_resolve_address = grpc_resolve_address_ares;
+ grpc_register_resolver_type(dns_ares_resolver_factory_create());
+ }
+ gpr_free(resolver);
+}
+
+void grpc_resolver_dns_ares_shutdown(void) {
+ char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
+ grpc_ares_cleanup();
+ }
+ gpr_free(resolver);
+}
+
+#else /* GRPC_ARES == 1 && !defined(GRPC_UV) */
+
+void grpc_resolver_dns_ares_init(void) {}
+
+void grpc_resolver_dns_ares_shutdown(void) {}
+
+#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */
diff --git a/src/core/ext/client_channel/initial_connect_string.h b/src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver.h
index 876abea40e..334feaa2ab 100644
--- a/src/core/ext/client_channel/initial_connect_string.h
+++ b/src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,20 +31,35 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H
-#define GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H
+#ifndef GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H
+#define GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H
-#include <grpc/slice.h>
-#include "src/core/lib/iomgr/resolve_address.h"
+#include <ares.h>
-typedef void (*grpc_set_initial_connect_string_func)(
- grpc_resolved_address **addr, grpc_slice *initial_str);
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/pollset_set.h"
-void grpc_test_set_initial_connect_string_function(
- grpc_set_initial_connect_string_func func);
+typedef struct grpc_ares_ev_driver grpc_ares_ev_driver;
-/** Set a string to be sent once connected. Optionally reset addr. */
-void grpc_set_initial_connect_string(grpc_resolved_address **addr,
- grpc_slice *connect_string);
+/* Start \a ev_driver. It will keep working until all IO on its ares_channel is
+ done, or grpc_ares_ev_driver_destroy() is called. It may notify the callbacks
+ bound to its ares_channel when necessary. */
+void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx,
+ grpc_ares_ev_driver *ev_driver);
-#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H */
+/* Returns the ares_channel owned by \a ev_driver. To bind a c-ares query to
+ \a ev_driver, use the ares_channel owned by \a ev_driver as the arg of the
+ query. */
+ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver);
+
+/* Creates a new grpc_ares_ev_driver. Returns GRPC_ERROR_NONE if \a ev_driver is
+ created successfully. */
+grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
+ grpc_pollset_set *pollset_set);
+
+/* Destroys \a ev_driver asynchronously. Pending lookups made on \a ev_driver
+ will be cancelled and their on_done callbacks will be invoked with a status
+ of ARES_ECANCELLED. */
+void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver);
+
+#endif /* GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H */
diff --git a/src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
new file mode 100644
index 0000000000..fab4f0c977
--- /dev/null
+++ b/src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
@@ -0,0 +1,319 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
+#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET)
+
+#include "src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+#include "src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h"
+#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/support/string.h"
+
+typedef struct fd_node {
+ /** the owner of this fd node */
+ grpc_ares_ev_driver *ev_driver;
+ /** the grpc_fd owned by this fd node */
+ grpc_fd *grpc_fd;
+ /** a closure wrapping on_readable_cb, which should be invoked when the
+ grpc_fd in this node becomes readable. */
+ grpc_closure read_closure;
+ /** a closure wrapping on_writable_cb, which should be invoked when the
+ grpc_fd in this node becomes writable. */
+ grpc_closure write_closure;
+ /** next fd node in the list */
+ struct fd_node *next;
+
+ /** mutex guarding the rest of the state */
+ gpr_mu mu;
+ /** if the readable closure has been registered */
+ bool readable_registered;
+ /** if the writable closure has been registered */
+ bool writable_registered;
+} fd_node;
+
+struct grpc_ares_ev_driver {
+ /** the ares_channel owned by this event driver */
+ ares_channel channel;
+ /** pollset set for driving the IO events of the channel */
+ grpc_pollset_set *pollset_set;
+ /** refcount of the event driver */
+ gpr_refcount refs;
+
+ /** mutex guarding the rest of the state */
+ gpr_mu mu;
+ /** a list of grpc_fd that this event driver is currently using. */
+ fd_node *fds;
+ /** is this event driver currently working? */
+ bool working;
+ /** is this event driver being shut down */
+ bool shutting_down;
+};
+
+static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
+ grpc_ares_ev_driver *ev_driver);
+
+static grpc_ares_ev_driver *grpc_ares_ev_driver_ref(
+ grpc_ares_ev_driver *ev_driver) {
+ gpr_log(GPR_DEBUG, "Ref ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
+ gpr_ref(&ev_driver->refs);
+ return ev_driver;
+}
+
+static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) {
+ gpr_log(GPR_DEBUG, "Unref ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
+ if (gpr_unref(&ev_driver->refs)) {
+ gpr_log(GPR_DEBUG, "destroy ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
+ GPR_ASSERT(ev_driver->fds == NULL);
+ gpr_mu_destroy(&ev_driver->mu);
+ ares_destroy(ev_driver->channel);
+ gpr_free(ev_driver);
+ }
+}
+
+static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
+ gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ GPR_ASSERT(!fdn->readable_registered);
+ GPR_ASSERT(!fdn->writable_registered);
+ gpr_mu_destroy(&fdn->mu);
+ grpc_pollset_set_del_fd(exec_ctx, fdn->ev_driver->pollset_set, fdn->grpc_fd);
+ grpc_fd_shutdown(exec_ctx, fdn->grpc_fd,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("fd node destroyed"));
+ grpc_fd_orphan(exec_ctx, fdn->grpc_fd, NULL, NULL, "c-ares query finished");
+ gpr_free(fdn);
+}
+
+grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
+ grpc_pollset_set *pollset_set) {
+ *ev_driver = gpr_malloc(sizeof(grpc_ares_ev_driver));
+ int status = ares_init(&(*ev_driver)->channel);
+ gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
+ if (status != ARES_SUCCESS) {
+ char *err_msg;
+ gpr_asprintf(&err_msg, "Failed to init ares channel. C-ares error: %s",
+ ares_strerror(status));
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_msg);
+ gpr_free(err_msg);
+ gpr_free(*ev_driver);
+ return err;
+ }
+ gpr_mu_init(&(*ev_driver)->mu);
+ gpr_ref_init(&(*ev_driver)->refs, 1);
+ (*ev_driver)->pollset_set = pollset_set;
+ (*ev_driver)->fds = NULL;
+ (*ev_driver)->working = false;
+ (*ev_driver)->shutting_down = false;
+ return GRPC_ERROR_NONE;
+}
+
+void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver) {
+ // It's not safe to shut down remaining fds here directly, becauses
+ // ares_host_callback does not provide an exec_ctx. We mark the event driver
+ // as being shut down. If the event driver is working,
+ // grpc_ares_notify_on_event_locked will shut down the fds; if it's not
+ // working, there are no fds to shut down.
+ gpr_mu_lock(&ev_driver->mu);
+ ev_driver->shutting_down = true;
+ gpr_mu_unlock(&ev_driver->mu);
+ grpc_ares_ev_driver_unref(ev_driver);
+}
+
+// Search fd in the fd_node list head. This is an O(n) search, the max possible
+// value of n is ARES_GETSOCK_MAXNUM (16). n is typically 1 - 2 in our tests.
+static fd_node *pop_fd_node(fd_node **head, int fd) {
+ fd_node dummy_head;
+ dummy_head.next = *head;
+ fd_node *node = &dummy_head;
+ while (node->next != NULL) {
+ if (grpc_fd_wrapped_fd(node->next->grpc_fd) == fd) {
+ fd_node *ret = node->next;
+ node->next = node->next->next;
+ *head = dummy_head.next;
+ return ret;
+ }
+ node = node->next;
+ }
+ return NULL;
+}
+
+static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ fd_node *fdn = arg;
+ grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
+ gpr_mu_lock(&fdn->mu);
+ fdn->readable_registered = false;
+ gpr_mu_unlock(&fdn->mu);
+
+ gpr_log(GPR_DEBUG, "readable on %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ if (error == GRPC_ERROR_NONE) {
+ ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->grpc_fd),
+ ARES_SOCKET_BAD);
+ } else {
+ // If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
+ // timed out. The pending lookups made on this ev_driver will be cancelled
+ // by the following ares_cancel() and the on_done callbacks will be invoked
+ // with a status of ARES_ECANCELLED. The remaining file descriptors in this
+ // ev_driver will be cleaned up in the follwing
+ // grpc_ares_notify_on_event_locked().
+ ares_cancel(ev_driver->channel);
+ }
+ gpr_mu_lock(&ev_driver->mu);
+ grpc_ares_notify_on_event_locked(exec_ctx, ev_driver);
+ gpr_mu_unlock(&ev_driver->mu);
+ grpc_ares_ev_driver_unref(ev_driver);
+}
+
+static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ fd_node *fdn = arg;
+ grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
+ gpr_mu_lock(&fdn->mu);
+ fdn->writable_registered = false;
+ gpr_mu_unlock(&fdn->mu);
+
+ gpr_log(GPR_DEBUG, "writable on %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ if (error == GRPC_ERROR_NONE) {
+ ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD,
+ grpc_fd_wrapped_fd(fdn->grpc_fd));
+ } else {
+ // If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
+ // timed out. The pending lookups made on this ev_driver will be cancelled
+ // by the following ares_cancel() and the on_done callbacks will be invoked
+ // with a status of ARES_ECANCELLED. The remaining file descriptors in this
+ // ev_driver will be cleaned up in the follwing
+ // grpc_ares_notify_on_event_locked().
+ ares_cancel(ev_driver->channel);
+ }
+ gpr_mu_lock(&ev_driver->mu);
+ grpc_ares_notify_on_event_locked(exec_ctx, ev_driver);
+ gpr_mu_unlock(&ev_driver->mu);
+ grpc_ares_ev_driver_unref(ev_driver);
+}
+
+ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver) {
+ return &ev_driver->channel;
+}
+
+// Get the file descriptors used by the ev_driver's ares channel, register
+// driver_closure with these filedescriptors.
+static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
+ grpc_ares_ev_driver *ev_driver) {
+ fd_node *new_list = NULL;
+ if (!ev_driver->shutting_down) {
+ ares_socket_t socks[ARES_GETSOCK_MAXNUM];
+ int socks_bitmask =
+ ares_getsock(ev_driver->channel, socks, ARES_GETSOCK_MAXNUM);
+ for (size_t i = 0; i < ARES_GETSOCK_MAXNUM; i++) {
+ if (ARES_GETSOCK_READABLE(socks_bitmask, i) ||
+ ARES_GETSOCK_WRITABLE(socks_bitmask, i)) {
+ fd_node *fdn = pop_fd_node(&ev_driver->fds, socks[i]);
+ // Create a new fd_node if sock[i] is not in the fd_node list.
+ if (fdn == NULL) {
+ char *fd_name;
+ gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
+ fdn = gpr_malloc(sizeof(fd_node));
+ gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
+ fdn->grpc_fd = grpc_fd_create(socks[i], fd_name);
+ fdn->ev_driver = ev_driver;
+ fdn->readable_registered = false;
+ fdn->writable_registered = false;
+ gpr_mu_init(&fdn->mu);
+ grpc_closure_init(&fdn->read_closure, on_readable_cb, fdn,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&fdn->write_closure, on_writable_cb, fdn,
+ grpc_schedule_on_exec_ctx);
+ grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set,
+ fdn->grpc_fd);
+ gpr_free(fd_name);
+ }
+ fdn->next = new_list;
+ new_list = fdn;
+ gpr_mu_lock(&fdn->mu);
+ // Register read_closure if the socket is readable and read_closure has
+ // not been registered with this socket.
+ if (ARES_GETSOCK_READABLE(socks_bitmask, i) &&
+ !fdn->readable_registered) {
+ grpc_ares_ev_driver_ref(ev_driver);
+ gpr_log(GPR_DEBUG, "notify read on: %d",
+ grpc_fd_wrapped_fd(fdn->grpc_fd));
+ grpc_fd_notify_on_read(exec_ctx, fdn->grpc_fd, &fdn->read_closure);
+ fdn->readable_registered = true;
+ }
+ // Register write_closure if the socket is writable and write_closure
+ // has not been registered with this socket.
+ if (ARES_GETSOCK_WRITABLE(socks_bitmask, i) &&
+ !fdn->writable_registered) {
+ gpr_log(GPR_DEBUG, "notify write on: %d",
+ grpc_fd_wrapped_fd(fdn->grpc_fd));
+ grpc_ares_ev_driver_ref(ev_driver);
+ grpc_fd_notify_on_write(exec_ctx, fdn->grpc_fd, &fdn->write_closure);
+ fdn->writable_registered = true;
+ }
+ gpr_mu_unlock(&fdn->mu);
+ }
+ }
+ }
+ // Any remaining fds in ev_driver->fds were not returned by ares_getsock() and
+ // are therefore no longer in use, so they can be shut down and removed from
+ // the list.
+ while (ev_driver->fds != NULL) {
+ fd_node *cur = ev_driver->fds;
+ ev_driver->fds = ev_driver->fds->next;
+ fd_node_destroy(exec_ctx, cur);
+ }
+ ev_driver->fds = new_list;
+ // If the ev driver has no working fd, all the tasks are done.
+ if (new_list == NULL) {
+ ev_driver->working = false;
+ gpr_log(GPR_DEBUG, "ev driver stop working");
+ }
+}
+
+void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx,
+ grpc_ares_ev_driver *ev_driver) {
+ gpr_mu_lock(&ev_driver->mu);
+ if (!ev_driver->working) {
+ ev_driver->working = true;
+ grpc_ares_notify_on_event_locked(exec_ctx, ev_driver);
+ }
+ gpr_mu_unlock(&ev_driver->mu);
+}
+
+#endif /* GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET) */
diff --git a/src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.c b/src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.c
new file mode 100644
index 0000000000..3eee8e3513
--- /dev/null
+++ b/src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.c
@@ -0,0 +1,289 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+#if GRPC_ARES == 1 && !defined(GRPC_UV)
+
+#include "src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/socket_utils_posix.h"
+
+#include <string.h>
+#include <sys/types.h>
+
+#include <ares.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/host_port.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+#include "src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver.h"
+#include "src/core/lib/iomgr/executor.h"
+#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/support/string.h"
+
+static gpr_once g_basic_init = GPR_ONCE_INIT;
+static gpr_mu g_init_mu;
+
+typedef struct grpc_ares_request {
+ /** following members are set in grpc_resolve_address_ares_impl */
+ /** host to resolve, parsed from the name to resolve */
+ char *host;
+ /** port to fill in sockaddr_in, parsed from the name to resolve */
+ char *port;
+ /** default port to use */
+ char *default_port;
+ /** closure to call when the request completes */
+ grpc_closure *on_done;
+ /** the pointer to receive the resolved addresses */
+ grpc_resolved_addresses **addrs_out;
+ /** the evernt driver used by this request */
+ grpc_ares_ev_driver *ev_driver;
+ /** number of ongoing queries */
+ gpr_refcount pending_queries;
+
+ /** mutex guarding the rest of the state */
+ gpr_mu mu;
+ /** is there at least one successful query, set in on_done_cb */
+ bool success;
+ /** the errors explaining the request failure, set in on_done_cb */
+ grpc_error *error;
+} grpc_ares_request;
+
+static void do_basic_init(void) { gpr_mu_init(&g_init_mu); }
+
+static uint16_t strhtons(const char *port) {
+ if (strcmp(port, "http") == 0) {
+ return htons(80);
+ } else if (strcmp(port, "https") == 0) {
+ return htons(443);
+ }
+ return htons((unsigned short)atoi(port));
+}
+
+static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
+ grpc_ares_request *r) {
+ /* If there are no pending queries, invoke on_done callback and destroy the
+ request */
+ if (gpr_unref(&r->pending_queries)) {
+ /* TODO(zyc): Sort results with RFC6724 before invoking on_done. */
+ if (exec_ctx == NULL) {
+ /* A new exec_ctx is created here, as the c-ares interface does not
+ provide one in ares_host_callback. It's safe to schedule on_done with
+ the newly created exec_ctx, since the caller has been warned not to
+ acquire locks in on_done. ares_dns_resolver is using combiner to
+ protect resources needed by on_done. */
+ grpc_exec_ctx new_exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_closure_sched(&new_exec_ctx, r->on_done, r->error);
+ grpc_exec_ctx_finish(&new_exec_ctx);
+ } else {
+ grpc_closure_sched(exec_ctx, r->on_done, r->error);
+ }
+ gpr_mu_destroy(&r->mu);
+ grpc_ares_ev_driver_destroy(r->ev_driver);
+ gpr_free(r->host);
+ gpr_free(r->port);
+ gpr_free(r->default_port);
+ gpr_free(r);
+ }
+}
+
+static void on_done_cb(void *arg, int status, int timeouts,
+ struct hostent *hostent) {
+ grpc_ares_request *r = (grpc_ares_request *)arg;
+ gpr_mu_lock(&r->mu);
+ if (status == ARES_SUCCESS) {
+ GRPC_ERROR_UNREF(r->error);
+ r->error = GRPC_ERROR_NONE;
+ r->success = true;
+ grpc_resolved_addresses **addresses = r->addrs_out;
+ if (*addresses == NULL) {
+ *addresses = gpr_malloc(sizeof(grpc_resolved_addresses));
+ (*addresses)->naddrs = 0;
+ (*addresses)->addrs = NULL;
+ }
+ size_t prev_naddr = (*addresses)->naddrs;
+ size_t i;
+ for (i = 0; hostent->h_addr_list[i] != NULL; i++) {
+ }
+ (*addresses)->naddrs += i;
+ (*addresses)->addrs =
+ gpr_realloc((*addresses)->addrs,
+ sizeof(grpc_resolved_address) * (*addresses)->naddrs);
+ for (i = prev_naddr; i < (*addresses)->naddrs; i++) {
+ memset(&(*addresses)->addrs[i], 0, sizeof(grpc_resolved_address));
+ if (hostent->h_addrtype == AF_INET6) {
+ (*addresses)->addrs[i].len = sizeof(struct sockaddr_in6);
+ struct sockaddr_in6 *addr =
+ (struct sockaddr_in6 *)&(*addresses)->addrs[i].addr;
+ addr->sin6_family = (sa_family_t)hostent->h_addrtype;
+ addr->sin6_port = strhtons(r->port);
+
+ char output[INET6_ADDRSTRLEN];
+ memcpy(&addr->sin6_addr, hostent->h_addr_list[i - prev_naddr],
+ sizeof(struct in6_addr));
+ ares_inet_ntop(AF_INET6, &addr->sin6_addr, output, INET6_ADDRSTRLEN);
+ gpr_log(GPR_DEBUG,
+ "c-ares resolver gets a AF_INET6 result: \n"
+ " addr: %s\n port: %s\n sin6_scope_id: %d\n",
+ output, r->port, addr->sin6_scope_id);
+ } else {
+ (*addresses)->addrs[i].len = sizeof(struct sockaddr_in);
+ struct sockaddr_in *addr =
+ (struct sockaddr_in *)&(*addresses)->addrs[i].addr;
+ memcpy(&addr->sin_addr, hostent->h_addr_list[i - prev_naddr],
+ sizeof(struct in_addr));
+ addr->sin_family = (sa_family_t)hostent->h_addrtype;
+ addr->sin_port = strhtons(r->port);
+
+ char output[INET_ADDRSTRLEN];
+ ares_inet_ntop(AF_INET, &addr->sin_addr, output, INET_ADDRSTRLEN);
+ gpr_log(GPR_DEBUG,
+ "c-ares resolver gets a AF_INET result: \n"
+ " addr: %s\n port: %s\n",
+ output, r->port);
+ }
+ }
+ } else if (!r->success) {
+ char *error_msg;
+ gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
+ ares_strerror(status));
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ gpr_free(error_msg);
+ if (r->error == GRPC_ERROR_NONE) {
+ r->error = error;
+ } else {
+ r->error = grpc_error_add_child(error, r->error);
+ }
+ }
+ gpr_mu_unlock(&r->mu);
+ grpc_ares_request_unref(NULL, r);
+}
+
+void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx, const char *name,
+ const char *default_port,
+ grpc_pollset_set *interested_parties,
+ grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) {
+ /* TODO(zyc): Enable tracing after #9603 is checked in */
+ /* if (grpc_dns_trace) {
+ gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s",
+ name, default_port);
+ } */
+
+ /* parse name, splitting it into host and port parts */
+ char *host;
+ char *port;
+ gpr_split_host_port(name, &host, &port);
+ if (host == NULL) {
+ grpc_error *err = grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("unparseable host:port"),
+ GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name));
+ grpc_closure_sched(exec_ctx, on_done, err);
+ goto error_cleanup;
+ } else if (port == NULL) {
+ if (default_port == NULL) {
+ grpc_error *err = grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("no port in name"),
+ GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name));
+ grpc_closure_sched(exec_ctx, on_done, err);
+ goto error_cleanup;
+ }
+ port = gpr_strdup(default_port);
+ }
+
+ grpc_ares_ev_driver *ev_driver;
+ grpc_error *err = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
+ if (err != GRPC_ERROR_NONE) {
+ GRPC_LOG_IF_ERROR("grpc_ares_ev_driver_create() failed", err);
+ goto error_cleanup;
+ }
+
+ grpc_ares_request *r = gpr_malloc(sizeof(grpc_ares_request));
+ gpr_mu_init(&r->mu);
+ r->ev_driver = ev_driver;
+ r->on_done = on_done;
+ r->addrs_out = addrs;
+ r->default_port = gpr_strdup(default_port);
+ r->port = port;
+ r->host = host;
+ r->success = false;
+ r->error = GRPC_ERROR_NONE;
+ ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
+ gpr_ref_init(&r->pending_queries, 2);
+ if (grpc_ipv6_loopback_available()) {
+ gpr_ref(&r->pending_queries);
+ ares_gethostbyname(*channel, r->host, AF_INET6, on_done_cb, r);
+ }
+ ares_gethostbyname(*channel, r->host, AF_INET, on_done_cb, r);
+ /* TODO(zyc): Handle CNAME records here. */
+ grpc_ares_ev_driver_start(exec_ctx, r->ev_driver);
+ grpc_ares_request_unref(exec_ctx, r);
+ return;
+
+error_cleanup:
+ gpr_free(host);
+ gpr_free(port);
+}
+
+void (*grpc_resolve_address_ares)(
+ grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
+ grpc_pollset_set *interested_parties, grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl;
+
+grpc_error *grpc_ares_init(void) {
+ gpr_once_init(&g_basic_init, do_basic_init);
+ gpr_mu_lock(&g_init_mu);
+ int status = ares_library_init(ARES_LIB_INIT_ALL);
+ gpr_mu_unlock(&g_init_mu);
+
+ if (status != ARES_SUCCESS) {
+ char *error_msg;
+ gpr_asprintf(&error_msg, "ares_library_init failed: %s",
+ ares_strerror(status));
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ gpr_free(error_msg);
+ return error;
+ }
+ return GRPC_ERROR_NONE;
+}
+
+void grpc_ares_cleanup(void) {
+ gpr_mu_lock(&g_init_mu);
+ ares_library_cleanup();
+ gpr_mu_unlock(&g_init_mu);
+}
+
+#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */
diff --git a/src/core/ext/client_channel/default_initial_connect_string.c b/src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h
index 6db82d84ef..ab00a26b36 100644
--- a/src/core/ext/client_channel/default_initial_connect_string.c
+++ b/src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,33 @@
*
*/
-#include <grpc/slice.h>
+#ifndef GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H
+#define GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H
+
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/resolve_address.h"
-void grpc_set_default_initial_connect_string(grpc_resolved_address **addr,
- grpc_slice *initial_str) {}
+/* Asynchronously resolve addr. Use \a default_port if a port isn't designated
+ in addr, otherwise use the port in addr. grpc_ares_init() must be called at
+ least once before this function. \a on_done may be called directly in this
+ function without being scheduled with \a exec_ctx, it must not try to acquire
+ locks that are being held by the caller. */
+extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx,
+ const char *addr,
+ const char *default_port,
+ grpc_pollset_set *interested_parties,
+ grpc_closure *on_done,
+ grpc_resolved_addresses **addresses);
+
+/* Initialize gRPC ares wrapper. Must be called at least once before
+ grpc_resolve_address_ares(). */
+grpc_error *grpc_ares_init(void);
+
+/* Uninitialized gRPC ares wrapper. If there was more than one previous call to
+ grpc_ares_init(), this function uninitializes the gRPC ares wrapper only if
+ it has been called the same number of times as grpc_ares_init(). */
+void grpc_ares_cleanup(void);
+
+#endif /* GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H */
diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c
index d227c19c43..97cd0486a9 100644
--- a/src/core/ext/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/resolver/dns/native/dns_resolver.c
@@ -44,6 +44,7 @@
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/backoff.h"
+#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#define GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS 1
@@ -113,8 +114,9 @@ static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx,
}
if (r->next_completion != NULL) {
*r->target_result = NULL;
- grpc_closure_sched(exec_ctx, r->next_completion,
- GRPC_ERROR_CREATE("Resolver Shutdown"));
+ grpc_closure_sched(
+ exec_ctx, r->next_completion,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
r->next_completion = NULL;
}
}
@@ -303,7 +305,21 @@ static grpc_resolver_factory *dns_resolver_factory_create() {
}
void grpc_resolver_dns_native_init(void) {
- grpc_register_resolver_type(dns_resolver_factory_create());
+ char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ if (resolver != NULL && gpr_stricmp(resolver, "native") == 0) {
+ gpr_log(GPR_DEBUG, "Using native dns resolver");
+ grpc_register_resolver_type(dns_resolver_factory_create());
+ } else {
+ grpc_resolver_factory *existing_factory =
+ grpc_resolver_factory_lookup("dns");
+ if (existing_factory == NULL) {
+ gpr_log(GPR_DEBUG, "Using native dns resolver");
+ grpc_register_resolver_type(dns_resolver_factory_create());
+ } else {
+ grpc_resolver_factory_unref(existing_factory);
+ }
+ }
+ gpr_free(resolver);
}
void grpc_resolver_dns_native_shutdown(void) {}
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c
index eae0145ecc..2b226c1bf7 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.c
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c
@@ -63,8 +63,6 @@ typedef struct {
grpc_closure *notify;
grpc_connect_in_args args;
grpc_connect_out_args *result;
- grpc_closure initial_string_sent;
- grpc_slice_buffer initial_string_buffer;
grpc_endpoint *endpoint; // Non-NULL until handshaking starts.
@@ -82,7 +80,6 @@ static void chttp2_connector_unref(grpc_exec_ctx *exec_ctx,
grpc_connector *con) {
chttp2_connector *c = (chttp2_connector *)con;
if (gpr_unref(&c->refs)) {
- /* c->initial_string_buffer does not need to be destroyed */
gpr_mu_destroy(&c->mu);
// If handshaking is not yet in progress, destroy the endpoint.
// Otherwise, the handshaker will do this for us.
@@ -116,7 +113,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&c->mu);
if (error != GRPC_ERROR_NONE || c->shutdown) {
if (error == GRPC_ERROR_NONE) {
- error = GRPC_ERROR_CREATE("connector shutdown");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
// We were shut down after handshaking completed successfully, so
// destroy the endpoint here.
// TODO(ctiller): It is currently necessary to shutdown endpoints
@@ -160,28 +157,6 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
c->endpoint = NULL; // Endpoint handed off to handshake manager.
}
-static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- chttp2_connector *c = arg;
- gpr_mu_lock(&c->mu);
- if (error != GRPC_ERROR_NONE || c->shutdown) {
- if (error == GRPC_ERROR_NONE) {
- error = GRPC_ERROR_CREATE("connector shutdown");
- } else {
- error = GRPC_ERROR_REF(error);
- }
- memset(c->result, 0, sizeof(*c->result));
- grpc_closure *notify = c->notify;
- c->notify = NULL;
- grpc_closure_sched(exec_ctx, notify, error);
- gpr_mu_unlock(&c->mu);
- chttp2_connector_unref(exec_ctx, arg);
- } else {
- start_handshake_locked(exec_ctx, c);
- gpr_mu_unlock(&c->mu);
- }
-}
-
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
chttp2_connector *c = arg;
gpr_mu_lock(&c->mu);
@@ -189,7 +164,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
c->connecting = false;
if (error != GRPC_ERROR_NONE || c->shutdown) {
if (error == GRPC_ERROR_NONE) {
- error = GRPC_ERROR_CREATE("connector shutdown");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
} else {
error = GRPC_ERROR_REF(error);
}
@@ -204,17 +179,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
chttp2_connector_unref(exec_ctx, arg);
} else {
GPR_ASSERT(c->endpoint != NULL);
- if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
- grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
- c, grpc_schedule_on_exec_ctx);
- grpc_slice_buffer_init(&c->initial_string_buffer);
- grpc_slice_buffer_add(&c->initial_string_buffer,
- c->args.initial_connect_string);
- grpc_endpoint_write(exec_ctx, c->endpoint, &c->initial_string_buffer,
- &c->initial_string_sent);
- } else {
- start_handshake_locked(exec_ctx, c);
- }
+ start_handshake_locked(exec_ctx, c);
gpr_mu_unlock(&c->mu);
}
}
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c
index 837b9fd03d..6433968ca5 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.c
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.c
@@ -256,7 +256,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
char *msg;
gpr_asprintf(&msg, "No address added out of total %" PRIuPTR " resolved",
naddrs);
- err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, naddrs);
+ err = GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(msg, errors, naddrs);
gpr_free(msg);
goto error;
} else if (count != naddrs) {
@@ -264,7 +264,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&msg, "Only %" PRIuPTR
" addresses added out of total %" PRIuPTR " resolved",
count, naddrs);
- err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, naddrs);
+ err = GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(msg, errors, naddrs);
gpr_free(msg);
const char *warning_message = grpc_error_string(err);
diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
index cb2b3f5502..88c813f3be 100644
--- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
@@ -61,7 +61,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
3, (server, addr, creds));
// Create security context.
if (creds == NULL) {
- err = GRPC_ERROR_CREATE(
+ err = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No credentials specified for secure server port (creds==NULL)");
goto done;
}
@@ -72,7 +72,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
gpr_asprintf(&msg,
"Unable to create secure server with credentials of type %s.",
creds->type);
- err = grpc_error_set_int(GRPC_ERROR_CREATE(msg),
+ err = grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg),
GRPC_ERROR_INT_SECURITY_STATUS, status);
gpr_free(msg);
goto done;
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index a3684535ff..8676a3752e 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -187,7 +187,8 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
GRPC_COMBINER_UNREF(exec_ctx, t->combiner, "chttp2_transport");
- cancel_pings(exec_ctx, t, GRPC_ERROR_CREATE("Transport destroyed"));
+ cancel_pings(exec_ctx, t,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"));
while (t->write_cb_pool) {
grpc_chttp2_write_cb *next = t->write_cb_pool->next;
@@ -494,8 +495,9 @@ static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
t->destroying = 1;
close_transport_locked(
exec_ctx, t,
- grpc_error_set_int(GRPC_ERROR_CREATE("Transport destroyed"),
- GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state));
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"),
+ GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state));
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destroy");
}
@@ -518,7 +520,8 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
if (t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE) {
if (t->close_transport_on_writes_finished == NULL) {
t->close_transport_on_writes_finished =
- GRPC_ERROR_CREATE("Delayed close due to in-progress write");
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Delayed close due to in-progress write");
}
t->close_transport_on_writes_finished =
grpc_error_add_child(t->close_transport_on_writes_finished, error);
@@ -575,7 +578,7 @@ void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s) {
static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_stream_refcount *refcount,
- const void *server_data) {
+ const void *server_data, gpr_arena *arena) {
GPR_TIMER_BEGIN("init_stream", 0);
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
@@ -588,8 +591,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
gpr_ref_init(&s->active_streams, 1);
GRPC_CHTTP2_STREAM_REF(s, "chttp2");
- grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0]);
- grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1]);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0], arena);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1], arena);
grpc_chttp2_data_parser_init(&s->data_parser);
grpc_slice_buffer_init(&s->flow_controlled_buffer);
s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
@@ -665,16 +668,17 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
GPR_TIMER_END("destroy_stream", 0);
- gpr_free(s->destroy_stream_arg);
+ grpc_closure_sched(exec_ctx, s->destroy_stream_arg, GRPC_ERROR_NONE);
}
static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, void *and_free_memory) {
+ grpc_stream *gs,
+ grpc_closure *then_schedule_closure) {
GPR_TIMER_BEGIN("destroy_stream", 0);
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
- s->destroy_stream_arg = and_free_memory;
+ s->destroy_stream_arg = then_schedule_closure;
grpc_closure_sched(
exec_ctx, grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s,
grpc_combiner_scheduler(t->combiner, false)),
@@ -833,7 +837,8 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED) {
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SENT;
if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
- close_transport_locked(exec_ctx, t, GRPC_ERROR_CREATE("goaway sent"));
+ close_transport_locked(
+ exec_ctx, t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("goaway sent"));
}
}
@@ -897,22 +902,19 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t goaway_error,
grpc_slice goaway_text) {
- char *msg = grpc_dump_slice(goaway_text, GPR_DUMP_HEX | GPR_DUMP_ASCII);
- GRPC_CHTTP2_IF_TRACING(
- gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
- grpc_slice_unref_internal(exec_ctx, goaway_text);
+ // GRPC_CHTTP2_IF_TRACING(
+ // gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
t->seen_goaway = 1;
/* lie: use transient failure from the transport to indicate goaway has been
* received */
connectivity_state_set(
exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_error_set_str(
- grpc_error_set_int(GRPC_ERROR_CREATE("GOAWAY received"),
- GRPC_ERROR_INT_HTTP2_ERROR,
- (intptr_t)goaway_error),
- GRPC_ERROR_STR_RAW_BYTES, msg),
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("GOAWAY received"),
+ GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)goaway_error),
+ GRPC_ERROR_STR_RAW_BYTES, goaway_text),
"got_goaway");
- gpr_free(msg);
}
static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
@@ -935,9 +937,10 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
t->next_stream_id += 2;
if (t->next_stream_id >= MAX_CLIENT_STREAM_ID) {
- connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_CREATE("Stream IDs exhausted"),
- "no_more_stream_ids");
+ connectivity_state_set(
+ exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream IDs exhausted"),
+ "no_more_stream_ids");
}
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
@@ -951,9 +954,9 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
grpc_chttp2_cancel_stream(
exec_ctx, t, s,
- grpc_error_set_int(GRPC_ERROR_CREATE("Stream IDs exhausted"),
- GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAVAILABLE));
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream IDs exhausted"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
}
@@ -1002,11 +1005,11 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
}
if (error != GRPC_ERROR_NONE) {
if (closure->error_data.error == GRPC_ERROR_NONE) {
- closure->error_data.error =
- GRPC_ERROR_CREATE("Error in HTTP transport completing operation");
- closure->error_data.error =
- grpc_error_set_str(closure->error_data.error,
- GRPC_ERROR_STR_TARGET_ADDRESS, t->peer_string);
+ closure->error_data.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Error in HTTP transport completing operation");
+ closure->error_data.error = grpc_error_set_str(
+ closure->error_data.error, GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(t->peer_string));
}
closure->error_data.error =
grpc_error_add_child(closure->error_data.error, error);
@@ -1181,10 +1184,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
exec_ctx, t, s,
grpc_error_set_int(
grpc_error_set_int(
- grpc_error_set_int(
- GRPC_ERROR_CREATE("to-be-sent initial metadata size "
- "exceeds peer limit"),
- GRPC_ERROR_INT_SIZE, (intptr_t)metadata_size),
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "to-be-sent initial metadata size "
+ "exceeds peer limit"),
+ GRPC_ERROR_INT_SIZE,
+ (intptr_t)metadata_size),
GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
} else {
@@ -1200,9 +1204,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
} else {
grpc_chttp2_cancel_stream(
exec_ctx, t, s,
- grpc_error_set_int(GRPC_ERROR_CREATE("Transport closed"),
- GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAVAILABLE));
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport closed"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
} else {
GPR_ASSERT(s->id != 0);
@@ -1214,7 +1218,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
s->send_initial_metadata = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished,
- GRPC_ERROR_CREATE_REFERENCING(
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Attempt to send initial metadata after stream was closed",
&s->write_closed_error, 1),
"send_initial_metadata_finished");
@@ -1228,7 +1232,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if (s->write_closed) {
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished,
- GRPC_ERROR_CREATE_REFERENCING(
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Attempt to send message after stream was closed",
&s->write_closed_error, 1),
"fetching_send_message_finished");
@@ -1276,10 +1280,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
exec_ctx, t, s,
grpc_error_set_int(
grpc_error_set_int(
- grpc_error_set_int(
- GRPC_ERROR_CREATE("to-be-sent trailing metadata size "
- "exceeds peer limit"),
- GRPC_ERROR_INT_SIZE, (intptr_t)metadata_size),
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "to-be-sent trailing metadata size "
+ "exceeds peer limit"),
+ GRPC_ERROR_INT_SIZE,
+ (intptr_t)metadata_size),
GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
} else {
@@ -1292,8 +1297,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
exec_ctx, t, s, &s->send_trailing_metadata_finished,
grpc_metadata_batch_is_empty(op->send_trailing_metadata)
? GRPC_ERROR_NONE
- : GRPC_ERROR_CREATE("Attempt to send trailing metadata after "
- "stream was closed"),
+ : GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Attempt to send trailing metadata after "
+ "stream was closed"),
"send_trailing_metadata_finished");
} else if (s->id != 0) {
/* TODO(ctiller): check if there's flow control for any outstanding
@@ -1408,11 +1414,11 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error) {
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
grpc_http2_error_code http_error;
- const char *msg;
- grpc_error_get_status(error, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL, &msg,
- &http_error);
+ grpc_slice slice;
+ grpc_error_get_status(error, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL,
+ &slice, &http_error);
grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error,
- grpc_slice_from_copied_string(msg), &t->qbuf);
+ grpc_slice_ref_internal(slice), &t->qbuf);
grpc_chttp2_initiate_write(exec_ctx, t, false, "goaway_sent");
GRPC_ERROR_UNREF(error);
}
@@ -1572,7 +1578,7 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SENT) {
close_transport_locked(
exec_ctx, t,
- GRPC_ERROR_CREATE_REFERENCING(
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Last stream closed after sending GOAWAY", &error, 1));
}
}
@@ -1613,8 +1619,8 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
grpc_status_code status;
- const char *msg;
- grpc_error_get_status(error, s->deadline, &status, &msg, NULL);
+ grpc_slice slice;
+ grpc_error_get_status(error, s->deadline, &status, &slice, NULL);
if (status != GRPC_STATUS_OK) {
s->seen_error = true;
@@ -1629,15 +1635,19 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
s->recv_trailing_metadata_finished != NULL) {
char status_string[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(status, status_string);
- grpc_chttp2_incoming_metadata_buffer_replace_or_add(
- exec_ctx, &s->metadata_buffer[1],
- grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_STATUS,
- grpc_slice_from_copied_string(status_string)));
- if (msg != NULL) {
- grpc_chttp2_incoming_metadata_buffer_replace_or_add(
- exec_ctx, &s->metadata_buffer[1],
- grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
- grpc_slice_from_copied_string(msg)));
+ GRPC_LOG_IF_ERROR("add_status",
+ grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ exec_ctx, &s->metadata_buffer[1],
+ grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_GRPC_STATUS,
+ grpc_slice_from_copied_string(status_string))));
+ if (!GRPC_SLICE_IS_EMPTY(slice)) {
+ GRPC_LOG_IF_ERROR(
+ "add_status_message",
+ grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ exec_ctx, &s->metadata_buffer[1],
+ grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
+ grpc_slice_ref_internal(slice))));
}
s->published_metadata[1] = GRPC_METADATA_SYNTHESIZED_FROM_FAKE;
grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
@@ -1666,7 +1676,8 @@ static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
add_error(extra_error, refs, &nrefs);
grpc_error *error = GRPC_ERROR_NONE;
if (nrefs > 0) {
- error = GRPC_ERROR_CREATE_REFERENCING(master_error_msg, refs, nrefs);
+ error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(master_error_msg,
+ refs, nrefs);
}
GRPC_ERROR_UNREF(extra_error);
return error;
@@ -1760,12 +1771,13 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
grpc_slice hdr;
grpc_slice status_hdr;
+ grpc_slice http_status_hdr;
grpc_slice message_pfx;
uint8_t *p;
uint32_t len = 0;
grpc_status_code grpc_status;
- const char *msg;
- grpc_error_get_status(error, s->deadline, &grpc_status, &msg, NULL);
+ grpc_slice slice;
+ grpc_error_get_status(error, s->deadline, &grpc_status, &slice, NULL);
GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100);
@@ -1775,6 +1787,26 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
It's complicated by the fact that our send machinery would be dead by
the time we got around to sending this, so instead we ignore HPACK
compression and just write the uncompressed bytes onto the wire. */
+ if (!s->sent_initial_metadata) {
+ http_status_hdr = grpc_slice_malloc(13);
+ p = GRPC_SLICE_START_PTR(http_status_hdr);
+ *p++ = 0x00;
+ *p++ = 7;
+ *p++ = ':';
+ *p++ = 's';
+ *p++ = 't';
+ *p++ = 'a';
+ *p++ = 't';
+ *p++ = 'u';
+ *p++ = 's';
+ *p++ = 3;
+ *p++ = '2';
+ *p++ = '0';
+ *p++ = '0';
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(http_status_hdr));
+ len += (uint32_t)GRPC_SLICE_LENGTH(http_status_hdr);
+ }
+
status_hdr = grpc_slice_malloc(15 + (grpc_status >= 10));
p = GRPC_SLICE_START_PTR(status_hdr);
*p++ = 0x00; /* literal header, not indexed */
@@ -1801,32 +1833,30 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GPR_ASSERT(p == GRPC_SLICE_END_PTR(status_hdr));
len += (uint32_t)GRPC_SLICE_LENGTH(status_hdr);
- if (msg != NULL) {
- size_t msg_len = strlen(msg);
- GPR_ASSERT(msg_len <= UINT32_MAX);
- uint32_t msg_len_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)msg_len, 0);
- message_pfx = grpc_slice_malloc(14 + msg_len_len);
- p = GRPC_SLICE_START_PTR(message_pfx);
- *p++ = 0x00; /* literal header, not indexed */
- *p++ = 12; /* len(grpc-message) */
- *p++ = 'g';
- *p++ = 'r';
- *p++ = 'p';
- *p++ = 'c';
- *p++ = '-';
- *p++ = 'm';
- *p++ = 'e';
- *p++ = 's';
- *p++ = 's';
- *p++ = 'a';
- *p++ = 'g';
- *p++ = 'e';
- GRPC_CHTTP2_WRITE_VARINT((uint32_t)msg_len, 0, 0, p, (uint32_t)msg_len_len);
- p += msg_len_len;
- GPR_ASSERT(p == GRPC_SLICE_END_PTR(message_pfx));
- len += (uint32_t)GRPC_SLICE_LENGTH(message_pfx);
- len += (uint32_t)msg_len;
- }
+ size_t msg_len = GRPC_SLICE_LENGTH(slice);
+ GPR_ASSERT(msg_len <= UINT32_MAX);
+ uint32_t msg_len_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)msg_len, 1);
+ message_pfx = grpc_slice_malloc(14 + msg_len_len);
+ p = GRPC_SLICE_START_PTR(message_pfx);
+ *p++ = 0x00; /* literal header, not indexed */
+ *p++ = 12; /* len(grpc-message) */
+ *p++ = 'g';
+ *p++ = 'r';
+ *p++ = 'p';
+ *p++ = 'c';
+ *p++ = '-';
+ *p++ = 'm';
+ *p++ = 'e';
+ *p++ = 's';
+ *p++ = 's';
+ *p++ = 'a';
+ *p++ = 'g';
+ *p++ = 'e';
+ GRPC_CHTTP2_WRITE_VARINT((uint32_t)msg_len, 1, 0, p, (uint32_t)msg_len_len);
+ p += msg_len_len;
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(message_pfx));
+ len += (uint32_t)GRPC_SLICE_LENGTH(message_pfx);
+ len += (uint32_t)msg_len;
hdr = grpc_slice_malloc(9);
p = GRPC_SLICE_START_PTR(hdr);
@@ -1842,11 +1872,12 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GPR_ASSERT(p == GRPC_SLICE_END_PTR(hdr));
grpc_slice_buffer_add(&t->qbuf, hdr);
- grpc_slice_buffer_add(&t->qbuf, status_hdr);
- if (msg != NULL) {
- grpc_slice_buffer_add(&t->qbuf, message_pfx);
- grpc_slice_buffer_add(&t->qbuf, grpc_slice_from_copied_string(msg));
+ if (!s->sent_initial_metadata) {
+ grpc_slice_buffer_add(&t->qbuf, http_status_hdr);
}
+ grpc_slice_buffer_add(&t->qbuf, status_hdr);
+ grpc_slice_buffer_add(&t->qbuf, message_pfx);
+ grpc_slice_buffer_add(&t->qbuf, grpc_slice_ref_internal(slice));
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing));
@@ -1921,9 +1952,9 @@ static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
if (parse_error == GRPC_ERROR_NONE &&
(parse_error = grpc_http_parser_eof(&parser)) == GRPC_ERROR_NONE) {
error = grpc_error_set_int(
- grpc_error_set_int(
- GRPC_ERROR_CREATE("Trying to connect an http1.x server"),
- GRPC_ERROR_INT_HTTP_STATUS, response.status),
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Trying to connect an http1.x server"),
+ GRPC_ERROR_INT_HTTP_STATUS, response.status),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
}
GRPC_ERROR_UNREF(parse_error);
@@ -1944,9 +1975,10 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *err = error;
if (err != GRPC_ERROR_NONE) {
- err = grpc_error_set_int(
- GRPC_ERROR_CREATE_REFERENCING("Endpoint read failed", &err, 1),
- GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state);
+ err = grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Endpoint read failed", &err, 1),
+ GRPC_ERROR_INT_OCCURRED_DURING_WRITE,
+ t->write_state);
}
GPR_SWAP(grpc_error *, err, error);
GRPC_ERROR_UNREF(err);
@@ -1967,8 +1999,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (errors[1] != GRPC_ERROR_NONE) {
errors[2] = try_http_parsing(exec_ctx, t);
GRPC_ERROR_UNREF(error);
- error = GRPC_ERROR_CREATE_REFERENCING("Failed parsing HTTP/2", errors,
- GPR_ARRAY_SIZE(errors));
+ error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed parsing HTTP/2", errors, GPR_ARRAY_SIZE(errors));
}
for (i = 0; i < GPR_ARRAY_SIZE(errors); i++) {
GRPC_ERROR_UNREF(errors[i]);
@@ -1993,7 +2025,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_BEGIN("post_reading_action_locked", 0);
bool keep_reading = false;
if (error == GRPC_ERROR_NONE && t->closed) {
- error = GRPC_ERROR_CREATE("Transport closed");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport closed");
}
if (error != GRPC_ERROR_NONE) {
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
@@ -2128,8 +2160,8 @@ static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
- close_transport_locked(exec_ctx, t,
- GRPC_ERROR_CREATE("keepalive watchdog timeout"));
+ close_transport_locked(exec_ctx, t, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "keepalive watchdog timeout"));
}
} else {
/** The watchdog timer should have been cancelled by
@@ -2328,7 +2360,8 @@ void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&bs->slice_mu);
if (bs->remaining_bytes < GRPC_SLICE_LENGTH(slice)) {
incoming_byte_stream_publish_error(
- exec_ctx, bs, GRPC_ERROR_CREATE("Too many bytes in stream"));
+ exec_ctx, bs,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many bytes in stream"));
} else {
bs->remaining_bytes -= (uint32_t)GRPC_SLICE_LENGTH(slice);
if (bs->on_next != NULL) {
@@ -2348,7 +2381,7 @@ void grpc_chttp2_incoming_byte_stream_finished(
if (error == GRPC_ERROR_NONE) {
gpr_mu_lock(&bs->slice_mu);
if (bs->remaining_bytes != 0) {
- error = GRPC_ERROR_CREATE("Truncated message");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message");
}
gpr_mu_unlock(&bs->slice_mu);
}
@@ -2428,9 +2461,9 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
t->peer_string);
}
send_goaway(exec_ctx, t,
- grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
- GRPC_ERROR_INT_HTTP2_ERROR,
- GRPC_HTTP2_ENHANCE_YOUR_CALM));
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
+ GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
} else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
@@ -2457,9 +2490,10 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
s->id);
}
grpc_chttp2_cancel_stream(
- exec_ctx, t, s, grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
- GRPC_ERROR_INT_HTTP2_ERROR,
- GRPC_HTTP2_ENHANCE_YOUR_CALM));
+ exec_ctx, t, s,
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
+ GRPC_ERROR_INT_HTTP2_ERROR,
+ GRPC_HTTP2_ENHANCE_YOUR_CALM));
if (n > 1) {
/* Since we cancel one stream per destructive reclamation, if
there are more streams left, we can immediately post a new
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.c b/src/core/ext/transport/chttp2/transport/frame_data.c
index f9b9e1b309..6e9258ee7e 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.c
+++ b/src/core/ext/transport/chttp2/transport/frame_data.c
@@ -54,7 +54,8 @@ void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *parser) {
if (parser->parsing_frame != NULL) {
grpc_chttp2_incoming_byte_stream_finished(
- exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"));
+ exec_ctx, parser->parsing_frame,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Parser destroyed"));
}
GRPC_ERROR_UNREF(parser->error);
}
@@ -65,8 +66,9 @@ grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
if (flags & ~GRPC_CHTTP2_DATA_FLAG_END_STREAM) {
char *msg;
gpr_asprintf(&msg, "unsupported data flags: 0x%02x", flags);
- grpc_error *err = grpc_error_set_int(
- GRPC_ERROR_CREATE(msg), GRPC_ERROR_INT_STREAM_ID, (intptr_t)stream_id);
+ grpc_error *err =
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg),
+ GRPC_ERROR_INT_STREAM_ID, (intptr_t)stream_id);
gpr_free(msg);
return err;
}
@@ -173,13 +175,13 @@ static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
break;
default:
gpr_asprintf(&msg, "Bad GRPC frame type 0x%02x", p->frame_type);
- p->error = GRPC_ERROR_CREATE(msg);
+ p->error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
p->error = grpc_error_set_int(p->error, GRPC_ERROR_INT_STREAM_ID,
(intptr_t)s->id);
gpr_free(msg);
msg = grpc_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
- p->error =
- grpc_error_set_str(p->error, GRPC_ERROR_STR_RAW_BYTES, msg);
+ p->error = grpc_error_set_str(p->error, GRPC_ERROR_STR_RAW_BYTES,
+ grpc_slice_from_copied_string(msg));
gpr_free(msg);
p->error =
grpc_error_set_int(p->error, GRPC_ERROR_INT_OFFSET, cur - beg);
@@ -265,7 +267,8 @@ static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
}
}
- GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
+ GPR_UNREACHABLE_CODE(
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here"));
}
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.c b/src/core/ext/transport/chttp2/transport/frame_goaway.c
index d99d486c1b..001271dd22 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.c
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.c
@@ -54,7 +54,7 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
if (length < 8) {
char *msg;
gpr_asprintf(&msg, "goaway frame too short (%d bytes)", length);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -156,7 +156,8 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
}
return GRPC_ERROR_NONE;
}
- GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
+ GPR_UNREACHABLE_CODE(
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here"));
}
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c
index 9b4b1a7b84..de8462a17e 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.c
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.c
@@ -71,7 +71,7 @@ grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
if (flags & 0xfe || length != 8) {
char *msg;
gpr_asprintf(&msg, "invalid ping: length=%d, flags=%02x", length, flags);
- grpc_error *error = GRPC_ERROR_CREATE(msg);
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return error;
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
index cb017e75f0..225f15c77c 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
@@ -76,7 +76,7 @@ grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
char *msg;
gpr_asprintf(&msg, "invalid rst_stream: length=%d, flags=%02x", length,
flags);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -112,8 +112,9 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
char *message;
gpr_asprintf(&message, "Received RST_STREAM with error code %d", reason);
error = grpc_error_set_int(
- grpc_error_set_str(GRPC_ERROR_CREATE("RST_STREAM"),
- GRPC_ERROR_STR_GRPC_MESSAGE, message),
+ grpc_error_set_str(GRPC_ERROR_CREATE_FROM_STATIC_STRING("RST_STREAM"),
+ GRPC_ERROR_STR_GRPC_MESSAGE,
+ grpc_slice_from_copied_string(message)),
GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)reason);
gpr_free(message);
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.c b/src/core/ext/transport/chttp2/transport/frame_settings.c
index 82290e34cd..16881c0707 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.c
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.c
@@ -131,13 +131,16 @@ grpc_error *grpc_chttp2_settings_parser_begin_frame(
if (flags == GRPC_CHTTP2_FLAG_ACK) {
parser->is_ack = 1;
if (length != 0) {
- return GRPC_ERROR_CREATE("non-empty settings ack frame received");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "non-empty settings ack frame received");
}
return GRPC_ERROR_NONE;
} else if (flags != 0) {
- return GRPC_ERROR_CREATE("invalid flags on settings frame");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "invalid flags on settings frame");
} else if (length % 6 != 0) {
- return GRPC_ERROR_CREATE("settings frames must be a multiple of six bytes");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "settings frames must be a multiple of six bytes");
} else {
return GRPC_ERROR_NONE;
}
@@ -229,7 +232,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
&t->qbuf);
gpr_asprintf(&msg, "invalid value %u passed for %s",
parser->value, sp->name);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.c b/src/core/ext/transport/chttp2/transport/frame_window_update.c
index 8fa0bb471a..b76b6f6f47 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.c
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.c
@@ -70,7 +70,7 @@ grpc_error *grpc_chttp2_window_update_parser_begin_frame(
char *msg;
gpr_asprintf(&msg, "invalid window update: length=%d, flags=%02x", length,
flags);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -102,7 +102,7 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
if (received_update == 0 || (received_update & 0x80000000u)) {
char *msg;
gpr_asprintf(&msg, "invalid window update bytes: %d", p->amount);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 40f5120308..5099d736bf 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -693,7 +693,7 @@ static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
}
if (p->on_header == NULL) {
GRPC_MDELEM_UNREF(exec_ctx, md);
- return GRPC_ERROR_CREATE("on_header callback not set");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("on_header callback not set");
}
p->on_header(exec_ctx, p->on_header_user_data, md);
return GRPC_ERROR_NONE;
@@ -810,7 +810,8 @@ static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
if (GRPC_MDISNULL(md)) {
return grpc_error_set_int(
- grpc_error_set_int(GRPC_ERROR_CREATE("Invalid HPACK index received"),
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Invalid HPACK index received"),
GRPC_ERROR_INT_INDEX, (intptr_t)p->index),
GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
}
@@ -1074,7 +1075,7 @@ static grpc_error *parse_max_tbl_size(grpc_exec_ctx *exec_ctx,
if (p->dynamic_table_update_allowed == 0) {
return parse_error(
exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"More than two max table size changes in a single frame"));
}
p->dynamic_table_update_allowed--;
@@ -1092,7 +1093,7 @@ static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx,
if (p->dynamic_table_update_allowed == 0) {
return parse_error(
exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"More than two max table size changes in a single frame"));
}
p->dynamic_table_update_allowed--;
@@ -1126,7 +1127,7 @@ static grpc_error *parse_illegal_op(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(cur != end);
char *msg;
gpr_asprintf(&msg, "Illegal hpack op code %d", *cur);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1246,7 +1247,7 @@ error:
"integer overflow in hpack integer decoding: have 0x%08x, "
"got byte 0x%02x on byte 5",
*p->parsing.value, *cur);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1275,7 +1276,7 @@ static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx,
"integer overflow in hpack integer decoding: have 0x%08x, "
"got byte 0x%02x sometime after byte 5",
*p->parsing.value, *cur);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1333,8 +1334,9 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE("Illegal base64 character"));
+ return parse_error(
+ exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
else if (bits == 64)
goto b64_byte0;
p->base64_buffer = bits << 18;
@@ -1348,8 +1350,9 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE("Illegal base64 character"));
+ return parse_error(
+ exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
else if (bits == 64)
goto b64_byte1;
p->base64_buffer |= bits << 12;
@@ -1363,8 +1366,9 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE("Illegal base64 character"));
+ return parse_error(
+ exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
else if (bits == 64)
goto b64_byte2;
p->base64_buffer |= bits << 6;
@@ -1378,8 +1382,9 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE("Illegal base64 character"));
+ return parse_error(
+ exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
else if (bits == 64)
goto b64_byte3;
p->base64_buffer |= bits;
@@ -1391,7 +1396,8 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
goto b64_byte0;
}
GPR_UNREACHABLE_CODE(return parse_error(
- exec_ctx, p, cur, end, GRPC_ERROR_CREATE("Should never reach here")));
+ exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here")));
}
static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
@@ -1406,16 +1412,16 @@ static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
case B64_BYTE0:
break;
case B64_BYTE1:
- return parse_error(
- exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE("illegal base64 encoding")); /* illegal encoding */
+ return parse_error(exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "illegal base64 encoding")); /* illegal encoding */
case B64_BYTE2:
bits = p->base64_buffer;
if (bits & 0xffff) {
char *msg;
gpr_asprintf(&msg, "trailing bits in base64 encoding: 0x%04x",
bits & 0xffff);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1428,7 +1434,7 @@ static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
char *msg;
gpr_asprintf(&msg, "trailing bits in base64 encoding: 0x%02x",
bits & 0xff);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1550,7 +1556,8 @@ static grpc_error *is_binary_indexed_header(grpc_chttp2_hpack_parser *p,
grpc_mdelem elem = grpc_chttp2_hptbl_lookup(&p->table, p->index);
if (GRPC_MDISNULL(elem)) {
return grpc_error_set_int(
- grpc_error_set_int(GRPC_ERROR_CREATE("Invalid HPACK index received"),
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Invalid HPACK index received"),
GRPC_ERROR_INT_INDEX, (intptr_t)p->index),
GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
}
@@ -1620,13 +1627,18 @@ void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
grpc_slice slice) {
- /* TODO(ctiller): limit the distance of end from beg, and perform multiple
- steps in the event of a large chunk of data to limit
- stack space usage when no tail call optimization is
- available */
+/* max number of bytes to parse at a time... limits call stack depth on
+ * compilers without TCO */
+#define MAX_PARSE_LENGTH 1024
p->current_slice_refcount = slice.refcount;
- grpc_error *error = p->state(exec_ctx, p, GRPC_SLICE_START_PTR(slice),
- GRPC_SLICE_END_PTR(slice));
+ uint8_t *start = GRPC_SLICE_START_PTR(slice);
+ uint8_t *end = GRPC_SLICE_END_PTR(slice);
+ grpc_error *error = GRPC_ERROR_NONE;
+ while (start != end && error == GRPC_ERROR_NONE) {
+ uint8_t *target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start);
+ error = p->state(exec_ctx, p, start, target);
+ start = target;
+ }
p->current_slice_refcount = NULL;
return error;
}
@@ -1670,7 +1682,7 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
if (is_last) {
if (parser->is_boundary && parser->state != parse_begin) {
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
- return GRPC_ERROR_CREATE(
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"end of header frame not aligned with a hpack record boundary");
}
/* need to check for null stream: this can occur if we receive an invalid
@@ -1678,7 +1690,8 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
if (s != NULL) {
if (parser->is_boundary) {
if (s->header_frames_received == GPR_ARRAY_SIZE(s->metadata_buffer)) {
- return GRPC_ERROR_CREATE("Too many trailer frames");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Too many trailer frames");
}
s->published_metadata[s->header_frames_received] =
GRPC_METADATA_PUBLISHED_FROM_WIRE;
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.c b/src/core/ext/transport/chttp2/transport/hpack_table.c
index 62dd1b8cab..9dd41fdbe1 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.c
@@ -280,7 +280,7 @@ grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&msg,
"Attempt to make hpack table %d bytes when max is %d bytes",
bytes, tbl->max_bytes);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -317,7 +317,7 @@ grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx,
"HPACK max table size reduced to %d but not reflected by hpack "
"stream (still at %d)",
tbl->max_bytes, tbl->current_table_bytes);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.c b/src/core/ext/transport/chttp2/transport/incoming_metadata.c
index c91b019aa0..da0a34d32a 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.c
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.c
@@ -41,69 +41,48 @@
#include <grpc/support/log.h>
void grpc_chttp2_incoming_metadata_buffer_init(
- grpc_chttp2_incoming_metadata_buffer *buffer) {
- buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena) {
+ buffer->arena = arena;
+ grpc_metadata_batch_init(&buffer->batch);
+ buffer->batch.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
void grpc_chttp2_incoming_metadata_buffer_destroy(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer) {
- size_t i;
- if (!buffer->published) {
- for (i = 0; i < buffer->count; i++) {
- GRPC_MDELEM_UNREF(exec_ctx, buffer->elems[i].md);
- }
- }
- gpr_free(buffer->elems);
+ grpc_metadata_batch_destroy(exec_ctx, &buffer->batch);
}
-void grpc_chttp2_incoming_metadata_buffer_add(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem elem) {
- GPR_ASSERT(!buffer->published);
- if (buffer->capacity == buffer->count) {
- buffer->capacity = GPR_MAX(8, 2 * buffer->capacity);
- buffer->elems =
- gpr_realloc(buffer->elems, sizeof(*buffer->elems) * buffer->capacity);
- }
- buffer->elems[buffer->count++].md = elem;
+grpc_error *grpc_chttp2_incoming_metadata_buffer_add(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+ grpc_mdelem elem) {
buffer->size += GRPC_MDELEM_LENGTH(elem);
+ return grpc_metadata_batch_add_tail(
+ exec_ctx, &buffer->batch,
+ gpr_arena_alloc(buffer->arena, sizeof(grpc_linked_mdelem)), elem);
}
-void grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
grpc_mdelem elem) {
- for (size_t i = 0; i < buffer->count; i++) {
- if (grpc_slice_eq(GRPC_MDKEY(buffer->elems[i].md), GRPC_MDKEY(elem))) {
- GRPC_MDELEM_UNREF(exec_ctx, buffer->elems[i].md);
- buffer->elems[i].md = elem;
- return;
+ for (grpc_linked_mdelem *l = buffer->batch.list.head; l != NULL;
+ l = l->next) {
+ if (grpc_slice_eq(GRPC_MDKEY(l->md), GRPC_MDKEY(elem))) {
+ GRPC_MDELEM_UNREF(exec_ctx, l->md);
+ l->md = elem;
+ return GRPC_ERROR_NONE;
}
}
- grpc_chttp2_incoming_metadata_buffer_add(buffer, elem);
+ return grpc_chttp2_incoming_metadata_buffer_add(exec_ctx, buffer, elem);
}
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) {
- GPR_ASSERT(!buffer->published);
- buffer->deadline = deadline;
+ buffer->batch.deadline = deadline;
}
void grpc_chttp2_incoming_metadata_buffer_publish(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
grpc_metadata_batch *batch) {
- GPR_ASSERT(!buffer->published);
- buffer->published = 1;
- if (buffer->count > 0) {
- size_t i;
- for (i = 0; i < buffer->count; i++) {
- /* TODO(ctiller): do something better here */
- if (!GRPC_LOG_IF_ERROR("grpc_chttp2_incoming_metadata_buffer_publish",
- grpc_metadata_batch_link_tail(
- exec_ctx, batch, &buffer->elems[i]))) {
- GRPC_MDELEM_UNREF(exec_ctx, buffer->elems[i].md);
- }
- }
- } else {
- batch->list.head = batch->list.tail = NULL;
- }
- batch->deadline = buffer->deadline;
+ *batch = buffer->batch;
+ grpc_metadata_batch_init(&buffer->batch);
}
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
index 1eac6fc150..288c917e65 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.h
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
@@ -37,28 +37,26 @@
#include "src/core/lib/transport/transport.h"
typedef struct {
- grpc_linked_mdelem *elems;
- size_t count;
- size_t capacity;
- gpr_timespec deadline;
- int published;
+ gpr_arena *arena;
+ grpc_metadata_batch batch;
size_t size; // total size of metadata
} grpc_chttp2_incoming_metadata_buffer;
/** assumes everything initially zeroed */
void grpc_chttp2_incoming_metadata_buffer_init(
- grpc_chttp2_incoming_metadata_buffer *buffer);
+ grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena);
void grpc_chttp2_incoming_metadata_buffer_destroy(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer);
void grpc_chttp2_incoming_metadata_buffer_publish(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
grpc_metadata_batch *batch);
-void grpc_chttp2_incoming_metadata_buffer_add(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem elem);
-void grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+grpc_error *grpc_chttp2_incoming_metadata_buffer_add(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
- grpc_mdelem elem);
+ grpc_mdelem elem) GRPC_MUST_USE_RESULT;
+grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+ grpc_mdelem elem) GRPC_MUST_USE_RESULT;
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline);
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index d26812ad6b..3c56c21599 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -425,7 +425,7 @@ struct grpc_chttp2_stream {
grpc_stream_refcount *refcount;
grpc_closure destroy_stream;
- void *destroy_stream_arg;
+ grpc_closure *destroy_stream_arg;
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
uint8_t included[STREAM_LIST_COUNT];
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index e7f2597f89..7e457ced27 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -116,7 +116,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state],
(int)(uint8_t)GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state],
*cur, (int)*cur, t->deframe_state);
- err = GRPC_ERROR_CREATE(msg);
+ err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -219,7 +219,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
t->incoming_frame_size,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE]);
- err = GRPC_ERROR_CREATE(msg);
+ err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -278,7 +278,7 @@ static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
gpr_asprintf(
&msg, "Expected SETTINGS frame as the first frame, got frame type %d",
t->incoming_frame_type);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -288,7 +288,7 @@ static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
char *msg;
gpr_asprintf(&msg, "Expected CONTINUATION frame, got frame type %02x",
t->incoming_frame_type);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -299,7 +299,7 @@ static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
"Expected CONTINUATION frame for grpc_chttp2_stream %08x, got "
"grpc_chttp2_stream %08x",
t->expect_continuation_stream_id, t->incoming_stream_id);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -311,7 +311,8 @@ static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_FRAME_HEADER:
return init_header_frame_parser(exec_ctx, t, 0);
case GRPC_CHTTP2_FRAME_CONTINUATION:
- return GRPC_ERROR_CREATE("Unexpected CONTINUATION frame");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Unexpected CONTINUATION frame");
case GRPC_CHTTP2_FRAME_RST_STREAM:
return init_rst_stream_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_SETTINGS:
@@ -371,7 +372,7 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
char *msg;
gpr_asprintf(&msg, "frame of size %d overflows incoming window of %" PRId64,
t->incoming_frame_size, t->incoming_window);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -409,7 +410,7 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
s->incoming_window_delta +
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -542,13 +543,21 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_chttp2_cancel_stream(
exec_ctx, t, s,
grpc_error_set_int(
- GRPC_ERROR_CREATE("received initial metadata size exceeds limit"),
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "received initial metadata size exceeds limit"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
s->seen_error = true;
GRPC_MDELEM_UNREF(exec_ctx, md);
} else {
- grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[0], md);
+ grpc_error *error = grpc_chttp2_incoming_metadata_buffer_add(
+ exec_ctx, &s->metadata_buffer[0], md);
+ if (error != GRPC_ERROR_NONE) {
+ grpc_chttp2_cancel_stream(exec_ctx, t, s, error);
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+ s->seen_error = true;
+ GRPC_MDELEM_UNREF(exec_ctx, md);
+ }
}
}
@@ -591,14 +600,22 @@ static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
new_size, metadata_size_limit);
grpc_chttp2_cancel_stream(
exec_ctx, t, s,
- grpc_error_set_int(
- GRPC_ERROR_CREATE("received trailing metadata size exceeds limit"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "received trailing metadata size exceeds limit"),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_RESOURCE_EXHAUSTED));
grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
s->seen_error = true;
GRPC_MDELEM_UNREF(exec_ctx, md);
} else {
- grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[1], md);
+ grpc_error *error = grpc_chttp2_incoming_metadata_buffer_add(
+ exec_ctx, &s->metadata_buffer[1], md);
+ if (error != GRPC_ERROR_NONE) {
+ grpc_chttp2_cancel_stream(exec_ctx, t, s, error);
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+ s->seen_error = true;
+ GRPC_MDELEM_UNREF(exec_ctx, md);
+ }
}
GPR_TIMER_END("on_trailing_header", 0);
@@ -757,7 +774,8 @@ static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx,
static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
if (t->incoming_stream_id != 0) {
- return GRPC_ERROR_CREATE("Settings frame received for grpc_chttp2_stream");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Settings frame received for grpc_chttp2_stream");
}
grpc_error *err = grpc_chttp2_settings_parser_begin_frame(
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index fabfaf8a27..952690eba7 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -128,6 +128,7 @@ struct read_state {
int received_bytes;
int remaining_bytes;
int length_field;
+ bool compressed;
char grpc_header_bytes[GRPC_HEADER_SIZE_IN_BYTES];
char *payload_field;
bool read_stream_closed;
@@ -184,6 +185,7 @@ struct op_storage {
};
struct stream_obj {
+ gpr_arena *arena;
struct op_and_state *oas;
grpc_transport_stream_op *curr_op;
grpc_cronet_transport *curr_ct;
@@ -272,7 +274,7 @@ static void maybe_flush_read(stream_obj *s) {
/* Whenever the evaluation of any of the two condition is changed, we check
* whether we should enter the flush read state. */
if (s->state.pending_recv_trailing_metadata && s->state.fail_state) {
- if (!s->state.flush_read) {
+ if (!s->state.flush_read && !s->state.rs.read_stream_closed) {
CRONET_LOG(GPR_DEBUG, "%p: Flush read", s);
s->state.flush_read = true;
null_and_maybe_free_read_buffer(s);
@@ -288,7 +290,7 @@ static void maybe_flush_read(stream_obj *s) {
}
static grpc_error *make_error_with_desc(int error_code, const char *desc) {
- grpc_error *error = GRPC_ERROR_CREATE(desc);
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, error_code);
return error;
}
@@ -483,18 +485,31 @@ static void on_response_headers_received(
CRONET_LOG(GPR_DEBUG, "R: on_response_headers_received(%p, %p, %s)", stream,
headers, negotiated_protocol);
stream_obj *s = (stream_obj *)stream->annotation;
+
+ /* Identify if this is a header or a trailer (in a trailer-only response case)
+ */
+ for (size_t i = 0; i < headers->count; i++) {
+ if (0 == strcmp("grpc-status", headers->headers[i].key)) {
+ on_response_trailers_received(stream, headers);
+ return;
+ }
+ }
+
gpr_mu_lock(&s->mu);
memset(&s->state.rs.initial_metadata, 0,
sizeof(s->state.rs.initial_metadata));
- grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.initial_metadata);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.initial_metadata,
+ s->arena);
for (size_t i = 0; i < headers->count; i++) {
- grpc_chttp2_incoming_metadata_buffer_add(
- &s->state.rs.initial_metadata,
- grpc_mdelem_from_slices(
- &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
- headers->headers[i].key)),
- grpc_slice_intern(
- grpc_slice_from_static_string(headers->headers[i].value))));
+ GRPC_LOG_IF_ERROR(
+ "on_response_headers_received",
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &exec_ctx, &s->state.rs.initial_metadata,
+ grpc_mdelem_from_slices(
+ &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
+ headers->headers[i].key)),
+ grpc_slice_intern(grpc_slice_from_static_string(
+ headers->headers[i].value)))));
}
s->state.state_callback_received[OP_RECV_INITIAL_METADATA] = true;
if (!(s->state.state_op_done[OP_CANCEL_ERROR] ||
@@ -503,6 +518,7 @@ static void on_response_headers_received(
is closed */
GPR_ASSERT(s->state.rs.length_field_received == false);
s->state.rs.read_buffer = s->state.rs.grpc_header_bytes;
+ s->state.rs.compressed = false;
s->state.rs.received_bytes = 0;
s->state.rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
@@ -586,17 +602,20 @@ static void on_response_trailers_received(
memset(&s->state.rs.trailing_metadata, 0,
sizeof(s->state.rs.trailing_metadata));
s->state.rs.trailing_metadata_valid = false;
- grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.trailing_metadata);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.trailing_metadata,
+ s->arena);
for (size_t i = 0; i < trailers->count; i++) {
CRONET_LOG(GPR_DEBUG, "trailer key=%s, value=%s", trailers->headers[i].key,
trailers->headers[i].value);
- grpc_chttp2_incoming_metadata_buffer_add(
- &s->state.rs.trailing_metadata,
- grpc_mdelem_from_slices(
- &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
- trailers->headers[i].key)),
- grpc_slice_intern(
- grpc_slice_from_static_string(trailers->headers[i].value))));
+ GRPC_LOG_IF_ERROR(
+ "on_response_trailers_received",
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &exec_ctx, &s->state.rs.trailing_metadata,
+ grpc_mdelem_from_slices(
+ &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
+ trailers->headers[i].key)),
+ grpc_slice_intern(grpc_slice_from_static_string(
+ trailers->headers[i].value)))));
s->state.rs.trailing_metadata_valid = true;
if (0 == strcmp(trailers->headers[i].key, "grpc-status") &&
0 != strcmp(trailers->headers[i].value, "0")) {
@@ -634,7 +653,7 @@ static void on_response_trailers_received(
*/
static void create_grpc_frame(grpc_slice_buffer *write_slice_buffer,
char **pp_write_buffer,
- size_t *p_write_buffer_size) {
+ size_t *p_write_buffer_size, uint32_t flags) {
grpc_slice slice = grpc_slice_buffer_take_first(write_slice_buffer);
size_t length = GRPC_SLICE_LENGTH(slice);
*p_write_buffer_size = length + GRPC_HEADER_SIZE_IN_BYTES;
@@ -643,7 +662,9 @@ static void create_grpc_frame(grpc_slice_buffer *write_slice_buffer,
*pp_write_buffer = write_buffer;
uint8_t *p = (uint8_t *)write_buffer;
/* Append 5 byte header */
- *p++ = 0;
+ /* Compressed flag */
+ *p++ = (flags & GRPC_WRITE_INTERNAL_COMPRESS) ? 1 : 0;
+ /* Message length */
*p++ = (uint8_t)(length >> 24);
*p++ = (uint8_t)(length >> 16);
*p++ = (uint8_t)(length >> 8);
@@ -721,14 +742,16 @@ static void convert_metadata_to_cronet_headers(
*p_num_headers = (size_t)num_headers;
}
-static int parse_grpc_header(const uint8_t *data) {
+static void parse_grpc_header(const uint8_t *data, int *length,
+ bool *compressed) {
+ const uint8_t c = *data;
const uint8_t *p = data + 1;
- int length = 0;
- length |= ((uint8_t)*p++) << 24;
- length |= ((uint8_t)*p++) << 16;
- length |= ((uint8_t)*p++) << 8;
- length |= ((uint8_t)*p++);
- return length;
+ *compressed = ((c & 0x01) == 0x01);
+ *length = 0;
+ *length |= ((uint8_t)*p++) << 24;
+ *length |= ((uint8_t)*p++) << 16;
+ *length |= ((uint8_t)*p++) << 8;
+ *length |= ((uint8_t)*p++);
}
static bool header_has_authority(grpc_linked_mdelem *head) {
@@ -781,7 +804,8 @@ static bool op_can_be_run(grpc_transport_stream_op *curr_op,
else if (!stream_state->state_callback_received[OP_SEND_INITIAL_METADATA])
result = false;
/* we haven't received headers yet. */
- else if (!stream_state->state_callback_received[OP_RECV_INITIAL_METADATA])
+ else if (!stream_state->state_callback_received[OP_RECV_INITIAL_METADATA] &&
+ !stream_state->state_op_done[OP_RECV_TRAILING_METADATA])
result = false;
} else if (op_id == OP_SEND_MESSAGE) {
/* already executed (note we're checking op specific state, not stream
@@ -794,7 +818,8 @@ static bool op_can_be_run(grpc_transport_stream_op *curr_op,
/* already executed */
if (op_state->state_op_done[OP_RECV_MESSAGE]) result = false;
/* we haven't received headers yet. */
- else if (!stream_state->state_callback_received[OP_RECV_INITIAL_METADATA])
+ else if (!stream_state->state_callback_received[OP_RECV_INITIAL_METADATA] &&
+ !stream_state->state_op_done[OP_RECV_TRAILING_METADATA])
result = false;
} else if (op_id == OP_RECV_TRAILING_METADATA) {
/* already executed */
@@ -948,12 +973,6 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_init(&write_slice_buffer);
grpc_byte_stream_next(NULL, stream_op->send_message, &slice,
stream_op->send_message->length, NULL);
- /* Check that compression flag is OFF. We don't support compression yet.
- */
- if (stream_op->send_message->flags != 0) {
- gpr_log(GPR_ERROR, "Compression is not supported");
- GPR_ASSERT(stream_op->send_message->flags == 0);
- }
grpc_slice_buffer_add(&write_slice_buffer, slice);
if (write_slice_buffer.count != 1) {
/* Empty request not handled yet */
@@ -963,7 +982,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
if (write_slice_buffer.count > 0) {
size_t write_buffer_size;
create_grpc_frame(&write_slice_buffer, &stream_state->ws.write_buffer,
- &write_buffer_size);
+ &write_buffer_size, stream_op->send_message->flags);
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, %p)", s->cbs,
stream_state->ws.write_buffer);
stream_state->state_callback_received[OP_SEND_MESSAGE] = false;
@@ -1015,6 +1034,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
} else if (stream_state->state_callback_received[OP_FAILED]) {
grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
GRPC_ERROR_NONE);
+ } else if (stream_state->state_op_done[OP_RECV_TRAILING_METADATA]) {
+ grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
+ GRPC_ERROR_NONE);
} else {
grpc_chttp2_incoming_metadata_buffer_publish(
exec_ctx, &oas->s->state.rs.initial_metadata,
@@ -1059,8 +1081,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->rs.remaining_bytes == 0) {
/* Start a read operation for data */
stream_state->rs.length_field_received = true;
- stream_state->rs.length_field = stream_state->rs.remaining_bytes =
- parse_grpc_header((const uint8_t *)stream_state->rs.read_buffer);
+ parse_grpc_header((const uint8_t *)stream_state->rs.read_buffer,
+ &stream_state->rs.length_field,
+ &stream_state->rs.compressed);
CRONET_LOG(GPR_DEBUG, "length field = %d",
stream_state->rs.length_field);
if (stream_state->rs.length_field > 0) {
@@ -1082,6 +1105,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_init(&stream_state->rs.read_slice_buffer);
grpc_slice_buffer_stream_init(&stream_state->rs.sbs,
&stream_state->rs.read_slice_buffer, 0);
+ if (stream_state->rs.compressed) {
+ stream_state->rs.sbs.base.flags |= GRPC_WRITE_INTERNAL_COMPRESS;
+ }
*((grpc_byte_buffer **)stream_op->recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs;
grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
@@ -1093,6 +1119,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes;
stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
stream_state->rs.received_bytes = 0;
+ stream_state->rs.compressed = false;
stream_state->rs.length_field_received = false;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
stream_state->state_op_done[OP_READ_REQ_MADE] =
@@ -1107,6 +1134,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes;
stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
stream_state->rs.received_bytes = 0;
+ stream_state->rs.compressed = false;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
stream_state->state_op_done[OP_READ_REQ_MADE] =
true; /* Indicates that at least one read request has been made */
@@ -1130,6 +1158,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
read_data_slice);
grpc_slice_buffer_stream_init(&stream_state->rs.sbs,
&stream_state->rs.read_slice_buffer, 0);
+ if (stream_state->rs.compressed) {
+ stream_state->rs.sbs.base.flags = GRPC_WRITE_INTERNAL_COMPRESS;
+ }
*((grpc_byte_buffer **)stream_op->recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs;
grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
@@ -1139,6 +1170,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
/* Do an extra read to trigger on_succeeded() callback in case connection
is closed */
stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes;
+ stream_state->rs.compressed = false;
stream_state->rs.received_bytes = 0;
stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
stream_state->rs.length_field_received = false;
@@ -1215,7 +1247,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_stream_refcount *refcount,
- const void *server_data) {
+ const void *server_data, gpr_arena *arena) {
stream_obj *s = (stream_obj *)gs;
memset(&s->storage, 0, sizeof(s->storage));
s->storage.head = NULL;
@@ -1237,6 +1269,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->curr_gs = gs;
s->curr_ct = (grpc_cronet_transport *)gt;
+ s->arena = arena;
gpr_mu_init(&s->mu);
return 0;
@@ -1273,10 +1306,12 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
}
static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, void *and_free_memory) {
+ grpc_stream *gs,
+ grpc_closure *then_schedule_closure) {
stream_obj *s = (stream_obj *)gs;
null_and_maybe_free_read_buffer(s);
GRPC_ERROR_UNREF(s->state.cancel_error);
+ grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {}