aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext/filters
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/ext/filters')
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.cc9
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.cc14
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc199
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.cc6
-rw-r--r--src/core/ext/filters/client_channel/client_channel_plugin.cc16
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.cc21
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.cc2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc7
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h4
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc19
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc255
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc5
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc5
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc15
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc54
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc43
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc139
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc38
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.cc15
-rw-r--r--src/core/ext/filters/client_channel/parse_address.cc18
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper_registry.cc4
-rw-r--r--src/core/ext/filters/client_channel/resolver.cc58
-rw-r--r--src/core/ext/filters/client_channel/resolver.h153
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc501
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc10
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc58
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc449
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc309
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h78
-rw-r--r--src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc252
-rw-r--r--src/core/ext/filters/client_channel/resolver_factory.cc40
-rw-r--r--src/core/ext/filters/client_channel/resolver_factory.h65
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.cc200
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.h85
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.cc51
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc58
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h4
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.cc84
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.cc6
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.cc45
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.cc49
-rw-r--r--src/core/ext/filters/http/http_filters_plugin.cc5
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.cc53
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.cc43
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.cc20
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc3
-rw-r--r--src/core/ext/filters/max_age/max_age_filter.cc204
-rw-r--r--src/core/ext/filters/message_size/message_size_filter.cc32
-rw-r--r--src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc8
-rw-r--r--src/core/ext/filters/workarounds/workaround_utils.cc8
52 files changed, 2062 insertions, 1761 deletions
diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc
index 906a72b662..ee90b499eb 100644
--- a/src/core/ext/filters/client_channel/backup_poller.cc
+++ b/src/core/ext/filters/client_channel/backup_poller.cc
@@ -80,7 +80,7 @@ static void backup_poller_shutdown_unref(backup_poller* p) {
}
static void done_poller(void* arg, grpc_error* error) {
- backup_poller_shutdown_unref((backup_poller*)arg);
+ backup_poller_shutdown_unref(static_cast<backup_poller*>(arg));
}
static void g_poller_unref() {
@@ -102,7 +102,7 @@ static void g_poller_unref() {
}
static void run_poller(void* arg, grpc_error* error) {
- backup_poller* p = (backup_poller*)arg;
+ backup_poller* p = static_cast<backup_poller*>(arg);
if (error != GRPC_ERROR_NONE) {
if (error != GRPC_ERROR_CANCELLED) {
GRPC_LOG_IF_ERROR("run_poller", GRPC_ERROR_REF(error));
@@ -133,8 +133,9 @@ void grpc_client_channel_start_backup_polling(
}
gpr_mu_lock(&g_poller_mu);
if (g_poller == nullptr) {
- g_poller = (backup_poller*)gpr_zalloc(sizeof(backup_poller));
- g_poller->pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
+ g_poller = static_cast<backup_poller*>(gpr_zalloc(sizeof(backup_poller)));
+ g_poller->pollset =
+ static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
g_poller->shutting_down = false;
grpc_pollset_init(g_poller->pollset, &g_poller->pollset_mu);
gpr_ref_init(&g_poller->refs, 0);
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.cc b/src/core/ext/filters/client_channel/channel_connectivity.cc
index a827aa30ec..31a5c31124 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.cc
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -89,7 +89,7 @@ static void delete_state_watcher(state_watcher* w) {
static void finished_completion(void* pw, grpc_cq_completion* ignored) {
bool should_delete = false;
- state_watcher* w = (state_watcher*)pw;
+ state_watcher* w = static_cast<state_watcher*>(pw);
gpr_mu_lock(&w->mu);
switch (w->phase) {
case WAITING:
@@ -162,11 +162,11 @@ static void partly_done(state_watcher* w, bool due_to_completion,
}
static void watch_complete(void* pw, grpc_error* error) {
- partly_done((state_watcher*)pw, true, GRPC_ERROR_REF(error));
+ partly_done(static_cast<state_watcher*>(pw), true, GRPC_ERROR_REF(error));
}
static void timeout_complete(void* pw, grpc_error* error) {
- partly_done((state_watcher*)pw, false, GRPC_ERROR_REF(error));
+ partly_done(static_cast<state_watcher*>(pw), false, GRPC_ERROR_REF(error));
}
int grpc_channel_num_external_connectivity_watchers(grpc_channel* channel) {
@@ -182,7 +182,7 @@ typedef struct watcher_timer_init_arg {
} watcher_timer_init_arg;
static void watcher_timer_init(void* arg, grpc_error* error_ignored) {
- watcher_timer_init_arg* wa = (watcher_timer_init_arg*)arg;
+ watcher_timer_init_arg* wa = static_cast<watcher_timer_init_arg*>(arg);
grpc_timer_init(&wa->w->alarm, grpc_timespec_to_millis_round_up(wa->deadline),
&wa->w->on_timeout);
@@ -201,7 +201,7 @@ void grpc_channel_watch_connectivity_state(
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_core::ExecCtx exec_ctx;
- state_watcher* w = (state_watcher*)gpr_malloc(sizeof(*w));
+ state_watcher* w = static_cast<state_watcher*>(gpr_malloc(sizeof(*w)));
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
@@ -227,8 +227,8 @@ void grpc_channel_watch_connectivity_state(
w->channel = channel;
w->error = nullptr;
- watcher_timer_init_arg* wa =
- (watcher_timer_init_arg*)gpr_malloc(sizeof(watcher_timer_init_arg));
+ watcher_timer_init_arg* wa = static_cast<watcher_timer_init_arg*>(
+ gpr_malloc(sizeof(watcher_timer_init_arg)));
wa->w = w;
wa->deadline = deadline;
GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index a8a7a37be0..50d562f946 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -29,7 +29,6 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
-#include <grpc/support/useful.h>
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
@@ -89,10 +88,10 @@ static void method_parameters_unref(method_parameters* method_params) {
// Wrappers to pass to grpc_service_config_create_method_config_table().
static void* method_parameters_ref_wrapper(void* value) {
- return method_parameters_ref((method_parameters*)value);
+ return method_parameters_ref(static_cast<method_parameters*>(value));
}
static void method_parameters_unref_wrapper(void* value) {
- method_parameters_unref((method_parameters*)value);
+ method_parameters_unref(static_cast<method_parameters*>(value));
}
static bool parse_wait_for_ready(grpc_json* field,
@@ -120,7 +119,7 @@ static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
gpr_free(buf);
return false;
}
- int num_digits = (int)strlen(decimal_point + 1);
+ int num_digits = static_cast<int>(strlen(decimal_point + 1));
if (num_digits > 9) { // We don't accept greater precision than nanos.
gpr_free(buf);
return false;
@@ -150,7 +149,7 @@ static void* method_parameters_create_from_json(const grpc_json* json) {
}
}
method_parameters* value =
- (method_parameters*)gpr_malloc(sizeof(method_parameters));
+ static_cast<method_parameters*>(gpr_malloc(sizeof(method_parameters)));
gpr_ref_init(&value->refs, 1);
value->timeout = timeout;
value->wait_for_ready = wait_for_ready;
@@ -165,7 +164,7 @@ struct external_connectivity_watcher;
typedef struct client_channel_channel_data {
/** resolver for this channel */
- grpc_resolver* resolver;
+ grpc_core::OrphanablePtr<grpc_core::Resolver> resolver;
/** have we started resolving this channel */
bool started_resolving;
/** is deadline checking enabled? */
@@ -261,7 +260,8 @@ static void set_channel_connectivity_state_locked(channel_data* chand,
}
static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
- lb_policy_connectivity_watcher* w = (lb_policy_connectivity_watcher*)arg;
+ lb_policy_connectivity_watcher* w =
+ static_cast<lb_policy_connectivity_watcher*>(arg);
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) {
if (grpc_client_channel_trace.enabled()) {
@@ -282,7 +282,7 @@ static void watch_lb_policy_locked(channel_data* chand,
grpc_lb_policy* lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher* w =
- (lb_policy_connectivity_watcher*)gpr_malloc(sizeof(*w));
+ static_cast<lb_policy_connectivity_watcher*>(gpr_malloc(sizeof(*w)));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
@@ -300,8 +300,8 @@ static void start_resolving_locked(channel_data* chand) {
GPR_ASSERT(!chand->started_resolving);
chand->started_resolving = true;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- grpc_resolver_next_locked(chand->resolver, &chand->resolver_result,
- &chand->on_resolver_result_changed);
+ chand->resolver->NextLocked(&chand->resolver_result,
+ &chand->on_resolver_result_changed);
}
typedef struct {
@@ -311,7 +311,7 @@ typedef struct {
static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
service_config_parsing_state* parsing_state =
- (service_config_parsing_state*)arg;
+ static_cast<service_config_parsing_state*>(arg);
if (strcmp(field->key, "retryThrottling") == 0) {
if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
@@ -335,7 +335,7 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
uint32_t decimal_value = 0;
const char* decimal_point = strchr(sub_field->value, '.');
if (decimal_point != nullptr) {
- whole_len = (size_t)(decimal_point - sub_field->value);
+ whole_len = static_cast<size_t>(decimal_point - sub_field->value);
multiplier = 1000;
size_t decimal_len = strlen(decimal_point + 1);
if (decimal_len > 3) decimal_len = 3;
@@ -354,7 +354,8 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
&whole_value)) {
return;
}
- milli_token_ratio = (int)((whole_value * multiplier) + decimal_value);
+ milli_token_ratio =
+ static_cast<int>((whole_value * multiplier) + decimal_value);
if (milli_token_ratio <= 0) return;
}
}
@@ -365,7 +366,8 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
}
static void request_reresolution_locked(void* arg, grpc_error* error) {
- reresolution_request_args* args = (reresolution_request_args*)arg;
+ reresolution_request_args* args =
+ static_cast<reresolution_request_args*>(arg);
channel_data* chand = args->chand;
// If this invocation is for a stale LB policy, treat it as an LB shutdown
// signal.
@@ -378,13 +380,13 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: started name re-resolving", chand);
}
- grpc_resolver_channel_saw_error_locked(chand->resolver);
+ chand->resolver->RequestReresolutionLocked();
// Give back the closure to the LB policy.
grpc_lb_policy_set_reresolve_closure_locked(chand->lb_policy, &args->closure);
}
static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
- channel_data* chand = (channel_data*)arg;
+ channel_data* chand = static_cast<channel_data*>(arg);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
@@ -413,7 +415,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
grpc_lb_addresses* addresses =
- (grpc_lb_addresses*)channel_arg->value.pointer.p;
+ static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) {
@@ -459,7 +461,8 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
lb_policy_name);
} else {
reresolution_request_args* args =
- (reresolution_request_args*)gpr_zalloc(sizeof(*args));
+ static_cast<reresolution_request_args*>(
+ gpr_zalloc(sizeof(*args)));
args->chand = chand;
args->lb_policy = new_lb_policy;
GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
@@ -568,9 +571,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand);
}
- grpc_resolver_shutdown_locked(chand->resolver);
- GRPC_RESOLVER_UNREF(chand->resolver, "channel");
- chand->resolver = nullptr;
+ chand->resolver.reset();
}
set_channel_connectivity_state_locked(
chand, GRPC_CHANNEL_SHUTDOWN,
@@ -606,17 +607,17 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
set_channel_connectivity_state_locked(
chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
}
- grpc_resolver_next_locked(chand->resolver, &chand->resolver_result,
- &chand->on_resolver_result_changed);
+ chand->resolver->NextLocked(&chand->resolver_result,
+ &chand->on_resolver_result_changed);
GRPC_ERROR_UNREF(state_error);
}
}
static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
- grpc_transport_op* op = (grpc_transport_op*)arg;
+ grpc_transport_op* op = static_cast<grpc_transport_op*>(arg);
grpc_channel_element* elem =
- (grpc_channel_element*)op->handler_private.extra_arg;
- channel_data* chand = (channel_data*)elem->channel_data;
+ static_cast<grpc_channel_element*>(op->handler_private.extra_arg);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (op->on_connectivity_state_change != nullptr) {
grpc_connectivity_state_notify_on_state_change(
@@ -648,9 +649,7 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
set_channel_connectivity_state_locked(
chand, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
- grpc_resolver_shutdown_locked(chand->resolver);
- GRPC_RESOLVER_UNREF(chand->resolver, "channel");
- chand->resolver = nullptr;
+ chand->resolver.reset();
if (!chand->started_resolving) {
grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
GRPC_ERROR_REF(op->disconnect_with_error));
@@ -673,7 +672,7 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
static void cc_start_transport_op(grpc_channel_element* elem,
grpc_transport_op* op) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != nullptr) {
@@ -690,7 +689,7 @@ static void cc_start_transport_op(grpc_channel_element* elem,
static void cc_get_channel_info(grpc_channel_element* elem,
const grpc_channel_info* info) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
gpr_mu_lock(&chand->info_mu);
if (info->lb_policy_name != nullptr) {
*info->lb_policy_name = chand->info_lb_policy_name == nullptr
@@ -709,7 +708,7 @@ static void cc_get_channel_info(grpc_channel_element* elem,
/* Constructor for channel_data */
static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
@@ -741,9 +740,9 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
"client channel factory arg must be a pointer");
}
grpc_client_channel_factory_ref(
- (grpc_client_channel_factory*)arg->value.pointer.p);
+ static_cast<grpc_client_channel_factory*>(arg->value.pointer.p));
chand->client_channel_factory =
- (grpc_client_channel_factory*)arg->value.pointer.p;
+ static_cast<grpc_client_channel_factory*>(arg->value.pointer.p);
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == nullptr) {
@@ -759,7 +758,7 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
grpc_proxy_mappers_map_name(arg->value.string, args->channel_args,
&proxy_name, &new_args);
// Instantiate resolver.
- chand->resolver = grpc_resolver_create(
+ chand->resolver = grpc_core::ResolverRegistry::CreateResolver(
proxy_name != nullptr ? proxy_name : arg->value.string,
new_args != nullptr ? new_args : args->channel_args,
chand->interested_parties, chand->combiner);
@@ -774,17 +773,16 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
}
static void shutdown_resolver_locked(void* arg, grpc_error* error) {
- grpc_resolver* resolver = (grpc_resolver*)arg;
- grpc_resolver_shutdown_locked(resolver);
- GRPC_RESOLVER_UNREF(resolver, "channel");
+ grpc_core::Resolver* resolver = static_cast<grpc_core::Resolver*>(arg);
+ resolver->Orphan();
}
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_channel_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (chand->resolver != nullptr) {
GRPC_CLOSURE_SCHED(
- GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
+ GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver.release(),
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
@@ -873,7 +871,7 @@ typedef struct client_channel_call_data {
grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
grpc_call_element* elem) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
return calld->subchannel_call;
}
@@ -892,7 +890,7 @@ static void waiting_for_pick_batches_add(
// This is called via the call combiner, so access to calld is synchronized.
static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
- call_data* calld = (call_data*)arg;
+ call_data* calld = static_cast<call_data*>(arg);
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_transport_stream_op_batch_finish_with_failure(
@@ -904,7 +902,7 @@ static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_fail(grpc_call_element* elem,
grpc_error* error) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
@@ -932,7 +930,7 @@ static void waiting_for_pick_batches_fail(grpc_call_element* elem,
// This is called via the call combiner, so access to calld is synchronized.
static void run_pending_batch_in_call_combiner(void* arg, grpc_error* ignored) {
- call_data* calld = (call_data*)arg;
+ call_data* calld = static_cast<call_data*>(arg);
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_subchannel_call_process_op(
@@ -943,8 +941,8 @@ static void run_pending_batch_in_call_combiner(void* arg, grpc_error* ignored) {
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_resume(grpc_call_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: sending %" PRIuPTR
@@ -968,8 +966,8 @@ static void waiting_for_pick_batches_resume(grpc_call_element* elem) {
// Applies service config to the call. Must be invoked once we know
// that the resolver has returned results to the channel.
static void apply_service_config_to_call_locked(grpc_call_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
chand, calld);
@@ -979,8 +977,8 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
}
if (chand->method_params_table != nullptr) {
- calld->method_params = (method_parameters*)grpc_method_config_table_get(
- chand->method_params_table, calld->path);
+ calld->method_params = static_cast<method_parameters*>(
+ grpc_method_config_table_get(chand->method_params_table, calld->path));
if (calld->method_params != nullptr) {
method_parameters_ref(calld->method_params);
// If the deadline from the service config is shorter than the one
@@ -1001,8 +999,8 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
static void create_subchannel_call_locked(grpc_call_element* elem,
grpc_error* error) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
const grpc_core::ConnectedSubchannel::CallArgs call_args = {
calld->pollent, // pollent
calld->path, // path
@@ -1029,8 +1027,8 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
// Invoked when a pick is completed, on both success or failure.
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
- call_data* calld = (call_data*)elem->call_data;
- channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (calld->pick.connected_subchannel == nullptr) {
// Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error);
@@ -1057,8 +1055,8 @@ static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
// pick was done asynchronously. Removes the call's polling entity from
// chand->interested_parties before invoking pick_done_locked().
static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_polling_entity_del_from_pollset_set(calld->pollent,
chand->interested_parties);
pick_done_locked(elem, error);
@@ -1067,9 +1065,9 @@ static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
// Note: chand->lb_policy may have changed since we started our pick,
// in which case we will be cancelling the pick on a policy other than
// the one we started it on. However, this will just be a no-op.
@@ -1087,22 +1085,23 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
// Unrefs the LB policy and invokes async_pick_done_locked().
static void pick_callback_done_locked(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
}
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
+ GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
}
// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
// If the pick was completed synchronously, unrefs the LB policy and
// returns true.
static bool pick_callback_start_locked(grpc_call_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
@@ -1134,6 +1133,7 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
calld->pick.on_complete = &calld->lb_pick_closure;
+ GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
const bool pick_done =
grpc_lb_policy_pick_locked(chand->lb_policy, &calld->pick);
if (pick_done) {
@@ -1142,6 +1142,7 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
+ GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
} else {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
@@ -1164,7 +1165,8 @@ typedef struct {
// holding the call combiner.
static void pick_after_resolver_result_cancel_locked(void* arg,
grpc_error* error) {
- pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
+ pick_after_resolver_result_args* args =
+ static_cast<pick_after_resolver_result_args*>(arg);
if (args->finished) {
gpr_free(args);
return;
@@ -1178,8 +1180,8 @@ static void pick_after_resolver_result_cancel_locked(void* arg,
// async_pick_done_locked() to propagate the error back to the caller.
args->finished = true;
grpc_call_element* elem = args->elem;
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: cancelling pick waiting for resolver result",
@@ -1198,7 +1200,8 @@ static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
static void pick_after_resolver_result_done_locked(void* arg,
grpc_error* error) {
- pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
+ pick_after_resolver_result_args* args =
+ static_cast<pick_after_resolver_result_args*>(arg);
if (args->finished) {
/* cancelled, do nothing */
if (grpc_client_channel_trace.enabled()) {
@@ -1209,8 +1212,8 @@ static void pick_after_resolver_result_done_locked(void* arg,
}
args->finished = true;
grpc_call_element* elem = args->elem;
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
@@ -1258,15 +1261,15 @@ static void pick_after_resolver_result_done_locked(void* arg,
}
static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
calld);
}
pick_after_resolver_result_args* args =
- (pick_after_resolver_result_args*)gpr_zalloc(sizeof(*args));
+ static_cast<pick_after_resolver_result_args*>(gpr_zalloc(sizeof(*args)));
args->elem = elem;
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
args, grpc_combiner_scheduler(chand->combiner));
@@ -1280,9 +1283,9 @@ static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
}
static void start_pick_locked(void* arg, grpc_error* ignored) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- call_data* calld = (call_data*)elem->call_data;
- channel_data* chand = (channel_data*)elem->channel_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
if (chand->lb_policy != nullptr) {
// We already have an LB policy, so ask it for a pick.
@@ -1313,8 +1316,8 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
}
static void on_complete(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->retry_throttle_data != nullptr) {
if (error == GRPC_ERROR_NONE) {
grpc_server_retry_throttle_data_record_success(
@@ -1333,12 +1336,12 @@ static void on_complete(void* arg, grpc_error* error) {
static void cc_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
- call_data* calld = (call_data*)elem->call_data;
- channel_data* chand = (channel_data*)elem->channel_data;
+ GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
}
- GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
// If we've previously been cancelled, immediately fail any new batches.
if (calld->error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
@@ -1347,7 +1350,7 @@ static void cc_start_transport_stream_op_batch(
}
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
- goto done;
+ return;
}
if (batch->cancel_stream) {
// Stash a copy of cancel_error in our call data, so that we can use
@@ -1369,7 +1372,7 @@ static void cc_start_transport_stream_op_batch(
waiting_for_pick_batches_add(calld, batch);
waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
}
- goto done;
+ return;
}
// Intercept on_complete for recv_trailing_metadata so that we can
// check retry throttle status.
@@ -1391,7 +1394,7 @@ static void cc_start_transport_stream_op_batch(
calld, calld->subchannel_call);
}
grpc_subchannel_call_process_op(calld->subchannel_call, batch);
- goto done;
+ return;
}
// We do not yet have a subchannel call.
// Add the batch to the waiting-for-pick list.
@@ -1417,15 +1420,13 @@ static void cc_start_transport_stream_op_batch(
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
"batch does not include send_initial_metadata");
}
-done:
- GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
}
/* Constructor for call_data */
static grpc_error* cc_init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = (call_data*)elem->call_data;
- channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
@@ -1444,8 +1445,8 @@ static grpc_error* cc_init_call_elem(grpc_call_element* elem,
static void cc_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* then_schedule_closure) {
- call_data* calld = (call_data*)elem->call_data;
- channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (chand->deadline_checking_enabled) {
grpc_deadline_state_destroy(elem);
}
@@ -1476,7 +1477,7 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
static void cc_set_pollset_or_pollset_set(grpc_call_element* elem,
grpc_polling_entity* pollent) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
calld->pollent = pollent;
}
@@ -1499,7 +1500,7 @@ const grpc_channel_filter grpc_client_channel_filter = {
};
static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
- channel_data* chand = (channel_data*)arg;
+ channel_data* chand = static_cast<channel_data*>(arg);
if (chand->lb_policy != nullptr) {
grpc_lb_policy_exit_idle_locked(chand->lb_policy);
} else {
@@ -1513,7 +1514,7 @@ static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_channel_element* elem, int try_to_connect) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
grpc_connectivity_state out =
grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
@@ -1584,7 +1585,7 @@ static void external_connectivity_watcher_list_remove(
int grpc_client_channel_num_external_connectivity_watchers(
grpc_channel_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
int count = 0;
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
@@ -1600,7 +1601,8 @@ int grpc_client_channel_num_external_connectivity_watchers(
}
static void on_external_watch_complete_locked(void* arg, grpc_error* error) {
- external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
+ external_connectivity_watcher* w =
+ static_cast<external_connectivity_watcher*>(arg);
grpc_closure* follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(&w->pollent,
w->chand->interested_parties);
@@ -1613,7 +1615,8 @@ static void on_external_watch_complete_locked(void* arg, grpc_error* error) {
static void watch_connectivity_state_locked(void* arg,
grpc_error* error_ignored) {
- external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
+ external_connectivity_watcher* w =
+ static_cast<external_connectivity_watcher*>(arg);
external_connectivity_watcher* found = nullptr;
if (w->state != nullptr) {
external_connectivity_watcher_list_append(w->chand, w);
@@ -1642,9 +1645,9 @@ void grpc_client_channel_watch_connectivity_state(
grpc_channel_element* elem, grpc_polling_entity pollent,
grpc_connectivity_state* state, grpc_closure* closure,
grpc_closure* watcher_timer_init) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
external_connectivity_watcher* w =
- (external_connectivity_watcher*)gpr_zalloc(sizeof(*w));
+ static_cast<external_connectivity_watcher*>(gpr_zalloc(sizeof(*w)));
w->chand = chand;
w->pollent = pollent;
w->on_complete = closure;
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.cc b/src/core/ext/filters/client_channel/client_channel_factory.cc
index 60c95d7dc9..3baf5b31ab 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.cc
+++ b/src/core/ext/filters/client_channel/client_channel_factory.cc
@@ -39,12 +39,14 @@ grpc_channel* grpc_client_channel_factory_create_channel(
}
static void* factory_arg_copy(void* factory) {
- grpc_client_channel_factory_ref((grpc_client_channel_factory*)factory);
+ grpc_client_channel_factory_ref(
+ static_cast<grpc_client_channel_factory*>(factory));
return factory;
}
static void factory_arg_destroy(void* factory) {
- grpc_client_channel_factory_unref((grpc_client_channel_factory*)factory);
+ grpc_client_channel_factory_unref(
+ static_cast<grpc_client_channel_factory*>(factory));
}
static int factory_arg_cmp(void* factory1, void* factory2) {
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.cc b/src/core/ext/filters/client_channel/client_channel_plugin.cc
index ea630d2917..9172fa781c 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.cc
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.cc
@@ -36,7 +36,7 @@
static bool append_filter(grpc_channel_stack_builder* builder, void* arg) {
return grpc_channel_stack_builder_append_filter(
- builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
+ builder, static_cast<const grpc_channel_filter*>(arg), nullptr, nullptr);
}
static bool set_default_host_if_unset(grpc_channel_stack_builder* builder,
@@ -49,14 +49,14 @@ static bool set_default_host_if_unset(grpc_channel_stack_builder* builder,
return true;
}
}
- char* default_authority = grpc_get_default_authority(
- grpc_channel_stack_builder_get_target(builder));
- if (default_authority != nullptr) {
+ grpc_core::UniquePtr<char> default_authority =
+ grpc_core::ResolverRegistry::GetDefaultAuthority(
+ grpc_channel_stack_builder_get_target(builder));
+ if (default_authority.get() != nullptr) {
grpc_arg arg = grpc_channel_arg_string_create(
- (char*)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
+ (char*)GRPC_ARG_DEFAULT_AUTHORITY, default_authority.get());
grpc_channel_args* new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
grpc_channel_stack_builder_set_channel_arguments(builder, new_args);
- gpr_free(default_authority);
grpc_channel_args_destroy(new_args);
}
return true;
@@ -64,7 +64,7 @@ static bool set_default_host_if_unset(grpc_channel_stack_builder* builder,
void grpc_client_channel_init(void) {
grpc_lb_policy_registry_init();
- grpc_resolver_registry_init();
+ grpc_core::ResolverRegistry::Builder::InitRegistry();
grpc_retry_throttle_map_init();
grpc_proxy_mapper_registry_init();
grpc_register_http_proxy_mapper();
@@ -82,6 +82,6 @@ void grpc_client_channel_shutdown(void) {
grpc_channel_init_shutdown();
grpc_proxy_mapper_registry_shutdown();
grpc_retry_throttle_map_shutdown();
- grpc_resolver_registry_shutdown();
+ grpc_core::ResolverRegistry::Builder::ShutdownRegistry();
grpc_lb_policy_registry_shutdown();
}
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.cc b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
index 6bfd038887..6bb4cefe73 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.cc
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -119,7 +119,8 @@ static void handshake_failed_locked(http_connect_handshaker* handshaker,
// Callback invoked when finished writing HTTP CONNECT request.
static void on_write_done(void* arg, grpc_error* error) {
- http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
+ http_connect_handshaker* handshaker =
+ static_cast<http_connect_handshaker*>(arg);
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the write failed or we're shutting down, clean up and invoke the
@@ -139,7 +140,8 @@ static void on_write_done(void* arg, grpc_error* error) {
// Callback invoked for reading HTTP CONNECT response.
static void on_read_done(void* arg, grpc_error* error) {
- http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
+ http_connect_handshaker* handshaker =
+ static_cast<http_connect_handshaker*>(arg);
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the read failed or we're shutting down, clean up and invoke the
@@ -224,13 +226,15 @@ done:
//
static void http_connect_handshaker_destroy(grpc_handshaker* handshaker_in) {
- http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
+ http_connect_handshaker* handshaker =
+ reinterpret_cast<http_connect_handshaker*>(handshaker_in);
http_connect_handshaker_unref(handshaker);
}
static void http_connect_handshaker_shutdown(grpc_handshaker* handshaker_in,
grpc_error* why) {
- http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
+ http_connect_handshaker* handshaker =
+ reinterpret_cast<http_connect_handshaker*>(handshaker_in);
gpr_mu_lock(&handshaker->mu);
if (!handshaker->shutdown) {
handshaker->shutdown = true;
@@ -244,7 +248,8 @@ static void http_connect_handshaker_shutdown(grpc_handshaker* handshaker_in,
static void http_connect_handshaker_do_handshake(
grpc_handshaker* handshaker_in, grpc_tcp_server_acceptor* acceptor,
grpc_closure* on_handshake_done, grpc_handshaker_args* args) {
- http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
+ http_connect_handshaker* handshaker =
+ reinterpret_cast<http_connect_handshaker*>(handshaker_in);
// Check for HTTP CONNECT channel arg.
// If not found, invoke on_handshake_done without doing anything.
const grpc_arg* arg =
@@ -270,8 +275,8 @@ static void http_connect_handshaker_do_handshake(
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
gpr_string_split(arg->value.string, "\n", &header_strings,
&num_header_strings);
- headers = (grpc_http_header*)gpr_malloc(sizeof(grpc_http_header) *
- num_header_strings);
+ headers = static_cast<grpc_http_header*>(
+ gpr_malloc(sizeof(grpc_http_header) * num_header_strings));
for (size_t i = 0; i < num_header_strings; ++i) {
char* sep = strchr(header_strings[i], ':');
if (sep == nullptr) {
@@ -324,7 +329,7 @@ static const grpc_handshaker_vtable http_connect_handshaker_vtable = {
static grpc_handshaker* grpc_http_connect_handshaker_create() {
http_connect_handshaker* handshaker =
- (http_connect_handshaker*)gpr_malloc(sizeof(*handshaker));
+ static_cast<http_connect_handshaker*>(gpr_malloc(sizeof(*handshaker)));
memset(handshaker, 0, sizeof(*handshaker));
grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base);
gpr_mu_init(&handshaker->mu);
diff --git a/src/core/ext/filters/client_channel/http_proxy.cc b/src/core/ext/filters/client_channel/http_proxy.cc
index 7c5f79fb30..d42376413d 100644
--- a/src/core/ext/filters/client_channel/http_proxy.cc
+++ b/src/core/ext/filters/client_channel/http_proxy.cc
@@ -22,7 +22,6 @@
#include <string.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
@@ -31,6 +30,7 @@
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/slice/b64.h"
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index cc4fe7ec62..27fb2ad1f4 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -118,7 +118,8 @@ void grpc_lb_policy_update_locked(grpc_lb_policy* policy,
void grpc_lb_policy_set_reresolve_closure_locked(
grpc_lb_policy* policy, grpc_closure* request_reresolution) {
- policy->vtable->set_reresolve_closure_locked(policy, request_reresolution);
+ GPR_ASSERT(policy->request_reresolution == nullptr);
+ policy->request_reresolution = request_reresolution;
}
void grpc_lb_policy_try_reresolve(grpc_lb_policy* policy,
@@ -133,8 +134,8 @@ void grpc_lb_policy_try_reresolve(grpc_lb_policy* policy,
grpc_lb_trace->name(), policy, grpc_error_string(error));
}
} else {
- if (grpc_lb_trace->enabled() && error == GRPC_ERROR_NONE) {
- gpr_log(GPR_DEBUG, "%s %p: re-resolution already in progress.",
+ if (grpc_lb_trace->enabled()) {
+ gpr_log(GPR_DEBUG, "%s %p: no available re-resolution closure.",
grpc_lb_trace->name(), policy);
}
}
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 30660cb83d..6edd314d5e 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -107,10 +107,6 @@ struct grpc_lb_policy_vtable {
void (*update_locked)(grpc_lb_policy* policy,
const grpc_lb_policy_args* args);
-
- /** \see grpc_lb_policy_set_reresolve_closure */
- void (*set_reresolve_closure_locked)(grpc_lb_policy* policy,
- grpc_closure* request_reresolution);
};
#ifndef NDEBUG
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index 4596f90745..1a3a1f029c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -48,7 +48,7 @@ struct call_data {
} // namespace
static void on_complete_for_send(void* arg, grpc_error* error) {
- call_data* calld = (call_data*)arg;
+ call_data* calld = static_cast<call_data*>(arg);
if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true;
}
@@ -56,7 +56,7 @@ static void on_complete_for_send(void* arg, grpc_error* error) {
}
static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
- call_data* calld = (call_data*)arg;
+ call_data* calld = static_cast<call_data*>(arg);
if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true;
}
@@ -66,13 +66,13 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
// Get stats object from context and take a ref.
GPR_ASSERT(args->context != nullptr);
if (args->context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) {
- calld->client_stats = grpc_grpclb_client_stats_ref(
- (grpc_grpclb_client_stats*)args->context[GRPC_GRPCLB_CLIENT_STATS]
- .value);
+ calld->client_stats =
+ grpc_grpclb_client_stats_ref(static_cast<grpc_grpclb_client_stats*>(
+ args->context[GRPC_GRPCLB_CLIENT_STATS].value));
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
}
@@ -82,7 +82,7 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->client_stats != nullptr) {
// Record call finished, optionally setting client_failed_to_send and
// received.
@@ -97,8 +97,8 @@ static void destroy_call_elem(grpc_call_element* elem,
static void start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
- call_data* calld = (call_data*)elem->call_data;
- GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ GPR_TIMER_SCOPE("clr_start_transport_stream_op_batch", 0);
if (calld->client_stats != nullptr) {
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {
@@ -120,7 +120,6 @@ static void start_transport_stream_op_batch(
}
// Chain to next filter.
grpc_call_next_op(elem, batch);
- GPR_TIMER_END("clr_start_transport_stream_op_batch", 0);
}
const grpc_channel_filter grpc_client_load_reporting_filter = {
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 1709e5622e..1c8809eabc 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -87,7 +87,6 @@
#include <grpc/byte_buffer_reader.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/host_port.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
@@ -106,8 +105,10 @@
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/manual_constructor.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -242,12 +243,13 @@ typedef struct glb_lb_policy {
glb_lb_call_data* lb_calld;
/** response generator to inject address updates into \a lb_channel */
- grpc_fake_resolver_response_generator* response_generator;
+ grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
+ response_generator;
/** the RR policy to use of the backend servers returned by the LB server */
grpc_lb_policy* rr_policy;
- grpc_closure on_rr_connectivity_changed;
+ /** the connectivity state of the embedded RR policy */
grpc_connectivity_state rr_connectivity_state;
bool started_picking;
@@ -290,6 +292,12 @@ typedef struct glb_lb_policy {
/** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed;
+ /** called upon changes to the RR's connectivity. */
+ grpc_closure rr_on_connectivity_changed;
+
+ /** called upon reresolution request from the RR policy. */
+ grpc_closure rr_on_reresolution_requested;
+
/************************************************************/
/* client data associated with the LB server communication */
/************************************************************/
@@ -318,7 +326,8 @@ static void glb_lb_call_data_ref(glb_lb_call_data* lb_calld,
const gpr_atm count = gpr_atm_acq_load(&lb_calld->refs.count);
gpr_log(GPR_DEBUG, "[%s %p] lb_calld %p REF %lu->%lu (%s)",
grpc_lb_glb_trace.name(), lb_calld->glb_policy, lb_calld,
- (unsigned long)(count - 1), (unsigned long)count, reason);
+ static_cast<unsigned long>(count - 1),
+ static_cast<unsigned long>(count), reason);
}
}
@@ -329,7 +338,8 @@ static void glb_lb_call_data_unref(glb_lb_call_data* lb_calld,
const gpr_atm count = gpr_atm_acq_load(&lb_calld->refs.count);
gpr_log(GPR_DEBUG, "[%s %p] lb_calld %p UNREF %lu->%lu (%s)",
grpc_lb_glb_trace.name(), lb_calld->glb_policy, lb_calld,
- (unsigned long)(count + 1), (unsigned long)count, reason);
+ static_cast<unsigned long>(count + 1),
+ static_cast<unsigned long>(count), reason);
}
if (done) {
GPR_ASSERT(lb_calld->lb_call != nullptr);
@@ -370,7 +380,7 @@ static grpc_error* initial_metadata_add_lb_token(
}
static void destroy_client_stats(void* arg) {
- grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
+ grpc_grpclb_client_stats_unref(static_cast<grpc_grpclb_client_stats*>(arg));
}
static void pending_pick_set_metadata_and_context(pending_pick* pp) {
@@ -406,7 +416,7 @@ static void pending_pick_set_metadata_and_context(pending_pick* pp) {
* reference to its associated round robin instance. We wrap this closure in
* order to unref the round robin instance upon its invocation */
static void pending_pick_complete(void* arg, grpc_error* error) {
- pending_pick* pp = (pending_pick*)arg;
+ pending_pick* pp = static_cast<pending_pick*>(arg);
pending_pick_set_metadata_and_context(pp);
GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
@@ -414,7 +424,7 @@ static void pending_pick_complete(void* arg, grpc_error* error) {
static pending_pick* pending_pick_create(glb_lb_policy* glb_policy,
grpc_lb_policy_pick_state* pick) {
- pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
+ pending_pick* pp = static_cast<pending_pick*>(gpr_zalloc(sizeof(*pp)));
pp->pick = pick;
pp->glb_policy = glb_policy;
GRPC_CLOSURE_INIT(&pp->on_complete, pending_pick_complete, pp,
@@ -431,7 +441,7 @@ static void pending_pick_add(pending_pick** root, pending_pick* new_pp) {
static void pending_ping_add(pending_ping** root, grpc_closure* on_initiate,
grpc_closure* on_ack) {
- pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
+ pending_ping* pping = static_cast<pending_ping*>(gpr_zalloc(sizeof(*pping)));
pping->on_initiate = on_initiate;
pping->on_ack = on_ack;
pping->next = *root;
@@ -446,7 +456,7 @@ static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
if (log) {
gpr_log(GPR_ERROR,
"Invalid port '%d' at index %lu of serverlist. Ignoring.",
- server->port, (unsigned long)idx);
+ server->port, static_cast<unsigned long>(idx));
}
return false;
}
@@ -455,7 +465,7 @@ static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
gpr_log(GPR_ERROR,
"Expected IP to be 4 or 16 bytes, got %d at index %lu of "
"serverlist. Ignoring",
- ip->size, (unsigned long)idx);
+ ip->size, static_cast<unsigned long>(idx));
}
return false;
}
@@ -485,19 +495,21 @@ static void parse_server(const grpc_grpclb_server* server,
grpc_resolved_address* addr) {
memset(addr, 0, sizeof(*addr));
if (server->drop) return;
- const uint16_t netorder_port = htons((uint16_t)server->port);
+ const uint16_t netorder_port = htons(static_cast<uint16_t>(server->port));
/* the addresses are given in binary format (a in(6)_addr struct) in
* server->ip_address.bytes. */
const grpc_grpclb_ip_address* ip = &server->ip_address;
if (ip->size == 4) {
addr->len = sizeof(struct sockaddr_in);
- struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
+ struct sockaddr_in* addr4 =
+ reinterpret_cast<struct sockaddr_in*>(&addr->addr);
addr4->sin_family = AF_INET;
memcpy(&addr4->sin_addr, ip->bytes, ip->size);
addr4->sin_port = netorder_port;
} else if (ip->size == 16) {
addr->len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
+ struct sockaddr_in6* addr6 =
+ reinterpret_cast<struct sockaddr_in6*>(&addr->addr);
addr6->sin6_family = AF_INET6;
memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
addr6->sin6_port = netorder_port;
@@ -584,9 +596,8 @@ static grpc_lb_addresses* extract_backend_addresses_locked(
return backend_addresses;
}
-static void update_lb_connectivity_status_locked(
- glb_lb_policy* glb_policy, grpc_connectivity_state rr_state,
- grpc_error* rr_state_error) {
+static void update_lb_connectivity_status_locked(glb_lb_policy* glb_policy,
+ grpc_error* rr_state_error) {
const grpc_connectivity_state curr_glb_state =
grpc_connectivity_state_check(&glb_policy->state_tracker);
/* The new connectivity status is a function of the previous one and the new
@@ -618,7 +629,7 @@ static void update_lb_connectivity_status_locked(
*
* (*) This function mustn't be called during shutting down. */
GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
- switch (rr_state) {
+ switch (glb_policy->rr_connectivity_state) {
case GRPC_CHANNEL_TRANSIENT_FAILURE:
case GRPC_CHANNEL_SHUTDOWN:
GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
@@ -632,11 +643,12 @@ static void update_lb_connectivity_status_locked(
gpr_log(
GPR_INFO,
"[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.",
- glb_policy, grpc_connectivity_state_name(rr_state),
+ glb_policy,
+ grpc_connectivity_state_name(glb_policy->rr_connectivity_state),
glb_policy->rr_policy);
}
- grpc_connectivity_state_set(&glb_policy->state_tracker, rr_state,
- rr_state_error,
+ grpc_connectivity_state_set(&glb_policy->state_tracker,
+ glb_policy->rr_connectivity_state, rr_state_error,
"update_lb_connectivity_status_locked");
}
@@ -682,7 +694,7 @@ static bool pick_from_internal_rr_locked(glb_lb_policy* glb_policy,
grpc_grpclb_client_stats_ref(glb_policy->lb_calld->client_stats);
}
GPR_ASSERT(pp->pick->user_data == nullptr);
- pp->pick->user_data = (void**)&pp->lb_token;
+ pp->pick->user_data = reinterpret_cast<void**>(&pp->lb_token);
// Pick via the RR policy.
bool pick_done = grpc_lb_policy_pick_locked(glb_policy->rr_policy, pp->pick);
if (pick_done) {
@@ -714,7 +726,8 @@ static grpc_lb_policy_args* lb_policy_args_create(glb_lb_policy* glb_policy) {
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
}
GPR_ASSERT(addresses != nullptr);
- grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
+ grpc_lb_policy_args* args =
+ static_cast<grpc_lb_policy_args*>(gpr_zalloc(sizeof(*args)));
args->client_channel_factory = glb_policy->cc_factory;
args->combiner = glb_policy->base.combiner;
// Replace the LB addresses in the channel args that we pass down to
@@ -733,11 +746,36 @@ static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
gpr_free(args);
}
-static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error);
+static void rr_on_reresolution_requested_locked(void* arg, grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
+ if (glb_policy->shutting_down || error != GRPC_ERROR_NONE) {
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
+ "rr_on_reresolution_requested_locked");
+ return;
+ }
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(
+ GPR_DEBUG,
+ "[grpclb %p] Re-resolution requested from the internal RR policy (%p).",
+ glb_policy, glb_policy->rr_policy);
+ }
+ // If we are talking to a balancer, we expect to get updated addresses form
+ // the balancer, so we can ignore the re-resolution request from the RR
+ // policy. Otherwise, handle the re-resolution request using glb's original
+ // re-resolution closure.
+ if (glb_policy->lb_calld == nullptr ||
+ !glb_policy->lb_calld->seen_initial_response) {
+ grpc_lb_policy_try_reresolve(&glb_policy->base, &grpc_lb_glb_trace,
+ GRPC_ERROR_NONE);
+ }
+ // Give back the wrapper closure to the RR policy.
+ grpc_lb_policy_set_reresolve_closure_locked(
+ glb_policy->rr_policy, &glb_policy->rr_on_reresolution_requested);
+}
+
static void create_rr_locked(glb_lb_policy* glb_policy,
grpc_lb_policy_args* args) {
GPR_ASSERT(glb_policy->rr_policy == nullptr);
-
grpc_lb_policy* new_rr_policy = grpc_lb_policy_create("round_robin", args);
if (new_rr_policy == nullptr) {
gpr_log(GPR_ERROR,
@@ -750,29 +788,25 @@ static void create_rr_locked(glb_lb_policy* glb_policy,
glb_policy->rr_policy);
return;
}
+ GRPC_LB_POLICY_REF(&glb_policy->base, "rr_on_reresolution_requested_locked");
grpc_lb_policy_set_reresolve_closure_locked(
- new_rr_policy, glb_policy->base.request_reresolution);
- glb_policy->base.request_reresolution = nullptr;
+ new_rr_policy, &glb_policy->rr_on_reresolution_requested);
glb_policy->rr_policy = new_rr_policy;
grpc_error* rr_state_error = nullptr;
glb_policy->rr_connectivity_state = grpc_lb_policy_check_connectivity_locked(
glb_policy->rr_policy, &rr_state_error);
/* Connectivity state is a function of the RR policy updated/created */
- update_lb_connectivity_status_locked(
- glb_policy, glb_policy->rr_connectivity_state, rr_state_error);
+ update_lb_connectivity_status_locked(glb_policy, rr_state_error);
/* Add the gRPC LB's interested_parties pollset_set to that of the newly
* created RR policy. This will make the RR policy progress upon activity on
* gRPC LB, which in turn is tied to the application's call */
grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
glb_policy->base.interested_parties);
- GRPC_CLOSURE_INIT(&glb_policy->on_rr_connectivity_changed,
- on_rr_connectivity_changed_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
/* Subscribe to changes to the connectivity of the new RR */
- GRPC_LB_POLICY_REF(&glb_policy->base, "glb_rr_connectivity_cb");
+ GRPC_LB_POLICY_REF(&glb_policy->base, "rr_on_connectivity_changed_locked");
grpc_lb_policy_notify_on_state_change_locked(
glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
- &glb_policy->on_rr_connectivity_changed);
+ &glb_policy->rr_on_connectivity_changed);
grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
// Send pending picks to RR policy.
pending_pick* pp;
@@ -820,28 +854,18 @@ static void rr_handover_locked(glb_lb_policy* glb_policy) {
lb_policy_args_destroy(args);
}
-static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
+static void rr_on_connectivity_changed_locked(void* arg, grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
if (glb_policy->shutting_down) {
- GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
- return;
- }
- if (glb_policy->rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
- /* An RR policy that has transitioned into the SHUTDOWN connectivity state
- * should not be considered for picks or updates: the SHUTDOWN state is a
- * sink, policies can't transition back from it. .*/
- GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
- glb_policy->rr_policy = nullptr;
- GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
+ "rr_on_connectivity_changed_locked");
return;
}
- /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
- update_lb_connectivity_status_locked(
- glb_policy, glb_policy->rr_connectivity_state, GRPC_ERROR_REF(error));
- /* Resubscribe. Reuse the "glb_rr_connectivity_cb" ref. */
+ update_lb_connectivity_status_locked(glb_policy, GRPC_ERROR_REF(error));
+ // Resubscribe. Reuse the "rr_on_connectivity_changed_locked" ref.
grpc_lb_policy_notify_on_state_change_locked(
glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
- &glb_policy->on_rr_connectivity_changed);
+ &glb_policy->rr_on_connectivity_changed);
}
static void destroy_balancer_name(void* balancer_name) {
@@ -857,8 +881,8 @@ static grpc_slice_hash_table_entry targets_info_entry_create(
}
static int balancer_name_cmp_fn(void* a, void* b) {
- const char* a_str = (const char*)a;
- const char* b_str = (const char*)b;
+ const char* a_str = static_cast<const char*>(a);
+ const char* b_str = static_cast<const char*>(b);
return strcmp(a_str, b_str);
}
@@ -872,7 +896,7 @@ static int balancer_name_cmp_fn(void* a, void* b) {
* - \a args: other args inherited from the grpclb policy. */
static grpc_channel_args* build_lb_channel_args(
const grpc_lb_addresses* addresses,
- grpc_fake_resolver_response_generator* response_generator,
+ grpc_core::FakeResolverResponseGenerator* response_generator,
const grpc_channel_args* args) {
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -885,8 +909,8 @@ static grpc_channel_args* build_lb_channel_args(
grpc_lb_addresses* lb_addresses =
grpc_lb_addresses_create(num_grpclb_addrs, nullptr);
grpc_slice_hash_table_entry* targets_info_entries =
- (grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
- num_grpclb_addrs);
+ static_cast<grpc_slice_hash_table_entry*>(
+ gpr_zalloc(sizeof(*targets_info_entries) * num_grpclb_addrs));
size_t lb_addresses_idx = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -929,7 +953,7 @@ static grpc_channel_args* build_lb_channel_args(
}
static void glb_destroy(grpc_lb_policy* pol) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
GPR_ASSERT(glb_policy->pending_picks == nullptr);
GPR_ASSERT(glb_policy->pending_pings == nullptr);
gpr_free((void*)glb_policy->server_name);
@@ -941,14 +965,15 @@ static void glb_destroy(grpc_lb_policy* pol) {
if (glb_policy->fallback_backend_addresses != nullptr) {
grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
}
- grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
+ // TODO(roth): Remove this once the LB policy becomes a C++ object.
+ glb_policy->response_generator.reset();
grpc_subchannel_index_unref();
gpr_free(glb_policy);
}
static void glb_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
glb_policy->shutting_down = true;
if (glb_policy->lb_calld != nullptr) {
@@ -963,8 +988,6 @@ static void glb_shutdown_locked(grpc_lb_policy* pol,
if (glb_policy->rr_policy != nullptr) {
grpc_lb_policy_shutdown_locked(glb_policy->rr_policy, nullptr);
GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
- } else {
- grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
}
// We destroy the LB channel here because
// glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
@@ -976,6 +999,7 @@ static void glb_shutdown_locked(grpc_lb_policy* pol,
}
grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "glb_shutdown");
+ grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
// Clear pending picks.
pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = nullptr;
@@ -1024,7 +1048,7 @@ static void glb_shutdown_locked(grpc_lb_policy* pol,
static void glb_cancel_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = nullptr;
while (pp != nullptr) {
@@ -1061,7 +1085,7 @@ static void glb_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = nullptr;
while (pp != nullptr) {
@@ -1108,7 +1132,7 @@ static void start_picking_locked(glb_lb_policy* glb_policy) {
}
static void glb_exit_idle_locked(grpc_lb_policy* pol) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
if (!glb_policy->started_picking) {
start_picking_locked(glb_policy);
}
@@ -1116,7 +1140,7 @@ static void glb_exit_idle_locked(grpc_lb_policy* pol) {
static int glb_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
pending_pick* pp = pending_pick_create(glb_policy, pick);
bool pick_done = false;
if (glb_policy->rr_policy != nullptr) {
@@ -1162,14 +1186,14 @@ static int glb_pick_locked(grpc_lb_policy* pol,
static grpc_connectivity_state glb_check_connectivity_locked(
grpc_lb_policy* pol, grpc_error** connectivity_error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
return grpc_connectivity_state_get(&glb_policy->state_tracker,
connectivity_error);
}
static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
grpc_closure* on_ack) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
if (glb_policy->rr_policy) {
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
} else {
@@ -1183,13 +1207,13 @@ static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
grpc_connectivity_state* current,
grpc_closure* notify) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
grpc_connectivity_state_notify_on_state_change(&glb_policy->state_tracker,
current, notify);
}
static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
+ glb_lb_policy* glb_policy = static_cast<glb_lb_policy*>(arg);
glb_policy->retry_timer_callback_pending = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE &&
glb_policy->lb_calld == nullptr) {
@@ -1241,7 +1265,7 @@ static void schedule_next_client_load_report(glb_lb_call_data* lb_calld) {
}
static void client_load_report_done_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)arg;
+ glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
glb_lb_policy* glb_policy = lb_calld->glb_policy;
grpc_byte_buffer_destroy(lb_calld->send_message_payload);
lb_calld->send_message_payload = nullptr;
@@ -1254,8 +1278,8 @@ static void client_load_report_done_locked(void* arg, grpc_error* error) {
static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
grpc_grpclb_dropped_call_counts* drop_entries =
- (grpc_grpclb_dropped_call_counts*)
- request->client_stats.calls_finished_with_drop.arg;
+ static_cast<grpc_grpclb_dropped_call_counts*>(
+ request->client_stats.calls_finished_with_drop.arg);
return request->client_stats.num_calls_started == 0 &&
request->client_stats.num_calls_finished == 0 &&
request->client_stats.num_calls_finished_with_client_failed_to_send ==
@@ -1304,7 +1328,7 @@ static void send_client_load_report_locked(glb_lb_call_data* lb_calld) {
}
static void maybe_send_client_load_report_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)arg;
+ glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
glb_lb_policy* glb_policy = lb_calld->glb_policy;
lb_calld->client_load_report_timer_callback_pending = false;
if (error != GRPC_ERROR_NONE || lb_calld != glb_policy->lb_calld) {
@@ -1336,7 +1360,8 @@ static glb_lb_call_data* lb_call_data_create_locked(glb_lb_policy* glb_policy) {
glb_policy->lb_call_timeout_ms == 0
? GRPC_MILLIS_INF_FUTURE
: grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_call_timeout_ms;
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)gpr_zalloc(sizeof(*lb_calld));
+ glb_lb_call_data* lb_calld =
+ static_cast<glb_lb_call_data*>(gpr_zalloc(sizeof(*lb_calld)));
lb_calld->lb_call = grpc_channel_create_pollset_set_call(
glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
@@ -1410,7 +1435,7 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
glb_lb_call_data_ref(glb_policy->lb_calld,
"lb_on_sent_initial_request_locked");
call_error = grpc_call_start_batch_and_execute(
- glb_policy->lb_calld->lb_call, ops, (size_t)(op - ops),
+ glb_policy->lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
&glb_policy->lb_calld->lb_on_sent_initial_request);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: recv initial metadata.
@@ -1430,7 +1455,7 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op++;
glb_lb_call_data_ref(glb_policy->lb_calld, "lb_on_response_received_locked");
call_error = grpc_call_start_batch_and_execute(
- glb_policy->lb_calld->lb_call, ops, (size_t)(op - ops),
+ glb_policy->lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
&glb_policy->lb_calld->lb_on_response_received);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: recv server status.
@@ -1448,13 +1473,13 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
// ref instead of a new ref. When it's invoked, it's the initial ref that is
// unreffed.
call_error = grpc_call_start_batch_and_execute(
- glb_policy->lb_calld->lb_call, ops, (size_t)(op - ops),
+ glb_policy->lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
&glb_policy->lb_calld->lb_on_server_status_received);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)arg;
+ glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
grpc_byte_buffer_destroy(lb_calld->send_message_payload);
lb_calld->send_message_payload = nullptr;
// If we attempted to send a client load report before the initial request was
@@ -1468,7 +1493,7 @@ static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
}
static void lb_on_response_received_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)arg;
+ glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
glb_lb_policy* glb_policy = lb_calld->glb_policy;
// Empty payload means the LB call was cancelled.
if (lb_calld != glb_policy->lb_calld ||
@@ -1591,7 +1616,7 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
// Reuse the "lb_on_response_received_locked" ref taken in
// query_for_backends_locked().
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
- lb_calld->lb_call, ops, (size_t)(op - ops),
+ lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
&lb_calld->lb_on_response_received);
GPR_ASSERT(GRPC_CALL_OK == call_error);
} else {
@@ -1601,7 +1626,7 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
}
static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)arg;
+ glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
glb_lb_policy* glb_policy = lb_calld->glb_policy;
GPR_ASSERT(lb_calld->lb_call != nullptr);
if (grpc_lb_glb_trace.enabled()) {
@@ -1614,6 +1639,8 @@ static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
lb_calld, lb_calld->lb_call, grpc_error_string(error));
gpr_free(status_details);
}
+ grpc_lb_policy_try_reresolve(&glb_policy->base, &grpc_lb_glb_trace,
+ GRPC_ERROR_NONE);
// If this lb_calld is still in use, this call ended because of a failure so
// we want to retry connecting. Otherwise, we have deliberately ended this
// call and no further action is required.
@@ -1638,20 +1665,19 @@ static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
}
static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
+ glb_lb_policy* glb_policy = static_cast<glb_lb_policy*>(arg);
glb_policy->fallback_timer_callback_pending = false;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't fall back. */
- if (glb_policy->serverlist == nullptr) {
- if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO,
- "[grpclb %p] Falling back to use backends from resolver",
- glb_policy);
- }
- GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
- rr_handover_locked(glb_policy);
+ if (glb_policy->serverlist == nullptr && !glb_policy->shutting_down &&
+ error == GRPC_ERROR_NONE) {
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "[grpclb %p] Falling back to use backends from resolver",
+ glb_policy);
}
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
+ rr_handover_locked(glb_policy);
}
GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_fallback_timer");
}
@@ -1670,7 +1696,7 @@ static void fallback_update_locked(glb_lb_policy* glb_policy,
static void glb_update_locked(grpc_lb_policy* policy,
const grpc_lb_policy_args* args) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(policy);
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
@@ -1691,7 +1717,7 @@ static void glb_update_locked(grpc_lb_policy* policy,
return;
}
const grpc_lb_addresses* addresses =
- (const grpc_lb_addresses*)arg->value.pointer.p;
+ static_cast<const grpc_lb_addresses*>(arg->value.pointer.p);
// If a non-empty serverlist hasn't been received from the balancer,
// propagate the update to fallback_backend_addresses.
if (glb_policy->serverlist == nullptr) {
@@ -1701,9 +1727,8 @@ static void glb_update_locked(grpc_lb_policy* policy,
// Propagate updates to the LB channel (pick_first) through the fake
// resolver.
grpc_channel_args* lb_channel_args = build_lb_channel_args(
- addresses, glb_policy->response_generator, args->args);
- grpc_fake_resolver_response_generator_set_response(
- glb_policy->response_generator, lb_channel_args);
+ addresses, glb_policy->response_generator.get(), args->args);
+ glb_policy->response_generator->SetResponse(lb_channel_args);
grpc_channel_args_destroy(lb_channel_args);
// Start watching the LB channel connectivity for connection, if not
// already doing so.
@@ -1729,7 +1754,7 @@ static void glb_update_locked(grpc_lb_policy* policy,
// stayed READY throughout the update (for example if the update is identical).
static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
+ glb_lb_policy* glb_policy = static_cast<glb_lb_policy*>(arg);
if (glb_policy->shutting_down) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// embedded RR policy. Note that the current RR policy, if any, will stay in
@@ -1773,19 +1798,6 @@ static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
}
}
-static void glb_set_reresolve_closure_locked(
- grpc_lb_policy* policy, grpc_closure* request_reresolution) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
- GPR_ASSERT(!glb_policy->shutting_down);
- GPR_ASSERT(glb_policy->base.request_reresolution == nullptr);
- if (glb_policy->rr_policy != nullptr) {
- grpc_lb_policy_set_reresolve_closure_locked(glb_policy->rr_policy,
- request_reresolution);
- } else {
- glb_policy->base.request_reresolution = request_reresolution;
- }
-}
-
/* Code wiring the policy with the rest of the core */
static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
glb_destroy,
@@ -1797,8 +1809,7 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
glb_exit_idle_locked,
glb_check_connectivity_locked,
glb_notify_on_state_change_locked,
- glb_update_locked,
- glb_set_reresolve_closure_locked};
+ glb_update_locked};
static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args) {
@@ -1808,14 +1819,16 @@ static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
return nullptr;
}
- grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
+ grpc_lb_addresses* addresses =
+ static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
if (num_grpclb_addrs == 0) return nullptr;
- glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
+ glb_lb_policy* glb_policy =
+ static_cast<glb_lb_policy*>(gpr_zalloc(sizeof(*glb_policy)));
/* Get server name. */
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
@@ -1858,17 +1871,16 @@ static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
/* Create a client channel over them to communicate with a LB service */
glb_policy->response_generator =
- grpc_fake_resolver_response_generator_create();
+ grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
grpc_channel_args* lb_channel_args = build_lb_channel_args(
- addresses, glb_policy->response_generator, args->args);
+ addresses, glb_policy->response_generator.get(), args->args);
char* uri_str;
gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
uri_str, args->client_channel_factory, lb_channel_args);
/* Propagate initial resolution */
- grpc_fake_resolver_response_generator_set_response(
- glb_policy->response_generator, lb_channel_args);
+ glb_policy->response_generator->SetResponse(lb_channel_args);
grpc_channel_args_destroy(lb_channel_args);
gpr_free(uri_str);
if (glb_policy->lb_channel == nullptr) {
@@ -1878,6 +1890,12 @@ static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
return nullptr;
}
grpc_subchannel_index_ref();
+ GRPC_CLOSURE_INIT(&glb_policy->rr_on_connectivity_changed,
+ rr_on_connectivity_changed_locked, glb_policy,
+ grpc_combiner_scheduler(args->combiner));
+ GRPC_CLOSURE_INIT(&glb_policy->rr_on_reresolution_requested,
+ rr_on_reresolution_requested_locked, glb_policy,
+ grpc_combiner_scheduler(args->combiner));
GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
glb_lb_channel_on_connectivity_changed_cb, glb_policy,
grpc_combiner_scheduler(args->combiner));
@@ -1920,7 +1938,8 @@ static bool maybe_add_client_load_reporting_filter(
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING &&
strcmp(channel_arg->value.string, "grpclb") == 0) {
return grpc_channel_stack_builder_append_filter(
- builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
+ builder, static_cast<const grpc_channel_filter*>(arg), nullptr,
+ nullptr);
}
return true;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
index 1e7f34bdc7..013fb12aea 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
@@ -37,10 +37,11 @@ grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
grpc_slice_hash_table* targets_info,
- grpc_fake_resolver_response_generator* response_generator,
+ grpc_core::FakeResolverResponseGenerator* response_generator,
const grpc_channel_args* args) {
const grpc_arg to_add[] = {
- grpc_fake_resolver_response_generator_arg(response_generator)};
+ grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
+ response_generator)};
/* We remove:
*
* - The channel arg for the LB policy name, since we want to use the default
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
index 56104b2ec0..2e34e3cab5 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
@@ -37,7 +37,7 @@ grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
grpc_slice_hash_table* targets_info,
- grpc_fake_resolver_response_generator* response_generator,
+ grpc_core::FakeResolverResponseGenerator* response_generator,
const grpc_channel_args* args);
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H \
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
index 15233d371c..5e615addbf 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
@@ -63,11 +63,12 @@ grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
grpc_slice_hash_table* targets_info,
- grpc_fake_resolver_response_generator* response_generator,
+ grpc_core::FakeResolverResponseGenerator* response_generator,
const grpc_channel_args* args) {
const grpc_arg to_add[] = {
grpc_lb_targets_info_create_channel_arg(targets_info),
- grpc_fake_resolver_response_generator_arg(response_generator)};
+ grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
+ response_generator)};
/* We remove:
*
* - The channel arg for the LB policy name, since we want to use the default
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
index e19a6a71aa..0b5a798be3 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
@@ -24,7 +24,6 @@
#include <grpc/support/atm.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
-#include <grpc/support/useful.h>
#include "src/core/lib/channel/channel_args.h"
@@ -43,7 +42,7 @@ struct grpc_grpclb_client_stats {
grpc_grpclb_client_stats* grpc_grpclb_client_stats_create() {
grpc_grpclb_client_stats* client_stats =
- (grpc_grpclb_client_stats*)gpr_zalloc(sizeof(*client_stats));
+ static_cast<grpc_grpclb_client_stats*>(gpr_zalloc(sizeof(*client_stats)));
gpr_ref_init(&client_stats->refs, 1);
return client_stats;
}
@@ -89,8 +88,8 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
// Record the drop.
if (client_stats->drop_token_counts == nullptr) {
client_stats->drop_token_counts =
- (grpc_grpclb_dropped_call_counts*)gpr_zalloc(
- sizeof(grpc_grpclb_dropped_call_counts));
+ static_cast<grpc_grpclb_dropped_call_counts*>(
+ gpr_zalloc(sizeof(grpc_grpclb_dropped_call_counts)));
}
grpc_grpclb_dropped_call_counts* drop_token_counts =
client_stats->drop_token_counts;
@@ -105,9 +104,9 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
while (new_num_entries < drop_token_counts->num_entries + 1) {
new_num_entries *= 2;
}
- drop_token_counts->token_counts = (grpc_grpclb_drop_token_count*)gpr_realloc(
- drop_token_counts->token_counts,
- new_num_entries * sizeof(grpc_grpclb_drop_token_count));
+ drop_token_counts->token_counts = static_cast<grpc_grpclb_drop_token_count*>(
+ gpr_realloc(drop_token_counts->token_counts,
+ new_num_entries * sizeof(grpc_grpclb_drop_token_count)));
grpc_grpclb_drop_token_count* new_entry =
&drop_token_counts->token_counts[drop_token_counts->num_entries++];
new_entry->token = gpr_strdup(token);
@@ -115,7 +114,7 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
}
static void atomic_get_and_reset_counter(int64_t* value, gpr_atm* counter) {
- *value = (int64_t)gpr_atm_acq_load(counter);
+ *value = static_cast<int64_t>(gpr_atm_acq_load(counter));
gpr_atm_full_fetch_add(counter, (gpr_atm)(-*value));
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
index fc781da330..c388b6ba77 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@@ -25,7 +25,7 @@
/* invoked once for every Server in ServerList */
static bool count_serverlist(pb_istream_t* stream, const pb_field_t* field,
void** arg) {
- grpc_grpclb_serverlist* sl = (grpc_grpclb_serverlist*)*arg;
+ grpc_grpclb_serverlist* sl = static_cast<grpc_grpclb_serverlist*>(*arg);
grpc_grpclb_server server;
if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -46,10 +46,10 @@ typedef struct decode_serverlist_arg {
/* invoked once for every Server in ServerList */
static bool decode_serverlist(pb_istream_t* stream, const pb_field_t* field,
void** arg) {
- decode_serverlist_arg* dec_arg = (decode_serverlist_arg*)*arg;
+ decode_serverlist_arg* dec_arg = static_cast<decode_serverlist_arg*>(*arg);
GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx);
grpc_grpclb_server* server =
- (grpc_grpclb_server*)gpr_zalloc(sizeof(grpc_grpclb_server));
+ static_cast<grpc_grpclb_server*>(gpr_zalloc(sizeof(grpc_grpclb_server)));
if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) {
gpr_free(server);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -60,8 +60,8 @@ static bool decode_serverlist(pb_istream_t* stream, const pb_field_t* field,
}
grpc_grpclb_request* grpc_grpclb_request_create(const char* lb_service_name) {
- grpc_grpclb_request* req =
- (grpc_grpclb_request*)gpr_malloc(sizeof(grpc_grpclb_request));
+ grpc_grpclb_request* req = static_cast<grpc_grpclb_request*>(
+ gpr_malloc(sizeof(grpc_grpclb_request)));
req->has_client_stats = false;
req->has_initial_request = true;
req->initial_request.has_name = true;
@@ -80,15 +80,15 @@ static void populate_timestamp(gpr_timespec timestamp,
static bool encode_string(pb_ostream_t* stream, const pb_field_t* field,
void* const* arg) {
- char* str = (char*)*arg;
+ char* str = static_cast<char*>(*arg);
if (!pb_encode_tag_for_field(stream, field)) return false;
- return pb_encode_string(stream, (uint8_t*)str, strlen(str));
+ return pb_encode_string(stream, reinterpret_cast<uint8_t*>(str), strlen(str));
}
static bool encode_drops(pb_ostream_t* stream, const pb_field_t* field,
void* const* arg) {
grpc_grpclb_dropped_call_counts* drop_entries =
- (grpc_grpclb_dropped_call_counts*)*arg;
+ static_cast<grpc_grpclb_dropped_call_counts*>(*arg);
if (drop_entries == nullptr) return true;
for (size_t i = 0; i < drop_entries->num_entries; ++i) {
if (!pb_encode_tag_for_field(stream, field)) return false;
@@ -107,8 +107,8 @@ static bool encode_drops(pb_ostream_t* stream, const pb_field_t* field,
grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked(
grpc_grpclb_client_stats* client_stats) {
- grpc_grpclb_request* req =
- (grpc_grpclb_request*)gpr_zalloc(sizeof(grpc_grpclb_request));
+ grpc_grpclb_request* req = static_cast<grpc_grpclb_request*>(
+ gpr_zalloc(sizeof(grpc_grpclb_request)));
req->has_client_stats = true;
req->client_stats.has_timestamp = true;
populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp);
@@ -123,8 +123,8 @@ grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked(
&req->client_stats.num_calls_finished,
&req->client_stats.num_calls_finished_with_client_failed_to_send,
&req->client_stats.num_calls_finished_known_received,
- (grpc_grpclb_dropped_call_counts**)&req->client_stats
- .calls_finished_with_drop.arg);
+ reinterpret_cast<grpc_grpclb_dropped_call_counts**>(
+ &req->client_stats.calls_finished_with_drop.arg));
return req;
}
@@ -148,8 +148,8 @@ grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request* request) {
void grpc_grpclb_request_destroy(grpc_grpclb_request* request) {
if (request->has_client_stats) {
grpc_grpclb_dropped_call_counts* drop_entries =
- (grpc_grpclb_dropped_call_counts*)
- request->client_stats.calls_finished_with_drop.arg;
+ static_cast<grpc_grpclb_dropped_call_counts*>(
+ request->client_stats.calls_finished_with_drop.arg);
grpc_grpclb_dropped_call_counts_destroy(drop_entries);
}
gpr_free(request);
@@ -171,8 +171,8 @@ grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
if (!res.has_initial_response) return nullptr;
grpc_grpclb_initial_response* initial_res =
- (grpc_grpclb_initial_response*)gpr_malloc(
- sizeof(grpc_grpclb_initial_response));
+ static_cast<grpc_grpclb_initial_response*>(
+ gpr_malloc(sizeof(grpc_grpclb_initial_response)));
memcpy(initial_res, &res.initial_response,
sizeof(grpc_grpclb_initial_response));
@@ -185,8 +185,8 @@ grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
- grpc_grpclb_serverlist* sl =
- (grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist* sl = static_cast<grpc_grpclb_serverlist*>(
+ gpr_zalloc(sizeof(grpc_grpclb_serverlist)));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
// First pass: count number of servers.
@@ -200,8 +200,8 @@ grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
- sl->servers = (grpc_grpclb_server**)gpr_zalloc(sizeof(grpc_grpclb_server*) *
- sl->num_servers);
+ sl->servers = static_cast<grpc_grpclb_server**>(
+ gpr_zalloc(sizeof(grpc_grpclb_server*) * sl->num_servers));
decode_serverlist_arg decode_arg;
memset(&decode_arg, 0, sizeof(decode_arg));
decode_arg.serverlist = sl;
@@ -231,14 +231,14 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist* serverlist) {
grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy(
const grpc_grpclb_serverlist* sl) {
- grpc_grpclb_serverlist* copy =
- (grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist* copy = static_cast<grpc_grpclb_serverlist*>(
+ gpr_zalloc(sizeof(grpc_grpclb_serverlist)));
copy->num_servers = sl->num_servers;
- copy->servers = (grpc_grpclb_server**)gpr_malloc(sizeof(grpc_grpclb_server*) *
- sl->num_servers);
+ copy->servers = static_cast<grpc_grpclb_server**>(
+ gpr_malloc(sizeof(grpc_grpclb_server*) * sl->num_servers));
for (size_t i = 0; i < sl->num_servers; i++) {
- copy->servers[i] =
- (grpc_grpclb_server*)gpr_malloc(sizeof(grpc_grpclb_server));
+ copy->servers[i] = static_cast<grpc_grpclb_server*>(
+ gpr_malloc(sizeof(grpc_grpclb_server)));
memcpy(copy->servers[i], sl->servers[i], sizeof(grpc_grpclb_server));
}
return copy;
@@ -291,7 +291,7 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration* lhs,
}
grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration* duration_pb) {
- return (grpc_millis)(
+ return static_cast<grpc_millis>(
(duration_pb->has_seconds ? duration_pb->seconds : 0) * GPR_MS_PER_SEC +
(duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index 725b78d478..296bdcb247 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -51,7 +51,7 @@ typedef struct {
} pick_first_lb_policy;
static void pf_destroy(grpc_lb_policy* pol) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
GPR_ASSERT(p->subchannel_list == nullptr);
GPR_ASSERT(p->latest_pending_subchannel_list == nullptr);
GPR_ASSERT(p->pending_picks == nullptr);
@@ -65,7 +65,7 @@ static void pf_destroy(grpc_lb_policy* pol) {
static void pf_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
@@ -105,7 +105,7 @@ static void pf_shutdown_locked(grpc_lb_policy* pol,
static void pf_cancel_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
@@ -128,7 +128,7 @@ static void pf_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
while (pick != nullptr) {
@@ -165,7 +165,7 @@ static void start_picking_locked(pick_first_lb_policy* p) {
}
static void pf_exit_idle_locked(grpc_lb_policy* pol) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
if (!p->started_picking) {
start_picking_locked(p);
}
@@ -173,7 +173,7 @@ static void pf_exit_idle_locked(grpc_lb_policy* pol) {
static int pf_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
// If we have a selected subchannel already, return synchronously.
if (p->selected != nullptr) {
pick->connected_subchannel = p->selected->connected_subchannel;
@@ -200,21 +200,21 @@ static void destroy_unselected_subchannels_locked(pick_first_lb_policy* p) {
static grpc_connectivity_state pf_check_connectivity_locked(
grpc_lb_policy* pol, grpc_error** error) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
return grpc_connectivity_state_get(&p->state_tracker, error);
}
static void pf_notify_on_state_change_locked(grpc_lb_policy* pol,
grpc_connectivity_state* current,
grpc_closure* notify) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
grpc_connectivity_state_notify_on_state_change(&p->state_tracker, current,
notify);
}
static void pf_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
grpc_closure* on_ack) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
if (p->selected) {
p->selected->connected_subchannel->Ping(on_initiate, on_ack);
} else {
@@ -229,7 +229,7 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error);
static void pf_update_locked(grpc_lb_policy* policy,
const grpc_lb_policy_args* args) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(policy);
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
@@ -249,10 +249,10 @@ static void pf_update_locked(grpc_lb_policy* policy,
return;
}
const grpc_lb_addresses* addresses =
- (const grpc_lb_addresses*)arg->value.pointer.p;
+ static_cast<const grpc_lb_addresses*>(arg->value.pointer.p);
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
- (void*)p, (unsigned long)addresses->num_addresses);
+ (void*)p, static_cast<unsigned long>(addresses->num_addresses));
}
grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
&p->base, &grpc_lb_pick_first_trace, addresses, args,
@@ -347,8 +347,9 @@ static void pf_update_locked(grpc_lb_policy* policy,
}
static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
- grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
- pick_first_lb_policy* p = (pick_first_lb_policy*)sd->subchannel_list->policy;
+ grpc_lb_subchannel_data* sd = static_cast<grpc_lb_subchannel_data*>(arg);
+ pick_first_lb_policy* p =
+ reinterpret_cast<pick_first_lb_policy*>(sd->subchannel_list->policy);
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
@@ -519,14 +520,6 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
}
}
-static void pf_set_reresolve_closure_locked(
- grpc_lb_policy* policy, grpc_closure* request_reresolution) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
- GPR_ASSERT(!p->shutdown);
- GPR_ASSERT(policy->request_reresolution == nullptr);
- policy->request_reresolution = request_reresolution;
-}
-
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_destroy,
pf_shutdown_locked,
@@ -537,8 +530,7 @@ static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_exit_idle_locked,
pf_check_connectivity_locked,
pf_notify_on_state_change_locked,
- pf_update_locked,
- pf_set_reresolve_closure_locked};
+ pf_update_locked};
static void pick_first_factory_ref(grpc_lb_policy_factory* factory) {}
@@ -547,7 +539,8 @@ static void pick_first_factory_unref(grpc_lb_policy_factory* factory) {}
static grpc_lb_policy* create_pick_first(grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args) {
GPR_ASSERT(args->client_channel_factory != nullptr);
- pick_first_lb_policy* p = (pick_first_lb_policy*)gpr_zalloc(sizeof(*p));
+ pick_first_lb_policy* p =
+ static_cast<pick_first_lb_policy*>(gpr_zalloc(sizeof(*p)));
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p created.", (void*)p);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index 24c381a46d..b5b4c44ef1 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -82,8 +82,9 @@ static size_t get_next_ready_subchannel_index_locked(
gpr_log(GPR_INFO,
"[RR %p] getting next ready subchannel (out of %lu), "
"last_ready_subchannel_index=%lu",
- (void*)p, (unsigned long)p->subchannel_list->num_subchannels,
- (unsigned long)p->last_ready_subchannel_index);
+ (void*)p,
+ static_cast<unsigned long>(p->subchannel_list->num_subchannels),
+ static_cast<unsigned long>(p->last_ready_subchannel_index));
}
for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
const size_t index = (i + p->last_ready_subchannel_index + 1) %
@@ -94,7 +95,7 @@ static size_t get_next_ready_subchannel_index_locked(
"[RR %p] checking subchannel %p, subchannel_list %p, index %lu: "
"state=%s",
(void*)p, (void*)p->subchannel_list->subchannels[index].subchannel,
- (void*)p->subchannel_list, (unsigned long)index,
+ (void*)p->subchannel_list, static_cast<unsigned long>(index),
grpc_connectivity_state_name(
p->subchannel_list->subchannels[index].curr_connectivity_state));
}
@@ -106,7 +107,7 @@ static size_t get_next_ready_subchannel_index_locked(
"subchannel_list %p",
(void*)p,
(void*)p->subchannel_list->subchannels[index].subchannel,
- (unsigned long)index, (void*)p->subchannel_list);
+ static_cast<unsigned long>(index), (void*)p->subchannel_list);
}
return index;
}
@@ -125,7 +126,7 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy* p,
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
- (void*)p, (unsigned long)last_ready_index,
+ (void*)p, static_cast<unsigned long>(last_ready_index),
(void*)p->subchannel_list->subchannels[last_ready_index].subchannel,
(void*)p->subchannel_list->subchannels[last_ready_index]
.connected_subchannel.get());
@@ -133,7 +134,7 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy* p,
}
static void rr_destroy(grpc_lb_policy* pol) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
(void*)pol, (void*)pol);
@@ -147,7 +148,7 @@ static void rr_destroy(grpc_lb_policy* pol) {
static void rr_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
@@ -187,7 +188,7 @@ static void rr_shutdown_locked(grpc_lb_policy* pol,
static void rr_cancel_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
@@ -210,7 +211,7 @@ static void rr_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
while (pick != nullptr) {
@@ -243,7 +244,7 @@ static void start_picking_locked(round_robin_lb_policy* p) {
}
static void rr_exit_idle_locked(grpc_lb_policy* pol) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
if (!p->started_picking) {
start_picking_locked(p);
}
@@ -251,7 +252,7 @@ static void rr_exit_idle_locked(grpc_lb_policy* pol) {
static int rr_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", pol,
p->shutdown);
@@ -328,20 +329,14 @@ static void update_lb_connectivity_status_locked(grpc_lb_subchannel_data* sd,
* 2) RULE: ANY subchannel is CONNECTING => policy is CONNECTING.
* CHECK: sd->curr_connectivity_state == CONNECTING.
*
- * 3) RULE: ALL subchannels are SHUTDOWN => policy is IDLE (and requests
- * re-resolution).
- * CHECK: subchannel_list->num_shutdown ==
- * subchannel_list->num_subchannels.
- *
- * 4) RULE: ALL subchannels are SHUTDOWN or TRANSIENT_FAILURE => policy is
- * TRANSIENT_FAILURE.
- * CHECK: subchannel_list->num_shutdown +
- * subchannel_list->num_transient_failures ==
+ * 3) RULE: ALL subchannels are TRANSIENT_FAILURE => policy is
+ * TRANSIENT_FAILURE.
+ * CHECK: subchannel_list->num_transient_failures ==
* subchannel_list->num_subchannels.
*/
- // TODO(juanlishen): For rule 4, we may want to re-resolve instead.
grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
- round_robin_lb_policy* p = (round_robin_lb_policy*)subchannel_list->policy;
+ round_robin_lb_policy* p =
+ reinterpret_cast<round_robin_lb_policy*>(subchannel_list->policy);
GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_IDLE);
if (subchannel_list->num_ready > 0) {
/* 1) READY */
@@ -351,19 +346,9 @@ static void update_lb_connectivity_status_locked(grpc_lb_subchannel_data* sd,
/* 2) CONNECTING */
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_CONNECTING,
GRPC_ERROR_NONE, "rr_connecting");
- } else if (subchannel_list->num_shutdown ==
+ } else if (subchannel_list->num_transient_failures ==
subchannel_list->num_subchannels) {
- /* 3) IDLE and re-resolve */
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_IDLE,
- GRPC_ERROR_NONE,
- "rr_exhausted_subchannels+reresolve");
- p->started_picking = false;
- grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_round_robin_trace,
- GRPC_ERROR_NONE);
- } else if (subchannel_list->num_shutdown +
- subchannel_list->num_transient_failures ==
- subchannel_list->num_subchannels) {
- /* 4) TRANSIENT_FAILURE */
+ /* 3) TRANSIENT_FAILURE */
grpc_connectivity_state_set(&p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "rr_transient_failure");
@@ -372,9 +357,9 @@ static void update_lb_connectivity_status_locked(grpc_lb_subchannel_data* sd,
}
static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
- grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
+ grpc_lb_subchannel_data* sd = static_cast<grpc_lb_subchannel_data*>(arg);
round_robin_lb_policy* p =
- (round_robin_lb_policy*)sd->subchannel_list->policy;
+ reinterpret_cast<round_robin_lb_policy*>(sd->subchannel_list->policy);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
@@ -387,6 +372,7 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
p->shutdown, sd->subchannel_list->shutting_down,
grpc_error_string(error));
}
+ GPR_ASSERT(sd->subchannel != nullptr);
// If the policy is shutting down, unref and return.
if (p->shutdown) {
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
@@ -412,14 +398,19 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
// state (which was set by the connectivity state watcher) to
// curr_connectivity_state, which is what we use inside of the combiner.
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
- // Update state counters and new overall state.
- update_state_counters_locked(sd);
- update_lb_connectivity_status_locked(sd, GRPC_ERROR_REF(error));
// If the sd's new state is TRANSIENT_FAILURE, unref the *connected*
// subchannel, if any.
switch (sd->curr_connectivity_state) {
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
sd->connected_subchannel.reset();
+ if (grpc_lb_round_robin_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "[RR %p] Subchannel %p has gone into TRANSIENT_FAILURE. "
+ "Requesting re-resolution",
+ p, sd->subchannel);
+ }
+ grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_round_robin_trace,
+ GRPC_ERROR_NONE);
break;
}
case GRPC_CHANNEL_READY: {
@@ -437,13 +428,14 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
if (grpc_lb_round_robin_trace.enabled()) {
const unsigned long num_subchannels =
p->subchannel_list != nullptr
- ? (unsigned long)p->subchannel_list->num_subchannels
+ ? static_cast<unsigned long>(
+ p->subchannel_list->num_subchannels)
: 0;
gpr_log(GPR_DEBUG,
"[RR %p] phasing out subchannel list %p (size %lu) in favor "
"of %p (size %lu)",
- (void*)p, (void*)p->subchannel_list, num_subchannels,
- (void*)sd->subchannel_list, num_subchannels);
+ p, p->subchannel_list, num_subchannels, sd->subchannel_list,
+ num_subchannels);
}
if (p->subchannel_list != nullptr) {
// dispose of the current subchannel_list
@@ -455,7 +447,8 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
}
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
- * p->pending_picks. This preemptively replicates rr_pick()'s actions. */
+ * p->pending_picks. This preemptively replicates rr_pick()'s actions.
+ */
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
grpc_lb_subchannel_data* selected =
@@ -477,7 +470,8 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
"[RR %p] Fulfilling pending pick. Target <-- subchannel %p "
"(subchannel_list %p, index %lu)",
(void*)p, (void*)selected->subchannel,
- (void*)p->subchannel_list, (unsigned long)next_ready_index);
+ (void*)p->subchannel_list,
+ static_cast<unsigned long>(next_ready_index));
}
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
@@ -488,27 +482,33 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:; // fallthrough
}
+ // Update state counters and new overall state.
+ update_state_counters_locked(sd);
+ // Only update connectivity based on the selected subchannel list.
+ if (sd->subchannel_list == p->subchannel_list) {
+ update_lb_connectivity_status_locked(sd, GRPC_ERROR_REF(error));
+ }
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
}
static grpc_connectivity_state rr_check_connectivity_locked(
grpc_lb_policy* pol, grpc_error** error) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
return grpc_connectivity_state_get(&p->state_tracker, error);
}
static void rr_notify_on_state_change_locked(grpc_lb_policy* pol,
grpc_connectivity_state* current,
grpc_closure* notify) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
grpc_connectivity_state_notify_on_state_change(&p->state_tracker, current,
notify);
}
static void rr_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
grpc_closure* on_ack) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
grpc_lb_subchannel_data* selected =
@@ -526,7 +526,7 @@ static void rr_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
static void rr_update_locked(grpc_lb_policy* policy,
const grpc_lb_policy_args* args) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(policy);
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
@@ -541,7 +541,8 @@ static void rr_update_locked(grpc_lb_policy* policy,
}
return;
}
- grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
+ grpc_lb_addresses* addresses =
+ static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", p,
addresses->num_addresses);
@@ -562,6 +563,30 @@ static void rr_update_locked(grpc_lb_policy* policy,
return;
}
if (p->started_picking) {
+ for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
+ const grpc_connectivity_state subchannel_state =
+ grpc_subchannel_check_connectivity(
+ subchannel_list->subchannels[i].subchannel, nullptr);
+ // Override the default setting of IDLE for connectivity notification
+ // purposes if the subchannel is already in transient failure. Otherwise
+ // we'd be immediately notified of the IDLE-TRANSIENT_FAILURE
+ // discrepancy, attempt to re-resolve and end up here again.
+ // TODO(roth): As part of C++-ifying the subchannel_list API, design a
+ // better API for notifying the LB policy of subchannel states, which can
+ // be used both for the subchannel's initial state and for subsequent
+ // state changes. This will allow us to handle this more generally instead
+ // of special-casing TRANSIENT_FAILURE (e.g., we can also distribute any
+ // pending picks across all READY subchannels rather than sending them all
+ // to the first one).
+ if (subchannel_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ subchannel_list->subchannels[i].pending_connectivity_state_unsafe =
+ subchannel_list->subchannels[i].curr_connectivity_state =
+ subchannel_list->subchannels[i].prev_connectivity_state =
+ subchannel_state;
+ --subchannel_list->num_idle;
+ ++subchannel_list->num_transient_failures;
+ }
+ }
if (p->latest_pending_subchannel_list != nullptr) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
@@ -595,14 +620,6 @@ static void rr_update_locked(grpc_lb_policy* policy,
}
}
-static void rr_set_reresolve_closure_locked(
- grpc_lb_policy* policy, grpc_closure* request_reresolution) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
- GPR_ASSERT(!p->shutdown);
- GPR_ASSERT(policy->request_reresolution == nullptr);
- policy->request_reresolution = request_reresolution;
-}
-
static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_destroy,
rr_shutdown_locked,
@@ -613,8 +630,7 @@ static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_exit_idle_locked,
rr_check_connectivity_locked,
rr_notify_on_state_change_locked,
- rr_update_locked,
- rr_set_reresolve_closure_locked};
+ rr_update_locked};
static void round_robin_factory_ref(grpc_lb_policy_factory* factory) {}
@@ -623,7 +639,8 @@ static void round_robin_factory_unref(grpc_lb_policy_factory* factory) {}
static grpc_lb_policy* round_robin_create(grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args) {
GPR_ASSERT(args->client_channel_factory != nullptr);
- round_robin_lb_policy* p = (round_robin_lb_policy*)gpr_zalloc(sizeof(*p));
+ round_robin_lb_policy* p =
+ static_cast<round_robin_lb_policy*>(gpr_zalloc(sizeof(*p)));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref();
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
@@ -631,7 +648,7 @@ static grpc_lb_policy* round_robin_create(grpc_lb_policy_factory* factory,
rr_update_locked(&p->base, args);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void*)p,
- (unsigned long)p->subchannel_list->num_subchannels);
+ static_cast<unsigned long>(p->subchannel_list->num_subchannels));
}
return &p->base;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
index fa2ffcc796..e35c5e8db3 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -37,7 +37,7 @@ void grpc_lb_subchannel_data_unref_subchannel(grpc_lb_subchannel_data* sd,
" (subchannel %p): unreffing subchannel",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
- (size_t)(sd - sd->subchannel_list->subchannels),
+ static_cast<size_t>(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(sd->subchannel, reason);
@@ -54,13 +54,16 @@ void grpc_lb_subchannel_data_unref_subchannel(grpc_lb_subchannel_data* sd,
void grpc_lb_subchannel_data_start_connectivity_watch(
grpc_lb_subchannel_data* sd) {
if (sd->subchannel_list->tracer->enabled()) {
- gpr_log(GPR_DEBUG,
- "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
- " (subchannel %p): requesting connectivity change notification",
- sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
- sd->subchannel_list,
- (size_t)(sd - sd->subchannel_list->subchannels),
- sd->subchannel_list->num_subchannels, sd->subchannel);
+ gpr_log(
+ GPR_DEBUG,
+ "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): requesting connectivity change "
+ "notification (from %s)",
+ sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
+ sd->subchannel_list,
+ static_cast<size_t>(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel,
+ grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe));
}
sd->connectivity_notification_pending = true;
grpc_subchannel_notify_on_state_change(
@@ -77,7 +80,7 @@ void grpc_lb_subchannel_data_stop_connectivity_watch(
" (subchannel %p): stopping connectivity watch",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
- (size_t)(sd - sd->subchannel_list->subchannels),
+ static_cast<size_t>(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GPR_ASSERT(sd->connectivity_notification_pending);
@@ -89,7 +92,8 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
grpc_iomgr_cb_func connectivity_changed_cb) {
grpc_lb_subchannel_list* subchannel_list =
- (grpc_lb_subchannel_list*)gpr_zalloc(sizeof(*subchannel_list));
+ static_cast<grpc_lb_subchannel_list*>(
+ gpr_zalloc(sizeof(*subchannel_list)));
if (tracer->enabled()) {
gpr_log(GPR_DEBUG,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
@@ -98,8 +102,8 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
subchannel_list->policy = p;
subchannel_list->tracer = tracer;
gpr_ref_init(&subchannel_list->refcount, 1);
- subchannel_list->subchannels = (grpc_lb_subchannel_data*)gpr_zalloc(
- sizeof(grpc_lb_subchannel_data) * addresses->num_addresses);
+ subchannel_list->subchannels = static_cast<grpc_lb_subchannel_data*>(
+ gpr_zalloc(sizeof(grpc_lb_subchannel_data) * addresses->num_addresses));
// We need to remove the LB addresses in order to be able to compare the
// subchannel keys of subchannels from a different batch of addresses.
static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
@@ -188,8 +192,8 @@ void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p REF %lu->%lu (%s)",
subchannel_list->tracer->name(), subchannel_list->policy,
- subchannel_list, (unsigned long)(count - 1), (unsigned long)count,
- reason);
+ subchannel_list, static_cast<unsigned long>(count - 1),
+ static_cast<unsigned long>(count), reason);
}
}
@@ -200,8 +204,8 @@ void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p UNREF %lu->%lu (%s)",
subchannel_list->tracer->name(), subchannel_list->policy,
- subchannel_list, (unsigned long)(count + 1), (unsigned long)count,
- reason);
+ subchannel_list, static_cast<unsigned long>(count + 1),
+ static_cast<unsigned long>(count), reason);
}
if (done) {
subchannel_list_destroy(subchannel_list);
@@ -228,7 +232,7 @@ static void subchannel_data_cancel_connectivity_watch(
" (subchannel %p): canceling connectivity watch (%s)",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
- (size_t)(sd - sd->subchannel_list->subchannels),
+ static_cast<size_t>(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
grpc_subchannel_notify_on_state_change(sd->subchannel, nullptr, nullptr,
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
index 3377605263..91537f3afe 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -101,8 +101,6 @@ struct grpc_lb_subchannel_list {
size_t num_ready;
/** how many subchannels are in state TRANSIENT_FAILURE */
size_t num_transient_failures;
- /** how many subchannels are in state SHUTDOWN */
- size_t num_shutdown;
/** how many subchannels are in state IDLE */
size_t num_idle;
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.cc b/src/core/ext/filters/client_channel/lb_policy_factory.cc
index dbf69fdcba..f2a800b221 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.cc
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.cc
@@ -29,11 +29,12 @@
grpc_lb_addresses* grpc_lb_addresses_create(
size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable) {
grpc_lb_addresses* addresses =
- (grpc_lb_addresses*)gpr_zalloc(sizeof(grpc_lb_addresses));
+ static_cast<grpc_lb_addresses*>(gpr_zalloc(sizeof(grpc_lb_addresses)));
addresses->num_addresses = num_addresses;
addresses->user_data_vtable = user_data_vtable;
const size_t addresses_size = sizeof(grpc_lb_address) * num_addresses;
- addresses->addresses = (grpc_lb_address*)gpr_zalloc(addresses_size);
+ addresses->addresses =
+ static_cast<grpc_lb_address*>(gpr_zalloc(addresses_size));
return addresses;
}
@@ -124,14 +125,14 @@ void grpc_lb_addresses_destroy(grpc_lb_addresses* addresses) {
}
static void* lb_addresses_copy(void* addresses) {
- return grpc_lb_addresses_copy((grpc_lb_addresses*)addresses);
+ return grpc_lb_addresses_copy(static_cast<grpc_lb_addresses*>(addresses));
}
static void lb_addresses_destroy(void* addresses) {
- grpc_lb_addresses_destroy((grpc_lb_addresses*)addresses);
+ grpc_lb_addresses_destroy(static_cast<grpc_lb_addresses*>(addresses));
}
static int lb_addresses_cmp(void* addresses1, void* addresses2) {
- return grpc_lb_addresses_cmp((grpc_lb_addresses*)addresses1,
- (grpc_lb_addresses*)addresses2);
+ return grpc_lb_addresses_cmp(static_cast<grpc_lb_addresses*>(addresses1),
+ static_cast<grpc_lb_addresses*>(addresses2));
}
static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
lb_addresses_copy, lb_addresses_destroy, lb_addresses_cmp};
@@ -148,7 +149,7 @@ grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
grpc_channel_args_find(channel_args, GRPC_ARG_LB_ADDRESSES);
if (lb_addresses_arg == nullptr || lb_addresses_arg->type != GRPC_ARG_POINTER)
return nullptr;
- return (grpc_lb_addresses*)lb_addresses_arg->value.pointer.p;
+ return static_cast<grpc_lb_addresses*>(lb_addresses_arg->value.pointer.p);
}
void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory) {
diff --git a/src/core/ext/filters/client_channel/parse_address.cc b/src/core/ext/filters/client_channel/parse_address.cc
index c3309e36a3..473c7542df 100644
--- a/src/core/ext/filters/client_channel/parse_address.cc
+++ b/src/core/ext/filters/client_channel/parse_address.cc
@@ -26,9 +26,10 @@
#endif
#include <grpc/support/alloc.h>
-#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+
+#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#ifdef GRPC_HAVE_UNIX_SOCKET
@@ -39,7 +40,8 @@ bool grpc_parse_unix(const grpc_uri* uri,
gpr_log(GPR_ERROR, "Expected 'unix' scheme, got '%s'", uri->scheme);
return false;
}
- struct sockaddr_un* un = (struct sockaddr_un*)resolved_addr->addr;
+ struct sockaddr_un* un =
+ reinterpret_cast<struct sockaddr_un*>(resolved_addr->addr);
const size_t maxlen = sizeof(un->sun_path);
const size_t path_len = strnlen(uri->path, maxlen);
if (path_len == maxlen) return false;
@@ -68,7 +70,7 @@ bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
// Parse IP address.
memset(addr, 0, sizeof(*addr));
addr->len = sizeof(struct sockaddr_in);
- struct sockaddr_in* in = (struct sockaddr_in*)addr->addr;
+ struct sockaddr_in* in = reinterpret_cast<struct sockaddr_in*>(addr->addr);
in->sin_family = AF_INET;
if (inet_pton(AF_INET, host, &in->sin_addr) == 0) {
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
@@ -84,7 +86,7 @@ bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 port: '%s'", port);
goto done;
}
- in->sin_port = htons((uint16_t)port_num);
+ in->sin_port = htons(static_cast<uint16_t>(port_num));
success = true;
done:
gpr_free(host);
@@ -114,14 +116,14 @@ bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
// Parse IP address.
memset(addr, 0, sizeof(*addr));
addr->len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr->addr;
+ struct sockaddr_in6* in6 = reinterpret_cast<struct sockaddr_in6*>(addr->addr);
in6->sin6_family = AF_INET6;
// Handle the RFC6874 syntax for IPv6 zone identifiers.
- char* host_end = (char*)gpr_memrchr(host, '%', strlen(host));
+ char* host_end = static_cast<char*>(gpr_memrchr(host, '%', strlen(host)));
if (host_end != nullptr) {
GPR_ASSERT(host_end >= host);
char host_without_scope[INET6_ADDRSTRLEN];
- size_t host_without_scope_len = (size_t)(host_end - host);
+ size_t host_without_scope_len = static_cast<size_t>(host_end - host);
uint32_t sin6_scope_id = 0;
strncpy(host_without_scope, host, host_without_scope_len);
host_without_scope[host_without_scope_len] = '\0';
@@ -153,7 +155,7 @@ bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv6 port: '%s'", port);
goto done;
}
- in6->sin6_port = htons((uint16_t)port_num);
+ in6->sin6_port = htons(static_cast<uint16_t>(port_num));
success = true;
done:
gpr_free(host);
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.cc b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
index 51778a20cc..b42597e363 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
@@ -34,8 +34,8 @@ typedef struct {
static void grpc_proxy_mapper_list_register(grpc_proxy_mapper_list* list,
bool at_start,
grpc_proxy_mapper* mapper) {
- list->list = (grpc_proxy_mapper**)gpr_realloc(
- list->list, (list->num_mappers + 1) * sizeof(grpc_proxy_mapper*));
+ list->list = static_cast<grpc_proxy_mapper**>(gpr_realloc(
+ list->list, (list->num_mappers + 1) * sizeof(grpc_proxy_mapper*)));
if (at_start) {
memmove(list->list + 1, list->list,
sizeof(grpc_proxy_mapper*) * list->num_mappers);
diff --git a/src/core/ext/filters/client_channel/resolver.cc b/src/core/ext/filters/client_channel/resolver.cc
index ff54e7179d..860c2eea1e 100644
--- a/src/core/ext/filters/client_channel/resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver.cc
@@ -22,58 +22,12 @@
grpc_core::DebugOnlyTraceFlag grpc_trace_resolver_refcount(false,
"resolver_refcount");
-void grpc_resolver_init(grpc_resolver* resolver,
- const grpc_resolver_vtable* vtable,
- grpc_combiner* combiner) {
- resolver->vtable = vtable;
- resolver->combiner = GRPC_COMBINER_REF(combiner, "resolver");
- gpr_ref_init(&resolver->refs, 1);
-}
+namespace grpc_core {
-#ifndef NDEBUG
-void grpc_resolver_ref(grpc_resolver* resolver, const char* file, int line,
- const char* reason) {
- if (grpc_trace_resolver_refcount.enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "RESOLVER:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", resolver,
- old_refs, old_refs + 1, reason);
- }
-#else
-void grpc_resolver_ref(grpc_resolver* resolver) {
-#endif
- gpr_ref(&resolver->refs);
-}
+Resolver::Resolver(grpc_combiner* combiner)
+ : InternallyRefCountedWithTracing(&grpc_trace_resolver_refcount),
+ combiner_(GRPC_COMBINER_REF(combiner, "resolver")) {}
-#ifndef NDEBUG
-void grpc_resolver_unref(grpc_resolver* resolver, const char* file, int line,
- const char* reason) {
- if (grpc_trace_resolver_refcount.enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "RESOLVER:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", resolver,
- old_refs, old_refs - 1, reason);
- }
-#else
-void grpc_resolver_unref(grpc_resolver* resolver) {
-#endif
- if (gpr_unref(&resolver->refs)) {
- grpc_combiner* combiner = resolver->combiner;
- resolver->vtable->destroy(resolver);
- GRPC_COMBINER_UNREF(combiner, "resolver");
- }
-}
+Resolver::~Resolver() { GRPC_COMBINER_UNREF(combiner_, "resolver"); }
-void grpc_resolver_shutdown_locked(grpc_resolver* resolver) {
- resolver->vtable->shutdown_locked(resolver);
-}
-
-void grpc_resolver_channel_saw_error_locked(grpc_resolver* resolver) {
- resolver->vtable->channel_saw_error_locked(resolver);
-}
-
-void grpc_resolver_next_locked(grpc_resolver* resolver,
- grpc_channel_args** result,
- grpc_closure* on_complete) {
- resolver->vtable->next_locked(resolver, result, on_complete);
-}
+} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index f6a4af01d6..62fcb49a41 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -19,67 +19,110 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_H
-#include "src/core/ext/filters/client_channel/subchannel.h"
-#include "src/core/lib/iomgr/iomgr.h"
+#include <grpc/impl/codegen/grpc_types.h>
-typedef struct grpc_resolver grpc_resolver;
-typedef struct grpc_resolver_vtable grpc_resolver_vtable;
+#include "src/core/lib/gprpp/abstract.h"
+#include "src/core/lib/gprpp/orphanable.h"
+#include "src/core/lib/iomgr/combiner.h"
+#include "src/core/lib/iomgr/iomgr.h"
extern grpc_core::DebugOnlyTraceFlag grpc_trace_resolver_refcount;
-/** \a grpc_resolver provides \a grpc_channel_args objects to its caller */
-struct grpc_resolver {
- const grpc_resolver_vtable* vtable;
- gpr_refcount refs;
- grpc_combiner* combiner;
-};
+namespace grpc_core {
+
+/// Interface for name resolution.
+///
+/// This interface is designed to support both push-based and pull-based
+/// mechanisms. A push-based mechanism is one where the resolver will
+/// subscribe to updates for a given name, and the name service will
+/// proactively send new data to the resolver whenever the data associated
+/// with the name changes. A pull-based mechanism is one where the resolver
+/// needs to query the name service again to get updated information (e.g.,
+/// DNS).
+///
+/// Note: All methods with a "Locked" suffix must be called from the
+/// combiner passed to the constructor.
+class Resolver : public InternallyRefCountedWithTracing<Resolver> {
+ public:
+ // Not copyable nor movable.
+ Resolver(const Resolver&) = delete;
+ Resolver& operator=(const Resolver&) = delete;
+
+ /// Requests a callback when a new result becomes available.
+ /// When the new result is available, sets \a *result to the new result
+ /// and schedules \a on_complete for execution.
+ /// If resolution is fatally broken, sets \a *result to nullptr and
+ /// schedules \a on_complete with an error.
+ ///
+ /// Note that the client channel will almost always have a request
+ /// to \a NextLocked() pending. When it gets the callback, it will
+ /// process the new result and then immediately make another call to
+ /// \a NextLocked(). This allows push-based resolvers to provide new
+ /// data as soon as it becomes available.
+ virtual void NextLocked(grpc_channel_args** result,
+ grpc_closure* on_complete) GRPC_ABSTRACT;
+
+ /// Asks the resolver to obtain an updated resolver result, if
+ /// applicable.
+ ///
+ /// This is useful for pull-based implementations to decide when to
+ /// re-resolve. However, the implementation is not required to
+ /// re-resolve immediately upon receiving this call; it may instead
+ /// elect to delay based on some configured minimum time between
+ /// queries, to avoid hammering the name service with queries.
+ ///
+ /// For push-based implementations, this may be a no-op.
+ ///
+ /// If this causes new data to become available, then the currently
+ /// pending call to \a NextLocked() will return the new result.
+ ///
+ /// Note: Currently, all resolvers are required to return a new result
+ /// shortly after this method is called. For pull-based mechanisms, if
+ /// the implementation decides to delay querying the name service, it
+ /// should immediately return a new copy of the previously returned
+ /// result (and it can then return the updated data later, when it
+ /// actually does query the name service). For push-based mechanisms,
+ /// the implementation should immediately return a new copy of the
+ /// last-seen result.
+ /// TODO(roth): Remove this requirement once we fix pick_first to not
+ /// throw away unselected subchannels.
+ virtual void RequestReresolutionLocked() GRPC_ABSTRACT;
+
+ void Orphan() override {
+ // Invoke ShutdownAndUnrefLocked() inside of the combiner.
+ GRPC_CLOSURE_SCHED(
+ GRPC_CLOSURE_CREATE(&Resolver::ShutdownAndUnrefLocked, this,
+ grpc_combiner_scheduler(combiner_)),
+ GRPC_ERROR_NONE);
+ }
+
+ GRPC_ABSTRACT_BASE_CLASS
+
+ protected:
+ /// Does NOT take ownership of the reference to \a combiner.
+ // TODO(roth): Once we have a C++-like interface for combiners, this
+ // API should change to take a RefCountedPtr<>, so that we always take
+ // ownership of a new ref.
+ explicit Resolver(grpc_combiner* combiner);
+
+ virtual ~Resolver();
+
+ /// Shuts down the resolver. If there is a pending call to
+ /// NextLocked(), the callback will be scheduled with an error.
+ virtual void ShutdownLocked() GRPC_ABSTRACT;
+
+ grpc_combiner* combiner() const { return combiner_; }
+
+ private:
+ static void ShutdownAndUnrefLocked(void* arg, grpc_error* ignored) {
+ Resolver* resolver = static_cast<Resolver*>(arg);
+ resolver->ShutdownLocked();
+ resolver->Unref();
+ }
-struct grpc_resolver_vtable {
- void (*destroy)(grpc_resolver* resolver);
- void (*shutdown_locked)(grpc_resolver* resolver);
- void (*channel_saw_error_locked)(grpc_resolver* resolver);
- void (*next_locked)(grpc_resolver* resolver, grpc_channel_args** result,
- grpc_closure* on_complete);
+ grpc_combiner* combiner_;
};
-#ifndef NDEBUG
-#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_RESOLVER_UNREF(p, r) \
- grpc_resolver_unref((p), __FILE__, __LINE__, (r))
-void grpc_resolver_ref(grpc_resolver* policy, const char* file, int line,
- const char* reason);
-void grpc_resolver_unref(grpc_resolver* policy, const char* file, int line,
- const char* reason);
-#else
-#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p))
-#define GRPC_RESOLVER_UNREF(p, r) grpc_resolver_unref((p))
-void grpc_resolver_ref(grpc_resolver* policy);
-void grpc_resolver_unref(grpc_resolver* policy);
-#endif
-
-void grpc_resolver_init(grpc_resolver* resolver,
- const grpc_resolver_vtable* vtable,
- grpc_combiner* combiner);
-
-void grpc_resolver_shutdown_locked(grpc_resolver* resolver);
-
-/** Notification that the channel has seen an error on some address.
- Can be used as a hint that re-resolution is desirable soon.
-
- Must be called from the combiner passed as a resolver_arg at construction
- time.*/
-void grpc_resolver_channel_saw_error_locked(grpc_resolver* resolver);
-
-/** Get the next result from the resolver. Expected to set \a *result with
- new channel args and then schedule \a on_complete for execution.
-
- If resolution is fatally broken, set \a *result to NULL and
- schedule \a on_complete.
-
- Must be called from the combiner passed as a resolver_arg at construction
- time.*/
-void grpc_resolver_next_locked(grpc_resolver* resolver,
- grpc_channel_args** result,
- grpc_closure* on_complete);
+} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_H */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index 6ba5f932f0..0442b1e496 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -25,7 +25,6 @@
#include <unistd.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/host_port.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
@@ -35,6 +34,7 @@
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/combiner.h"
@@ -49,105 +49,168 @@
#define GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_DNS_RECONNECT_JITTER 0.2
-typedef struct {
- /** base class: must be first */
- grpc_resolver base;
- /** DNS server to use (if not system default) */
- char* dns_server;
- /** name to resolve (usually the same as target_name) */
- char* name_to_resolve;
- /** default port to use */
- char* default_port;
- /** channel args. */
- grpc_channel_args* channel_args;
- /** whether to request the service config */
- bool request_service_config;
- /** pollset_set to drive the name resolution process */
- grpc_pollset_set* interested_parties;
-
- /** Closures used by the combiner */
- grpc_closure dns_ares_on_retry_timer_locked;
- grpc_closure dns_ares_on_resolved_locked;
-
- /** Combiner guarding the rest of the state */
- grpc_combiner* combiner;
- /** are we currently resolving? */
- bool resolving;
- /** the pending resolving request */
- grpc_ares_request* pending_request;
- /** which version of the result have we published? */
- int published_version;
- /** which version of the result is current? */
- int resolved_version;
- /** pending next completion, or NULL */
- grpc_closure* next_completion;
- /** target result address for next completion */
- grpc_channel_args** target_result;
- /** current (fully resolved) result */
- grpc_channel_args* resolved_result;
- /** retry timer */
- bool have_retry_timer;
- grpc_timer retry_timer;
- /** retry backoff state */
- grpc_core::ManualConstructor<grpc_core::BackOff> backoff;
-
- /** currently resolving addresses */
- grpc_lb_addresses* lb_addresses;
- /** currently resolving service config */
- char* service_config_json;
-} ares_dns_resolver;
-
-static void dns_ares_destroy(grpc_resolver* r);
-
-static void dns_ares_start_resolving_locked(ares_dns_resolver* r);
-static void dns_ares_maybe_finish_next_locked(ares_dns_resolver* r);
-
-static void dns_ares_shutdown_locked(grpc_resolver* r);
-static void dns_ares_channel_saw_error_locked(grpc_resolver* r);
-static void dns_ares_next_locked(grpc_resolver* r,
- grpc_channel_args** target_result,
- grpc_closure* on_complete);
-
-static const grpc_resolver_vtable dns_ares_resolver_vtable = {
- dns_ares_destroy, dns_ares_shutdown_locked,
- dns_ares_channel_saw_error_locked, dns_ares_next_locked};
-
-static void dns_ares_shutdown_locked(grpc_resolver* resolver) {
- ares_dns_resolver* r = (ares_dns_resolver*)resolver;
- if (r->have_retry_timer) {
- grpc_timer_cancel(&r->retry_timer);
+namespace grpc_core {
+
+namespace {
+
+const char kDefaultPort[] = "https";
+
+class AresDnsResolver : public Resolver {
+ public:
+ explicit AresDnsResolver(const ResolverArgs& args);
+
+ void NextLocked(grpc_channel_args** result,
+ grpc_closure* on_complete) override;
+
+ void RequestReresolutionLocked() override;
+
+ void ShutdownLocked() override;
+
+ private:
+ virtual ~AresDnsResolver();
+
+ void MaybeStartResolvingLocked();
+ void StartResolvingLocked();
+ void MaybeFinishNextLocked();
+
+ static void OnNextResolutionLocked(void* arg, grpc_error* error);
+ static void OnResolvedLocked(void* arg, grpc_error* error);
+
+ /// DNS server to use (if not system default)
+ char* dns_server_;
+ /// name to resolve (usually the same as target_name)
+ char* name_to_resolve_;
+ /// channel args
+ grpc_channel_args* channel_args_;
+ /// whether to request the service config
+ bool request_service_config_;
+ /// pollset_set to drive the name resolution process
+ grpc_pollset_set* interested_parties_;
+ /// closures used by the combiner
+ grpc_closure on_next_resolution_;
+ grpc_closure on_resolved_;
+ /// are we currently resolving?
+ bool resolving_ = false;
+ /// the pending resolving request
+ grpc_ares_request* pending_request_ = nullptr;
+ /// which version of the result have we published?
+ int published_version_ = 0;
+ /// which version of the result is current?
+ int resolved_version_ = 0;
+ /// pending next completion, or NULL
+ grpc_closure* next_completion_ = nullptr;
+ /// target result address for next completion
+ grpc_channel_args** target_result_ = nullptr;
+ /// current (fully resolved) result
+ grpc_channel_args* resolved_result_ = nullptr;
+ /// next resolution timer
+ bool have_next_resolution_timer_ = false;
+ grpc_timer next_resolution_timer_;
+ /// min interval between DNS requests
+ grpc_millis min_time_between_resolutions_;
+ /// timestamp of last DNS request
+ grpc_millis last_resolution_timestamp_ = -1;
+ /// retry backoff state
+ BackOff backoff_;
+ /// currently resolving addresses
+ grpc_lb_addresses* lb_addresses_ = nullptr;
+ /// currently resolving service config
+ char* service_config_json_ = nullptr;
+};
+
+AresDnsResolver::AresDnsResolver(const ResolverArgs& args)
+ : Resolver(args.combiner),
+ backoff_(
+ BackOff::Options()
+ .set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS *
+ 1000)
+ .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER)
+ .set_jitter(GRPC_DNS_RECONNECT_JITTER)
+ .set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) {
+ // Get name to resolve from URI path.
+ const char* path = args.uri->path;
+ if (path[0] == '/') ++path;
+ name_to_resolve_ = gpr_strdup(path);
+ // Get DNS server from URI authority.
+ if (0 != strcmp(args.uri->authority, "")) {
+ dns_server_ = gpr_strdup(args.uri->authority);
}
- if (r->pending_request != nullptr) {
- grpc_cancel_ares_request(r->pending_request);
+ channel_args_ = grpc_channel_args_copy(args.args);
+ const grpc_arg* arg = grpc_channel_args_find(
+ channel_args_, GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION);
+ request_service_config_ = !grpc_channel_arg_get_integer(
+ arg, (grpc_integer_options){false, false, true});
+ arg = grpc_channel_args_find(channel_args_,
+ GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS);
+ min_time_between_resolutions_ =
+ grpc_channel_arg_get_integer(arg, {1000, 0, INT_MAX});
+ interested_parties_ = grpc_pollset_set_create();
+ if (args.pollset_set != nullptr) {
+ grpc_pollset_set_add_pollset_set(interested_parties_, args.pollset_set);
+ }
+ GRPC_CLOSURE_INIT(&on_next_resolution_, OnNextResolutionLocked, this,
+ grpc_combiner_scheduler(combiner()));
+ GRPC_CLOSURE_INIT(&on_resolved_, OnResolvedLocked, this,
+ grpc_combiner_scheduler(combiner()));
+}
+
+AresDnsResolver::~AresDnsResolver() {
+ gpr_log(GPR_DEBUG, "destroying AresDnsResolver");
+ if (resolved_result_ != nullptr) {
+ grpc_channel_args_destroy(resolved_result_);
}
- if (r->next_completion != nullptr) {
- *r->target_result = nullptr;
- GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Resolver Shutdown"));
- r->next_completion = nullptr;
+ grpc_pollset_set_destroy(interested_parties_);
+ gpr_free(dns_server_);
+ gpr_free(name_to_resolve_);
+ grpc_channel_args_destroy(channel_args_);
+}
+
+void AresDnsResolver::NextLocked(grpc_channel_args** target_result,
+ grpc_closure* on_complete) {
+ gpr_log(GPR_DEBUG, "AresDnsResolver::NextLocked() is called.");
+ GPR_ASSERT(next_completion_ == nullptr);
+ next_completion_ = on_complete;
+ target_result_ = target_result;
+ if (resolved_version_ == 0 && !resolving_) {
+ MaybeStartResolvingLocked();
+ } else {
+ MaybeFinishNextLocked();
}
}
-static void dns_ares_channel_saw_error_locked(grpc_resolver* resolver) {
- ares_dns_resolver* r = (ares_dns_resolver*)resolver;
- if (!r->resolving) {
- r->backoff->Reset();
- dns_ares_start_resolving_locked(r);
+void AresDnsResolver::RequestReresolutionLocked() {
+ if (!resolving_) {
+ MaybeStartResolvingLocked();
}
}
-static void dns_ares_on_retry_timer_locked(void* arg, grpc_error* error) {
- ares_dns_resolver* r = (ares_dns_resolver*)arg;
- r->have_retry_timer = false;
+void AresDnsResolver::ShutdownLocked() {
+ if (have_next_resolution_timer_) {
+ grpc_timer_cancel(&next_resolution_timer_);
+ }
+ if (pending_request_ != nullptr) {
+ grpc_cancel_ares_request(pending_request_);
+ }
+ if (next_completion_ != nullptr) {
+ *target_result_ = nullptr;
+ GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Resolver Shutdown"));
+ next_completion_ = nullptr;
+ }
+}
+
+void AresDnsResolver::OnNextResolutionLocked(void* arg, grpc_error* error) {
+ AresDnsResolver* r = static_cast<AresDnsResolver*>(arg);
+ r->have_next_resolution_timer_ = false;
if (error == GRPC_ERROR_NONE) {
- if (!r->resolving) {
- dns_ares_start_resolving_locked(r);
+ if (!r->resolving_) {
+ r->StartResolvingLocked();
}
}
- GRPC_RESOLVER_UNREF(&r->base, "retry-timer");
+ r->Unref(DEBUG_LOCATION, "next_resolution_timer");
}
-static bool value_in_json_array(grpc_json* array, const char* value) {
+bool ValueInJsonArray(grpc_json* array, const char* value) {
for (grpc_json* entry = array->child; entry != nullptr; entry = entry->next) {
if (entry->type == GRPC_JSON_STRING && strcmp(entry->value, value) == 0) {
return true;
@@ -156,7 +219,7 @@ static bool value_in_json_array(grpc_json* array, const char* value) {
return false;
}
-static char* choose_service_config(char* service_config_choice_json) {
+char* ChooseServiceConfig(char* service_config_choice_json) {
grpc_json* choices_json = grpc_json_parse_string(service_config_choice_json);
if (choices_json == nullptr || choices_json->type != GRPC_JSON_ARRAY) {
gpr_log(GPR_ERROR, "cannot parse service config JSON string");
@@ -174,8 +237,7 @@ static char* choose_service_config(char* service_config_choice_json) {
field = field->next) {
// Check client language, if specified.
if (strcmp(field->key, "clientLanguage") == 0) {
- if (field->type != GRPC_JSON_ARRAY ||
- !value_in_json_array(field, "c++")) {
+ if (field->type != GRPC_JSON_ARRAY || !ValueInJsonArray(field, "c++")) {
service_config_json = nullptr;
break;
}
@@ -184,7 +246,7 @@ static char* choose_service_config(char* service_config_choice_json) {
if (strcmp(field->key, "clientHostname") == 0) {
char* hostname = grpc_gethostname();
if (hostname == nullptr || field->type != GRPC_JSON_ARRAY ||
- !value_in_json_array(field, hostname)) {
+ !ValueInJsonArray(field, hostname)) {
service_config_json = nullptr;
break;
}
@@ -219,24 +281,24 @@ static char* choose_service_config(char* service_config_choice_json) {
return service_config;
}
-static void dns_ares_on_resolved_locked(void* arg, grpc_error* error) {
- ares_dns_resolver* r = (ares_dns_resolver*)arg;
+void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
+ AresDnsResolver* r = static_cast<AresDnsResolver*>(arg);
grpc_channel_args* result = nullptr;
- GPR_ASSERT(r->resolving);
- r->resolving = false;
- r->pending_request = nullptr;
- if (r->lb_addresses != nullptr) {
+ GPR_ASSERT(r->resolving_);
+ r->resolving_ = false;
+ r->pending_request_ = nullptr;
+ if (r->lb_addresses_ != nullptr) {
static const char* args_to_remove[2];
size_t num_args_to_remove = 0;
grpc_arg new_args[3];
size_t num_args_to_add = 0;
new_args[num_args_to_add++] =
- grpc_lb_addresses_create_channel_arg(r->lb_addresses);
+ grpc_lb_addresses_create_channel_arg(r->lb_addresses_);
grpc_service_config* service_config = nullptr;
char* service_config_string = nullptr;
- if (r->service_config_json != nullptr) {
- service_config_string = choose_service_config(r->service_config_json);
- gpr_free(r->service_config_json);
+ if (r->service_config_json_ != nullptr) {
+ service_config_string = ChooseServiceConfig(r->service_config_json_);
+ gpr_free(r->service_config_json_);
if (service_config_string != nullptr) {
gpr_log(GPR_INFO, "selected service config choice: %s",
service_config_string);
@@ -250,190 +312,157 @@ static void dns_ares_on_resolved_locked(void* arg, grpc_error* error) {
if (lb_policy_name != nullptr) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- (char*)GRPC_ARG_LB_POLICY_NAME, (char*)lb_policy_name);
+ (char*)GRPC_ARG_LB_POLICY_NAME,
+ const_cast<char*>(lb_policy_name));
}
}
}
}
result = grpc_channel_args_copy_and_add_and_remove(
- r->channel_args, args_to_remove, num_args_to_remove, new_args,
+ r->channel_args_, args_to_remove, num_args_to_remove, new_args,
num_args_to_add);
if (service_config != nullptr) grpc_service_config_destroy(service_config);
gpr_free(service_config_string);
- grpc_lb_addresses_destroy(r->lb_addresses);
+ grpc_lb_addresses_destroy(r->lb_addresses_);
+ // Reset backoff state so that we start from the beginning when the
+ // next request gets triggered.
+ r->backoff_.Reset();
} else {
const char* msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
- grpc_millis next_try = r->backoff->NextAttemptTime();
- grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
+ grpc_millis next_try = r->backoff_.NextAttemptTime();
+ grpc_millis timeout = next_try - ExecCtx::Get()->Now();
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
- GPR_ASSERT(!r->have_retry_timer);
- r->have_retry_timer = true;
- GRPC_RESOLVER_REF(&r->base, "retry-timer");
+ GPR_ASSERT(!r->have_next_resolution_timer_);
+ r->have_next_resolution_timer_ = true;
+ // TODO(roth): We currently deal with this ref manually. Once the
+ // new closure API is done, find a way to track this ref with the timer
+ // callback as part of the type system.
+ RefCountedPtr<Resolver> self = r->Ref(DEBUG_LOCATION, "retry-timer");
+ self.release();
if (timeout > 0) {
gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
- grpc_timer_init(&r->retry_timer, next_try,
- &r->dns_ares_on_retry_timer_locked);
+ grpc_timer_init(&r->next_resolution_timer_, next_try,
+ &r->on_next_resolution_);
}
- if (r->resolved_result != nullptr) {
- grpc_channel_args_destroy(r->resolved_result);
+ if (r->resolved_result_ != nullptr) {
+ grpc_channel_args_destroy(r->resolved_result_);
}
- r->resolved_result = result;
- r->resolved_version++;
- dns_ares_maybe_finish_next_locked(r);
- GRPC_RESOLVER_UNREF(&r->base, "dns-resolving");
+ r->resolved_result_ = result;
+ ++r->resolved_version_;
+ r->MaybeFinishNextLocked();
+ r->Unref(DEBUG_LOCATION, "dns-resolving");
}
-static void dns_ares_next_locked(grpc_resolver* resolver,
- grpc_channel_args** target_result,
- grpc_closure* on_complete) {
- gpr_log(GPR_DEBUG, "dns_ares_next is called.");
- ares_dns_resolver* r = (ares_dns_resolver*)resolver;
- GPR_ASSERT(!r->next_completion);
- r->next_completion = on_complete;
- r->target_result = target_result;
- if (r->resolved_version == 0 && !r->resolving) {
- r->backoff->Reset();
- dns_ares_start_resolving_locked(r);
- } else {
- dns_ares_maybe_finish_next_locked(r);
+void AresDnsResolver::MaybeStartResolvingLocked() {
+ if (last_resolution_timestamp_ >= 0) {
+ const grpc_millis earliest_next_resolution =
+ last_resolution_timestamp_ + min_time_between_resolutions_;
+ const grpc_millis ms_until_next_resolution =
+ earliest_next_resolution - grpc_core::ExecCtx::Get()->Now();
+ if (ms_until_next_resolution > 0) {
+ const grpc_millis last_resolution_ago =
+ grpc_core::ExecCtx::Get()->Now() - last_resolution_timestamp_;
+ gpr_log(GPR_DEBUG,
+ "In cooldown from last resolution (from %" PRIdPTR
+ " ms ago). Will resolve again in %" PRIdPTR " ms",
+ last_resolution_ago, ms_until_next_resolution);
+ if (!have_next_resolution_timer_) {
+ have_next_resolution_timer_ = true;
+ // TODO(roth): We currently deal with this ref manually. Once the
+ // new closure API is done, find a way to track this ref with the timer
+ // callback as part of the type system.
+ RefCountedPtr<Resolver> self =
+ Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown");
+ self.release();
+ grpc_timer_init(&next_resolution_timer_, ms_until_next_resolution,
+ &on_next_resolution_);
+ }
+ // TODO(dgq): remove the following two lines once Pick First stops
+ // discarding subchannels after selecting.
+ ++resolved_version_;
+ MaybeFinishNextLocked();
+ return;
+ }
}
+ StartResolvingLocked();
}
-static void dns_ares_start_resolving_locked(ares_dns_resolver* r) {
- GRPC_RESOLVER_REF(&r->base, "dns-resolving");
- GPR_ASSERT(!r->resolving);
- r->resolving = true;
- r->lb_addresses = nullptr;
- r->service_config_json = nullptr;
- r->pending_request = grpc_dns_lookup_ares(
- r->dns_server, r->name_to_resolve, r->default_port, r->interested_parties,
- &r->dns_ares_on_resolved_locked, &r->lb_addresses,
- true /* check_grpclb */,
- r->request_service_config ? &r->service_config_json : nullptr);
+void AresDnsResolver::StartResolvingLocked() {
+ // TODO(roth): We currently deal with this ref manually. Once the
+ // new closure API is done, find a way to track this ref with the timer
+ // callback as part of the type system.
+ RefCountedPtr<Resolver> self = Ref(DEBUG_LOCATION, "dns-resolving");
+ self.release();
+ GPR_ASSERT(!resolving_);
+ resolving_ = true;
+ lb_addresses_ = nullptr;
+ service_config_json_ = nullptr;
+ pending_request_ = grpc_dns_lookup_ares(
+ dns_server_, name_to_resolve_, kDefaultPort, interested_parties_,
+ &on_resolved_, &lb_addresses_, true /* check_grpclb */,
+ request_service_config_ ? &service_config_json_ : nullptr);
+ last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now();
}
-static void dns_ares_maybe_finish_next_locked(ares_dns_resolver* r) {
- if (r->next_completion != nullptr &&
- r->resolved_version != r->published_version) {
- *r->target_result = r->resolved_result == nullptr
- ? nullptr
- : grpc_channel_args_copy(r->resolved_result);
- gpr_log(GPR_DEBUG, "dns_ares_maybe_finish_next_locked");
- GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_NONE);
- r->next_completion = nullptr;
- r->published_version = r->resolved_version;
+void AresDnsResolver::MaybeFinishNextLocked() {
+ if (next_completion_ != nullptr && resolved_version_ != published_version_) {
+ *target_result_ = resolved_result_ == nullptr
+ ? nullptr
+ : grpc_channel_args_copy(resolved_result_);
+ gpr_log(GPR_DEBUG, "AresDnsResolver::MaybeFinishNextLocked()");
+ GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_NONE);
+ next_completion_ = nullptr;
+ published_version_ = resolved_version_;
}
}
-static void dns_ares_destroy(grpc_resolver* gr) {
- gpr_log(GPR_DEBUG, "dns_ares_destroy");
- ares_dns_resolver* r = (ares_dns_resolver*)gr;
- if (r->resolved_result != nullptr) {
- grpc_channel_args_destroy(r->resolved_result);
- }
- grpc_pollset_set_destroy(r->interested_parties);
- gpr_free(r->dns_server);
- gpr_free(r->name_to_resolve);
- gpr_free(r->default_port);
- grpc_channel_args_destroy(r->channel_args);
- gpr_free(r);
-}
+//
+// Factory
+//
-static grpc_resolver* dns_ares_create(grpc_resolver_args* args,
- const char* default_port) {
- /* Get name from args. */
- const char* path = args->uri->path;
- if (path[0] == '/') ++path;
- /* Create resolver. */
- ares_dns_resolver* r =
- (ares_dns_resolver*)gpr_zalloc(sizeof(ares_dns_resolver));
- grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
- if (0 != strcmp(args->uri->authority, "")) {
- r->dns_server = gpr_strdup(args->uri->authority);
+class AresDnsResolverFactory : public ResolverFactory {
+ public:
+ OrphanablePtr<Resolver> CreateResolver(
+ const ResolverArgs& args) const override {
+ return OrphanablePtr<Resolver>(New<AresDnsResolver>(args));
}
- r->name_to_resolve = gpr_strdup(path);
- r->default_port = gpr_strdup(default_port);
- r->channel_args = grpc_channel_args_copy(args->args);
- const grpc_arg* arg = grpc_channel_args_find(
- r->channel_args, GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION);
- r->request_service_config = !grpc_channel_arg_get_integer(
- arg, (grpc_integer_options){false, false, true});
- r->interested_parties = grpc_pollset_set_create();
- if (args->pollset_set != nullptr) {
- grpc_pollset_set_add_pollset_set(r->interested_parties, args->pollset_set);
- }
- grpc_core::BackOff::Options backoff_options;
- backoff_options
- .set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
- .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER)
- .set_jitter(GRPC_DNS_RECONNECT_JITTER)
- .set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
- r->backoff.Init(grpc_core::BackOff(backoff_options));
- GRPC_CLOSURE_INIT(&r->dns_ares_on_retry_timer_locked,
- dns_ares_on_retry_timer_locked, r,
- grpc_combiner_scheduler(r->base.combiner));
- GRPC_CLOSURE_INIT(&r->dns_ares_on_resolved_locked,
- dns_ares_on_resolved_locked, r,
- grpc_combiner_scheduler(r->base.combiner));
- return &r->base;
-}
-
-/*
- * FACTORY
- */
-static void dns_ares_factory_ref(grpc_resolver_factory* factory) {}
+ const char* scheme() const override { return "dns"; }
+};
-static void dns_ares_factory_unref(grpc_resolver_factory* factory) {}
+} // namespace
-static grpc_resolver* dns_factory_create_resolver(
- grpc_resolver_factory* factory, grpc_resolver_args* args) {
- return dns_ares_create(args, "https");
-}
-
-static char* dns_ares_factory_get_default_host_name(
- grpc_resolver_factory* factory, grpc_uri* uri) {
- const char* path = uri->path;
- if (path[0] == '/') ++path;
- return gpr_strdup(path);
-}
-
-static const grpc_resolver_factory_vtable dns_ares_factory_vtable = {
- dns_ares_factory_ref, dns_ares_factory_unref, dns_factory_create_resolver,
- dns_ares_factory_get_default_host_name, "dns"};
-static grpc_resolver_factory dns_resolver_factory = {&dns_ares_factory_vtable};
-
-static grpc_resolver_factory* dns_ares_resolver_factory_create() {
- return &dns_resolver_factory;
-}
+} // namespace grpc_core
-void grpc_resolver_dns_ares_init(void) {
- char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+void grpc_resolver_dns_ares_init() {
+ char* resolver_env = gpr_getenv("GRPC_DNS_RESOLVER");
/* TODO(zyc): Turn on c-ares based resolver by default after the address
sorter and the CNAME support are added. */
- if (resolver != nullptr && gpr_stricmp(resolver, "ares") == 0) {
+ if (resolver_env != nullptr && gpr_stricmp(resolver_env, "ares") == 0) {
grpc_error* error = grpc_ares_init();
if (error != GRPC_ERROR_NONE) {
GRPC_LOG_IF_ERROR("ares_library_init() failed", error);
return;
}
grpc_resolve_address = grpc_resolve_address_ares;
- grpc_register_resolver_type(dns_ares_resolver_factory_create());
+ grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
+ grpc_core::UniquePtr<grpc_core::ResolverFactory>(
+ grpc_core::New<grpc_core::AresDnsResolverFactory>()));
}
- gpr_free(resolver);
+ gpr_free(resolver_env);
}
-void grpc_resolver_dns_ares_shutdown(void) {
- char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
- if (resolver != nullptr && gpr_stricmp(resolver, "ares") == 0) {
+void grpc_resolver_dns_ares_shutdown() {
+ char* resolver_env = gpr_getenv("GRPC_DNS_RESOLVER");
+ if (resolver_env != nullptr && gpr_stricmp(resolver_env, "ares") == 0) {
grpc_ares_cleanup();
}
- gpr_free(resolver);
+ gpr_free(resolver_env);
}
#else /* GRPC_ARES == 1 && !defined(GRPC_UV) */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
index 2eb2a9b59d..10bc8f6074 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
@@ -28,7 +28,6 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
-#include <grpc/support/useful.h>
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/ev_posix.h"
@@ -125,7 +124,8 @@ static void fd_node_shutdown(fd_node* fdn) {
grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
grpc_pollset_set* pollset_set) {
- *ev_driver = (grpc_ares_ev_driver*)gpr_malloc(sizeof(grpc_ares_ev_driver));
+ *ev_driver = static_cast<grpc_ares_ev_driver*>(
+ gpr_malloc(sizeof(grpc_ares_ev_driver)));
int status = ares_init(&(*ev_driver)->channel);
gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
if (status != ARES_SUCCESS) {
@@ -196,7 +196,7 @@ static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver* ev_driver,
}
static void on_readable_cb(void* arg, grpc_error* error) {
- fd_node* fdn = (fd_node*)arg;
+ fd_node* fdn = static_cast<fd_node*>(arg);
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
@@ -230,7 +230,7 @@ static void on_readable_cb(void* arg, grpc_error* error) {
}
static void on_writable_cb(void* arg, grpc_error* error) {
- fd_node* fdn = (fd_node*)arg;
+ fd_node* fdn = static_cast<fd_node*>(arg);
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
@@ -281,7 +281,7 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) {
if (fdn == nullptr) {
char* fd_name;
gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
- fdn = (fd_node*)gpr_malloc(sizeof(fd_node));
+ fdn = static_cast<fd_node*>(gpr_malloc(sizeof(fd_node)));
gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
fdn->fd = grpc_fd_create(socks[i], fd_name);
fdn->ev_driver = ev_driver;
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index 2b35bdb605..82b5545601 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
@@ -28,14 +28,13 @@
#include <ares.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
-#include <grpc/support/useful.h>
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
+#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/executor.h"
@@ -89,7 +88,7 @@ static uint16_t strhtons(const char* port) {
} else if (strcmp(port, "https") == 0) {
return htons(443);
}
- return htons((unsigned short)atoi(port));
+ return htons(static_cast<unsigned short>(atoi(port)));
}
static void grpc_ares_request_ref(grpc_ares_request* r) {
@@ -111,8 +110,8 @@ static void grpc_ares_request_unref(grpc_ares_request* r) {
static grpc_ares_hostbyname_request* create_hostbyname_request(
grpc_ares_request* parent_request, char* host, uint16_t port,
bool is_balancer) {
- grpc_ares_hostbyname_request* hr = (grpc_ares_hostbyname_request*)gpr_zalloc(
- sizeof(grpc_ares_hostbyname_request));
+ grpc_ares_hostbyname_request* hr = static_cast<grpc_ares_hostbyname_request*>(
+ gpr_zalloc(sizeof(grpc_ares_hostbyname_request)));
hr->parent_request = parent_request;
hr->host = gpr_strdup(host);
hr->port = port;
@@ -129,7 +128,8 @@ static void destroy_hostbyname_request(grpc_ares_hostbyname_request* hr) {
static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
struct hostent* hostent) {
- grpc_ares_hostbyname_request* hr = (grpc_ares_hostbyname_request*)arg;
+ grpc_ares_hostbyname_request* hr =
+ static_cast<grpc_ares_hostbyname_request*>(arg);
grpc_ares_request* r = hr->parent_request;
gpr_mu_lock(&r->mu);
if (status == ARES_SUCCESS) {
@@ -145,9 +145,9 @@ static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
for (i = 0; hostent->h_addr_list[i] != nullptr; i++) {
}
(*lb_addresses)->num_addresses += i;
- (*lb_addresses)->addresses = (grpc_lb_address*)gpr_realloc(
- (*lb_addresses)->addresses,
- sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
+ (*lb_addresses)->addresses = static_cast<grpc_lb_address*>(
+ gpr_realloc((*lb_addresses)->addresses,
+ sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses));
for (i = prev_naddr; i < (*lb_addresses)->num_addresses; i++) {
switch (hostent->h_addrtype) {
case AF_INET6: {
@@ -156,7 +156,7 @@ static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
memset(&addr, 0, addr_len);
memcpy(&addr.sin6_addr, hostent->h_addr_list[i - prev_naddr],
sizeof(struct in6_addr));
- addr.sin6_family = (sa_family_t)hostent->h_addrtype;
+ addr.sin6_family = static_cast<sa_family_t>(hostent->h_addrtype);
addr.sin6_port = hr->port;
grpc_lb_addresses_set_address(
*lb_addresses, i, &addr, addr_len,
@@ -177,7 +177,7 @@ static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
memset(&addr, 0, addr_len);
memcpy(&addr.sin_addr, hostent->h_addr_list[i - prev_naddr],
sizeof(struct in_addr));
- addr.sin_family = (sa_family_t)hostent->h_addrtype;
+ addr.sin_family = static_cast<sa_family_t>(hostent->h_addrtype);
addr.sin_port = hr->port;
grpc_lb_addresses_set_address(
*lb_addresses, i, &addr, addr_len,
@@ -212,7 +212,7 @@ static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
static void on_srv_query_done_cb(void* arg, int status, int timeouts,
unsigned char* abuf, int alen) {
- grpc_ares_request* r = (grpc_ares_request*)arg;
+ grpc_ares_request* r = static_cast<grpc_ares_request*>(arg);
grpc_core::ExecCtx exec_ctx;
gpr_log(GPR_DEBUG, "on_query_srv_done_cb");
if (status == ARES_SUCCESS) {
@@ -260,7 +260,7 @@ static void on_txt_done_cb(void* arg, int status, int timeouts,
unsigned char* buf, int len) {
gpr_log(GPR_DEBUG, "on_txt_done_cb");
char* error_msg;
- grpc_ares_request* r = (grpc_ares_request*)arg;
+ grpc_ares_request* r = static_cast<grpc_ares_request*>(arg);
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext* result = nullptr;
struct ares_txt_ext* reply = nullptr;
@@ -280,13 +280,15 @@ static void on_txt_done_cb(void* arg, int status, int timeouts,
// Found a service config record.
if (result != nullptr) {
size_t service_config_len = result->length - prefix_len;
- *r->service_config_json_out = (char*)gpr_malloc(service_config_len + 1);
+ *r->service_config_json_out =
+ static_cast<char*>(gpr_malloc(service_config_len + 1));
memcpy(*r->service_config_json_out, result->txt + prefix_len,
service_config_len);
for (result = result->next; result != nullptr && !result->record_start;
result = result->next) {
- *r->service_config_json_out = (char*)gpr_realloc(
- *r->service_config_json_out, service_config_len + result->length + 1);
+ *r->service_config_json_out = static_cast<char*>(
+ gpr_realloc(*r->service_config_json_out,
+ service_config_len + result->length + 1));
memcpy(*r->service_config_json_out + service_config_len, result->txt,
result->length);
service_config_len += result->length;
@@ -349,7 +351,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_impl(
error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
- r = (grpc_ares_request*)gpr_zalloc(sizeof(grpc_ares_request));
+ r = static_cast<grpc_ares_request*>(gpr_zalloc(sizeof(grpc_ares_request)));
gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
@@ -365,7 +367,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_impl(
grpc_resolved_address addr;
if (grpc_parse_ipv4_hostport(dns_server, &addr, false /* log_errors */)) {
r->dns_server_addr.family = AF_INET;
- struct sockaddr_in* in = (struct sockaddr_in*)addr.addr;
+ struct sockaddr_in* in = reinterpret_cast<struct sockaddr_in*>(addr.addr);
memcpy(&r->dns_server_addr.addr.addr4, &in->sin_addr,
sizeof(struct in_addr));
r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
@@ -373,7 +375,8 @@ static grpc_ares_request* grpc_dns_lookup_ares_impl(
} else if (grpc_parse_ipv6_hostport(dns_server, &addr,
false /* log_errors */)) {
r->dns_server_addr.family = AF_INET6;
- struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr.addr;
+ struct sockaddr_in6* in6 =
+ reinterpret_cast<struct sockaddr_in6*>(addr.addr);
memcpy(&r->dns_server_addr.addr.addr6, &in6->sin6_addr,
sizeof(struct in6_addr));
r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
@@ -488,16 +491,17 @@ typedef struct grpc_resolve_address_ares_request {
static void on_dns_lookup_done_cb(void* arg, grpc_error* error) {
grpc_resolve_address_ares_request* r =
- (grpc_resolve_address_ares_request*)arg;
+ static_cast<grpc_resolve_address_ares_request*>(arg);
grpc_resolved_addresses** resolved_addresses = r->addrs_out;
if (r->lb_addrs == nullptr || r->lb_addrs->num_addresses == 0) {
*resolved_addresses = nullptr;
} else {
- *resolved_addresses =
- (grpc_resolved_addresses*)gpr_zalloc(sizeof(grpc_resolved_addresses));
+ *resolved_addresses = static_cast<grpc_resolved_addresses*>(
+ gpr_zalloc(sizeof(grpc_resolved_addresses)));
(*resolved_addresses)->naddrs = r->lb_addrs->num_addresses;
- (*resolved_addresses)->addrs = (grpc_resolved_address*)gpr_zalloc(
- sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs);
+ (*resolved_addresses)->addrs =
+ static_cast<grpc_resolved_address*>(gpr_zalloc(
+ sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs));
for (size_t i = 0; i < (*resolved_addresses)->naddrs; i++) {
GPR_ASSERT(!r->lb_addrs->addresses[i].is_balancer);
memcpy(&(*resolved_addresses)->addrs[i],
@@ -505,7 +509,7 @@ static void on_dns_lookup_done_cb(void* arg, grpc_error* error) {
}
}
GRPC_CLOSURE_SCHED(r->on_resolve_address_done, GRPC_ERROR_REF(error));
- grpc_lb_addresses_destroy(r->lb_addrs);
+ if (r->lb_addrs != nullptr) grpc_lb_addresses_destroy(r->lb_addrs);
gpr_free(r);
}
@@ -515,8 +519,8 @@ static void grpc_resolve_address_ares_impl(const char* name,
grpc_closure* on_done,
grpc_resolved_addresses** addrs) {
grpc_resolve_address_ares_request* r =
- (grpc_resolve_address_ares_request*)gpr_zalloc(
- sizeof(grpc_resolve_address_ares_request));
+ static_cast<grpc_resolve_address_ares_request*>(
+ gpr_zalloc(sizeof(grpc_resolve_address_ares_request)));
r->addrs_out = addrs;
r->on_resolve_address_done = on_done;
GRPC_CLOSURE_INIT(&r->on_dns_lookup_done, on_dns_lookup_done_cb, r,
diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
index 62f03d52c0..fbab136421 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
@@ -19,17 +19,19 @@
#include <grpc/support/port_platform.h>
#include <inttypes.h>
-#include <string.h>
+#include <climits>
+#include <cstring>
#include <grpc/support/alloc.h>
-#include <grpc/support/host_port.h>
#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/combiner.h"
@@ -41,263 +43,298 @@
#define GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_DNS_RECONNECT_JITTER 0.2
-typedef struct {
- /** base class: must be first */
- grpc_resolver base;
- /** name to resolve */
- char* name_to_resolve;
- /** default port to use */
- char* default_port;
- /** channel args. */
- grpc_channel_args* channel_args;
- /** pollset_set to drive the name resolution process */
- grpc_pollset_set* interested_parties;
-
- /** are we currently resolving? */
- bool resolving;
- /** which version of the result have we published? */
- int published_version;
- /** which version of the result is current? */
- int resolved_version;
- /** pending next completion, or NULL */
- grpc_closure* next_completion;
- /** target result address for next completion */
- grpc_channel_args** target_result;
- /** current (fully resolved) result */
- grpc_channel_args* resolved_result;
- /** retry timer */
- bool have_retry_timer;
- grpc_timer retry_timer;
- grpc_closure on_retry;
- /** retry backoff state */
- grpc_core::ManualConstructor<grpc_core::BackOff> backoff;
-
- /** currently resolving addresses */
- grpc_resolved_addresses* addresses;
-} dns_resolver;
-
-static void dns_destroy(grpc_resolver* r);
-
-static void dns_start_resolving_locked(dns_resolver* r);
-static void dns_maybe_finish_next_locked(dns_resolver* r);
-
-static void dns_shutdown_locked(grpc_resolver* r);
-static void dns_channel_saw_error_locked(grpc_resolver* r);
-static void dns_next_locked(grpc_resolver* r, grpc_channel_args** target_result,
- grpc_closure* on_complete);
-
-static const grpc_resolver_vtable dns_resolver_vtable = {
- dns_destroy, dns_shutdown_locked, dns_channel_saw_error_locked,
- dns_next_locked};
-
-static void dns_shutdown_locked(grpc_resolver* resolver) {
- dns_resolver* r = (dns_resolver*)resolver;
- if (r->have_retry_timer) {
- grpc_timer_cancel(&r->retry_timer);
- }
- if (r->next_completion != nullptr) {
- *r->target_result = nullptr;
- GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Resolver Shutdown"));
- r->next_completion = nullptr;
+namespace grpc_core {
+
+namespace {
+
+const char kDefaultPort[] = "https";
+
+class NativeDnsResolver : public Resolver {
+ public:
+ explicit NativeDnsResolver(const ResolverArgs& args);
+
+ void NextLocked(grpc_channel_args** result,
+ grpc_closure* on_complete) override;
+
+ void RequestReresolutionLocked() override;
+
+ void ShutdownLocked() override;
+
+ private:
+ virtual ~NativeDnsResolver();
+
+ void MaybeStartResolvingLocked();
+ void StartResolvingLocked();
+ void MaybeFinishNextLocked();
+
+ static void OnNextResolutionLocked(void* arg, grpc_error* error);
+ static void OnResolvedLocked(void* arg, grpc_error* error);
+
+ /// name to resolve
+ char* name_to_resolve_ = nullptr;
+ /// channel args
+ grpc_channel_args* channel_args_ = nullptr;
+ /// pollset_set to drive the name resolution process
+ grpc_pollset_set* interested_parties_ = nullptr;
+ /// are we currently resolving?
+ bool resolving_ = false;
+ grpc_closure on_resolved_;
+ /// which version of the result have we published?
+ int published_version_ = 0;
+ /// which version of the result is current?
+ int resolved_version_ = 0;
+ /// pending next completion, or nullptr
+ grpc_closure* next_completion_ = nullptr;
+ /// target result address for next completion
+ grpc_channel_args** target_result_ = nullptr;
+ /// current (fully resolved) result
+ grpc_channel_args* resolved_result_ = nullptr;
+ /// next resolution timer
+ bool have_next_resolution_timer_ = false;
+ grpc_timer next_resolution_timer_;
+ grpc_closure on_next_resolution_;
+ /// min time between DNS requests
+ grpc_millis min_time_between_resolutions_;
+ /// timestamp of last DNS request
+ grpc_millis last_resolution_timestamp_ = -1;
+ /// retry backoff state
+ BackOff backoff_;
+ /// currently resolving addresses
+ grpc_resolved_addresses* addresses_ = nullptr;
+};
+
+NativeDnsResolver::NativeDnsResolver(const ResolverArgs& args)
+ : Resolver(args.combiner),
+ backoff_(
+ BackOff::Options()
+ .set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS *
+ 1000)
+ .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER)
+ .set_jitter(GRPC_DNS_RECONNECT_JITTER)
+ .set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) {
+ char* path = args.uri->path;
+ if (path[0] == '/') ++path;
+ name_to_resolve_ = gpr_strdup(path);
+ channel_args_ = grpc_channel_args_copy(args.args);
+ const grpc_arg* arg = grpc_channel_args_find(
+ args.args, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS);
+ min_time_between_resolutions_ =
+ grpc_channel_arg_get_integer(arg, {1000, 0, INT_MAX});
+ interested_parties_ = grpc_pollset_set_create();
+ if (args.pollset_set != nullptr) {
+ grpc_pollset_set_add_pollset_set(interested_parties_, args.pollset_set);
}
+ GRPC_CLOSURE_INIT(&on_next_resolution_,
+ NativeDnsResolver::OnNextResolutionLocked, this,
+ grpc_combiner_scheduler(args.combiner));
+ GRPC_CLOSURE_INIT(&on_resolved_, NativeDnsResolver::OnResolvedLocked, this,
+ grpc_combiner_scheduler(args.combiner));
}
-static void dns_channel_saw_error_locked(grpc_resolver* resolver) {
- dns_resolver* r = (dns_resolver*)resolver;
- if (!r->resolving) {
- r->backoff->Reset();
- dns_start_resolving_locked(r);
+NativeDnsResolver::~NativeDnsResolver() {
+ if (resolved_result_ != nullptr) {
+ grpc_channel_args_destroy(resolved_result_);
}
+ grpc_pollset_set_destroy(interested_parties_);
+ gpr_free(name_to_resolve_);
+ grpc_channel_args_destroy(channel_args_);
}
-static void dns_next_locked(grpc_resolver* resolver,
- grpc_channel_args** target_result,
- grpc_closure* on_complete) {
- dns_resolver* r = (dns_resolver*)resolver;
- GPR_ASSERT(!r->next_completion);
- r->next_completion = on_complete;
- r->target_result = target_result;
- if (r->resolved_version == 0 && !r->resolving) {
- r->backoff->Reset();
- dns_start_resolving_locked(r);
+void NativeDnsResolver::NextLocked(grpc_channel_args** result,
+ grpc_closure* on_complete) {
+ GPR_ASSERT(next_completion_ == nullptr);
+ next_completion_ = on_complete;
+ target_result_ = result;
+ if (resolved_version_ == 0 && !resolving_) {
+ MaybeStartResolvingLocked();
} else {
- dns_maybe_finish_next_locked(r);
+ MaybeFinishNextLocked();
}
}
-static void dns_on_retry_timer_locked(void* arg, grpc_error* error) {
- dns_resolver* r = (dns_resolver*)arg;
+void NativeDnsResolver::RequestReresolutionLocked() {
+ if (!resolving_) {
+ MaybeStartResolvingLocked();
+ }
+}
- r->have_retry_timer = false;
- if (error == GRPC_ERROR_NONE) {
- if (!r->resolving) {
- dns_start_resolving_locked(r);
- }
+void NativeDnsResolver::ShutdownLocked() {
+ if (have_next_resolution_timer_) {
+ grpc_timer_cancel(&next_resolution_timer_);
+ }
+ if (next_completion_ != nullptr) {
+ *target_result_ = nullptr;
+ GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Resolver Shutdown"));
+ next_completion_ = nullptr;
}
+}
- GRPC_RESOLVER_UNREF(&r->base, "retry-timer");
+void NativeDnsResolver::OnNextResolutionLocked(void* arg, grpc_error* error) {
+ NativeDnsResolver* r = static_cast<NativeDnsResolver*>(arg);
+ r->have_next_resolution_timer_ = false;
+ if (error == GRPC_ERROR_NONE && !r->resolving_) {
+ r->StartResolvingLocked();
+ }
+ r->Unref(DEBUG_LOCATION, "retry-timer");
}
-static void dns_on_resolved_locked(void* arg, grpc_error* error) {
- dns_resolver* r = (dns_resolver*)arg;
+void NativeDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
+ NativeDnsResolver* r = static_cast<NativeDnsResolver*>(arg);
grpc_channel_args* result = nullptr;
- GPR_ASSERT(r->resolving);
- r->resolving = false;
+ GPR_ASSERT(r->resolving_);
+ r->resolving_ = false;
GRPC_ERROR_REF(error);
- error = grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
- grpc_slice_from_copied_string(r->name_to_resolve));
- if (r->addresses != nullptr) {
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(r->name_to_resolve_));
+ if (r->addresses_ != nullptr) {
grpc_lb_addresses* addresses = grpc_lb_addresses_create(
- r->addresses->naddrs, nullptr /* user_data_vtable */);
- for (size_t i = 0; i < r->addresses->naddrs; ++i) {
+ r->addresses_->naddrs, nullptr /* user_data_vtable */);
+ for (size_t i = 0; i < r->addresses_->naddrs; ++i) {
grpc_lb_addresses_set_address(
- addresses, i, &r->addresses->addrs[i].addr,
- r->addresses->addrs[i].len, false /* is_balancer */,
+ addresses, i, &r->addresses_->addrs[i].addr,
+ r->addresses_->addrs[i].len, false /* is_balancer */,
nullptr /* balancer_name */, nullptr /* user_data */);
}
grpc_arg new_arg = grpc_lb_addresses_create_channel_arg(addresses);
- result = grpc_channel_args_copy_and_add(r->channel_args, &new_arg, 1);
- grpc_resolved_addresses_destroy(r->addresses);
+ result = grpc_channel_args_copy_and_add(r->channel_args_, &new_arg, 1);
+ grpc_resolved_addresses_destroy(r->addresses_);
grpc_lb_addresses_destroy(addresses);
+ // Reset backoff state so that we start from the beginning when the
+ // next request gets triggered.
+ r->backoff_.Reset();
} else {
- grpc_millis next_try = r->backoff->NextAttemptTime();
- grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
+ grpc_millis next_try = r->backoff_.NextAttemptTime();
+ grpc_millis timeout = next_try - ExecCtx::Get()->Now();
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
- GPR_ASSERT(!r->have_retry_timer);
- r->have_retry_timer = true;
- GRPC_RESOLVER_REF(&r->base, "retry-timer");
+ GPR_ASSERT(!r->have_next_resolution_timer_);
+ r->have_next_resolution_timer_ = true;
+ // TODO(roth): We currently deal with this ref manually. Once the
+ // new closure API is done, find a way to track this ref with the timer
+ // callback as part of the type system.
+ RefCountedPtr<Resolver> self =
+ r->Ref(DEBUG_LOCATION, "next_resolution_timer");
+ self.release();
if (timeout > 0) {
gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
- GRPC_CLOSURE_INIT(&r->on_retry, dns_on_retry_timer_locked, r,
- grpc_combiner_scheduler(r->base.combiner));
- grpc_timer_init(&r->retry_timer, next_try, &r->on_retry);
+ grpc_timer_init(&r->next_resolution_timer_, next_try,
+ &r->on_next_resolution_);
}
- if (r->resolved_result != nullptr) {
- grpc_channel_args_destroy(r->resolved_result);
+ if (r->resolved_result_ != nullptr) {
+ grpc_channel_args_destroy(r->resolved_result_);
}
- r->resolved_result = result;
- r->resolved_version++;
- dns_maybe_finish_next_locked(r);
+ r->resolved_result_ = result;
+ ++r->resolved_version_;
+ r->MaybeFinishNextLocked();
GRPC_ERROR_UNREF(error);
-
- GRPC_RESOLVER_UNREF(&r->base, "dns-resolving");
-}
-
-static void dns_start_resolving_locked(dns_resolver* r) {
- GRPC_RESOLVER_REF(&r->base, "dns-resolving");
- GPR_ASSERT(!r->resolving);
- r->resolving = true;
- r->addresses = nullptr;
- grpc_resolve_address(
- r->name_to_resolve, r->default_port, r->interested_parties,
- GRPC_CLOSURE_CREATE(dns_on_resolved_locked, r,
- grpc_combiner_scheduler(r->base.combiner)),
- &r->addresses);
+ r->Unref(DEBUG_LOCATION, "dns-resolving");
}
-static void dns_maybe_finish_next_locked(dns_resolver* r) {
- if (r->next_completion != nullptr &&
- r->resolved_version != r->published_version) {
- *r->target_result = r->resolved_result == nullptr
- ? nullptr
- : grpc_channel_args_copy(r->resolved_result);
- GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_NONE);
- r->next_completion = nullptr;
- r->published_version = r->resolved_version;
+void NativeDnsResolver::MaybeStartResolvingLocked() {
+ if (last_resolution_timestamp_ >= 0) {
+ const grpc_millis earliest_next_resolution =
+ last_resolution_timestamp_ + min_time_between_resolutions_;
+ const grpc_millis ms_until_next_resolution =
+ earliest_next_resolution - grpc_core::ExecCtx::Get()->Now();
+ if (ms_until_next_resolution > 0) {
+ const grpc_millis last_resolution_ago =
+ grpc_core::ExecCtx::Get()->Now() - last_resolution_timestamp_;
+ gpr_log(GPR_DEBUG,
+ "In cooldown from last resolution (from %" PRIdPTR
+ " ms ago). Will resolve again in %" PRIdPTR " ms",
+ last_resolution_ago, ms_until_next_resolution);
+ if (!have_next_resolution_timer_) {
+ have_next_resolution_timer_ = true;
+ // TODO(roth): We currently deal with this ref manually. Once the
+ // new closure API is done, find a way to track this ref with the timer
+ // callback as part of the type system.
+ RefCountedPtr<Resolver> self =
+ Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown");
+ self.release();
+ grpc_timer_init(&next_resolution_timer_, ms_until_next_resolution,
+ &on_next_resolution_);
+ }
+ // TODO(dgq): remove the following two lines once Pick First stops
+ // discarding subchannels after selecting.
+ ++resolved_version_;
+ MaybeFinishNextLocked();
+ return;
+ }
}
+ StartResolvingLocked();
}
-static void dns_destroy(grpc_resolver* gr) {
- dns_resolver* r = (dns_resolver*)gr;
- if (r->resolved_result != nullptr) {
- grpc_channel_args_destroy(r->resolved_result);
- }
- grpc_pollset_set_destroy(r->interested_parties);
- gpr_free(r->name_to_resolve);
- gpr_free(r->default_port);
- grpc_channel_args_destroy(r->channel_args);
- gpr_free(r);
+void NativeDnsResolver::StartResolvingLocked() {
+ // TODO(roth): We currently deal with this ref manually. Once the
+ // new closure API is done, find a way to track this ref with the timer
+ // callback as part of the type system.
+ RefCountedPtr<Resolver> self = Ref(DEBUG_LOCATION, "dns-resolving");
+ self.release();
+ GPR_ASSERT(!resolving_);
+ resolving_ = true;
+ addresses_ = nullptr;
+ grpc_resolve_address(name_to_resolve_, kDefaultPort, interested_parties_,
+ &on_resolved_, &addresses_);
+ last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now();
}
-static grpc_resolver* dns_create(grpc_resolver_args* args,
- const char* default_port) {
- if (0 != strcmp(args->uri->authority, "")) {
- gpr_log(GPR_ERROR, "authority based dns uri's not supported");
- return nullptr;
+void NativeDnsResolver::MaybeFinishNextLocked() {
+ if (next_completion_ != nullptr && resolved_version_ != published_version_) {
+ *target_result_ = resolved_result_ == nullptr
+ ? nullptr
+ : grpc_channel_args_copy(resolved_result_);
+ GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_NONE);
+ next_completion_ = nullptr;
+ published_version_ = resolved_version_;
}
- // Get name from args.
- char* path = args->uri->path;
- if (path[0] == '/') ++path;
- // Create resolver.
- dns_resolver* r = (dns_resolver*)gpr_zalloc(sizeof(dns_resolver));
- grpc_resolver_init(&r->base, &dns_resolver_vtable, args->combiner);
- r->name_to_resolve = gpr_strdup(path);
- r->default_port = gpr_strdup(default_port);
- r->channel_args = grpc_channel_args_copy(args->args);
- r->interested_parties = grpc_pollset_set_create();
- if (args->pollset_set != nullptr) {
- grpc_pollset_set_add_pollset_set(r->interested_parties, args->pollset_set);
- }
- grpc_core::BackOff::Options backoff_options;
- backoff_options
- .set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
- .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER)
- .set_jitter(GRPC_DNS_RECONNECT_JITTER)
- .set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
- r->backoff.Init(grpc_core::BackOff(backoff_options));
- return &r->base;
}
-/*
- * FACTORY
- */
-
-static void dns_factory_ref(grpc_resolver_factory* factory) {}
-
-static void dns_factory_unref(grpc_resolver_factory* factory) {}
-
-static grpc_resolver* dns_factory_create_resolver(
- grpc_resolver_factory* factory, grpc_resolver_args* args) {
- return dns_create(args, "https");
-}
+//
+// Factory
+//
+
+class NativeDnsResolverFactory : public ResolverFactory {
+ public:
+ OrphanablePtr<Resolver> CreateResolver(
+ const ResolverArgs& args) const override {
+ if (0 != strcmp(args.uri->authority, "")) {
+ gpr_log(GPR_ERROR, "authority based dns uri's not supported");
+ return OrphanablePtr<Resolver>(nullptr);
+ }
+ return OrphanablePtr<Resolver>(New<NativeDnsResolver>(args));
+ }
-static char* dns_factory_get_default_host_name(grpc_resolver_factory* factory,
- grpc_uri* uri) {
- const char* path = uri->path;
- if (path[0] == '/') ++path;
- return gpr_strdup(path);
-}
+ const char* scheme() const override { return "dns"; }
+};
-static const grpc_resolver_factory_vtable dns_factory_vtable = {
- dns_factory_ref, dns_factory_unref, dns_factory_create_resolver,
- dns_factory_get_default_host_name, "dns"};
-static grpc_resolver_factory dns_resolver_factory = {&dns_factory_vtable};
+} // namespace
-static grpc_resolver_factory* dns_resolver_factory_create() {
- return &dns_resolver_factory;
-}
+} // namespace grpc_core
-void grpc_resolver_dns_native_init(void) {
- char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
- if (resolver != nullptr && gpr_stricmp(resolver, "native") == 0) {
+void grpc_resolver_dns_native_init() {
+ char* resolver_env = gpr_getenv("GRPC_DNS_RESOLVER");
+ if (resolver_env != nullptr && gpr_stricmp(resolver_env, "native") == 0) {
gpr_log(GPR_DEBUG, "Using native dns resolver");
- grpc_register_resolver_type(dns_resolver_factory_create());
+ grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
+ grpc_core::UniquePtr<grpc_core::ResolverFactory>(
+ grpc_core::New<grpc_core::NativeDnsResolverFactory>()));
} else {
- grpc_resolver_factory* existing_factory =
- grpc_resolver_factory_lookup("dns");
+ grpc_core::ResolverRegistry::Builder::InitRegistry();
+ grpc_core::ResolverFactory* existing_factory =
+ grpc_core::ResolverRegistry::LookupResolverFactory("dns");
if (existing_factory == nullptr) {
gpr_log(GPR_DEBUG, "Using native dns resolver");
- grpc_register_resolver_type(dns_resolver_factory_create());
- } else {
- grpc_resolver_factory_unref(existing_factory);
+ grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
+ grpc_core::UniquePtr<grpc_core::ResolverFactory>(
+ grpc_core::New<grpc_core::NativeDnsResolverFactory>()));
}
}
- gpr_free(resolver);
+ gpr_free(resolver_env);
}
-void grpc_resolver_dns_native_shutdown(void) {}
+void grpc_resolver_dns_native_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
index eaa5e6ac49..b01e608c3f 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
@@ -24,7 +24,6 @@
#include <string.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/host_port.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
@@ -32,6 +31,7 @@
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/combiner.h"
@@ -42,155 +42,177 @@
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
-//
-// fake_resolver
-//
+namespace grpc_core {
-typedef struct {
- // base class -- must be first
- grpc_resolver base;
+// This cannot be in an anonymous namespace, because it is a friend of
+// FakeResolverResponseGenerator.
+class FakeResolver : public Resolver {
+ public:
+ explicit FakeResolver(const ResolverArgs& args);
- // passed-in parameters
- grpc_channel_args* channel_args;
+ void NextLocked(grpc_channel_args** result,
+ grpc_closure* on_complete) override;
- // If not NULL, the next set of resolution results to be returned to
- // grpc_resolver_next_locked()'s closure.
- grpc_channel_args* next_results;
+ void RequestReresolutionLocked() override;
- // Results to use for the pretended re-resolution in
- // fake_resolver_channel_saw_error_locked().
- grpc_channel_args* results_upon_error;
+ private:
+ friend class FakeResolverResponseGenerator;
+
+ virtual ~FakeResolver();
+
+ void MaybeFinishNextLocked();
+
+ void ShutdownLocked() override;
+ // passed-in parameters
+ grpc_channel_args* channel_args_ = nullptr;
+ // If not NULL, the next set of resolution results to be returned to
+ // NextLocked()'s closure.
+ grpc_channel_args* next_results_ = nullptr;
+ // Results to use for the pretended re-resolution in
+ // RequestReresolutionLocked().
+ grpc_channel_args* reresolution_results_ = nullptr;
+ // TODO(juanlishen): This can go away once pick_first is changed to not throw
+ // away its subchannels, since that will eliminate its dependence on
+ // channel_saw_error_locked() causing an immediate resolver return.
+ // A copy of the most-recently used resolution results.
+ grpc_channel_args* last_used_results_ = nullptr;
// pending next completion, or NULL
- grpc_closure* next_completion;
+ grpc_closure* next_completion_ = nullptr;
// target result address for next completion
- grpc_channel_args** target_result;
-} fake_resolver;
-
-static void fake_resolver_destroy(grpc_resolver* gr) {
- fake_resolver* r = (fake_resolver*)gr;
- grpc_channel_args_destroy(r->next_results);
- grpc_channel_args_destroy(r->results_upon_error);
- grpc_channel_args_destroy(r->channel_args);
- gpr_free(r);
+ grpc_channel_args** target_result_ = nullptr;
+};
+
+FakeResolver::FakeResolver(const ResolverArgs& args) : Resolver(args.combiner) {
+ channel_args_ = grpc_channel_args_copy(args.args);
+ FakeResolverResponseGenerator* response_generator =
+ FakeResolverResponseGenerator::GetFromArgs(args.args);
+ if (response_generator != nullptr) response_generator->resolver_ = this;
}
-static void fake_resolver_shutdown_locked(grpc_resolver* resolver) {
- fake_resolver* r = (fake_resolver*)resolver;
- if (r->next_completion != nullptr) {
- *r->target_result = nullptr;
- GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Resolver Shutdown"));
- r->next_completion = nullptr;
- }
+FakeResolver::~FakeResolver() {
+ grpc_channel_args_destroy(next_results_);
+ grpc_channel_args_destroy(reresolution_results_);
+ grpc_channel_args_destroy(last_used_results_);
+ grpc_channel_args_destroy(channel_args_);
+}
+
+void FakeResolver::NextLocked(grpc_channel_args** target_result,
+ grpc_closure* on_complete) {
+ GPR_ASSERT(next_completion_ == nullptr);
+ next_completion_ = on_complete;
+ target_result_ = target_result;
+ MaybeFinishNextLocked();
}
-static void fake_resolver_maybe_finish_next_locked(fake_resolver* r) {
- if (r->next_completion != nullptr && r->next_results != nullptr) {
- *r->target_result =
- grpc_channel_args_union(r->next_results, r->channel_args);
- grpc_channel_args_destroy(r->next_results);
- r->next_results = nullptr;
- GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_NONE);
- r->next_completion = nullptr;
+void FakeResolver::RequestReresolutionLocked() {
+ // A resolution must have been returned before an error is seen.
+ GPR_ASSERT(last_used_results_ != nullptr);
+ grpc_channel_args_destroy(next_results_);
+ if (reresolution_results_ != nullptr) {
+ next_results_ = grpc_channel_args_copy(reresolution_results_);
+ } else {
+ // If reresolution_results is unavailable, re-resolve with the most-recently
+ // used results to avoid a no-op re-resolution.
+ next_results_ = grpc_channel_args_copy(last_used_results_);
}
+ MaybeFinishNextLocked();
}
-static void fake_resolver_channel_saw_error_locked(grpc_resolver* resolver) {
- fake_resolver* r = (fake_resolver*)resolver;
- if (r->next_results == nullptr && r->results_upon_error != nullptr) {
- // Pretend we re-resolved.
- r->next_results = grpc_channel_args_copy(r->results_upon_error);
+void FakeResolver::MaybeFinishNextLocked() {
+ if (next_completion_ != nullptr && next_results_ != nullptr) {
+ *target_result_ = grpc_channel_args_union(next_results_, channel_args_);
+ grpc_channel_args_destroy(next_results_);
+ next_results_ = nullptr;
+ GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_NONE);
+ next_completion_ = nullptr;
}
- fake_resolver_maybe_finish_next_locked(r);
}
-static void fake_resolver_next_locked(grpc_resolver* resolver,
- grpc_channel_args** target_result,
- grpc_closure* on_complete) {
- fake_resolver* r = (fake_resolver*)resolver;
- GPR_ASSERT(!r->next_completion);
- r->next_completion = on_complete;
- r->target_result = target_result;
- fake_resolver_maybe_finish_next_locked(r);
+void FakeResolver::ShutdownLocked() {
+ if (next_completion_ != nullptr) {
+ *target_result_ = nullptr;
+ GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Resolver Shutdown"));
+ next_completion_ = nullptr;
+ }
}
-static const grpc_resolver_vtable fake_resolver_vtable = {
- fake_resolver_destroy, fake_resolver_shutdown_locked,
- fake_resolver_channel_saw_error_locked, fake_resolver_next_locked};
+//
+// FakeResolverResponseGenerator
+//
-struct grpc_fake_resolver_response_generator {
- fake_resolver* resolver; // Set by the fake_resolver constructor to itself.
- gpr_refcount refcount;
+struct SetResponseClosureArg {
+ grpc_closure set_response_closure;
+ FakeResolverResponseGenerator* generator;
+ grpc_channel_args* response;
};
-grpc_fake_resolver_response_generator*
-grpc_fake_resolver_response_generator_create() {
- grpc_fake_resolver_response_generator* generator =
- (grpc_fake_resolver_response_generator*)gpr_zalloc(sizeof(*generator));
- gpr_ref_init(&generator->refcount, 1);
- return generator;
+void FakeResolverResponseGenerator::SetResponseLocked(void* arg,
+ grpc_error* error) {
+ SetResponseClosureArg* closure_arg = static_cast<SetResponseClosureArg*>(arg);
+ FakeResolver* resolver = closure_arg->generator->resolver_;
+ grpc_channel_args_destroy(resolver->next_results_);
+ resolver->next_results_ = closure_arg->response;
+ grpc_channel_args_destroy(resolver->last_used_results_);
+ resolver->last_used_results_ = grpc_channel_args_copy(closure_arg->response);
+ resolver->MaybeFinishNextLocked();
+ Delete(closure_arg);
}
-grpc_fake_resolver_response_generator*
-grpc_fake_resolver_response_generator_ref(
- grpc_fake_resolver_response_generator* generator) {
- gpr_ref(&generator->refcount);
- return generator;
+void FakeResolverResponseGenerator::SetResponse(grpc_channel_args* response) {
+ GPR_ASSERT(response != nullptr);
+ GPR_ASSERT(resolver_ != nullptr);
+ SetResponseClosureArg* closure_arg = New<SetResponseClosureArg>();
+ closure_arg->generator = this;
+ closure_arg->response = grpc_channel_args_copy(response);
+ GRPC_CLOSURE_SCHED(
+ GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetResponseLocked,
+ closure_arg,
+ grpc_combiner_scheduler(resolver_->combiner())),
+ GRPC_ERROR_NONE);
}
-void grpc_fake_resolver_response_generator_unref(
- grpc_fake_resolver_response_generator* generator) {
- if (gpr_unref(&generator->refcount)) {
- gpr_free(generator);
- }
+void FakeResolverResponseGenerator::SetReresolutionResponseLocked(
+ void* arg, grpc_error* error) {
+ SetResponseClosureArg* closure_arg = static_cast<SetResponseClosureArg*>(arg);
+ FakeResolver* resolver = closure_arg->generator->resolver_;
+ grpc_channel_args_destroy(resolver->reresolution_results_);
+ resolver->reresolution_results_ = closure_arg->response;
+ Delete(closure_arg);
}
-typedef struct set_response_closure_arg {
- grpc_closure set_response_closure;
- grpc_fake_resolver_response_generator* generator;
- grpc_channel_args* next_response;
-} set_response_closure_arg;
-
-static void set_response_closure_fn(void* arg, grpc_error* error) {
- set_response_closure_arg* closure_arg = (set_response_closure_arg*)arg;
- grpc_fake_resolver_response_generator* generator = closure_arg->generator;
- fake_resolver* r = generator->resolver;
- if (r->next_results != nullptr) {
- grpc_channel_args_destroy(r->next_results);
- }
- r->next_results = closure_arg->next_response;
- if (r->results_upon_error != nullptr) {
- grpc_channel_args_destroy(r->results_upon_error);
- }
- r->results_upon_error = grpc_channel_args_copy(closure_arg->next_response);
- gpr_free(closure_arg);
- fake_resolver_maybe_finish_next_locked(r);
+void FakeResolverResponseGenerator::SetReresolutionResponse(
+ grpc_channel_args* response) {
+ GPR_ASSERT(resolver_ != nullptr);
+ SetResponseClosureArg* closure_arg = New<SetResponseClosureArg>();
+ closure_arg->generator = this;
+ closure_arg->response =
+ response != nullptr ? grpc_channel_args_copy(response) : nullptr;
+ GRPC_CLOSURE_SCHED(
+ GRPC_CLOSURE_INIT(&closure_arg->set_response_closure,
+ SetReresolutionResponseLocked, closure_arg,
+ grpc_combiner_scheduler(resolver_->combiner())),
+ GRPC_ERROR_NONE);
}
-void grpc_fake_resolver_response_generator_set_response(
- grpc_fake_resolver_response_generator* generator,
- grpc_channel_args* next_response) {
- GPR_ASSERT(generator->resolver != nullptr);
- set_response_closure_arg* closure_arg =
- (set_response_closure_arg*)gpr_zalloc(sizeof(*closure_arg));
- closure_arg->generator = generator;
- closure_arg->next_response = grpc_channel_args_copy(next_response);
- GRPC_CLOSURE_SCHED(GRPC_CLOSURE_INIT(&closure_arg->set_response_closure,
- set_response_closure_fn, closure_arg,
- grpc_combiner_scheduler(
- generator->resolver->base.combiner)),
- GRPC_ERROR_NONE);
-}
+namespace {
static void* response_generator_arg_copy(void* p) {
- return grpc_fake_resolver_response_generator_ref(
- (grpc_fake_resolver_response_generator*)p);
+ FakeResolverResponseGenerator* generator =
+ static_cast<FakeResolverResponseGenerator*>(p);
+ // TODO(roth): We currently deal with this ref manually. Once the
+ // new channel args code is converted to C++, find a way to track this ref
+ // in a cleaner way.
+ RefCountedPtr<FakeResolverResponseGenerator> copy = generator->Ref();
+ copy.release();
+ return p;
}
static void response_generator_arg_destroy(void* p) {
- grpc_fake_resolver_response_generator_unref(
- (grpc_fake_resolver_response_generator*)p);
+ FakeResolverResponseGenerator* generator =
+ static_cast<FakeResolverResponseGenerator*>(p);
+ generator->Unref();
}
static int response_generator_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
@@ -199,8 +221,10 @@ static const grpc_arg_pointer_vtable response_generator_arg_vtable = {
response_generator_arg_copy, response_generator_arg_destroy,
response_generator_cmp};
-grpc_arg grpc_fake_resolver_response_generator_arg(
- grpc_fake_resolver_response_generator* generator) {
+} // namespace
+
+grpc_arg FakeResolverResponseGenerator::MakeChannelArg(
+ FakeResolverResponseGenerator* generator) {
grpc_arg arg;
arg.type = GRPC_ARG_POINTER;
arg.key = (char*)GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR;
@@ -209,49 +233,38 @@ grpc_arg grpc_fake_resolver_response_generator_arg(
return arg;
}
-grpc_fake_resolver_response_generator*
-grpc_fake_resolver_get_response_generator(const grpc_channel_args* args) {
+FakeResolverResponseGenerator* FakeResolverResponseGenerator::GetFromArgs(
+ const grpc_channel_args* args) {
const grpc_arg* arg =
grpc_channel_args_find(args, GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) return nullptr;
- return (grpc_fake_resolver_response_generator*)arg->value.pointer.p;
+ return static_cast<FakeResolverResponseGenerator*>(arg->value.pointer.p);
}
//
-// fake_resolver_factory
+// Factory
//
-static void fake_resolver_factory_ref(grpc_resolver_factory* factory) {}
+namespace {
-static void fake_resolver_factory_unref(grpc_resolver_factory* factory) {}
-
-static grpc_resolver* fake_resolver_create(grpc_resolver_factory* factory,
- grpc_resolver_args* args) {
- fake_resolver* r = (fake_resolver*)gpr_zalloc(sizeof(*r));
- r->channel_args = grpc_channel_args_copy(args->args);
- grpc_resolver_init(&r->base, &fake_resolver_vtable, args->combiner);
- grpc_fake_resolver_response_generator* response_generator =
- grpc_fake_resolver_get_response_generator(args->args);
- if (response_generator != nullptr) response_generator->resolver = r;
- return &r->base;
-}
+class FakeResolverFactory : public ResolverFactory {
+ public:
+ OrphanablePtr<Resolver> CreateResolver(
+ const ResolverArgs& args) const override {
+ return OrphanablePtr<Resolver>(New<FakeResolver>(args));
+ }
-static char* fake_resolver_get_default_authority(grpc_resolver_factory* factory,
- grpc_uri* uri) {
- const char* path = uri->path;
- if (path[0] == '/') ++path;
- return gpr_strdup(path);
-}
+ const char* scheme() const override { return "fake"; }
+};
-static const grpc_resolver_factory_vtable fake_resolver_factory_vtable = {
- fake_resolver_factory_ref, fake_resolver_factory_unref,
- fake_resolver_create, fake_resolver_get_default_authority, "fake"};
+} // namespace
-static grpc_resolver_factory fake_resolver_factory = {
- &fake_resolver_factory_vtable};
+} // namespace grpc_core
-void grpc_resolver_fake_init(void) {
- grpc_register_resolver_type(&fake_resolver_factory);
+void grpc_resolver_fake_init() {
+ grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
+ grpc_core::UniquePtr<grpc_core::ResolverFactory>(
+ grpc_core::New<grpc_core::FakeResolverFactory>()));
}
-void grpc_resolver_fake_shutdown(void) {}
+void grpc_resolver_fake_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
index a8977e5980..d42811d913 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
@@ -20,41 +20,57 @@
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/gprpp/ref_counted.h"
#define GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR \
"grpc.fake_resolver.response_generator"
-void grpc_resolver_fake_init();
-
-// Instances of \a grpc_fake_resolver_response_generator are passed to the
-// fake resolver in a channel argument (see \a
-// grpc_fake_resolver_response_generator_arg) in order to inject and trigger
-// custom resolutions. See also \a
-// grpc_fake_resolver_response_generator_set_response.
-typedef struct grpc_fake_resolver_response_generator
- grpc_fake_resolver_response_generator;
-grpc_fake_resolver_response_generator*
-grpc_fake_resolver_response_generator_create();
-
-// Instruct the fake resolver associated with the \a response_generator instance
-// to trigger a new resolution for \a uri and \a args.
-void grpc_fake_resolver_response_generator_set_response(
- grpc_fake_resolver_response_generator* generator,
- grpc_channel_args* next_response);
-
-// Return a \a grpc_arg for a \a grpc_fake_resolver_response_generator instance.
-grpc_arg grpc_fake_resolver_response_generator_arg(
- grpc_fake_resolver_response_generator* generator);
-// Return the \a grpc_fake_resolver_response_generator instance in \a args or
-// NULL.
-grpc_fake_resolver_response_generator*
-grpc_fake_resolver_get_response_generator(const grpc_channel_args* args);
-
-grpc_fake_resolver_response_generator*
-grpc_fake_resolver_response_generator_ref(
- grpc_fake_resolver_response_generator* generator);
-void grpc_fake_resolver_response_generator_unref(
- grpc_fake_resolver_response_generator* generator);
+namespace grpc_core {
+
+class FakeResolver;
+
+/// A mechanism for generating responses for the fake resolver.
+/// An instance of this class is passed to the fake resolver via a channel
+/// argument (see \a MakeChannelArg()) and used to inject and trigger custom
+/// resolutions.
+// TODO(roth): I would ideally like this to be InternallyRefCounted
+// instead of RefCounted, but external refs are currently needed to
+// encode this in channel args. Once channel_args are converted to C++,
+// see if we can find a way to fix this.
+class FakeResolverResponseGenerator
+ : public RefCounted<FakeResolverResponseGenerator> {
+ public:
+ FakeResolverResponseGenerator() {}
+
+ // Instructs the fake resolver associated with the response generator
+ // instance to trigger a new resolution with the specified response.
+ void SetResponse(grpc_channel_args* next_response);
+
+ // Sets the re-resolution response, which is returned by the fake resolver
+ // when re-resolution is requested (via \a RequestReresolutionLocked()).
+ // The new re-resolution response replaces any previous re-resolution
+ // response that may have been set by a previous call.
+ // If the re-resolution response is set to NULL, then the fake
+ // resolver will return the last value set via \a SetResponse().
+ void SetReresolutionResponse(grpc_channel_args* response);
+
+ // Returns a channel arg containing \a generator.
+ static grpc_arg MakeChannelArg(FakeResolverResponseGenerator* generator);
+
+ // Returns the response generator in \a args, or null if not found.
+ static FakeResolverResponseGenerator* GetFromArgs(
+ const grpc_channel_args* args);
+
+ private:
+ friend class FakeResolver;
+
+ static void SetResponseLocked(void* arg, grpc_error* error);
+ static void SetReresolutionResponseLocked(void* arg, grpc_error* error);
+
+ FakeResolver* resolver_ = nullptr; // Do not own.
+};
+
+} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FAKE_FAKE_RESOLVER_H \
*/
diff --git a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
index 99ad78e23c..966b9fd3f2 100644
--- a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
@@ -22,7 +22,6 @@
#include <string.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/host_port.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
@@ -30,6 +29,7 @@
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
@@ -37,115 +37,99 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
-typedef struct {
- /** base class: must be first */
- grpc_resolver base;
- /** the addresses that we've 'resolved' */
- grpc_lb_addresses* addresses;
- /** channel args */
- grpc_channel_args* channel_args;
- /** have we published? */
- bool published;
- /** pending next completion, or NULL */
- grpc_closure* next_completion;
- /** target result address for next completion */
- grpc_channel_args** target_result;
-} sockaddr_resolver;
-
-static void sockaddr_destroy(grpc_resolver* r);
-
-static void sockaddr_maybe_finish_next_locked(sockaddr_resolver* r);
-
-static void sockaddr_shutdown_locked(grpc_resolver* r);
-static void sockaddr_channel_saw_error_locked(grpc_resolver* r);
-static void sockaddr_next_locked(grpc_resolver* r,
- grpc_channel_args** target_result,
- grpc_closure* on_complete);
-
-static const grpc_resolver_vtable sockaddr_resolver_vtable = {
- sockaddr_destroy, sockaddr_shutdown_locked,
- sockaddr_channel_saw_error_locked, sockaddr_next_locked};
-
-static void sockaddr_shutdown_locked(grpc_resolver* resolver) {
- sockaddr_resolver* r = (sockaddr_resolver*)resolver;
- if (r->next_completion != nullptr) {
- *r->target_result = nullptr;
- GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Resolver Shutdown"));
- r->next_completion = nullptr;
- }
-}
+namespace grpc_core {
-static void sockaddr_channel_saw_error_locked(grpc_resolver* resolver) {
- sockaddr_resolver* r = (sockaddr_resolver*)resolver;
- r->published = false;
- sockaddr_maybe_finish_next_locked(r);
-}
+namespace {
-static void sockaddr_next_locked(grpc_resolver* resolver,
- grpc_channel_args** target_result,
- grpc_closure* on_complete) {
- sockaddr_resolver* r = (sockaddr_resolver*)resolver;
- GPR_ASSERT(!r->next_completion);
- r->next_completion = on_complete;
- r->target_result = target_result;
- sockaddr_maybe_finish_next_locked(r);
-}
+class SockaddrResolver : public Resolver {
+ public:
+ /// Takes ownership of \a addresses.
+ SockaddrResolver(const ResolverArgs& args, grpc_lb_addresses* addresses);
-static void sockaddr_maybe_finish_next_locked(sockaddr_resolver* r) {
- if (r->next_completion != nullptr && !r->published) {
- r->published = true;
- grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
- *r->target_result =
- grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
- GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_NONE);
- r->next_completion = nullptr;
- }
-}
+ void NextLocked(grpc_channel_args** result,
+ grpc_closure* on_complete) override;
+
+ void RequestReresolutionLocked() override;
-static void sockaddr_destroy(grpc_resolver* gr) {
- sockaddr_resolver* r = (sockaddr_resolver*)gr;
- grpc_lb_addresses_destroy(r->addresses);
- grpc_channel_args_destroy(r->channel_args);
- gpr_free(r);
+ void ShutdownLocked() override;
+
+ private:
+ virtual ~SockaddrResolver();
+
+ void MaybeFinishNextLocked();
+
+ /// the addresses that we've "resolved"
+ grpc_lb_addresses* addresses_ = nullptr;
+ /// channel args
+ grpc_channel_args* channel_args_ = nullptr;
+ /// have we published?
+ bool published_ = false;
+ /// pending next completion, or NULL
+ grpc_closure* next_completion_ = nullptr;
+ /// target result address for next completion
+ grpc_channel_args** target_result_ = nullptr;
+};
+
+SockaddrResolver::SockaddrResolver(const ResolverArgs& args,
+ grpc_lb_addresses* addresses)
+ : Resolver(args.combiner),
+ addresses_(addresses),
+ channel_args_(grpc_channel_args_copy(args.args)) {}
+
+SockaddrResolver::~SockaddrResolver() {
+ grpc_lb_addresses_destroy(addresses_);
+ grpc_channel_args_destroy(channel_args_);
}
-static char* ip_get_default_authority(grpc_uri* uri) {
- const char* path = uri->path;
- if (path[0] == '/') ++path;
- return gpr_strdup(path);
+void SockaddrResolver::NextLocked(grpc_channel_args** target_result,
+ grpc_closure* on_complete) {
+ GPR_ASSERT(!next_completion_);
+ next_completion_ = on_complete;
+ target_result_ = target_result;
+ MaybeFinishNextLocked();
}
-static char* ipv4_get_default_authority(grpc_resolver_factory* factory,
- grpc_uri* uri) {
- return ip_get_default_authority(uri);
+void SockaddrResolver::RequestReresolutionLocked() {
+ published_ = false;
+ MaybeFinishNextLocked();
}
-static char* ipv6_get_default_authority(grpc_resolver_factory* factory,
- grpc_uri* uri) {
- return ip_get_default_authority(uri);
+void SockaddrResolver::ShutdownLocked() {
+ if (next_completion_ != nullptr) {
+ *target_result_ = nullptr;
+ GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Resolver Shutdown"));
+ next_completion_ = nullptr;
+ }
}
-#ifdef GRPC_HAVE_UNIX_SOCKET
-char* unix_get_default_authority(grpc_resolver_factory* factory,
- grpc_uri* uri) {
- return gpr_strdup("localhost");
+void SockaddrResolver::MaybeFinishNextLocked() {
+ if (next_completion_ != nullptr && !published_) {
+ published_ = true;
+ grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses_);
+ *target_result_ = grpc_channel_args_copy_and_add(channel_args_, &arg, 1);
+ GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_NONE);
+ next_completion_ = nullptr;
+ }
}
-#endif
-static void do_nothing(void* ignored) {}
+//
+// Factory
+//
-static grpc_resolver* sockaddr_create(grpc_resolver_args* args,
- bool parse(const grpc_uri* uri,
- grpc_resolved_address* dst)) {
- if (0 != strcmp(args->uri->authority, "")) {
- gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme",
- args->uri->scheme);
- return nullptr;
+void DoNothing(void* ignored) {}
+
+OrphanablePtr<Resolver> CreateSockaddrResolver(
+ const ResolverArgs& args,
+ bool parse(const grpc_uri* uri, grpc_resolved_address* dst)) {
+ if (0 != strcmp(args.uri->authority, "")) {
+ gpr_log(GPR_ERROR, "authority-based URIs not supported by the %s scheme",
+ args.uri->scheme);
+ return OrphanablePtr<Resolver>(nullptr);
}
- /* Construct addresses. */
+ // Construct addresses.
grpc_slice path_slice =
- grpc_slice_new(args->uri->path, strlen(args->uri->path), do_nothing);
+ grpc_slice_new(args.uri->path, strlen(args.uri->path), DoNothing);
grpc_slice_buffer path_parts;
grpc_slice_buffer_init(&path_parts);
grpc_slice_split(path_slice, ",", &path_parts);
@@ -153,7 +137,7 @@ static grpc_resolver* sockaddr_create(grpc_resolver_args* args,
path_parts.count, nullptr /* user_data_vtable */);
bool errors_found = false;
for (size_t i = 0; i < addresses->num_addresses; i++) {
- grpc_uri ith_uri = *args->uri;
+ grpc_uri ith_uri = *args.uri;
char* part_str = grpc_slice_to_c_string(path_parts.slices[i]);
ith_uri.path = part_str;
if (!parse(&ith_uri, &addresses->addresses[i].address)) {
@@ -166,48 +150,64 @@ static grpc_resolver* sockaddr_create(grpc_resolver_args* args,
grpc_slice_unref_internal(path_slice);
if (errors_found) {
grpc_lb_addresses_destroy(addresses);
- return nullptr;
+ return OrphanablePtr<Resolver>(nullptr);
}
- /* Instantiate resolver. */
- sockaddr_resolver* r =
- (sockaddr_resolver*)gpr_zalloc(sizeof(sockaddr_resolver));
- r->addresses = addresses;
- r->channel_args = grpc_channel_args_copy(args->args);
- grpc_resolver_init(&r->base, &sockaddr_resolver_vtable, args->combiner);
- return &r->base;
+ // Instantiate resolver.
+ return OrphanablePtr<Resolver>(New<SockaddrResolver>(args, addresses));
}
-/*
- * FACTORY
- */
+class IPv4ResolverFactory : public ResolverFactory {
+ public:
+ OrphanablePtr<Resolver> CreateResolver(
+ const ResolverArgs& args) const override {
+ return CreateSockaddrResolver(args, grpc_parse_ipv4);
+ }
-static void sockaddr_factory_ref(grpc_resolver_factory* factory) {}
+ const char* scheme() const override { return "ipv4"; }
+};
-static void sockaddr_factory_unref(grpc_resolver_factory* factory) {}
+class IPv6ResolverFactory : public ResolverFactory {
+ public:
+ OrphanablePtr<Resolver> CreateResolver(
+ const ResolverArgs& args) const override {
+ return CreateSockaddrResolver(args, grpc_parse_ipv6);
+ }
-#define DECL_FACTORY(name) \
- static grpc_resolver* name##_factory_create_resolver( \
- grpc_resolver_factory* factory, grpc_resolver_args* args) { \
- return sockaddr_create(args, grpc_parse_##name); \
- } \
- static const grpc_resolver_factory_vtable name##_factory_vtable = { \
- sockaddr_factory_ref, sockaddr_factory_unref, \
- name##_factory_create_resolver, name##_get_default_authority, #name}; \
- static grpc_resolver_factory name##_resolver_factory = { \
- &name##_factory_vtable}
+ const char* scheme() const override { return "ipv6"; }
+};
#ifdef GRPC_HAVE_UNIX_SOCKET
-DECL_FACTORY(unix);
-#endif
-DECL_FACTORY(ipv4);
-DECL_FACTORY(ipv6);
+class UnixResolverFactory : public ResolverFactory {
+ public:
+ OrphanablePtr<Resolver> CreateResolver(
+ const ResolverArgs& args) const override {
+ return CreateSockaddrResolver(args, grpc_parse_unix);
+ }
+
+ UniquePtr<char> GetDefaultAuthority(grpc_uri* uri) const override {
+ return UniquePtr<char>(gpr_strdup("localhost"));
+ }
+
+ const char* scheme() const override { return "unix"; }
+};
+#endif // GRPC_HAVE_UNIX_SOCKET
+
+} // namespace
+
+} // namespace grpc_core
-void grpc_resolver_sockaddr_init(void) {
- grpc_register_resolver_type(&ipv4_resolver_factory);
- grpc_register_resolver_type(&ipv6_resolver_factory);
+void grpc_resolver_sockaddr_init() {
+ grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
+ grpc_core::UniquePtr<grpc_core::ResolverFactory>(
+ grpc_core::New<grpc_core::IPv4ResolverFactory>()));
+ grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
+ grpc_core::UniquePtr<grpc_core::ResolverFactory>(
+ grpc_core::New<grpc_core::IPv6ResolverFactory>()));
#ifdef GRPC_HAVE_UNIX_SOCKET
- grpc_register_resolver_type(&unix_resolver_factory);
+ grpc_core::ResolverRegistry::Builder::RegisterResolverFactory(
+ grpc_core::UniquePtr<grpc_core::ResolverFactory>(
+ grpc_core::New<grpc_core::UnixResolverFactory>()));
#endif
}
-void grpc_resolver_sockaddr_shutdown(void) {}
+void grpc_resolver_sockaddr_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/resolver_factory.cc b/src/core/ext/filters/client_channel/resolver_factory.cc
deleted file mode 100644
index 9b3ec2f1c4..0000000000
--- a/src/core/ext/filters/client_channel/resolver_factory.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/filters/client_channel/resolver_factory.h"
-
-void grpc_resolver_factory_ref(grpc_resolver_factory* factory) {
- factory->vtable->ref(factory);
-}
-
-void grpc_resolver_factory_unref(grpc_resolver_factory* factory) {
- factory->vtable->unref(factory);
-}
-
-/** Create a resolver instance for a name */
-grpc_resolver* grpc_resolver_factory_create_resolver(
- grpc_resolver_factory* factory, grpc_resolver_args* args) {
- if (factory == nullptr) return nullptr;
- return factory->vtable->create_resolver(factory, args);
-}
-
-char* grpc_resolver_factory_get_default_authority(
- grpc_resolver_factory* factory, grpc_uri* uri) {
- if (factory == nullptr) return nullptr;
- return factory->vtable->get_default_authority(factory, uri);
-}
diff --git a/src/core/ext/filters/client_channel/resolver_factory.h b/src/core/ext/filters/client_channel/resolver_factory.h
index 170ecc0b48..f9b9501236 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.h
+++ b/src/core/ext/filters/client_channel/resolver_factory.h
@@ -19,50 +19,51 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H
-#include "src/core/ext/filters/client_channel/client_channel_factory.h"
+#include <grpc/support/string_util.h>
+
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/ext/filters/client_channel/uri_parser.h"
+#include "src/core/lib/gprpp/abstract.h"
+#include "src/core/lib/gprpp/memory.h"
+#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/pollset_set.h"
-typedef struct grpc_resolver_factory grpc_resolver_factory;
-typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable;
+namespace grpc_core {
-struct grpc_resolver_factory {
- const grpc_resolver_factory_vtable* vtable;
+struct ResolverArgs {
+ /// The parsed URI to resolve.
+ grpc_uri* uri = nullptr;
+ /// Channel args to be included in resolver results.
+ const grpc_channel_args* args = nullptr;
+ /// Used to drive I/O in the name resolution process.
+ grpc_pollset_set* pollset_set = nullptr;
+ /// The combiner under which all resolver calls will be run.
+ grpc_combiner* combiner = nullptr;
};
-typedef struct grpc_resolver_args {
- grpc_uri* uri;
- const grpc_channel_args* args;
- grpc_pollset_set* pollset_set;
- grpc_combiner* combiner;
-} grpc_resolver_args;
+class ResolverFactory {
+ public:
+ /// Returns a new resolver instance.
+ virtual OrphanablePtr<Resolver> CreateResolver(const ResolverArgs& args) const
+ GRPC_ABSTRACT;
-struct grpc_resolver_factory_vtable {
- void (*ref)(grpc_resolver_factory* factory);
- void (*unref)(grpc_resolver_factory* factory);
+ /// Returns a string representing the default authority to use for this
+ /// scheme.
+ virtual UniquePtr<char> GetDefaultAuthority(grpc_uri* uri) const {
+ const char* path = uri->path;
+ if (path[0] == '/') ++path;
+ return UniquePtr<char>(gpr_strdup(path));
+ }
- /** Implementation of grpc_resolver_factory_create_resolver */
- grpc_resolver* (*create_resolver)(grpc_resolver_factory* factory,
- grpc_resolver_args* args);
+ /// Returns the URI scheme that this factory implements.
+ /// Caller does NOT take ownership of result.
+ virtual const char* scheme() const GRPC_ABSTRACT;
- /** Implementation of grpc_resolver_factory_get_default_authority */
- char* (*get_default_authority)(grpc_resolver_factory* factory, grpc_uri* uri);
+ virtual ~ResolverFactory() {}
- /** URI scheme that this factory implements */
- const char* scheme;
+ GRPC_ABSTRACT_BASE_CLASS
};
-void grpc_resolver_factory_ref(grpc_resolver_factory* resolver);
-void grpc_resolver_factory_unref(grpc_resolver_factory* resolver);
-
-/** Create a resolver instance for a name */
-grpc_resolver* grpc_resolver_factory_create_resolver(
- grpc_resolver_factory* factory, grpc_resolver_args* args);
-
-/** Return a (freshly allocated with gpr_malloc) string representing
- the default authority to use for this scheme. */
-char* grpc_resolver_factory_get_default_authority(
- grpc_resolver_factory* factory, grpc_uri* uri);
+} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H */
diff --git a/src/core/ext/filters/client_channel/resolver_registry.cc b/src/core/ext/filters/client_channel/resolver_registry.cc
index 3f8451de6b..036e81d0ae 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.cc
+++ b/src/core/ext/filters/client_channel/resolver_registry.cc
@@ -24,133 +24,153 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#define MAX_RESOLVERS 10
-#define DEFAULT_RESOLVER_PREFIX_MAX_LENGTH 32
+namespace grpc_core {
-static grpc_resolver_factory* g_all_of_the_resolvers[MAX_RESOLVERS];
-static int g_number_of_resolvers = 0;
+namespace {
-static char g_default_resolver_prefix[DEFAULT_RESOLVER_PREFIX_MAX_LENGTH] =
- "dns:///";
+class RegistryState {
+ public:
+ RegistryState() : default_prefix_(gpr_strdup("dns:///")) {}
-void grpc_resolver_registry_init() {}
-
-void grpc_resolver_registry_shutdown(void) {
- for (int i = 0; i < g_number_of_resolvers; i++) {
- grpc_resolver_factory_unref(g_all_of_the_resolvers[i]);
+ void SetDefaultPrefix(const char* default_resolver_prefix) {
+ GPR_ASSERT(default_resolver_prefix != nullptr);
+ GPR_ASSERT(*default_resolver_prefix != '\0');
+ default_prefix_.reset(gpr_strdup(default_resolver_prefix));
}
- // FIXME(ctiller): this should live in grpc_resolver_registry_init,
- // however that would have the client_channel plugin call this AFTER we start
- // registering resolvers from third party plugins, and so they'd never show
- // up.
- // We likely need some kind of dependency system for plugins.... what form
- // that takes is TBD.
- g_number_of_resolvers = 0;
-}
-
-void grpc_resolver_registry_set_default_prefix(
- const char* default_resolver_prefix) {
- const size_t len = strlen(default_resolver_prefix);
- GPR_ASSERT(len < DEFAULT_RESOLVER_PREFIX_MAX_LENGTH &&
- "default resolver prefix too long");
- GPR_ASSERT(len > 0 && "default resolver prefix can't be empty");
- // By the previous assert, default_resolver_prefix is safe to be copied with a
- // plain strcpy.
- strcpy(g_default_resolver_prefix, default_resolver_prefix);
-}
-void grpc_register_resolver_type(grpc_resolver_factory* factory) {
- int i;
- for (i = 0; i < g_number_of_resolvers; i++) {
- GPR_ASSERT(0 != strcmp(factory->vtable->scheme,
- g_all_of_the_resolvers[i]->vtable->scheme));
+ void RegisterResolverFactory(UniquePtr<ResolverFactory> factory) {
+ for (size_t i = 0; i < factories_.size(); ++i) {
+ GPR_ASSERT(strcmp(factories_[i]->scheme(), factory->scheme()) != 0);
+ }
+ factories_.push_back(std::move(factory));
}
- GPR_ASSERT(g_number_of_resolvers != MAX_RESOLVERS);
- grpc_resolver_factory_ref(factory);
- g_all_of_the_resolvers[g_number_of_resolvers++] = factory;
-}
-static grpc_resolver_factory* lookup_factory(const char* name) {
- int i;
+ ResolverFactory* LookupResolverFactory(const char* scheme) const {
+ for (size_t i = 0; i < factories_.size(); ++i) {
+ if (strcmp(scheme, factories_[i]->scheme()) == 0) {
+ return factories_[i].get();
+ }
+ }
+ return nullptr;
+ }
- for (i = 0; i < g_number_of_resolvers; i++) {
- if (0 == strcmp(name, g_all_of_the_resolvers[i]->vtable->scheme)) {
- return g_all_of_the_resolvers[i];
+ // Returns the factory for the scheme of \a target. If \a target does
+ // not parse as a URI, prepends \a default_prefix_ and tries again.
+ // If URI parsing is successful (in either attempt), sets \a uri to
+ // point to the parsed URI.
+ // If \a default_prefix_ needs to be prepended, sets \a canonical_target
+ // to the canonical target string.
+ ResolverFactory* FindResolverFactory(const char* target, grpc_uri** uri,
+ char** canonical_target) const {
+ GPR_ASSERT(uri != nullptr);
+ *uri = grpc_uri_parse(target, 1);
+ ResolverFactory* factory =
+ *uri == nullptr ? nullptr : LookupResolverFactory((*uri)->scheme);
+ if (factory == nullptr) {
+ grpc_uri_destroy(*uri);
+ gpr_asprintf(canonical_target, "%s%s", default_prefix_.get(), target);
+ *uri = grpc_uri_parse(*canonical_target, 1);
+ factory =
+ *uri == nullptr ? nullptr : LookupResolverFactory((*uri)->scheme);
+ if (factory == nullptr) {
+ grpc_uri_destroy(grpc_uri_parse(target, 0));
+ grpc_uri_destroy(grpc_uri_parse(*canonical_target, 0));
+ gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target,
+ *canonical_target);
+ }
}
+ return factory;
}
- return nullptr;
+
+ private:
+ // We currently support 10 factories without doing additional
+ // allocation. This number could be raised if there is a case where
+ // more factories are needed and the additional allocations are
+ // hurting performance (which is unlikely, since these allocations
+ // only occur at gRPC initialization time).
+ InlinedVector<UniquePtr<ResolverFactory>, 10> factories_;
+ UniquePtr<char> default_prefix_;
+};
+
+static RegistryState* g_state = nullptr;
+
+} // namespace
+
+//
+// ResolverRegistry::Builder
+//
+
+void ResolverRegistry::Builder::InitRegistry() {
+ if (g_state == nullptr) g_state = New<RegistryState>();
}
-grpc_resolver_factory* grpc_resolver_factory_lookup(const char* name) {
- grpc_resolver_factory* f = lookup_factory(name);
- if (f) grpc_resolver_factory_ref(f);
- return f;
+void ResolverRegistry::Builder::ShutdownRegistry() {
+ Delete(g_state);
+ g_state = nullptr;
}
-static grpc_resolver_factory* lookup_factory_by_uri(grpc_uri* uri) {
- if (!uri) return nullptr;
- return lookup_factory(uri->scheme);
+void ResolverRegistry::Builder::SetDefaultPrefix(
+ const char* default_resolver_prefix) {
+ InitRegistry();
+ g_state->SetDefaultPrefix(default_resolver_prefix);
}
-static grpc_resolver_factory* resolve_factory(const char* target,
- grpc_uri** uri,
- char** canonical_target) {
- grpc_resolver_factory* factory = nullptr;
-
- GPR_ASSERT(uri != nullptr);
- *uri = grpc_uri_parse(target, 1);
- factory = lookup_factory_by_uri(*uri);
- if (factory == nullptr) {
- grpc_uri_destroy(*uri);
- gpr_asprintf(canonical_target, "%s%s", g_default_resolver_prefix, target);
- *uri = grpc_uri_parse(*canonical_target, 1);
- factory = lookup_factory_by_uri(*uri);
- if (factory == nullptr) {
- grpc_uri_destroy(grpc_uri_parse(target, 0));
- grpc_uri_destroy(grpc_uri_parse(*canonical_target, 0));
- gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target,
- *canonical_target);
- }
- }
- return factory;
+void ResolverRegistry::Builder::RegisterResolverFactory(
+ UniquePtr<ResolverFactory> factory) {
+ InitRegistry();
+ g_state->RegisterResolverFactory(std::move(factory));
}
-grpc_resolver* grpc_resolver_create(const char* target,
- const grpc_channel_args* args,
- grpc_pollset_set* pollset_set,
- grpc_combiner* combiner) {
+//
+// ResolverRegistry
+//
+
+ResolverFactory* ResolverRegistry::LookupResolverFactory(const char* scheme) {
+ GPR_ASSERT(g_state != nullptr);
+ return g_state->LookupResolverFactory(scheme);
+}
+
+OrphanablePtr<Resolver> ResolverRegistry::CreateResolver(
+ const char* target, const grpc_channel_args* args,
+ grpc_pollset_set* pollset_set, grpc_combiner* combiner) {
+ GPR_ASSERT(g_state != nullptr);
grpc_uri* uri = nullptr;
char* canonical_target = nullptr;
- grpc_resolver_factory* factory =
- resolve_factory(target, &uri, &canonical_target);
- grpc_resolver* resolver;
- grpc_resolver_args resolver_args;
- memset(&resolver_args, 0, sizeof(resolver_args));
+ ResolverFactory* factory =
+ g_state->FindResolverFactory(target, &uri, &canonical_target);
+ ResolverArgs resolver_args;
resolver_args.uri = uri;
resolver_args.args = args;
resolver_args.pollset_set = pollset_set;
resolver_args.combiner = combiner;
- resolver = grpc_resolver_factory_create_resolver(factory, &resolver_args);
+ OrphanablePtr<Resolver> resolver =
+ factory == nullptr ? nullptr : factory->CreateResolver(resolver_args);
grpc_uri_destroy(uri);
gpr_free(canonical_target);
return resolver;
}
-char* grpc_get_default_authority(const char* target) {
+UniquePtr<char> ResolverRegistry::GetDefaultAuthority(const char* target) {
+ GPR_ASSERT(g_state != nullptr);
grpc_uri* uri = nullptr;
char* canonical_target = nullptr;
- grpc_resolver_factory* factory =
- resolve_factory(target, &uri, &canonical_target);
- char* authority = grpc_resolver_factory_get_default_authority(factory, uri);
+ ResolverFactory* factory =
+ g_state->FindResolverFactory(target, &uri, &canonical_target);
+ UniquePtr<char> authority =
+ factory == nullptr ? nullptr : factory->GetDefaultAuthority(uri);
grpc_uri_destroy(uri);
gpr_free(canonical_target);
return authority;
}
-char* grpc_resolver_factory_add_default_prefix_if_needed(const char* target) {
+UniquePtr<char> ResolverRegistry::AddDefaultPrefixIfNeeded(const char* target) {
+ GPR_ASSERT(g_state != nullptr);
grpc_uri* uri = nullptr;
char* canonical_target = nullptr;
- resolve_factory(target, &uri, &canonical_target);
+ g_state->FindResolverFactory(target, &uri, &canonical_target);
grpc_uri_destroy(uri);
- return canonical_target == nullptr ? gpr_strdup(target) : canonical_target;
+ return UniquePtr<char>(canonical_target == nullptr ? gpr_strdup(target)
+ : canonical_target);
}
+
+} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/resolver_registry.h b/src/core/ext/filters/client_channel/resolver_registry.h
index bbd30df8da..260336de83 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.h
+++ b/src/core/ext/filters/client_channel/resolver_registry.h
@@ -20,49 +20,62 @@
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H
#include "src/core/ext/filters/client_channel/resolver_factory.h"
+#include "src/core/lib/gprpp/inlined_vector.h"
+#include "src/core/lib/gprpp/memory.h"
+#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/pollset_set.h"
-void grpc_resolver_registry_init();
-void grpc_resolver_registry_shutdown(void);
+namespace grpc_core {
-/** Set the default URI prefix to \a default_prefix. */
-void grpc_resolver_registry_set_default_prefix(const char* default_prefix);
+class ResolverRegistry {
+ public:
+ /// Methods used to create and populate the ResolverRegistry.
+ /// NOT THREAD SAFE -- to be used only during global gRPC
+ /// initialization and shutdown.
+ class Builder {
+ public:
+ /// Global initialization and shutdown hooks.
+ static void InitRegistry();
+ static void ShutdownRegistry();
-/** Register a resolver type.
- URI's of \a scheme will be resolved with the given resolver.
- If \a priority is greater than zero, then the resolver will be eligible
- to resolve names that are passed in with no scheme. Higher priority
- resolvers will be tried before lower priority schemes. */
-void grpc_register_resolver_type(grpc_resolver_factory* factory);
+ /// Sets the default URI prefix to \a default_prefix.
+ /// Calls InitRegistry() if it has not already been called.
+ static void SetDefaultPrefix(const char* default_prefix);
-/** Create a resolver given \a target.
- First tries to parse \a target as a URI. If this succeeds, tries
- to locate a registered resolver factory based on the URI scheme.
- If parsing or location fails, prefixes default_prefix from
- grpc_resolver_registry_init to target, and tries again (if default_prefix
- was not NULL).
- If a resolver factory was found, use it to instantiate a resolver and
- return it.
- If a resolver factory was not found, return NULL.
- \a args is a set of channel arguments to be included in the result
- (typically the set of arguments passed in from the client API).
- \a pollset_set is used to drive IO in the name resolution process, it
- should not be NULL. */
-grpc_resolver* grpc_resolver_create(const char* target,
- const grpc_channel_args* args,
- grpc_pollset_set* pollset_set,
- grpc_combiner* combiner);
+ /// Registers a resolver factory. The factory will be used to create a
+ /// resolver for any URI whose scheme matches that of the factory.
+ /// Calls InitRegistry() if it has not already been called.
+ static void RegisterResolverFactory(UniquePtr<ResolverFactory> factory);
+ };
-/** Find a resolver factory given a name and return an (owned-by-the-caller)
- * reference to it */
-grpc_resolver_factory* grpc_resolver_factory_lookup(const char* name);
+ /// Creates a resolver given \a target.
+ /// First tries to parse \a target as a URI. If this succeeds, tries
+ /// to locate a registered resolver factory based on the URI scheme.
+ /// If parsing fails or there is no factory for the URI's scheme,
+ /// prepends default_prefix to target and tries again.
+ /// If a resolver factory is found, uses it to instantiate a resolver and
+ /// returns it; otherwise, returns nullptr.
+ /// \a args, \a pollset_set, and \a combiner are passed to the factory's
+ /// \a CreateResolver() method.
+ /// \a args are the channel args to be included in resolver results.
+ /// \a pollset_set is used to drive I/O in the name resolution process.
+ /// \a combiner is the combiner under which all resolver calls will be run.
+ static OrphanablePtr<Resolver> CreateResolver(const char* target,
+ const grpc_channel_args* args,
+ grpc_pollset_set* pollset_set,
+ grpc_combiner* combiner);
-/** Given a target, return a (freshly allocated with gpr_malloc) string
- representing the default authority to pass from a client. */
-char* grpc_get_default_authority(const char* target);
+ /// Returns the default authority to pass from a client for \a target.
+ static UniquePtr<char> GetDefaultAuthority(const char* target);
-/** Returns a newly allocated string containing \a target, adding the
- default prefix if needed. */
-char* grpc_resolver_factory_add_default_prefix_if_needed(const char* target);
+ /// Returns \a target with the default prefix prepended, if needed.
+ static UniquePtr<char> AddDefaultPrefixIfNeeded(const char* target);
+
+ /// Returns the resolver factory for \a scheme.
+ /// Caller does NOT own the return value.
+ static ResolverFactory* LookupResolverFactory(const char* scheme);
+};
+
+} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */
diff --git a/src/core/ext/filters/client_channel/retry_throttle.cc b/src/core/ext/filters/client_channel/retry_throttle.cc
index 867d775151..a98e27860a 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.cc
+++ b/src/core/ext/filters/client_channel/retry_throttle.cc
@@ -23,10 +23,11 @@
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
-#include <grpc/support/avl.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
+#include "src/core/lib/avl/avl.h"
+
//
// server_retry_throttle_data
//
@@ -58,9 +59,10 @@ bool grpc_server_retry_throttle_data_record_failure(
// First, check if we are stale and need to be replaced.
get_replacement_throttle_data_if_needed(&throttle_data);
// We decrement milli_tokens by 1000 (1 token) for each failure.
- const int new_value = (int)gpr_atm_no_barrier_clamped_add(
- &throttle_data->milli_tokens, (gpr_atm)-1000, (gpr_atm)0,
- (gpr_atm)throttle_data->max_milli_tokens);
+ const int new_value = static_cast<int>(gpr_atm_no_barrier_clamped_add(
+ &throttle_data->milli_tokens, static_cast<gpr_atm>(-1000),
+ static_cast<gpr_atm>(0),
+ static_cast<gpr_atm>(throttle_data->max_milli_tokens)));
// Retries are allowed as long as the new value is above the threshold
// (max_milli_tokens / 2).
return new_value > throttle_data->max_milli_tokens / 2;
@@ -72,8 +74,10 @@ void grpc_server_retry_throttle_data_record_success(
get_replacement_throttle_data_if_needed(&throttle_data);
// We increment milli_tokens by milli_token_ratio for each success.
gpr_atm_no_barrier_clamped_add(
- &throttle_data->milli_tokens, (gpr_atm)throttle_data->milli_token_ratio,
- (gpr_atm)0, (gpr_atm)throttle_data->max_milli_tokens);
+ &throttle_data->milli_tokens,
+ static_cast<gpr_atm>(throttle_data->milli_token_ratio),
+ static_cast<gpr_atm>(0),
+ static_cast<gpr_atm>(throttle_data->max_milli_tokens));
}
grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_ref(
@@ -99,7 +103,8 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
int max_milli_tokens, int milli_token_ratio,
grpc_server_retry_throttle_data* old_throttle_data) {
grpc_server_retry_throttle_data* throttle_data =
- (grpc_server_retry_throttle_data*)gpr_malloc(sizeof(*throttle_data));
+ static_cast<grpc_server_retry_throttle_data*>(
+ gpr_malloc(sizeof(*throttle_data)));
memset(throttle_data, 0, sizeof(*throttle_data));
gpr_ref_init(&throttle_data->refs, 1);
throttle_data->max_milli_tokens = max_milli_tokens;
@@ -111,9 +116,9 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
// we will start out doing the same thing on the new one.
if (old_throttle_data != nullptr) {
double token_fraction =
- (int)gpr_atm_acq_load(&old_throttle_data->milli_tokens) /
- (double)old_throttle_data->max_milli_tokens;
- initial_milli_tokens = (int)(token_fraction * max_milli_tokens);
+ static_cast<int>(gpr_atm_acq_load(&old_throttle_data->milli_tokens)) /
+ static_cast<double>(old_throttle_data->max_milli_tokens);
+ initial_milli_tokens = static_cast<int>(token_fraction * max_milli_tokens);
}
gpr_atm_rel_store(&throttle_data->milli_tokens,
(gpr_atm)initial_milli_tokens);
@@ -131,28 +136,28 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
//
static void* copy_server_name(void* key, void* unused) {
- return gpr_strdup((const char*)key);
+ return gpr_strdup(static_cast<const char*>(key));
}
static long compare_server_name(void* key1, void* key2, void* unused) {
- return strcmp((const char*)key1, (const char*)key2);
+ return strcmp(static_cast<const char*>(key1), static_cast<const char*>(key2));
}
static void destroy_server_retry_throttle_data(void* value, void* unused) {
grpc_server_retry_throttle_data* throttle_data =
- (grpc_server_retry_throttle_data*)value;
+ static_cast<grpc_server_retry_throttle_data*>(value);
grpc_server_retry_throttle_data_unref(throttle_data);
}
static void* copy_server_retry_throttle_data(void* value, void* unused) {
grpc_server_retry_throttle_data* throttle_data =
- (grpc_server_retry_throttle_data*)value;
+ static_cast<grpc_server_retry_throttle_data*>(value);
return grpc_server_retry_throttle_data_ref(throttle_data);
}
static void destroy_server_name(void* key, void* unused) { gpr_free(key); }
-static const gpr_avl_vtable avl_vtable = {
+static const grpc_avl_vtable avl_vtable = {
destroy_server_name, copy_server_name, compare_server_name,
destroy_server_retry_throttle_data, copy_server_retry_throttle_data};
@@ -161,29 +166,30 @@ static const gpr_avl_vtable avl_vtable = {
//
static gpr_mu g_mu;
-static gpr_avl g_avl;
+static grpc_avl g_avl;
void grpc_retry_throttle_map_init() {
gpr_mu_init(&g_mu);
- g_avl = gpr_avl_create(&avl_vtable);
+ g_avl = grpc_avl_create(&avl_vtable);
}
void grpc_retry_throttle_map_shutdown() {
gpr_mu_destroy(&g_mu);
- gpr_avl_unref(g_avl, nullptr);
+ grpc_avl_unref(g_avl, nullptr);
}
grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
const char* server_name, int max_milli_tokens, int milli_token_ratio) {
gpr_mu_lock(&g_mu);
grpc_server_retry_throttle_data* throttle_data =
- (grpc_server_retry_throttle_data*)gpr_avl_get(g_avl, (char*)server_name,
- nullptr);
+ static_cast<grpc_server_retry_throttle_data*>(
+ grpc_avl_get(g_avl, const_cast<char*>(server_name), nullptr));
if (throttle_data == nullptr) {
// Entry not found. Create a new one.
throttle_data = grpc_server_retry_throttle_data_create(
max_milli_tokens, milli_token_ratio, nullptr);
- g_avl = gpr_avl_add(g_avl, (char*)server_name, throttle_data, nullptr);
+ g_avl = grpc_avl_add(g_avl, const_cast<char*>(server_name), throttle_data,
+ nullptr);
} else {
if (throttle_data->max_milli_tokens != max_milli_tokens ||
throttle_data->milli_token_ratio != milli_token_ratio) {
@@ -191,7 +197,8 @@ grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
// the original one.
throttle_data = grpc_server_retry_throttle_data_create(
max_milli_tokens, milli_token_ratio, throttle_data);
- g_avl = gpr_avl_add(g_avl, (char*)server_name, throttle_data, nullptr);
+ g_avl = grpc_avl_add(g_avl, const_cast<char*>(server_name), throttle_data,
+ nullptr);
} else {
// Entry found. Increase refcount.
grpc_server_retry_throttle_data_ref(throttle_data);
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index 478fd3fbb9..db04f346ec 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -25,7 +25,6 @@
#include <cstring>
#include <grpc/support/alloc.h>
-#include <grpc/support/avl.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
@@ -165,7 +164,7 @@ static void on_subchannel_connected(void* subchannel, grpc_error* error);
*/
static void connection_destroy(void* arg, grpc_error* error) {
- grpc_channel_stack* stk = (grpc_channel_stack*)arg;
+ grpc_channel_stack* stk = static_cast<grpc_channel_stack*>(arg);
grpc_channel_stack_destroy(stk);
gpr_free(stk);
}
@@ -175,7 +174,7 @@ static void connection_destroy(void* arg, grpc_error* error) {
*/
static void subchannel_destroy(void* arg, grpc_error* error) {
- grpc_subchannel* c = (grpc_subchannel*)arg;
+ grpc_subchannel* c = static_cast<grpc_subchannel*>(arg);
gpr_free((void*)c->filters);
grpc_channel_args_destroy(c->args);
grpc_connectivity_state_destroy(&c->state_tracker);
@@ -247,8 +246,9 @@ static void disconnect(grpc_subchannel* c) {
void grpc_subchannel_unref(grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
// add a weak ref and subtract a strong ref (atomically)
- old_refs = ref_mutate(c, (gpr_atm)1 - (gpr_atm)(1 << INTERNAL_REF_BITS),
- 1 REF_MUTATE_PURPOSE("STRONG_UNREF"));
+ old_refs = ref_mutate(
+ c, static_cast<gpr_atm>(1) - static_cast<gpr_atm>(1 << INTERNAL_REF_BITS),
+ 1 REF_MUTATE_PURPOSE("STRONG_UNREF"));
if ((old_refs & STRONG_REF_MASK) == (1 << INTERNAL_REF_BITS)) {
disconnect(c);
}
@@ -258,7 +258,8 @@ void grpc_subchannel_unref(grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
void grpc_subchannel_weak_unref(
grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
- old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
+ old_refs = ref_mutate(c, -static_cast<gpr_atm>(1),
+ 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) {
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_CREATE(subchannel_destroy, c, grpc_schedule_on_exec_ctx),
@@ -324,15 +325,15 @@ grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
}
GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED();
- c = (grpc_subchannel*)gpr_zalloc(sizeof(*c));
+ c = static_cast<grpc_subchannel*>(gpr_zalloc(sizeof(*c)));
c->key = key;
gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
c->connector = connector;
grpc_connector_ref(c->connector);
c->num_filters = args->filter_count;
if (c->num_filters > 0) {
- c->filters = (const grpc_channel_filter**)gpr_malloc(
- sizeof(grpc_channel_filter*) * c->num_filters);
+ c->filters = static_cast<const grpc_channel_filter**>(
+ gpr_malloc(sizeof(grpc_channel_filter*) * c->num_filters));
memcpy((void*)c->filters, args->filters,
sizeof(grpc_channel_filter*) * c->num_filters);
} else {
@@ -340,7 +341,7 @@ grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
}
c->pollset_set = grpc_pollset_set_create();
grpc_resolved_address* addr =
- (grpc_resolved_address*)gpr_malloc(sizeof(*addr));
+ static_cast<grpc_resolved_address*>(gpr_malloc(sizeof(*addr)));
grpc_get_subchannel_address_arg(args->args, addr);
grpc_resolved_address* new_address = nullptr;
grpc_channel_args* new_args = nullptr;
@@ -397,7 +398,7 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel* c,
}
static void on_external_state_watcher_done(void* arg, grpc_error* error) {
- external_state_watcher* w = (external_state_watcher*)arg;
+ external_state_watcher* w = static_cast<external_state_watcher*>(arg);
grpc_closure* follow_up = w->notify;
if (w->pollset_set != nullptr) {
grpc_pollset_set_del_pollset_set(w->subchannel->pollset_set,
@@ -413,7 +414,7 @@ static void on_external_state_watcher_done(void* arg, grpc_error* error) {
}
static void on_alarm(void* arg, grpc_error* error) {
- grpc_subchannel* c = (grpc_subchannel*)arg;
+ grpc_subchannel* c = static_cast<grpc_subchannel*>(arg);
gpr_mu_lock(&c->mu);
c->have_alarm = false;
if (c->disconnected) {
@@ -492,7 +493,7 @@ void grpc_subchannel_notify_on_state_change(
}
gpr_mu_unlock(&c->mu);
} else {
- w = (external_state_watcher*)gpr_malloc(sizeof(*w));
+ w = static_cast<external_state_watcher*>(gpr_malloc(sizeof(*w)));
w->subchannel = c;
w->pollset_set = interested_parties;
w->notify = notify;
@@ -515,7 +516,7 @@ void grpc_subchannel_notify_on_state_change(
static void on_connected_subchannel_connectivity_changed(void* p,
grpc_error* error) {
- state_watcher* connected_subchannel_watcher = (state_watcher*)p;
+ state_watcher* connected_subchannel_watcher = static_cast<state_watcher*>(p);
grpc_subchannel* c = connected_subchannel_watcher->subchannel;
gpr_mu* mu = &c->mu;
@@ -576,7 +577,8 @@ static bool publish_transport_locked(grpc_subchannel* c) {
}
grpc_channel_stack* stk;
grpc_error* error = grpc_channel_stack_builder_finish(
- builder, 0, 1, connection_destroy, nullptr, (void**)&stk);
+ builder, 0, 1, connection_destroy, nullptr,
+ reinterpret_cast<void**>(&stk));
if (error != GRPC_ERROR_NONE) {
grpc_transport_destroy(c->connecting_result.transport);
gpr_log(GPR_ERROR, "error initializing subchannel stack: %s",
@@ -587,8 +589,8 @@ static bool publish_transport_locked(grpc_subchannel* c) {
memset(&c->connecting_result, 0, sizeof(c->connecting_result));
/* initialize state watcher */
- state_watcher* connected_subchannel_watcher =
- (state_watcher*)gpr_zalloc(sizeof(*connected_subchannel_watcher));
+ state_watcher* connected_subchannel_watcher = static_cast<state_watcher*>(
+ gpr_zalloc(sizeof(*connected_subchannel_watcher)));
connected_subchannel_watcher->subchannel = c;
connected_subchannel_watcher->connectivity_state = GRPC_CHANNEL_READY;
GRPC_CLOSURE_INIT(&connected_subchannel_watcher->closure,
@@ -623,7 +625,7 @@ static bool publish_transport_locked(grpc_subchannel* c) {
}
static void on_subchannel_connected(void* arg, grpc_error* error) {
- grpc_subchannel* c = (grpc_subchannel*)arg;
+ grpc_subchannel* c = static_cast<grpc_subchannel*>(arg);
grpc_channel_args* delete_channel_args = c->connecting_result.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "on_subchannel_connected");
@@ -658,14 +660,13 @@ static void on_subchannel_connected(void* arg, grpc_error* error) {
*/
static void subchannel_call_destroy(void* call, grpc_error* error) {
- grpc_subchannel_call* c = (grpc_subchannel_call*)call;
+ GPR_TIMER_SCOPE("grpc_subchannel_call_unref.destroy", 0);
+ grpc_subchannel_call* c = static_cast<grpc_subchannel_call*>(call);
GPR_ASSERT(c->schedule_closure_after_destroy != nullptr);
- GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
grpc_core::ConnectedSubchannel* connection = c->connection;
grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr,
c->schedule_closure_after_destroy);
connection->Unref(DEBUG_LOCATION, "subchannel_call");
- GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
}
void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call* call,
@@ -687,12 +688,11 @@ void grpc_subchannel_call_unref(
void grpc_subchannel_call_process_op(grpc_subchannel_call* call,
grpc_transport_stream_op_batch* batch) {
- GPR_TIMER_BEGIN("grpc_subchannel_call_process_op", 0);
+ GPR_TIMER_SCOPE("grpc_subchannel_call_process_op", 0);
grpc_call_stack* call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0);
GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
top_elem->filter->start_transport_stream_op_batch(top_elem, batch);
- GPR_TIMER_END("grpc_subchannel_call_process_op", 0);
}
grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel>
@@ -745,8 +745,9 @@ grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address* addr) {
}
namespace grpc_core {
+
ConnectedSubchannel::ConnectedSubchannel(grpc_channel_stack* channel_stack)
- : grpc_core::RefCountedWithTracing(&grpc_trace_stream_refcount),
+ : RefCountedWithTracing<ConnectedSubchannel>(&grpc_trace_stream_refcount),
channel_stack_(channel_stack) {}
ConnectedSubchannel::~ConnectedSubchannel() {
@@ -777,11 +778,13 @@ void ConnectedSubchannel::Ping(grpc_closure* on_initiate,
grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
grpc_subchannel_call** call) {
- *call = (grpc_subchannel_call*)gpr_arena_alloc(
+ *call = static_cast<grpc_subchannel_call*>(gpr_arena_alloc(
args.arena,
- sizeof(grpc_subchannel_call) + channel_stack_->call_stack_size);
+ sizeof(grpc_subchannel_call) + channel_stack_->call_stack_size));
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
- Ref(DEBUG_LOCATION, "subchannel_call");
+ RefCountedPtr<ConnectedSubchannel> connection =
+ Ref(DEBUG_LOCATION, "subchannel_call");
+ connection.release(); // Ref is passed to the grpc_subchannel_call object.
(*call)->connection = this;
const grpc_call_element_args call_args = {
callstk, /* call_stack */
@@ -803,4 +806,5 @@ grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
grpc_call_stack_set_pollset_or_pollset_set(callstk, args.pollent);
return GRPC_ERROR_NONE;
}
+
} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index b7593ec911..d2b45ae9c8 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -68,7 +68,8 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
#endif
namespace grpc_core {
-class ConnectedSubchannel : public grpc_core::RefCountedWithTracing {
+
+class ConnectedSubchannel : public RefCountedWithTracing<ConnectedSubchannel> {
public:
struct CallArgs {
grpc_polling_entity* pollent;
@@ -93,6 +94,7 @@ class ConnectedSubchannel : public grpc_core::RefCountedWithTracing {
private:
grpc_channel_stack* channel_stack_;
};
+
} // namespace grpc_core
grpc_subchannel* grpc_subchannel_ref(
diff --git a/src/core/ext/filters/client_channel/subchannel_index.cc b/src/core/ext/filters/client_channel/subchannel_index.cc
index 052b047f43..d1dc5ee970 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.cc
+++ b/src/core/ext/filters/client_channel/subchannel_index.cc
@@ -22,15 +22,15 @@
#include <string.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/avl.h>
#include <grpc/support/string_util.h>
-#include <grpc/support/tls.h>
+#include "src/core/lib/avl/avl.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/gpr/tls.h"
// a map of subchannel_key --> subchannel, used for detecting connections
// to the same destination in order to share them
-static gpr_avl g_subchannel_index;
+static grpc_avl g_subchannel_index;
static gpr_mu g_mu;
@@ -45,13 +45,14 @@ static bool g_force_creation = false;
static grpc_subchannel_key* create_key(
const grpc_subchannel_args* args,
grpc_channel_args* (*copy_channel_args)(const grpc_channel_args* args)) {
- grpc_subchannel_key* k = (grpc_subchannel_key*)gpr_malloc(sizeof(*k));
+ grpc_subchannel_key* k =
+ static_cast<grpc_subchannel_key*>(gpr_malloc(sizeof(*k)));
k->args.filter_count = args->filter_count;
if (k->args.filter_count > 0) {
- k->args.filters = (const grpc_channel_filter**)gpr_malloc(
- sizeof(*k->args.filters) * k->args.filter_count);
- memcpy((grpc_channel_filter*)k->args.filters, args->filters,
- sizeof(*k->args.filters) * k->args.filter_count);
+ k->args.filters = static_cast<const grpc_channel_filter**>(
+ gpr_malloc(sizeof(*k->args.filters) * k->args.filter_count));
+ memcpy(reinterpret_cast<grpc_channel_filter*>(k->args.filters),
+ args->filters, sizeof(*k->args.filters) * k->args.filter_count);
} else {
k->args.filters = nullptr;
}
@@ -82,22 +83,22 @@ int grpc_subchannel_key_compare(const grpc_subchannel_key* a,
}
void grpc_subchannel_key_destroy(grpc_subchannel_key* k) {
- gpr_free((grpc_channel_args*)k->args.filters);
- grpc_channel_args_destroy((grpc_channel_args*)k->args.args);
+ gpr_free(reinterpret_cast<grpc_channel_args*>(k->args.filters));
+ grpc_channel_args_destroy(const_cast<grpc_channel_args*>(k->args.args));
gpr_free(k);
}
static void sck_avl_destroy(void* p, void* user_data) {
- grpc_subchannel_key_destroy((grpc_subchannel_key*)p);
+ grpc_subchannel_key_destroy(static_cast<grpc_subchannel_key*>(p));
}
static void* sck_avl_copy(void* p, void* unused) {
- return subchannel_key_copy((grpc_subchannel_key*)p);
+ return subchannel_key_copy(static_cast<grpc_subchannel_key*>(p));
}
static long sck_avl_compare(void* a, void* b, void* unused) {
- return grpc_subchannel_key_compare((grpc_subchannel_key*)a,
- (grpc_subchannel_key*)b);
+ return grpc_subchannel_key_compare(static_cast<grpc_subchannel_key*>(a),
+ static_cast<grpc_subchannel_key*>(b));
}
static void scv_avl_destroy(void* p, void* user_data) {
@@ -109,7 +110,7 @@ static void* scv_avl_copy(void* p, void* unused) {
return p;
}
-static const gpr_avl_vtable subchannel_avl_vtable = {
+static const grpc_avl_vtable subchannel_avl_vtable = {
sck_avl_destroy, // destroy_key
sck_avl_copy, // copy_key
sck_avl_compare, // compare_keys
@@ -118,7 +119,7 @@ static const gpr_avl_vtable subchannel_avl_vtable = {
};
void grpc_subchannel_index_init(void) {
- g_subchannel_index = gpr_avl_create(&subchannel_avl_vtable);
+ g_subchannel_index = grpc_avl_create(&subchannel_avl_vtable);
gpr_mu_init(&g_mu);
gpr_ref_init(&g_refcount, 1);
}
@@ -133,7 +134,7 @@ void grpc_subchannel_index_shutdown(void) {
void grpc_subchannel_index_unref(void) {
if (gpr_unref(&g_refcount)) {
gpr_mu_destroy(&g_mu);
- gpr_avl_unref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(g_subchannel_index, grpc_core::ExecCtx::Get());
}
}
@@ -143,13 +144,13 @@ grpc_subchannel* grpc_subchannel_index_find(grpc_subchannel_key* key) {
// Lock, and take a reference to the subchannel index.
// We don't need to do the search under a lock as avl's are immutable.
gpr_mu_lock(&g_mu);
- gpr_avl index = gpr_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl index = grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
gpr_mu_unlock(&g_mu);
grpc_subchannel* c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
- (grpc_subchannel*)gpr_avl_get(index, key, grpc_core::ExecCtx::Get()),
+ (grpc_subchannel*)grpc_avl_get(index, key, grpc_core::ExecCtx::Get()),
"index_find");
- gpr_avl_unref(index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(index, grpc_core::ExecCtx::Get());
return c;
}
@@ -165,11 +166,13 @@ grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key,
// Compare and swap loop:
// - take a reference to the current index
gpr_mu_lock(&g_mu);
- gpr_avl index = gpr_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl index =
+ grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
gpr_mu_unlock(&g_mu);
// - Check to see if a subchannel already exists
- c = (grpc_subchannel*)gpr_avl_get(index, key, grpc_core::ExecCtx::Get());
+ c = static_cast<grpc_subchannel*>(
+ grpc_avl_get(index, key, grpc_core::ExecCtx::Get()));
if (c != nullptr) {
c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
}
@@ -178,25 +181,25 @@ grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key,
need_to_unref_constructed = true;
} else {
// no -> update the avl and compare/swap
- gpr_avl updated =
- gpr_avl_add(gpr_avl_ref(index, grpc_core::ExecCtx::Get()),
- subchannel_key_copy(key),
- GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"),
- grpc_core::ExecCtx::Get());
+ grpc_avl updated =
+ grpc_avl_add(grpc_avl_ref(index, grpc_core::ExecCtx::Get()),
+ subchannel_key_copy(key),
+ GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"),
+ grpc_core::ExecCtx::Get());
// it may happen (but it's expected to be unlikely)
// that some other thread has changed the index:
// compare/swap here to check that, and retry as necessary
gpr_mu_lock(&g_mu);
if (index.root == g_subchannel_index.root) {
- GPR_SWAP(gpr_avl, updated, g_subchannel_index);
+ GPR_SWAP(grpc_avl, updated, g_subchannel_index);
c = constructed;
}
gpr_mu_unlock(&g_mu);
- gpr_avl_unref(updated, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(updated, grpc_core::ExecCtx::Get());
}
- gpr_avl_unref(index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(index, grpc_core::ExecCtx::Get());
}
if (need_to_unref_constructed) {
@@ -213,33 +216,34 @@ void grpc_subchannel_index_unregister(grpc_subchannel_key* key,
// Compare and swap loop:
// - take a reference to the current index
gpr_mu_lock(&g_mu);
- gpr_avl index = gpr_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl index =
+ grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
gpr_mu_unlock(&g_mu);
// Check to see if this key still refers to the previously
// registered subchannel
- grpc_subchannel* c =
- (grpc_subchannel*)gpr_avl_get(index, key, grpc_core::ExecCtx::Get());
+ grpc_subchannel* c = static_cast<grpc_subchannel*>(
+ grpc_avl_get(index, key, grpc_core::ExecCtx::Get()));
if (c != constructed) {
- gpr_avl_unref(index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(index, grpc_core::ExecCtx::Get());
break;
}
// compare and swap the update (some other thread may have
// mutated the index behind us)
- gpr_avl updated =
- gpr_avl_remove(gpr_avl_ref(index, grpc_core::ExecCtx::Get()), key,
- grpc_core::ExecCtx::Get());
+ grpc_avl updated =
+ grpc_avl_remove(grpc_avl_ref(index, grpc_core::ExecCtx::Get()), key,
+ grpc_core::ExecCtx::Get());
gpr_mu_lock(&g_mu);
if (index.root == g_subchannel_index.root) {
- GPR_SWAP(gpr_avl, updated, g_subchannel_index);
+ GPR_SWAP(grpc_avl, updated, g_subchannel_index);
done = true;
}
gpr_mu_unlock(&g_mu);
- gpr_avl_unref(updated, grpc_core::ExecCtx::Get());
- gpr_avl_unref(index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(updated, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(index, grpc_core::ExecCtx::Get());
}
}
diff --git a/src/core/ext/filters/client_channel/uri_parser.cc b/src/core/ext/filters/client_channel/uri_parser.cc
index c5f2d6822c..cd07a6fbf5 100644
--- a/src/core/ext/filters/client_channel/uri_parser.cc
+++ b/src/core/ext/filters/client_channel/uri_parser.cc
@@ -45,7 +45,7 @@ static grpc_uri* bad_uri(const char* uri_text, size_t pos, const char* section,
gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text);
gpr_free(line_prefix);
- line_prefix = (char*)gpr_malloc(pfx_len + 1);
+ line_prefix = static_cast<char*>(gpr_malloc(pfx_len + 1));
memset(line_prefix, ' ', pfx_len);
line_prefix[pfx_len] = 0;
gpr_log(GPR_ERROR, "%s^ here", line_prefix);
@@ -159,7 +159,7 @@ static void parse_query_parts(grpc_uri* uri) {
gpr_string_split(uri->query, QUERY_PARTS_SEPARATOR, &uri->query_parts,
&uri->num_query_parts);
uri->query_parts_values =
- (char**)gpr_malloc(uri->num_query_parts * sizeof(char**));
+ static_cast<char**>(gpr_malloc(uri->num_query_parts * sizeof(char**)));
for (size_t i = 0; i < uri->num_query_parts; i++) {
char** query_param_parts;
size_t num_query_param_parts;
@@ -271,7 +271,7 @@ grpc_uri* grpc_uri_parse(const char* uri_text, bool suppress_errors) {
fragment_end = i;
}
- uri = (grpc_uri*)gpr_zalloc(sizeof(*uri));
+ uri = static_cast<grpc_uri*>(gpr_zalloc(sizeof(*uri)));
uri->scheme = decode_and_copy_component(uri_text, scheme_begin, scheme_end);
uri->authority =
decode_and_copy_component(uri_text, authority_begin, authority_end);
diff --git a/src/core/ext/filters/deadline/deadline_filter.cc b/src/core/ext/filters/deadline/deadline_filter.cc
index c430f3d2d4..76c1204090 100644
--- a/src/core/ext/filters/deadline/deadline_filter.cc
+++ b/src/core/ext/filters/deadline/deadline_filter.cc
@@ -37,7 +37,7 @@
// The on_complete callback used when sending a cancel_error batch down the
// filter stack. Yields the call combiner when the batch returns.
static void yield_call_combiner(void* arg, grpc_error* ignored) {
- grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg;
+ grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg);
GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
"got on_complete from cancel_stream batch");
GRPC_CALL_STACK_UNREF(deadline_state->call_stack, "deadline_timer");
@@ -46,8 +46,9 @@ static void yield_call_combiner(void* arg, grpc_error* ignored) {
// This is called via the call combiner, so access to deadline_state is
// synchronized.
static void send_cancel_op_in_call_combiner(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ grpc_deadline_state* deadline_state =
+ static_cast<grpc_deadline_state*>(elem->call_data);
grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner,
deadline_state, grpc_schedule_on_exec_ctx));
@@ -58,8 +59,9 @@ static void send_cancel_op_in_call_combiner(void* arg, grpc_error* error) {
// Timer callback.
static void timer_callback(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ grpc_deadline_state* deadline_state =
+ static_cast<grpc_deadline_state*>(elem->call_data);
if (error != GRPC_ERROR_CANCELLED) {
error = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
@@ -85,7 +87,8 @@ static void start_timer_if_needed(grpc_call_element* elem,
if (deadline == GRPC_MILLIS_INF_FUTURE) {
return;
}
- grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
+ grpc_deadline_state* deadline_state =
+ static_cast<grpc_deadline_state*>(elem->call_data);
grpc_closure* closure = nullptr;
switch (deadline_state->timer_state) {
case GRPC_DEADLINE_STATE_PENDING:
@@ -126,7 +129,7 @@ static void cancel_timer_if_needed(grpc_deadline_state* deadline_state) {
// Callback run when the call is complete.
static void on_complete(void* arg, grpc_error* error) {
- grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg;
+ grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg);
cancel_timer_if_needed(deadline_state);
// Invoke the next callback.
GRPC_CLOSURE_RUN(deadline_state->next_on_complete, GRPC_ERROR_REF(error));
@@ -151,9 +154,9 @@ struct start_timer_after_init_state {
};
static void start_timer_after_init(void* arg, grpc_error* error) {
struct start_timer_after_init_state* state =
- (struct start_timer_after_init_state*)arg;
+ static_cast<struct start_timer_after_init_state*>(arg);
grpc_deadline_state* deadline_state =
- (grpc_deadline_state*)state->elem->call_data;
+ static_cast<grpc_deadline_state*>(state->elem->call_data);
if (!state->in_call_combiner) {
// We are initially called without holding the call combiner, so we
// need to bounce ourselves into it.
@@ -173,7 +176,8 @@ void grpc_deadline_state_init(grpc_call_element* elem,
grpc_call_stack* call_stack,
grpc_call_combiner* call_combiner,
grpc_millis deadline) {
- grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
+ grpc_deadline_state* deadline_state =
+ static_cast<grpc_deadline_state*>(elem->call_data);
deadline_state->call_stack = call_stack;
deadline_state->call_combiner = call_combiner;
// Deadline will always be infinite on servers, so the timer will only be
@@ -187,7 +191,8 @@ void grpc_deadline_state_init(grpc_call_element* elem,
// create a closure to start the timer, and we schedule that closure
// to be run after call stack initialization is done.
struct start_timer_after_init_state* state =
- (struct start_timer_after_init_state*)gpr_zalloc(sizeof(*state));
+ static_cast<struct start_timer_after_init_state*>(
+ gpr_zalloc(sizeof(*state)));
state->elem = elem;
state->deadline = deadline;
GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
@@ -197,20 +202,23 @@ void grpc_deadline_state_init(grpc_call_element* elem,
}
void grpc_deadline_state_destroy(grpc_call_element* elem) {
- grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
+ grpc_deadline_state* deadline_state =
+ static_cast<grpc_deadline_state*>(elem->call_data);
cancel_timer_if_needed(deadline_state);
}
void grpc_deadline_state_reset(grpc_call_element* elem,
grpc_millis new_deadline) {
- grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
+ grpc_deadline_state* deadline_state =
+ static_cast<grpc_deadline_state*>(elem->call_data);
cancel_timer_if_needed(deadline_state);
start_timer_if_needed(elem, new_deadline);
}
void grpc_deadline_state_client_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
- grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
+ grpc_deadline_state* deadline_state =
+ static_cast<grpc_deadline_state*>(elem->call_data);
if (op->cancel_stream) {
cancel_timer_if_needed(deadline_state);
} else {
@@ -278,8 +286,8 @@ static void client_start_transport_stream_op_batch(
// Callback for receiving initial metadata on the server.
static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- server_call_data* calld = (server_call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ server_call_data* calld = static_cast<server_call_data*>(elem->call_data);
// Get deadline from metadata and start the timer if needed.
start_timer_if_needed(elem, calld->recv_initial_metadata->deadline);
// Invoke the next callback.
@@ -290,7 +298,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
// Method for starting a call op for server filter.
static void server_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
- server_call_data* calld = (server_call_data*)elem->call_data;
+ server_call_data* calld = static_cast<server_call_data*>(elem->call_data);
if (op->cancel_stream) {
cancel_timer_if_needed(&calld->base.deadline_state);
} else {
@@ -360,7 +368,8 @@ static bool maybe_add_deadline_filter(grpc_channel_stack_builder* builder,
return grpc_deadline_checking_enabled(
grpc_channel_stack_builder_get_channel_arguments(builder))
? grpc_channel_stack_builder_prepend_filter(
- builder, (const grpc_channel_filter*)arg, nullptr, nullptr)
+ builder, static_cast<const grpc_channel_filter*>(arg),
+ nullptr, nullptr)
: true;
}
diff --git a/src/core/ext/filters/http/client/http_client_filter.cc b/src/core/ext/filters/http/client/http_client_filter.cc
index 5584d50018..80643f8584 100644
--- a/src/core/ext/filters/http/client/http_client_filter.cc
+++ b/src/core/ext/filters/http/client/http_client_filter.cc
@@ -138,8 +138,8 @@ static grpc_error* client_filter_incoming_metadata(grpc_call_element* elem,
}
static void recv_initial_metadata_ready(void* user_data, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)user_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(elem, calld->recv_initial_metadata);
} else {
@@ -150,8 +150,8 @@ static void recv_initial_metadata_ready(void* user_data, grpc_error* error) {
static void recv_trailing_metadata_on_complete(void* user_data,
grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)user_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (error == GRPC_ERROR_NONE) {
error =
client_filter_incoming_metadata(elem, calld->recv_trailing_metadata);
@@ -162,8 +162,8 @@ static void recv_trailing_metadata_on_complete(void* user_data,
}
static void send_message_on_complete(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_byte_stream_cache_destroy(&calld->send_message_cache);
GRPC_CLOSURE_RUN(calld->original_send_message_on_complete,
GRPC_ERROR_REF(error));
@@ -189,7 +189,8 @@ static grpc_error* pull_slice_from_send_message(call_data* calld) {
// and on_send_message_next_done() will be invoked when it is complete.
static grpc_error* read_all_available_send_message_data(call_data* calld) {
while (grpc_byte_stream_next(&calld->send_message_caching_stream.base,
- ~(size_t)0, &calld->on_send_message_next_done)) {
+ ~static_cast<size_t>(0),
+ &calld->on_send_message_next_done)) {
grpc_error* error = pull_slice_from_send_message(calld);
if (error != GRPC_ERROR_NONE) return error;
if (calld->send_message_bytes_read ==
@@ -202,8 +203,8 @@ static grpc_error* read_all_available_send_message_data(call_data* calld) {
// Async callback for grpc_byte_stream_next().
static void on_send_message_next_done(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
calld->send_message_batch, error, calld->call_combiner);
@@ -224,7 +225,8 @@ static void on_send_message_next_done(void* arg, grpc_error* error) {
}
static char* slice_buffer_to_string(grpc_slice_buffer* slice_buffer) {
- char* payload_bytes = (char*)gpr_malloc(slice_buffer->length + 1);
+ char* payload_bytes =
+ static_cast<char*>(gpr_malloc(slice_buffer->length + 1));
size_t offset = 0;
for (size_t i = 0; i < slice_buffer->count; ++i) {
memcpy(payload_bytes + offset,
@@ -240,7 +242,7 @@ static char* slice_buffer_to_string(grpc_slice_buffer* slice_buffer) {
// append the base64-encoded query for a GET request.
static grpc_error* update_path_for_get(grpc_call_element* elem,
grpc_transport_stream_op_batch* batch) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_slice path_slice =
GRPC_MDVALUE(batch->payload->send_initial_metadata.send_initial_metadata
->idx.named.path->md);
@@ -253,19 +255,21 @@ static grpc_error* update_path_for_get(grpc_call_element* elem,
false /* multi_line */);
grpc_slice path_with_query_slice = GRPC_SLICE_MALLOC(estimated_len);
/* memcopy individual pieces into this slice */
- char* write_ptr = (char*)GRPC_SLICE_START_PTR(path_with_query_slice);
- char* original_path = (char*)GRPC_SLICE_START_PTR(path_slice);
+ char* write_ptr =
+ reinterpret_cast<char*> GRPC_SLICE_START_PTR(path_with_query_slice);
+ char* original_path =
+ reinterpret_cast<char*> GRPC_SLICE_START_PTR(path_slice);
memcpy(write_ptr, original_path, GRPC_SLICE_LENGTH(path_slice));
write_ptr += GRPC_SLICE_LENGTH(path_slice);
*write_ptr++ = '?';
char* payload_bytes =
slice_buffer_to_string(&calld->send_message_cache.cache_buffer);
- grpc_base64_encode_core((char*)write_ptr, payload_bytes,
+ grpc_base64_encode_core(write_ptr, payload_bytes,
batch->payload->send_message.send_message->length,
true /* url_safe */, false /* multi_line */);
gpr_free(payload_bytes);
/* remove trailing unused memory and add trailing 0 to terminate string */
- char* t = (char*)GRPC_SLICE_START_PTR(path_with_query_slice);
+ char* t = reinterpret_cast<char*> GRPC_SLICE_START_PTR(path_with_query_slice);
/* safe to use strlen since base64_encode will always add '\0' */
path_with_query_slice =
grpc_slice_sub_no_ref(path_with_query_slice, 0, strlen(t));
@@ -287,9 +291,9 @@ static void remove_if_present(grpc_metadata_batch* batch,
static void hc_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
- call_data* calld = (call_data*)elem->call_data;
- channel_data* channeld = (channel_data*)elem->channel_data;
- GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* channeld = static_cast<channel_data*>(elem->channel_data);
+ GPR_TIMER_SCOPE("hc_start_transport_stream_op_batch", 0);
if (batch->recv_initial_metadata) {
/* substitute our callback for the higher callback */
@@ -404,13 +408,12 @@ done:
} else if (!batch_will_be_handled_asynchronously) {
grpc_call_next_op(elem, batch);
}
- GPR_TIMER_END("hc_start_transport_stream_op_batch", 0);
}
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
@@ -459,7 +462,7 @@ static size_t max_payload_size_from_args(const grpc_channel_args* args) {
gpr_log(GPR_ERROR, "%s: must be an integer",
GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET);
} else {
- return (size_t)args->args[i].value.integer;
+ return static_cast<size_t>(args->args[i].value.integer);
}
}
}
@@ -520,7 +523,7 @@ static grpc_slice user_agent_from_args(const grpc_channel_args* args,
/* Constructor for channel_data */
static grpc_error* init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(!args->is_last);
GPR_ASSERT(args->optional_transport != nullptr);
chand->static_scheme = scheme_from_args(args->channel_args);
@@ -535,7 +538,7 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
/* Destructor for channel data */
static void destroy_channel_elem(grpc_channel_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GRPC_MDELEM_UNREF(chand->user_agent);
}
diff --git a/src/core/ext/filters/http/http_filters_plugin.cc b/src/core/ext/filters/http/http_filters_plugin.cc
index deec77c96f..56fe1e5c24 100644
--- a/src/core/ext/filters/http/http_filters_plugin.cc
+++ b/src/core/ext/filters/http/http_filters_plugin.cc
@@ -43,7 +43,7 @@ static bool is_building_http_like_transport(
static bool maybe_add_optional_filter(grpc_channel_stack_builder* builder,
void* arg) {
if (!is_building_http_like_transport(builder)) return true;
- optional_filter* filtarg = (optional_filter*)arg;
+ optional_filter* filtarg = static_cast<optional_filter*>(arg);
const grpc_channel_args* channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder);
bool enable = grpc_channel_arg_get_bool(
@@ -58,7 +58,8 @@ static bool maybe_add_required_filter(grpc_channel_stack_builder* builder,
void* arg) {
return is_building_http_like_transport(builder)
? grpc_channel_stack_builder_prepend_filter(
- builder, (const grpc_channel_filter*)arg, nullptr, nullptr)
+ builder, static_cast<const grpc_channel_filter*>(arg),
+ nullptr, nullptr)
: true;
}
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index 0218ec6e40..73220a0ea1 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -80,8 +80,8 @@ struct channel_data {
static bool skip_compression(grpc_call_element* elem, uint32_t flags,
bool has_compression_algorithm) {
- call_data* calld = (call_data*)elem->call_data;
- channel_data* channeld = (channel_data*)elem->channel_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* channeld = static_cast<channel_data*>(elem->channel_data);
if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
return true;
@@ -103,8 +103,8 @@ static grpc_error* process_send_initial_metadata(
static grpc_error* process_send_initial_metadata(
grpc_call_element* elem, grpc_metadata_batch* initial_metadata,
bool* has_compression_algorithm) {
- call_data* calld = (call_data*)elem->call_data;
- channel_data* channeld = (channel_data*)elem->channel_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* channeld = static_cast<channel_data*>(elem->channel_data);
*has_compression_algorithm = false;
grpc_compression_algorithm compression_algorithm;
grpc_stream_compression_algorithm stream_compression_algorithm =
@@ -194,15 +194,15 @@ static grpc_error* process_send_initial_metadata(
}
static void send_message_on_complete(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_slice_buffer_reset_and_unref_internal(&calld->slices);
GRPC_CLOSURE_RUN(calld->original_send_message_on_complete,
GRPC_ERROR_REF(error));
}
static void send_message_batch_continue(grpc_call_element* elem) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
// Note: The call to grpc_call_next_op() results in yielding the
// call combiner, so we need to clear calld->send_message_batch
// before we do that.
@@ -213,7 +213,7 @@ static void send_message_batch_continue(grpc_call_element* elem) {
}
static void finish_send_message(grpc_call_element* elem) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
// Compress the data if appropriate.
grpc_slice_buffer tmp;
grpc_slice_buffer_init(&tmp);
@@ -226,7 +226,8 @@ static void finish_send_message(grpc_call_element* elem) {
const char* algo_name;
const size_t before_size = calld->slices.length;
const size_t after_size = tmp.length;
- const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
+ const float savings_ratio = 1.0f - static_cast<float>(after_size) /
+ static_cast<float>(before_size);
GPR_ASSERT(grpc_message_compression_algorithm_name(
calld->message_compression_algorithm, &algo_name));
gpr_log(GPR_DEBUG,
@@ -264,7 +265,7 @@ static void finish_send_message(grpc_call_element* elem) {
static void fail_send_message_batch_in_call_combiner(void* arg,
grpc_error* error) {
- call_data* calld = (call_data*)arg;
+ call_data* calld = static_cast<call_data*>(arg);
if (calld->send_message_batch != nullptr) {
grpc_transport_stream_op_batch_finish_with_failure(
calld->send_message_batch, GRPC_ERROR_REF(error), calld->call_combiner);
@@ -289,10 +290,10 @@ static grpc_error* pull_slice_from_send_message(call_data* calld) {
// an async call to grpc_byte_stream_next() has been started, which will
// eventually result in calling on_send_message_next_done().
static void continue_reading_send_message(grpc_call_element* elem) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
while (grpc_byte_stream_next(
- calld->send_message_batch->payload->send_message.send_message, ~(size_t)0,
- &calld->on_send_message_next_done)) {
+ calld->send_message_batch->payload->send_message.send_message,
+ ~static_cast<size_t>(0), &calld->on_send_message_next_done)) {
grpc_error* error = pull_slice_from_send_message(calld);
if (error != GRPC_ERROR_NONE) {
// Closure callback; does not take ownership of error.
@@ -310,8 +311,8 @@ static void continue_reading_send_message(grpc_call_element* elem) {
// Async callback for grpc_byte_stream_next().
static void on_send_message_next_done(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (error != GRPC_ERROR_NONE) {
// Closure callback; does not take ownership of error.
fail_send_message_batch_in_call_combiner(calld, error);
@@ -333,8 +334,8 @@ static void on_send_message_next_done(void* arg, grpc_error* error) {
}
static void start_send_message_batch(void* arg, grpc_error* unused) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (skip_compression(
elem,
calld->send_message_batch->payload->send_message.send_message->flags,
@@ -347,8 +348,8 @@ static void start_send_message_batch(void* arg, grpc_error* unused) {
static void compress_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
- call_data* calld = (call_data*)elem->call_data;
- GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
+ GPR_TIMER_SCOPE("compress_start_transport_stream_op_batch", 0);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
// Handle cancel_stream.
if (batch->cancel_stream) {
GRPC_ERROR_UNREF(calld->cancel_error);
@@ -371,7 +372,7 @@ static void compress_start_transport_stream_op_batch(
} else if (calld->cancel_error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
- goto done;
+ return;
}
// Handle send_initial_metadata.
if (batch->send_initial_metadata) {
@@ -383,7 +384,7 @@ static void compress_start_transport_stream_op_batch(
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(batch, error,
calld->call_combiner);
- goto done;
+ return;
}
calld->send_initial_metadata_state = has_compression_algorithm
? HAS_COMPRESSION_ALGORITHM
@@ -412,21 +413,19 @@ static void compress_start_transport_stream_op_batch(
GRPC_CALL_COMBINER_STOP(
calld->call_combiner,
"send_message batch pending send_initial_metadata");
- goto done;
+ return;
}
start_send_message_batch(elem, GRPC_ERROR_NONE);
} else {
// Pass control down the stack.
grpc_call_next_op(elem, batch);
}
-done:
- GPR_TIMER_END("compress_start_transport_stream_op_batch", 0);
}
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
calld->call_combiner = args->call_combiner;
calld->cancel_error = GRPC_ERROR_NONE;
grpc_slice_buffer_init(&calld->slices);
@@ -443,7 +442,7 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_slice_buffer_destroy_internal(&calld->slices);
GRPC_ERROR_UNREF(calld->cancel_error);
}
@@ -451,7 +450,7 @@ static void destroy_call_elem(grpc_call_element* elem,
/* Constructor for channel_data */
static grpc_error* init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
- channel_data* channeld = (channel_data*)elem->channel_data;
+ channel_data* channeld = static_cast<channel_data*>(elem->channel_data);
channeld->enabled_algorithms_bitset =
grpc_channel_args_compression_algorithm_get_states(args->channel_args);
diff --git a/src/core/ext/filters/http/server/http_server_filter.cc b/src/core/ext/filters/http/server/http_server_filter.cc
index 508a3bf9fc..63bc2bd59f 100644
--- a/src/core/ext/filters/http/server/http_server_filter.cc
+++ b/src/core/ext/filters/http/server/http_server_filter.cc
@@ -95,7 +95,7 @@ static void add_error(const char* error_name, grpc_error** cumulative,
static grpc_error* server_filter_incoming_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_error* error = GRPC_ERROR_NONE;
static const char* error_name = "Failed processing incoming headers";
@@ -204,7 +204,7 @@ static grpc_error* server_filter_incoming_metadata(grpc_call_element* elem,
* query parameter which is base64 encoded request payload. */
const char k_query_separator = '?';
grpc_slice path_slice = GRPC_MDVALUE(b->idx.named.path->md);
- uint8_t* path_ptr = (uint8_t*)GRPC_SLICE_START_PTR(path_slice);
+ uint8_t* path_ptr = GRPC_SLICE_START_PTR(path_slice);
size_t path_length = GRPC_SLICE_LENGTH(path_slice);
/* offset of the character '?' */
size_t offset = 0;
@@ -224,10 +224,11 @@ static grpc_error* server_filter_incoming_metadata(grpc_call_element* elem,
/* decode payload from query and add to the slice buffer to be returned */
const int k_url_safe = 1;
- grpc_slice_buffer_add(&calld->read_slice_buffer,
- grpc_base64_decode_with_len(
- (const char*)GRPC_SLICE_START_PTR(query_slice),
- GRPC_SLICE_LENGTH(query_slice), k_url_safe));
+ grpc_slice_buffer_add(
+ &calld->read_slice_buffer,
+ grpc_base64_decode_with_len(
+ reinterpret_cast<const char*> GRPC_SLICE_START_PTR(query_slice),
+ GRPC_SLICE_LENGTH(query_slice), k_url_safe));
grpc_slice_buffer_stream_init(&calld->read_stream,
&calld->read_slice_buffer, 0);
calld->seen_path_with_query = true;
@@ -262,8 +263,8 @@ static grpc_error* server_filter_incoming_metadata(grpc_call_element* elem,
}
static void hs_on_recv(void* user_data, grpc_error* err) {
- grpc_call_element* elem = (grpc_call_element*)user_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (err == GRPC_ERROR_NONE) {
err = server_filter_incoming_metadata(elem, calld->recv_initial_metadata);
} else {
@@ -273,13 +274,14 @@ static void hs_on_recv(void* user_data, grpc_error* err) {
}
static void hs_on_complete(void* user_data, grpc_error* err) {
- grpc_call_element* elem = (grpc_call_element*)user_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
/* Call recv_message_ready if we got the payload via the path field */
if (calld->seen_path_with_query && calld->recv_message_ready != nullptr) {
- *calld->pp_recv_message = calld->payload_bin_delivered
- ? nullptr
- : (grpc_byte_stream*)&calld->read_stream;
+ *calld->pp_recv_message =
+ calld->payload_bin_delivered
+ ? nullptr
+ : reinterpret_cast<grpc_byte_stream*>(&calld->read_stream);
// Re-enter call combiner for recv_message_ready, since the surface
// code will release the call combiner for each callback it receives.
GRPC_CALL_COMBINER_START(calld->call_combiner, calld->recv_message_ready,
@@ -292,8 +294,8 @@ static void hs_on_complete(void* user_data, grpc_error* err) {
}
static void hs_recv_message_ready(void* user_data, grpc_error* err) {
- grpc_call_element* elem = (grpc_call_element*)user_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->seen_path_with_query) {
// Do nothing. This is probably a GET request, and payload will be
// returned in hs_on_complete callback.
@@ -309,7 +311,7 @@ static void hs_recv_message_ready(void* user_data, grpc_error* err) {
static grpc_error* hs_mutate_op(grpc_call_element* elem,
grpc_transport_stream_op_batch* op) {
/* grab pointers to our data from the call element */
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (op->send_initial_metadata) {
grpc_error* error = GRPC_ERROR_NONE;
@@ -367,8 +369,8 @@ static grpc_error* hs_mutate_op(grpc_call_element* elem,
static void hs_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
- call_data* calld = (call_data*)elem->call_data;
- GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0);
+ GPR_TIMER_SCOPE("hs_start_transport_stream_op_batch", 0);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_error* error = hs_mutate_op(elem, op);
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(op, error,
@@ -376,14 +378,13 @@ static void hs_start_transport_stream_op_batch(
} else {
grpc_call_next_op(elem, op);
}
- GPR_TIMER_END("hs_start_transport_stream_op_batch", 0);
}
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
/* grab pointers to our data from the call element */
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
/* initialize members */
calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
@@ -400,7 +401,7 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_slice_buffer_destroy_internal(&calld->read_slice_buffer);
}
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
index a414229768..79b144a6eb 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
@@ -57,8 +57,8 @@ struct channel_data {
} // namespace
static void on_initial_md_ready(void* user_data, grpc_error* err) {
- grpc_call_element* elem = (grpc_call_element*)user_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (err == GRPC_ERROR_NONE) {
if (calld->recv_initial_metadata->idx.named.path != nullptr) {
@@ -88,7 +88,7 @@ static void on_initial_md_ready(void* user_data, grpc_error* err) {
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
calld->id = (intptr_t)args->call_stack;
GRPC_CLOSURE_INIT(&calld->on_initial_md_ready, on_initial_md_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -111,7 +111,7 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
/* TODO(dgq): do something with the data
channel_data *chand = elem->channel_data;
@@ -140,7 +140,7 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
chand->id = (intptr_t)args->channel_stack;
/* TODO(dgq): do something with the data
@@ -173,8 +173,8 @@ static void destroy_channel_elem(grpc_channel_element* elem) {
static grpc_filtered_mdelem lr_trailing_md_filter(void* user_data,
grpc_mdelem md) {
- grpc_call_element* elem = (grpc_call_element*)user_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_LB_COST_BIN)) {
calld->trailing_md_string = GRPC_MDVALUE(md);
return GRPC_FILTERED_REMOVE();
@@ -184,8 +184,8 @@ static grpc_filtered_mdelem lr_trailing_md_filter(void* user_data,
static void lr_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
- GPR_TIMER_BEGIN("lr_start_transport_stream_op_batch", 0);
- call_data* calld = (call_data*)elem->call_data;
+ GPR_TIMER_SCOPE("lr_start_transport_stream_op_batch", 0);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
@@ -204,8 +204,6 @@ static void lr_start_transport_stream_op_batch(
"LR trailing metadata filtering error"));
}
grpc_call_next_op(elem, op);
-
- GPR_TIMER_END("lr_start_transport_stream_op_batch", 0);
}
const grpc_channel_filter grpc_server_load_reporting_filter = {
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
index 9d1dfcbb4c..667c0c56ef 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
@@ -41,7 +41,8 @@ static bool maybe_add_server_load_reporting_filter(
grpc_channel_stack_builder* builder, void* arg) {
const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
- const grpc_channel_filter* filter = (const grpc_channel_filter*)arg;
+ const grpc_channel_filter* filter =
+ static_cast<const grpc_channel_filter*>(arg);
grpc_channel_stack_builder_iterator* it =
grpc_channel_stack_builder_iterator_find(builder, filter->name);
const bool already_has_load_reporting_filter =
diff --git a/src/core/ext/filters/max_age/max_age_filter.cc b/src/core/ext/filters/max_age/max_age_filter.cc
index 7b86e4cd6c..abb9d69036 100644
--- a/src/core/ext/filters/max_age/max_age_filter.cc
+++ b/src/core/ext/filters/max_age/max_age_filter.cc
@@ -37,6 +37,12 @@
#define MAX_CONNECTION_IDLE_INTEGER_OPTIONS \
{ DEFAULT_MAX_CONNECTION_IDLE_MS, 1, INT_MAX }
+/* States for idle_state in channel_data */
+#define MAX_IDLE_STATE_INIT ((gpr_atm)0)
+#define MAX_IDLE_STATE_SEEN_EXIT_IDLE ((gpr_atm)1)
+#define MAX_IDLE_STATE_SEEN_ENTER_IDLE ((gpr_atm)2)
+#define MAX_IDLE_STATE_TIMER_SET ((gpr_atm)3)
+
namespace {
struct channel_data {
/* We take a reference to the channel stack for the timer callback */
@@ -64,7 +70,7 @@ struct channel_data {
grpc_millis max_connection_age_grace;
/* Closure to run when the channel's idle duration reaches max_connection_idle
and should be closed gracefully */
- grpc_closure close_max_idle_channel;
+ grpc_closure max_idle_timer_cb;
/* Closure to run when the channel reaches its max age and should be closed
gracefully */
grpc_closure close_max_age_channel;
@@ -85,31 +91,122 @@ struct channel_data {
grpc_connectivity_state connectivity_state;
/* Number of active calls */
gpr_atm call_count;
+ /* TODO(zyc): C++lize this state machine */
+ /* 'idle_state' holds the states of max_idle_timer and channel idleness.
+ It can contain one of the following values:
+ +--------------------------------+----------------+---------+
+ | idle_state | max_idle_timer | channel |
+ +--------------------------------+----------------+---------+
+ | MAX_IDLE_STATE_INIT | unset | busy |
+ | MAX_IDLE_STATE_TIMER_SET | set, valid | idle |
+ | MAX_IDLE_STATE_SEEN_EXIT_IDLE | set, invalid | busy |
+ | MAX_IDLE_STATE_SEEN_ENTER_IDLE | set, invalid | idle |
+ +--------------------------------+----------------+---------+
+
+ MAX_IDLE_STATE_INIT: The initial and final state of 'idle_state'. The
+ channel has 1 or 1+ active calls, and the the timer is not set. Note that
+ we may put a virtual call to hold this state at channel initialization or
+ shutdown, so that the channel won't enter other states.
+
+ MAX_IDLE_STATE_TIMER_SET: The state after the timer is set and no calls
+ have arrived after the timer is set. The channel must have 0 active call in
+ this state. If the timer is fired in this state, we will close the channel
+ due to idleness.
+
+ MAX_IDLE_STATE_SEEN_EXIT_IDLE: The state after the timer is set and at
+ least one call has arrived after the timer is set. The channel must have 1
+ or 1+ active calls in this state. If the timer is fired in this state, we
+ won't reschudle it.
+
+ MAX_IDLE_STATE_SEEN_ENTER_IDLE: The state after the timer is set and the at
+ least one call has arrived after the timer is set, BUT the channel
+ currently has 1 or 1+ active calls. If the timer is fired in this state, we
+ will reschudle it.
+
+ max_idle_timer will not be cancelled (unless the channel is shutting down).
+ If the timer callback is called when the max_idle_timer is valid (i.e.
+ idle_state is MAX_IDLE_STATE_TIMER_SET), the channel will be closed due to
+ idleness, otherwise the channel won't be changed.
+
+ State transitions:
+ MAX_IDLE_STATE_INIT <-------3------ MAX_IDLE_STATE_SEEN_EXIT_IDLE
+ ^ | ^ ^ |
+ | | | | |
+ 1 2 +-----------4------------+ 6 7
+ | | | | |
+ | v | | v
+ MAX_IDLE_STATE_TIMER_SET <----5------ MAX_IDLE_STATE_SEEN_ENTER_IDLE
+
+ For 1, 3, 5 : See max_idle_timer_cb() function
+ For 2, 7 : See decrease_call_count() function
+ For 4, 6 : See increase_call_count() function */
+ gpr_atm idle_state;
+ /* Time when the channel finished its last outstanding call, in grpc_millis */
+ gpr_atm last_enter_idle_time_millis;
};
} // namespace
/* Increase the nubmer of active calls. Before the increasement, if there are no
calls, the max_idle_timer should be cancelled. */
static void increase_call_count(channel_data* chand) {
+ /* Exit idle */
if (gpr_atm_full_fetch_add(&chand->call_count, 1) == 0) {
- grpc_timer_cancel(&chand->max_idle_timer);
+ while (true) {
+ gpr_atm idle_state = gpr_atm_acq_load(&chand->idle_state);
+ switch (idle_state) {
+ case MAX_IDLE_STATE_TIMER_SET:
+ /* max_idle_timer_cb may have already set idle_state to
+ MAX_IDLE_STATE_INIT, in this case, we don't need to set it to
+ MAX_IDLE_STATE_SEEN_EXIT_IDLE */
+ gpr_atm_rel_cas(&chand->idle_state, MAX_IDLE_STATE_TIMER_SET,
+ MAX_IDLE_STATE_SEEN_EXIT_IDLE);
+ return;
+ case MAX_IDLE_STATE_SEEN_ENTER_IDLE:
+ gpr_atm_rel_store(&chand->idle_state, MAX_IDLE_STATE_SEEN_EXIT_IDLE);
+ return;
+ default:
+ /* try again */
+ break;
+ }
+ }
}
}
/* Decrease the nubmer of active calls. After the decrement, if there are no
calls, the max_idle_timer should be started. */
static void decrease_call_count(channel_data* chand) {
+ /* Enter idle */
if (gpr_atm_full_fetch_add(&chand->call_count, -1) == 1) {
- GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_idle_timer");
- grpc_timer_init(
- &chand->max_idle_timer,
- grpc_core::ExecCtx::Get()->Now() + chand->max_connection_idle,
- &chand->close_max_idle_channel);
+ gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis,
+ (gpr_atm)grpc_core::ExecCtx::Get()->Now());
+ while (true) {
+ gpr_atm idle_state = gpr_atm_acq_load(&chand->idle_state);
+ switch (idle_state) {
+ case MAX_IDLE_STATE_INIT:
+ GRPC_CHANNEL_STACK_REF(chand->channel_stack,
+ "max_age max_idle_timer");
+ grpc_timer_init(
+ &chand->max_idle_timer,
+ grpc_core::ExecCtx::Get()->Now() + chand->max_connection_idle,
+ &chand->max_idle_timer_cb);
+ gpr_atm_rel_store(&chand->idle_state, MAX_IDLE_STATE_TIMER_SET);
+ return;
+ case MAX_IDLE_STATE_SEEN_EXIT_IDLE:
+ if (gpr_atm_rel_cas(&chand->idle_state, MAX_IDLE_STATE_SEEN_EXIT_IDLE,
+ MAX_IDLE_STATE_SEEN_ENTER_IDLE)) {
+ return;
+ }
+ break;
+ default:
+ /* try again */
+ break;
+ }
+ }
}
}
static void start_max_idle_timer_after_init(void* arg, grpc_error* error) {
- channel_data* chand = (channel_data*)arg;
+ channel_data* chand = static_cast<channel_data*>(arg);
/* Decrease call_count. If there are no active calls at this time,
max_idle_timer will start here. If the number of active calls is not 0,
max_idle_timer will start after all the active calls end. */
@@ -119,7 +216,7 @@ static void start_max_idle_timer_after_init(void* arg, grpc_error* error) {
}
static void start_max_age_timer_after_init(void* arg, grpc_error* error) {
- channel_data* chand = (channel_data*)arg;
+ channel_data* chand = static_cast<channel_data*>(arg);
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_timer_pending = true;
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_timer");
@@ -137,7 +234,7 @@ static void start_max_age_timer_after_init(void* arg, grpc_error* error) {
static void start_max_age_grace_timer_after_goaway_op(void* arg,
grpc_error* error) {
- channel_data* chand = (channel_data*)arg;
+ channel_data* chand = static_cast<channel_data*>(arg);
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_grace_timer_pending = true;
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer");
@@ -152,26 +249,64 @@ static void start_max_age_grace_timer_after_goaway_op(void* arg,
"max_age start_max_age_grace_timer_after_goaway_op");
}
-static void close_max_idle_channel(void* arg, grpc_error* error) {
- channel_data* chand = (channel_data*)arg;
+static void close_max_idle_channel(channel_data* chand) {
+ /* Prevent the max idle timer from being set again */
+ gpr_atm_no_barrier_fetch_add(&chand->call_count, 1);
+ grpc_transport_op* op = grpc_make_transport_op(nullptr);
+ op->goaway_error =
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("max_idle"),
+ GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_NO_ERROR);
+ grpc_channel_element* elem =
+ grpc_channel_stack_element(chand->channel_stack, 0);
+ elem->filter->start_transport_op(elem, op);
+}
+
+static void max_idle_timer_cb(void* arg, grpc_error* error) {
+ channel_data* chand = static_cast<channel_data*>(arg);
if (error == GRPC_ERROR_NONE) {
- /* Prevent the max idle timer from being set again */
- gpr_atm_no_barrier_fetch_add(&chand->call_count, 1);
- grpc_transport_op* op = grpc_make_transport_op(nullptr);
- op->goaway_error =
- grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("max_idle"),
- GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_NO_ERROR);
- grpc_channel_element* elem =
- grpc_channel_stack_element(chand->channel_stack, 0);
- elem->filter->start_transport_op(elem, op);
- } else if (error != GRPC_ERROR_CANCELLED) {
- GRPC_LOG_IF_ERROR("close_max_idle_channel", error);
+ bool try_again = true;
+ while (try_again) {
+ gpr_atm idle_state = gpr_atm_acq_load(&chand->idle_state);
+ switch (idle_state) {
+ case MAX_IDLE_STATE_TIMER_SET:
+ close_max_idle_channel(chand);
+ /* This MAX_IDLE_STATE_INIT is a final state, we don't have to check
+ * if idle_state has been changed */
+ gpr_atm_rel_store(&chand->idle_state, MAX_IDLE_STATE_INIT);
+ try_again = false;
+ break;
+ case MAX_IDLE_STATE_SEEN_EXIT_IDLE:
+ if (gpr_atm_rel_cas(&chand->idle_state, MAX_IDLE_STATE_SEEN_EXIT_IDLE,
+ MAX_IDLE_STATE_INIT)) {
+ try_again = false;
+ }
+ break;
+ case MAX_IDLE_STATE_SEEN_ENTER_IDLE:
+ GRPC_CHANNEL_STACK_REF(chand->channel_stack,
+ "max_age max_idle_timer");
+ grpc_timer_init(&chand->max_idle_timer,
+ static_cast<grpc_millis>(gpr_atm_no_barrier_load(
+ &chand->last_enter_idle_time_millis)) +
+ chand->max_connection_idle,
+ &chand->max_idle_timer_cb);
+ /* idle_state may have already been set to
+ MAX_IDLE_STATE_SEEN_EXIT_IDLE by increase_call_count(), in this
+ case, we don't need to set it to MAX_IDLE_STATE_TIMER_SET */
+ gpr_atm_rel_cas(&chand->idle_state, MAX_IDLE_STATE_SEEN_ENTER_IDLE,
+ MAX_IDLE_STATE_TIMER_SET);
+ try_again = false;
+ break;
+ default:
+ /* try again */
+ break;
+ }
+ }
}
GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, "max_age max_idle_timer");
}
static void close_max_age_channel(void* arg, grpc_error* error) {
- channel_data* chand = (channel_data*)arg;
+ channel_data* chand = static_cast<channel_data*>(arg);
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_timer_pending = false;
gpr_mu_unlock(&chand->max_age_timer_mu);
@@ -193,7 +328,7 @@ static void close_max_age_channel(void* arg, grpc_error* error) {
}
static void force_close_max_age_channel(void* arg, grpc_error* error) {
- channel_data* chand = (channel_data*)arg;
+ channel_data* chand = static_cast<channel_data*>(arg);
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_grace_timer_pending = false;
gpr_mu_unlock(&chand->max_age_timer_mu);
@@ -211,7 +346,7 @@ static void force_close_max_age_channel(void* arg, grpc_error* error) {
}
static void channel_connectivity_changed(void* arg, grpc_error* error) {
- channel_data* chand = (channel_data*)arg;
+ channel_data* chand = static_cast<channel_data*>(arg);
if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
op->on_connectivity_state_change = &chand->channel_connectivity_changed;
@@ -249,15 +384,15 @@ add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) {
double result = multiplier * value;
/* INT_MAX - 0.5 converts the value to float, so that result will not be
cast to int implicitly before the comparison. */
- return result > ((double)GRPC_MILLIS_INF_FUTURE) - 0.5
+ return result > (static_cast<double>(GRPC_MILLIS_INF_FUTURE)) - 0.5
? GRPC_MILLIS_INF_FUTURE
- : (grpc_millis)result;
+ : static_cast<grpc_millis>(result);
}
/* Constructor for call_data. */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
increase_call_count(chand);
return GRPC_ERROR_NONE;
}
@@ -266,14 +401,14 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
decrease_call_count(chand);
}
/* Constructor for channel_data. */
static grpc_error* init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
gpr_mu_init(&chand->max_age_timer_mu);
chand->max_age_timer_pending = false;
chand->max_age_grace_timer_pending = false;
@@ -288,6 +423,9 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
chand->max_connection_idle = DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: DEFAULT_MAX_CONNECTION_IDLE_MS;
+ chand->idle_state = MAX_IDLE_STATE_INIT;
+ gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis,
+ GRPC_MILLIS_INF_PAST);
for (size_t i = 0; i < args->channel_args->num_args; ++i) {
if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_AGE_MS)) {
@@ -311,8 +449,8 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
}
}
- GRPC_CLOSURE_INIT(&chand->close_max_idle_channel, close_max_idle_channel,
- chand, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&chand->max_idle_timer_cb, max_idle_timer_cb, chand,
+ grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&chand->close_max_age_channel, close_max_age_channel, chand,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&chand->force_close_max_age_channel,
diff --git a/src/core/ext/filters/message_size/message_size_filter.cc b/src/core/ext/filters/message_size/message_size_filter.cc
index 8d76c4a837..49d9ae60ae 100644
--- a/src/core/ext/filters/message_size/message_size_filter.cc
+++ b/src/core/ext/filters/message_size/message_size_filter.cc
@@ -42,14 +42,14 @@ typedef struct {
static void* refcounted_message_size_limits_ref(void* value) {
refcounted_message_size_limits* limits =
- (refcounted_message_size_limits*)value;
+ static_cast<refcounted_message_size_limits*>(value);
gpr_ref(&limits->refs);
return value;
}
static void refcounted_message_size_limits_unref(void* value) {
refcounted_message_size_limits* limits =
- (refcounted_message_size_limits*)value;
+ static_cast<refcounted_message_size_limits*>(value);
if (gpr_unref(&limits->refs)) {
gpr_free(value);
}
@@ -78,8 +78,8 @@ static void* refcounted_message_size_limits_create_from_json(
}
}
refcounted_message_size_limits* value =
- (refcounted_message_size_limits*)gpr_malloc(
- sizeof(refcounted_message_size_limits));
+ static_cast<refcounted_message_size_limits*>(
+ gpr_malloc(sizeof(refcounted_message_size_limits)));
gpr_ref_init(&value->refs, 1);
value->limits.max_send_size = max_request_message_bytes;
value->limits.max_recv_size = max_response_message_bytes;
@@ -110,10 +110,11 @@ struct channel_data {
// Callback invoked when we receive a message. Here we check the max
// receive message size.
static void recv_message_ready(void* user_data, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)user_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (*calld->recv_message != nullptr && calld->limits.max_recv_size >= 0 &&
- (*calld->recv_message)->length > (size_t)calld->limits.max_recv_size) {
+ (*calld->recv_message)->length >
+ static_cast<size_t>(calld->limits.max_recv_size)) {
char* message_string;
gpr_asprintf(&message_string,
"Received message larger than max (%u vs. %d)",
@@ -138,11 +139,11 @@ static void recv_message_ready(void* user_data, grpc_error* error) {
// Start transport stream op.
static void start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
// Check max send message size.
if (op->send_message && calld->limits.max_send_size >= 0 &&
op->payload->send_message.send_message->length >
- (size_t)calld->limits.max_send_size) {
+ static_cast<size_t>(calld->limits.max_send_size)) {
char* message_string;
gpr_asprintf(&message_string, "Sent message larger than max (%u vs. %d)",
op->payload->send_message.send_message->length,
@@ -170,8 +171,8 @@ static void start_transport_stream_op_batch(
// Constructor for call_data.
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
calld->call_combiner = args->call_combiner;
calld->next_recv_message_ready = nullptr;
GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
@@ -183,8 +184,9 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
calld->limits = chand->limits;
if (chand->method_limit_table != nullptr) {
refcounted_message_size_limits* limits =
- (refcounted_message_size_limits*)grpc_method_config_table_get(
- chand->method_limit_table, args->path);
+ static_cast<refcounted_message_size_limits*>(
+ grpc_method_config_table_get(chand->method_limit_table,
+ args->path));
if (limits != nullptr) {
if (limits->limits.max_send_size >= 0 &&
(limits->limits.max_send_size < calld->limits.max_send_size ||
@@ -242,7 +244,7 @@ message_size_limits get_message_size_limits(
static grpc_error* init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
chand->limits = get_message_size_limits(args->channel_args);
// Get method config table from channel args.
const grpc_arg* channel_arg =
@@ -265,7 +267,7 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
// Destructor for channel_data.
static void destroy_channel_elem(grpc_channel_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
grpc_slice_hash_table_unref(chand->method_limit_table);
}
diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
index 88bb8c71cc..3092ed2056 100644
--- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
+++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
@@ -53,8 +53,8 @@ static bool get_user_agent_mdelem(const grpc_metadata_batch* batch,
// Callback invoked when we receive an initial metadata.
static void recv_initial_metadata_ready(void* user_data, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)user_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (GRPC_ERROR_NONE == error) {
grpc_mdelem md;
@@ -75,7 +75,7 @@ static void recv_initial_metadata_ready(void* user_data, grpc_error* error) {
// Start transport stream op.
static void start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
// Inject callback for receiving initial metadata
if (op->recv_initial_metadata) {
@@ -102,7 +102,7 @@ static void start_transport_stream_op_batch(
// Constructor for call_data.
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
calld->next_recv_initial_metadata_ready = nullptr;
calld->workaround_active = false;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
diff --git a/src/core/ext/filters/workarounds/workaround_utils.cc b/src/core/ext/filters/workarounds/workaround_utils.cc
index 9d76582ea2..850ed75ec9 100644
--- a/src/core/ext/filters/workarounds/workaround_utils.cc
+++ b/src/core/ext/filters/workarounds/workaround_utils.cc
@@ -27,14 +27,14 @@ static void destroy_user_agent_md(void* user_agent_md) {
grpc_workaround_user_agent_md* grpc_parse_user_agent(grpc_mdelem md) {
grpc_workaround_user_agent_md* user_agent_md =
- (grpc_workaround_user_agent_md*)grpc_mdelem_get_user_data(
- md, destroy_user_agent_md);
+ static_cast<grpc_workaround_user_agent_md*>(
+ grpc_mdelem_get_user_data(md, destroy_user_agent_md));
if (nullptr != user_agent_md) {
return user_agent_md;
}
- user_agent_md = (grpc_workaround_user_agent_md*)gpr_malloc(
- sizeof(grpc_workaround_user_agent_md));
+ user_agent_md = static_cast<grpc_workaround_user_agent_md*>(
+ gpr_malloc(sizeof(grpc_workaround_user_agent_md)));
for (int i = 0; i < GRPC_MAX_WORKAROUND_ID; i++) {
if (ua_parser[i]) {
user_agent_md->workaround_active[i] = ua_parser[i](md);