aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext/filters
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/ext/filters')
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.cc73
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc499
-rw-r--r--src/core/ext/filters/client_channel/client_channel.h14
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.h42
-rw-r--r--src/core/ext/filters/client_channel/client_channel_plugin.cc22
-rw-r--r--src/core/ext/filters/client_channel/connector.h40
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc87
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h134
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc53
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc528
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc20
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h18
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc28
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc108
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h40
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc166
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc208
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc108
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.h52
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.h80
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.cc14
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.h6
-rw-r--r--src/core/ext/filters/client_channel/parse_address.cc42
-rw-r--r--src/core/ext/filters/client_channel/parse_address.h12
-rw-r--r--src/core/ext/filters/client_channel/resolver.cc34
-rw-r--r--src/core/ext/filters/client_channel/resolver.h48
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc164
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h18
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc96
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc180
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h30
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc42
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc118
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc100
-rw-r--r--src/core/ext/filters/client_channel/resolver_factory.h38
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.cc58
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.h20
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc317
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h114
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.cc83
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.h28
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.cc36
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.h20
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.cc150
-rw-r--r--src/core/ext/filters/http/http_filters_plugin.cc32
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.cc145
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.h2
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.cc134
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.cc54
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.h2
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc14
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_plugin.h10
-rw-r--r--src/core/ext/filters/workarounds/workaround_utils.cc12
-rw-r--r--src/core/ext/filters/workarounds/workaround_utils.h2
58 files changed, 2257 insertions, 2246 deletions
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.cc b/src/core/ext/filters/client_channel/channel_connectivity.cc
index 31a8fc39ce..82a5edca93 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.cc
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -29,9 +29,9 @@
#include "src/core/lib/surface/completion_queue.h"
grpc_connectivity_state grpc_channel_check_connectivity_state(
- grpc_channel *channel, int try_to_connect) {
+ grpc_channel* channel, int try_to_connect) {
/* forward through to the underlying client channel */
- grpc_channel_element *client_channel_elem =
+ grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_connectivity_state state;
@@ -66,15 +66,15 @@ typedef struct {
grpc_closure watcher_timer_init;
grpc_timer alarm;
grpc_connectivity_state state;
- grpc_completion_queue *cq;
+ grpc_completion_queue* cq;
grpc_cq_completion completion_storage;
- grpc_channel *channel;
- grpc_error *error;
- void *tag;
+ grpc_channel* channel;
+ grpc_error* error;
+ void* tag;
} state_watcher;
-static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
- grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
+static void delete_state_watcher(grpc_exec_ctx* exec_ctx, state_watcher* w) {
+ grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(w->channel));
if (client_channel_elem->filter == &grpc_client_channel_filter) {
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->channel,
@@ -86,10 +86,10 @@ static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
gpr_free(w);
}
-static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
- grpc_cq_completion *ignored) {
+static void finished_completion(grpc_exec_ctx* exec_ctx, void* pw,
+ grpc_cq_completion* ignored) {
bool should_delete = false;
- state_watcher *w = (state_watcher *)pw;
+ state_watcher* w = (state_watcher*)pw;
gpr_mu_lock(&w->mu);
switch (w->phase) {
case WAITING:
@@ -106,12 +106,12 @@ static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
}
}
-static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
- bool due_to_completion, grpc_error *error) {
+static void partly_done(grpc_exec_ctx* exec_ctx, state_watcher* w,
+ bool due_to_completion, grpc_error* error) {
if (due_to_completion) {
grpc_timer_cancel(exec_ctx, &w->alarm);
} else {
- grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
+ grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(w->channel));
grpc_client_channel_watch_connectivity_state(
exec_ctx, client_channel_elem,
@@ -161,31 +161,31 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
GRPC_ERROR_UNREF(error);
}
-static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw,
- grpc_error *error) {
- partly_done(exec_ctx, (state_watcher *)pw, true, GRPC_ERROR_REF(error));
+static void watch_complete(grpc_exec_ctx* exec_ctx, void* pw,
+ grpc_error* error) {
+ partly_done(exec_ctx, (state_watcher*)pw, true, GRPC_ERROR_REF(error));
}
-static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw,
- grpc_error *error) {
- partly_done(exec_ctx, (state_watcher *)pw, false, GRPC_ERROR_REF(error));
+static void timeout_complete(grpc_exec_ctx* exec_ctx, void* pw,
+ grpc_error* error) {
+ partly_done(exec_ctx, (state_watcher*)pw, false, GRPC_ERROR_REF(error));
}
-int grpc_channel_num_external_connectivity_watchers(grpc_channel *channel) {
- grpc_channel_element *client_channel_elem =
+int grpc_channel_num_external_connectivity_watchers(grpc_channel* channel) {
+ grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
return grpc_client_channel_num_external_connectivity_watchers(
client_channel_elem);
}
typedef struct watcher_timer_init_arg {
- state_watcher *w;
+ state_watcher* w;
gpr_timespec deadline;
} watcher_timer_init_arg;
-static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error_ignored) {
- watcher_timer_init_arg *wa = (watcher_timer_init_arg *)arg;
+static void watcher_timer_init(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error_ignored) {
+ watcher_timer_init_arg* wa = (watcher_timer_init_arg*)arg;
grpc_timer_init(exec_ctx, &wa->w->alarm,
grpc_timespec_to_millis_round_up(wa->deadline),
@@ -193,19 +193,19 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(wa);
}
-int grpc_channel_support_connectivity_watcher(grpc_channel *channel) {
- grpc_channel_element *client_channel_elem =
+int grpc_channel_support_connectivity_watcher(grpc_channel* channel) {
+ grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
return client_channel_elem->filter != &grpc_client_channel_filter ? 0 : 1;
}
void grpc_channel_watch_connectivity_state(
- grpc_channel *channel, grpc_connectivity_state last_observed_state,
- gpr_timespec deadline, grpc_completion_queue *cq, void *tag) {
- grpc_channel_element *client_channel_elem =
+ grpc_channel* channel, grpc_connectivity_state last_observed_state,
+ gpr_timespec deadline, grpc_completion_queue* cq, void* tag) {
+ grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- state_watcher *w = (state_watcher *)gpr_malloc(sizeof(*w));
+ state_watcher* w = (state_watcher*)gpr_malloc(sizeof(*w));
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
@@ -213,8 +213,9 @@ void grpc_channel_watch_connectivity_state(
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"cq=%p, tag=%p)",
- 7, (channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
- (int)deadline.clock_type, cq, tag));
+ 7,
+ (channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
+ (int)deadline.clock_type, cq, tag));
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
@@ -230,8 +231,8 @@ void grpc_channel_watch_connectivity_state(
w->channel = channel;
w->error = NULL;
- watcher_timer_init_arg *wa =
- (watcher_timer_init_arg *)gpr_malloc(sizeof(watcher_timer_init_arg));
+ watcher_timer_init_arg* wa =
+ (watcher_timer_init_arg*)gpr_malloc(sizeof(watcher_timer_init_arg));
wa->w = w;
wa->deadline = deadline;
GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 00c51ba543..b18fa20c65 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -76,24 +76,24 @@ typedef struct {
wait_for_ready_value wait_for_ready;
} method_parameters;
-static method_parameters *method_parameters_ref(
- method_parameters *method_params) {
+static method_parameters* method_parameters_ref(
+ method_parameters* method_params) {
gpr_ref(&method_params->refs);
return method_params;
}
-static void method_parameters_unref(method_parameters *method_params) {
+static void method_parameters_unref(method_parameters* method_params) {
if (gpr_unref(&method_params->refs)) {
gpr_free(method_params);
}
}
-static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) {
- method_parameters_unref((method_parameters *)value);
+static void method_parameters_free(grpc_exec_ctx* exec_ctx, void* value) {
+ method_parameters_unref((method_parameters*)value);
}
-static bool parse_wait_for_ready(grpc_json *field,
- wait_for_ready_value *wait_for_ready) {
+static bool parse_wait_for_ready(grpc_json* field,
+ wait_for_ready_value* wait_for_ready) {
if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
return false;
}
@@ -102,13 +102,13 @@ static bool parse_wait_for_ready(grpc_json *field,
return true;
}
-static bool parse_timeout(grpc_json *field, grpc_millis *timeout) {
+static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
if (field->type != GRPC_JSON_STRING) return false;
size_t len = strlen(field->value);
if (field->value[len - 1] != 's') return false;
- char *buf = gpr_strdup(field->value);
+ char* buf = gpr_strdup(field->value);
buf[len - 1] = '\0'; // Remove trailing 's'.
- char *decimal_point = strchr(buf, '.');
+ char* decimal_point = strchr(buf, '.');
int nanos = 0;
if (decimal_point != NULL) {
*decimal_point = '\0';
@@ -141,10 +141,10 @@ static bool parse_timeout(grpc_json *field, grpc_millis *timeout) {
return true;
}
-static void *method_parameters_create_from_json(const grpc_json *json) {
+static void* method_parameters_create_from_json(const grpc_json* json) {
wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
grpc_millis timeout = 0;
- for (grpc_json *field = json->child; field != NULL; field = field->next) {
+ for (grpc_json* field = json->child; field != NULL; field = field->next) {
if (field->key == NULL) continue;
if (strcmp(field->key, "waitForReady") == 0) {
if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
@@ -154,8 +154,8 @@ static void *method_parameters_create_from_json(const grpc_json *json) {
if (!parse_timeout(field, &timeout)) return NULL;
}
}
- method_parameters *value =
- (method_parameters *)gpr_malloc(sizeof(method_parameters));
+ method_parameters* value =
+ (method_parameters*)gpr_malloc(sizeof(method_parameters));
gpr_ref_init(&value->refs, 1);
value->timeout = timeout;
value->wait_for_ready = wait_for_ready;
@@ -170,24 +170,24 @@ struct external_connectivity_watcher;
typedef struct client_channel_channel_data {
/** resolver for this channel */
- grpc_resolver *resolver;
+ grpc_resolver* resolver;
/** have we started resolving this channel */
bool started_resolving;
/** is deadline checking enabled? */
bool deadline_checking_enabled;
/** client channel factory */
- grpc_client_channel_factory *client_channel_factory;
+ grpc_client_channel_factory* client_channel_factory;
/** combiner protecting all variables below in this data structure */
- grpc_combiner *combiner;
+ grpc_combiner* combiner;
/** currently active load balancer */
- grpc_lb_policy *lb_policy;
+ grpc_lb_policy* lb_policy;
/** retry throttle data */
- grpc_server_retry_throttle_data *retry_throttle_data;
+ grpc_server_retry_throttle_data* retry_throttle_data;
/** maps method names to method_parameters structs */
- grpc_slice_hash_table *method_params_table;
+ grpc_slice_hash_table* method_params_table;
/** incoming resolver result - set by resolver.next() */
- grpc_channel_args *resolver_result;
+ grpc_channel_args* resolver_result;
/** a list of closures that are all waiting for resolver result to come in */
grpc_closure_list waiting_for_resolver_result_closures;
/** resolver callback */
@@ -197,42 +197,42 @@ typedef struct client_channel_channel_data {
/** when an lb_policy arrives, should we try to exit idle */
bool exit_idle_when_lb_policy_arrives;
/** owning stack */
- grpc_channel_stack *owning_stack;
+ grpc_channel_stack* owning_stack;
/** interested parties (owned) */
- grpc_pollset_set *interested_parties;
+ grpc_pollset_set* interested_parties;
/* external_connectivity_watcher_list head is guarded by its own mutex, since
* counts need to be grabbed immediately without polling on a cq */
gpr_mu external_connectivity_watcher_list_mu;
- struct external_connectivity_watcher *external_connectivity_watcher_list_head;
+ struct external_connectivity_watcher* external_connectivity_watcher_list_head;
/* the following properties are guarded by a mutex since API's require them
to be instantaneously available */
gpr_mu info_mu;
- char *info_lb_policy_name;
+ char* info_lb_policy_name;
/** service config in JSON form */
- char *info_service_config_json;
+ char* info_service_config_json;
} channel_data;
/** We create one watcher for each new lb_policy that is returned from a
resolver, to watch for state changes from the lb_policy. When a state
change is seen, we update the channel, and create a new watcher. */
typedef struct {
- channel_data *chand;
+ channel_data* chand;
grpc_closure on_changed;
grpc_connectivity_state state;
- grpc_lb_policy *lb_policy;
+ grpc_lb_policy* lb_policy;
} lb_policy_connectivity_watcher;
-static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
- grpc_lb_policy *lb_policy,
+static void watch_lb_policy_locked(grpc_exec_ctx* exec_ctx, channel_data* chand,
+ grpc_lb_policy* lb_policy,
grpc_connectivity_state current_state);
-static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
- channel_data *chand,
+static void set_channel_connectivity_state_locked(grpc_exec_ctx* exec_ctx,
+ channel_data* chand,
grpc_connectivity_state state,
- grpc_error *error,
- const char *reason) {
+ grpc_error* error,
+ const char* reason) {
/* TODO: Improve failure handling:
* - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE.
* - Hand over pending picks from old policies during the switch that happens
@@ -259,9 +259,9 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
reason);
}
-static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- lb_policy_connectivity_watcher *w = (lb_policy_connectivity_watcher *)arg;
+static void on_lb_policy_state_changed_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ lb_policy_connectivity_watcher* w = (lb_policy_connectivity_watcher*)arg;
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) {
@@ -285,11 +285,11 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
gpr_free(w);
}
-static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
- grpc_lb_policy *lb_policy,
+static void watch_lb_policy_locked(grpc_exec_ctx* exec_ctx, channel_data* chand,
+ grpc_lb_policy* lb_policy,
grpc_connectivity_state current_state) {
- lb_policy_connectivity_watcher *w =
- (lb_policy_connectivity_watcher *)gpr_malloc(sizeof(*w));
+ lb_policy_connectivity_watcher* w =
+ (lb_policy_connectivity_watcher*)gpr_malloc(sizeof(*w));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
@@ -300,8 +300,8 @@ static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
&w->on_changed);
}
-static void start_resolving_locked(grpc_exec_ctx *exec_ctx,
- channel_data *chand) {
+static void start_resolving_locked(grpc_exec_ctx* exec_ctx,
+ channel_data* chand) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
}
@@ -313,19 +313,19 @@ static void start_resolving_locked(grpc_exec_ctx *exec_ctx,
}
typedef struct {
- char *server_name;
- grpc_server_retry_throttle_data *retry_throttle_data;
+ char* server_name;
+ grpc_server_retry_throttle_data* retry_throttle_data;
} service_config_parsing_state;
-static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
- service_config_parsing_state *parsing_state =
- (service_config_parsing_state *)arg;
+static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
+ service_config_parsing_state* parsing_state =
+ (service_config_parsing_state*)arg;
if (strcmp(field->key, "retryThrottling") == 0) {
if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
int max_milli_tokens = 0;
int milli_token_ratio = 0;
- for (grpc_json *sub_field = field->child; sub_field != NULL;
+ for (grpc_json* sub_field = field->child; sub_field != NULL;
sub_field = sub_field->next) {
if (sub_field->key == NULL) return;
if (strcmp(sub_field->key, "maxTokens") == 0) {
@@ -341,7 +341,7 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
size_t whole_len = strlen(sub_field->value);
uint32_t multiplier = 1;
uint32_t decimal_value = 0;
- const char *decimal_point = strchr(sub_field->value, '.');
+ const char* decimal_point = strchr(sub_field->value, '.');
if (decimal_point != NULL) {
whole_len = (size_t)(decimal_point - sub_field->value);
multiplier = 1000;
@@ -372,25 +372,25 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
}
}
-static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- channel_data *chand = (channel_data *)arg;
+static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ channel_data* chand = (channel_data*)arg;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
}
// Extract the following fields from the resolver result, if non-NULL.
bool lb_policy_updated = false;
- char *lb_policy_name_dup = NULL;
+ char* lb_policy_name_dup = NULL;
bool lb_policy_name_changed = false;
- grpc_lb_policy *new_lb_policy = NULL;
- char *service_config_json = NULL;
- grpc_server_retry_throttle_data *retry_throttle_data = NULL;
- grpc_slice_hash_table *method_params_table = NULL;
+ grpc_lb_policy* new_lb_policy = NULL;
+ char* service_config_json = NULL;
+ grpc_server_retry_throttle_data* retry_throttle_data = NULL;
+ grpc_slice_hash_table* method_params_table = NULL;
if (chand->resolver_result != NULL) {
// Find LB policy name.
- const char *lb_policy_name = NULL;
- const grpc_arg *channel_arg =
+ const char* lb_policy_name = NULL;
+ const grpc_arg* channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
@@ -401,8 +401,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) {
- grpc_lb_addresses *addresses =
- (grpc_lb_addresses *)channel_arg->value.pointer.p;
+ grpc_lb_addresses* addresses =
+ (grpc_lb_addresses*)channel_arg->value.pointer.p;
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) {
@@ -453,14 +453,14 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
service_config_json = gpr_strdup(channel_arg->value.string);
- grpc_service_config *service_config =
+ grpc_service_config* service_config =
grpc_service_config_create(service_config_json);
if (service_config != NULL) {
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
GPR_ASSERT(channel_arg != NULL);
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
- grpc_uri *uri =
+ grpc_uri* uri =
grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
GPR_ASSERT(uri->path[0] != '\0');
service_config_parsing_state parsing_state;
@@ -563,7 +563,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
&chand->waiting_for_resolver_result_closures);
} else { // Not shutting down.
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
- grpc_error *state_error =
+ grpc_error* state_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
if (new_lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -595,12 +595,12 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error_ignored) {
- grpc_transport_op *op = (grpc_transport_op *)arg;
- grpc_channel_element *elem =
- (grpc_channel_element *)op->handler_private.extra_arg;
- channel_data *chand = (channel_data *)elem->channel_data;
+static void start_transport_op_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error_ignored) {
+ grpc_transport_op* op = (grpc_transport_op*)arg;
+ grpc_channel_element* elem =
+ (grpc_channel_element*)op->handler_private.extra_arg;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
@@ -651,10 +651,10 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
}
-static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_transport_op *op) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static void cc_start_transport_op(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_transport_op* op) {
+ channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != NULL) {
@@ -671,10 +671,10 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE);
}
-static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- const grpc_channel_info *info) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static void cc_get_channel_info(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ const grpc_channel_info* info) {
+ channel_data* chand = (channel_data*)elem->channel_data;
gpr_mu_lock(&chand->info_mu);
if (info->lb_policy_name != NULL) {
*info->lb_policy_name = chand->info_lb_policy_name == NULL
@@ -691,10 +691,10 @@ static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
}
/* Constructor for channel_data */
-static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static grpc_error* cc_init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
@@ -715,7 +715,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
"client_channel");
grpc_client_channel_start_backup_polling(exec_ctx, chand->interested_parties);
// Record client channel factory.
- const grpc_arg *arg = grpc_channel_args_find(args->channel_args,
+ const grpc_arg* arg = grpc_channel_args_find(args->channel_args,
GRPC_ARG_CLIENT_CHANNEL_FACTORY);
if (arg == NULL) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -726,9 +726,9 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
"client channel factory arg must be a pointer");
}
grpc_client_channel_factory_ref(
- (grpc_client_channel_factory *)arg->value.pointer.p);
+ (grpc_client_channel_factory*)arg->value.pointer.p);
chand->client_channel_factory =
- (grpc_client_channel_factory *)arg->value.pointer.p;
+ (grpc_client_channel_factory*)arg->value.pointer.p;
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == NULL) {
@@ -739,8 +739,8 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"server uri arg must be a string");
}
- char *proxy_name = NULL;
- grpc_channel_args *new_args = NULL;
+ char* proxy_name = NULL;
+ grpc_channel_args* new_args = NULL;
grpc_proxy_mappers_map_name(exec_ctx, arg->value.string, args->channel_args,
&proxy_name, &new_args);
// Instantiate resolver.
@@ -758,21 +758,22 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_resolver *resolver = (grpc_resolver *)arg;
+static void shutdown_resolver_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_resolver* resolver = (grpc_resolver*)arg;
grpc_resolver_shutdown_locked(exec_ctx, resolver);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
}
/* Destructor for channel_data */
-static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static void cc_destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
if (chand->resolver != NULL) {
GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
- grpc_combiner_scheduler(chand->combiner)),
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
+ grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
if (chand->client_channel_factory != NULL) {
@@ -832,45 +833,45 @@ typedef struct client_channel_call_data {
grpc_slice path; // Request path.
gpr_timespec call_start_time;
grpc_millis deadline;
- gpr_arena *arena;
- grpc_call_stack *owning_call;
- grpc_call_combiner *call_combiner;
+ gpr_arena* arena;
+ grpc_call_stack* owning_call;
+ grpc_call_combiner* call_combiner;
- grpc_server_retry_throttle_data *retry_throttle_data;
- method_parameters *method_params;
+ grpc_server_retry_throttle_data* retry_throttle_data;
+ method_parameters* method_params;
- grpc_subchannel_call *subchannel_call;
- grpc_error *error;
+ grpc_subchannel_call* subchannel_call;
+ grpc_error* error;
- grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
+ grpc_lb_policy* lb_policy; // Holds ref while LB pick is pending.
grpc_closure lb_pick_closure;
grpc_closure lb_pick_cancel_closure;
- grpc_connected_subchannel *connected_subchannel;
+ grpc_connected_subchannel* connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
- grpc_polling_entity *pollent;
+ grpc_polling_entity* pollent;
- grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
+ grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
size_t waiting_for_pick_batches_count;
grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
- grpc_transport_stream_op_batch *initial_metadata_batch;
+ grpc_transport_stream_op_batch* initial_metadata_batch;
grpc_linked_mdelem lb_token_mdelem;
grpc_closure on_complete;
- grpc_closure *original_on_complete;
+ grpc_closure* original_on_complete;
} call_data;
-grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
- grpc_call_element *elem) {
- call_data *calld = (call_data *)elem->call_data;
+grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
+ grpc_call_element* elem) {
+ call_data* calld = (call_data*)elem->call_data;
return calld->subchannel_call;
}
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_add(
- call_data *calld, grpc_transport_stream_op_batch *batch) {
+ call_data* calld, grpc_transport_stream_op_batch* batch) {
if (batch->send_initial_metadata) {
GPR_ASSERT(calld->initial_metadata_batch == NULL);
calld->initial_metadata_batch = batch;
@@ -882,9 +883,9 @@ static void waiting_for_pick_batches_add(
}
// This is called via the call combiner, so access to calld is synchronized.
-static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- call_data *calld = (call_data *)arg;
+static void fail_pending_batch_in_call_combiner(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ call_data* calld = (call_data*)arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_transport_stream_op_batch_finish_with_failure(
@@ -895,10 +896,10 @@ static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
}
// This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- call_data *calld = (call_data *)elem->call_data;
+static void waiting_for_pick_batches_fail(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_error* error) {
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
@@ -926,9 +927,9 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
}
// This is called via the call combiner, so access to calld is synchronized.
-static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *ignored) {
- call_data *calld = (call_data *)arg;
+static void run_pending_batch_in_call_combiner(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* ignored) {
+ call_data* calld = (call_data*)arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_subchannel_call_process_op(
@@ -938,13 +939,14 @@ static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
}
// This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void waiting_for_pick_batches_resume(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIuPTR
- " pending batches to subchannel_call=%p",
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: sending %" PRIuPTR
+ " pending batches to subchannel_call=%p",
chand, calld, calld->waiting_for_pick_batches_count,
calld->subchannel_call);
}
@@ -964,10 +966,10 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
// Applies service config to the call. Must be invoked once we know
// that the resolver has returned results to the channel.
-static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void apply_service_config_to_call_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
chand, calld);
@@ -977,7 +979,7 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
}
if (chand->method_params_table != NULL) {
- calld->method_params = (method_parameters *)grpc_method_config_table_get(
+ calld->method_params = (method_parameters*)grpc_method_config_table_get(
exec_ctx, chand->method_params_table, calld->path);
if (calld->method_params != NULL) {
method_parameters_ref(calld->method_params);
@@ -997,11 +999,11 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void create_subchannel_call_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_error* error) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
const grpc_connected_subchannel_call_args call_args = {
calld->pollent, // pollent
calld->path, // path
@@ -1011,7 +1013,7 @@ static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
calld->subchannel_call_context, // context
calld->call_combiner // call_combiner
};
- grpc_error *new_error = grpc_connected_subchannel_create_call(
+ grpc_error* new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, &call_args,
&calld->subchannel_call);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -1028,10 +1030,10 @@ static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
}
// Invoked when a pick is completed, on both success or failure.
-static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_error *error) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static void pick_done_locked(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_error* error) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (calld->connected_subchannel == NULL) {
// Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error);
@@ -1057,10 +1059,10 @@ static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
// either (a) the pick was deferred pending a resolver result or (b) the
// pick was done asynchronously. Removes the call's polling entity from
// chand->interested_parties before invoking pick_done_locked().
-static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem, grpc_error *error) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void async_pick_done_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem, grpc_error* error) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
pick_done_locked(exec_ctx, elem, error);
@@ -1068,11 +1070,11 @@ static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
-static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void pick_callback_cancel_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (calld->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
@@ -1087,11 +1089,11 @@ static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
// Unrefs the LB policy and invokes async_pick_done_locked().
-static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void pick_callback_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
@@ -1105,10 +1107,10 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
// If the pick was completed synchronously, unrefs the LB policy and
// returns true.
-static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static bool pick_callback_start_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
@@ -1165,7 +1167,7 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
}
typedef struct {
- grpc_call_element *elem;
+ grpc_call_element* elem;
bool finished;
grpc_closure closure;
grpc_closure cancel_closure;
@@ -1173,11 +1175,10 @@ typedef struct {
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
-static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error) {
- pick_after_resolver_result_args *args =
- (pick_after_resolver_result_args *)arg;
+static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx* exec_ctx,
+ void* arg,
+ grpc_error* error) {
+ pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
if (args->finished) {
gpr_free(args);
return;
@@ -1190,9 +1191,9 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
// is called, it will be a no-op. We also immediately invoke
// async_pick_done_locked() to propagate the error back to the caller.
args->finished = true;
- grpc_call_element *elem = args->elem;
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+ grpc_call_element* elem = args->elem;
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: cancelling pick waiting for resolver result",
@@ -1208,14 +1209,13 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
"Pick cancelled", &error, 1));
}
-static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem);
+static void pick_after_resolver_result_start_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem);
-static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error) {
- pick_after_resolver_result_args *args =
- (pick_after_resolver_result_args *)arg;
+static void pick_after_resolver_result_done_locked(grpc_exec_ctx* exec_ctx,
+ void* arg,
+ grpc_error* error) {
+ pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
if (args->finished) {
/* cancelled, do nothing */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -1225,9 +1225,9 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
return;
}
args->finished = true;
- grpc_call_element *elem = args->elem;
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+ grpc_call_element* elem = args->elem;
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
@@ -1274,17 +1274,17 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void pick_after_resolver_result_start_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
calld);
}
- pick_after_resolver_result_args *args =
- (pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
+ pick_after_resolver_result_args* args =
+ (pick_after_resolver_result_args*)gpr_zalloc(sizeof(*args));
args->elem = elem;
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
args, grpc_combiner_scheduler(chand->combiner));
@@ -1297,11 +1297,11 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
grpc_combiner_scheduler(chand->combiner)));
}
-static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *ignored) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static void start_pick_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* ignored) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(calld->connected_subchannel == NULL);
if (chand->lb_policy != NULL) {
// We already have an LB policy, so ask it for a pick.
@@ -1331,9 +1331,9 @@ static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
chand->interested_parties);
}
-static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
if (calld->retry_throttle_data != NULL) {
if (error == GRPC_ERROR_NONE) {
grpc_server_retry_throttle_data_record_success(
@@ -1352,10 +1352,10 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static void cc_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
batch);
@@ -1446,11 +1446,11 @@ done:
}
/* Constructor for call_data */
-static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static grpc_error* cc_init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
@@ -1466,12 +1466,12 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
-static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *then_schedule_closure) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static void cc_destroy_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* then_schedule_closure) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_destroy(exec_ctx, elem);
}
@@ -1502,10 +1502,10 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
-static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_polling_entity *pollent) {
- call_data *calld = (call_data *)elem->call_data;
+static void cc_set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_polling_entity* pollent) {
+ call_data* calld = (call_data*)elem->call_data;
calld->pollent = pollent;
}
@@ -1527,9 +1527,9 @@ const grpc_channel_filter grpc_client_channel_filter = {
"client-channel",
};
-static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error_ignored) {
- channel_data *chand = (channel_data *)arg;
+static void try_to_connect_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error_ignored) {
+ channel_data* chand = (channel_data*)arg;
if (chand->lb_policy != NULL) {
grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
} else {
@@ -1542,34 +1542,35 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
- channel_data *chand = (channel_data *)elem->channel_data;
+ grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, int try_to_connect) {
+ channel_data* chand = (channel_data*)elem->channel_data;
grpc_connectivity_state out =
grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
- grpc_combiner_scheduler(chand->combiner)),
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
+ grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
return out;
}
typedef struct external_connectivity_watcher {
- channel_data *chand;
+ channel_data* chand;
grpc_polling_entity pollent;
- grpc_closure *on_complete;
- grpc_closure *watcher_timer_init;
- grpc_connectivity_state *state;
+ grpc_closure* on_complete;
+ grpc_closure* watcher_timer_init;
+ grpc_connectivity_state* state;
grpc_closure my_closure;
- struct external_connectivity_watcher *next;
+ struct external_connectivity_watcher* next;
} external_connectivity_watcher;
-static external_connectivity_watcher *lookup_external_connectivity_watcher(
- channel_data *chand, grpc_closure *on_complete) {
+static external_connectivity_watcher* lookup_external_connectivity_watcher(
+ channel_data* chand, grpc_closure* on_complete) {
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
- external_connectivity_watcher *w =
+ external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
while (w != NULL && w->on_complete != on_complete) {
w = w->next;
@@ -1579,7 +1580,7 @@ static external_connectivity_watcher *lookup_external_connectivity_watcher(
}
static void external_connectivity_watcher_list_append(
- channel_data *chand, external_connectivity_watcher *w) {
+ channel_data* chand, external_connectivity_watcher* w) {
GPR_ASSERT(!lookup_external_connectivity_watcher(chand, w->on_complete));
gpr_mu_lock(&w->chand->external_connectivity_watcher_list_mu);
@@ -1590,7 +1591,7 @@ static void external_connectivity_watcher_list_append(
}
static void external_connectivity_watcher_list_remove(
- channel_data *chand, external_connectivity_watcher *too_remove) {
+ channel_data* chand, external_connectivity_watcher* too_remove) {
GPR_ASSERT(
lookup_external_connectivity_watcher(chand, too_remove->on_complete));
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
@@ -1599,7 +1600,7 @@ static void external_connectivity_watcher_list_remove(
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
return;
}
- external_connectivity_watcher *w =
+ external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
while (w != NULL) {
if (w->next == too_remove) {
@@ -1613,12 +1614,12 @@ static void external_connectivity_watcher_list_remove(
}
int grpc_client_channel_num_external_connectivity_watchers(
- grpc_channel_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
+ grpc_channel_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
int count = 0;
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
- external_connectivity_watcher *w =
+ external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
while (w != NULL) {
count++;
@@ -1629,10 +1630,10 @@ int grpc_client_channel_num_external_connectivity_watchers(
return count;
}
-static void on_external_watch_complete_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
- grpc_closure *follow_up = w->on_complete;
+static void on_external_watch_complete_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
+ grpc_closure* follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
w->chand->interested_parties);
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
@@ -1642,10 +1643,10 @@ static void on_external_watch_complete_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
}
-static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error_ignored) {
- external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
- external_connectivity_watcher *found = NULL;
+static void watch_connectivity_state_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error_ignored) {
+ external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
+ external_connectivity_watcher* found = NULL;
if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
@@ -1670,12 +1671,12 @@ static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
void grpc_client_channel_watch_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- grpc_polling_entity pollent, grpc_connectivity_state *state,
- grpc_closure *closure, grpc_closure *watcher_timer_init) {
- channel_data *chand = (channel_data *)elem->channel_data;
- external_connectivity_watcher *w =
- (external_connectivity_watcher *)gpr_zalloc(sizeof(*w));
+ grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+ grpc_polling_entity pollent, grpc_connectivity_state* state,
+ grpc_closure* closure, grpc_closure* watcher_timer_init) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ external_connectivity_watcher* w =
+ (external_connectivity_watcher*)gpr_zalloc(sizeof(*w));
w->chand = chand;
w->pollent = pollent;
w->on_complete = closure;
diff --git a/src/core/ext/filters/client_channel/client_channel.h b/src/core/ext/filters/client_channel/client_channel.h
index 152fe2365a..27862cf239 100644
--- a/src/core/ext/filters/client_channel/client_channel.h
+++ b/src/core/ext/filters/client_channel/client_channel.h
@@ -42,19 +42,19 @@ extern "C" {
extern const grpc_channel_filter grpc_client_channel_filter;
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
+ grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, int try_to_connect);
int grpc_client_channel_num_external_connectivity_watchers(
- grpc_channel_element *elem);
+ grpc_channel_element* elem);
void grpc_client_channel_watch_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- grpc_polling_entity pollent, grpc_connectivity_state *state,
- grpc_closure *on_complete, grpc_closure *watcher_timer_init);
+ grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+ grpc_polling_entity pollent, grpc_connectivity_state* state,
+ grpc_closure* on_complete, grpc_closure* watcher_timer_init);
/* Debug helper: pull the subchannel call from a call stack element */
-grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
- grpc_call_element *elem);
+grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
+ grpc_call_element* elem);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.h b/src/core/ext/filters/client_channel/client_channel_factory.h
index 4273c90058..db8645cd00 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.h
+++ b/src/core/ext/filters/client_channel/client_channel_factory.h
@@ -44,39 +44,39 @@ typedef enum {
/** Constructor for new configured channels.
Creating decorators around this type is encouraged to adapt behavior. */
struct grpc_client_channel_factory {
- const grpc_client_channel_factory_vtable *vtable;
+ const grpc_client_channel_factory_vtable* vtable;
};
struct grpc_client_channel_factory_vtable {
- void (*ref)(grpc_client_channel_factory *factory);
- void (*unref)(grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory);
- grpc_subchannel *(*create_subchannel)(grpc_exec_ctx *exec_ctx,
- grpc_client_channel_factory *factory,
- const grpc_subchannel_args *args);
- grpc_channel *(*create_client_channel)(grpc_exec_ctx *exec_ctx,
- grpc_client_channel_factory *factory,
- const char *target,
+ void (*ref)(grpc_client_channel_factory* factory);
+ void (*unref)(grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory);
+ grpc_subchannel* (*create_subchannel)(grpc_exec_ctx* exec_ctx,
+ grpc_client_channel_factory* factory,
+ const grpc_subchannel_args* args);
+ grpc_channel* (*create_client_channel)(grpc_exec_ctx* exec_ctx,
+ grpc_client_channel_factory* factory,
+ const char* target,
grpc_client_channel_type type,
- const grpc_channel_args *args);
+ const grpc_channel_args* args);
};
-void grpc_client_channel_factory_ref(grpc_client_channel_factory *factory);
-void grpc_client_channel_factory_unref(grpc_exec_ctx *exec_ctx,
- grpc_client_channel_factory *factory);
+void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory);
+void grpc_client_channel_factory_unref(grpc_exec_ctx* exec_ctx,
+ grpc_client_channel_factory* factory);
/** Create a new grpc_subchannel */
-grpc_subchannel *grpc_client_channel_factory_create_subchannel(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory,
- const grpc_subchannel_args *args);
+grpc_subchannel* grpc_client_channel_factory_create_subchannel(
+ grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
+ const grpc_subchannel_args* args);
/** Create a new grpc_channel */
-grpc_channel *grpc_client_channel_factory_create_channel(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory,
- const char *target, grpc_client_channel_type type,
- const grpc_channel_args *args);
+grpc_channel* grpc_client_channel_factory_create_channel(
+ grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
+ const char* target, grpc_client_channel_type type,
+ const grpc_channel_args* args);
grpc_arg grpc_client_channel_factory_create_channel_arg(
- grpc_client_channel_factory *factory);
+ grpc_client_channel_factory* factory);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.cc b/src/core/ext/filters/client_channel/client_channel_plugin.cc
index 4431d11519..0db894913c 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.cc
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.cc
@@ -34,16 +34,16 @@
#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/lib/surface/channel_init.h"
-static bool append_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder, void *arg) {
+static bool append_filter(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder, void* arg) {
return grpc_channel_stack_builder_append_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL);
+ builder, (const grpc_channel_filter*)arg, NULL, NULL);
}
-static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *unused) {
- const grpc_channel_args *args =
+static bool set_default_host_if_unset(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder,
+ void* unused) {
+ const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
for (size_t i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY) ||
@@ -51,12 +51,12 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
return true;
}
}
- char *default_authority = grpc_get_default_authority(
+ char* default_authority = grpc_get_default_authority(
exec_ctx, grpc_channel_stack_builder_get_target(builder));
if (default_authority != NULL) {
grpc_arg arg = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
+ (char*)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
+ grpc_channel_args* new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
new_args);
gpr_free(default_authority);
@@ -76,7 +76,7 @@ extern "C" void grpc_client_channel_init(void) {
set_default_host_if_unset, NULL);
grpc_channel_init_register_stage(
GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter,
- (void *)&grpc_client_channel_filter);
+ (void*)&grpc_client_channel_filter);
grpc_http_connect_register_handshaker_factory();
grpc_register_tracer(&grpc_client_channel_trace);
#ifndef NDEBUG
diff --git a/src/core/ext/filters/client_channel/connector.h b/src/core/ext/filters/client_channel/connector.h
index b71e0aab00..12dc59bcdf 100644
--- a/src/core/ext/filters/client_channel/connector.h
+++ b/src/core/ext/filters/client_channel/connector.h
@@ -31,48 +31,48 @@ typedef struct grpc_connector grpc_connector;
typedef struct grpc_connector_vtable grpc_connector_vtable;
struct grpc_connector {
- const grpc_connector_vtable *vtable;
+ const grpc_connector_vtable* vtable;
};
typedef struct {
/** set of pollsets interested in this connection */
- grpc_pollset_set *interested_parties;
+ grpc_pollset_set* interested_parties;
/** deadline for connection */
grpc_millis deadline;
/** channel arguments (to be passed to transport) */
- const grpc_channel_args *channel_args;
+ const grpc_channel_args* channel_args;
} grpc_connect_in_args;
typedef struct {
/** the connected transport */
- grpc_transport *transport;
+ grpc_transport* transport;
/** channel arguments (to be passed to the filters) */
- grpc_channel_args *channel_args;
+ grpc_channel_args* channel_args;
} grpc_connect_out_args;
struct grpc_connector_vtable {
- void (*ref)(grpc_connector *connector);
- void (*unref)(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
+ void (*ref)(grpc_connector* connector);
+ void (*unref)(grpc_exec_ctx* exec_ctx, grpc_connector* connector);
/** Implementation of grpc_connector_shutdown */
- void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
- grpc_error *why);
+ void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+ grpc_error* why);
/** Implementation of grpc_connector_connect */
- void (*connect)(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
- const grpc_connect_in_args *in_args,
- grpc_connect_out_args *out_args, grpc_closure *notify);
+ void (*connect)(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+ const grpc_connect_in_args* in_args,
+ grpc_connect_out_args* out_args, grpc_closure* notify);
};
-grpc_connector *grpc_connector_ref(grpc_connector *connector);
-void grpc_connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
+grpc_connector* grpc_connector_ref(grpc_connector* connector);
+void grpc_connector_unref(grpc_exec_ctx* exec_ctx, grpc_connector* connector);
/** Connect using the connector: max one outstanding call at a time */
-void grpc_connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
- const grpc_connect_in_args *in_args,
- grpc_connect_out_args *out_args,
- grpc_closure *notify);
+void grpc_connector_connect(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+ const grpc_connect_in_args* in_args,
+ grpc_connect_out_args* out_args,
+ grpc_closure* notify);
/** Cancel any pending connection */
-void grpc_connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
- grpc_error *why);
+void grpc_connector_shutdown(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+ grpc_error* why);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index 8e6673d737..387c26ed5c 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -26,9 +26,9 @@ grpc_tracer_flag grpc_trace_lb_policy_refcount =
GRPC_TRACER_INITIALIZER(false, "lb_policy_refcount");
#endif
-void grpc_lb_policy_init(grpc_lb_policy *policy,
- const grpc_lb_policy_vtable *vtable,
- grpc_combiner *combiner) {
+void grpc_lb_policy_init(grpc_lb_policy* policy,
+ const grpc_lb_policy_vtable* vtable,
+ grpc_combiner* combiner) {
policy->vtable = vtable;
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
policy->interested_parties = grpc_pollset_set_create();
@@ -37,7 +37,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
#ifndef NDEBUG
#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
-#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char *purpose
+#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char* purpose
#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose
#else
@@ -47,7 +47,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
#define REF_MUTATE_PASS_ARGS(x)
#endif
-static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
+static gpr_atm ref_mutate(grpc_lb_policy* c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
@@ -61,104 +61,105 @@ static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
return old_val;
}
-void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+void grpc_lb_policy_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
}
-static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_lb_policy *policy = (grpc_lb_policy *)arg;
+static void shutdown_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_lb_policy* policy = (grpc_lb_policy*)arg;
policy->vtable->shutdown_locked(exec_ctx, policy);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
}
-void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
- GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(
- shutdown_locked, policy,
- grpc_combiner_scheduler(policy->combiner)),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(shutdown_locked, policy,
+ grpc_combiner_scheduler(policy->combiner)),
+ GRPC_ERROR_NONE);
} else {
grpc_lb_policy_weak_unref(exec_ctx,
policy REF_FUNC_PASS_ARGS("strong-unref"));
}
}
-void grpc_lb_policy_weak_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+void grpc_lb_policy_weak_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
}
-void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
grpc_pollset_set_destroy(exec_ctx, policy->interested_parties);
- grpc_combiner *combiner = policy->combiner;
+ grpc_combiner* combiner = policy->combiner;
policy->vtable->destroy(exec_ctx, policy);
GRPC_COMBINER_UNREF(exec_ctx, combiner, "lb_policy");
}
}
-int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context,
- void **user_data, grpc_closure *on_complete) {
+int grpc_lb_policy_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context,
+ void** user_data, grpc_closure* on_complete) {
return policy->vtable->pick_locked(exec_ctx, policy, pick_args, target,
context, user_data, on_complete);
}
-void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connected_subchannel **target,
- grpc_error *error) {
+void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ grpc_connected_subchannel** target,
+ grpc_error* error) {
policy->vtable->cancel_pick_locked(exec_ctx, policy, target, error);
}
-void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
+void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
+ grpc_error* error) {
policy->vtable->cancel_picks_locked(exec_ctx, policy,
initial_metadata_flags_mask,
initial_metadata_flags_eq, error);
}
-void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy) {
+void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy) {
policy->vtable->exit_idle_locked(exec_ctx, policy);
}
-void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_closure *closure) {
+void grpc_lb_policy_ping_one_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ grpc_closure* closure) {
policy->vtable->ping_one_locked(exec_ctx, policy, closure);
}
void grpc_lb_policy_notify_on_state_change_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connectivity_state *state, grpc_closure *closure) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_connectivity_state* state, grpc_closure* closure) {
policy->vtable->notify_on_state_change_locked(exec_ctx, policy, state,
closure);
}
grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_error **connectivity_error) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_error** connectivity_error) {
return policy->vtable->check_connectivity_locked(exec_ctx, policy,
connectivity_error);
}
-void grpc_lb_policy_update_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- const grpc_lb_policy_args *lb_policy_args) {
+void grpc_lb_policy_update_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ const grpc_lb_policy_args* lb_policy_args) {
policy->vtable->update_locked(exec_ctx, policy, lb_policy_args);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 010299c2f4..590094e67e 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -38,70 +38,70 @@ extern grpc_tracer_flag grpc_trace_lb_policy_refcount;
#endif
struct grpc_lb_policy {
- const grpc_lb_policy_vtable *vtable;
+ const grpc_lb_policy_vtable* vtable;
gpr_atm ref_pair;
/* owned pointer to interested parties in load balancing decisions */
- grpc_pollset_set *interested_parties;
+ grpc_pollset_set* interested_parties;
/* combiner under which lb_policy actions take place */
- grpc_combiner *combiner;
+ grpc_combiner* combiner;
};
/** Extra arguments for an LB pick */
typedef struct grpc_lb_policy_pick_args {
/** Initial metadata associated with the picking call. */
- grpc_metadata_batch *initial_metadata;
+ grpc_metadata_batch* initial_metadata;
/** Bitmask used for selective cancelling. See \a
* grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
* grpc_types.h */
uint32_t initial_metadata_flags;
/** Storage for LB token in \a initial_metadata, or NULL if not used */
- grpc_linked_mdelem *lb_token_mdelem_storage;
+ grpc_linked_mdelem* lb_token_mdelem_storage;
} grpc_lb_policy_pick_args;
struct grpc_lb_policy_vtable {
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
- void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
+ void (*shutdown_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
/** \see grpc_lb_policy_pick */
- int (*pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context, void **user_data,
- grpc_closure *on_complete);
+ int (*pick_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context, void** user_data,
+ grpc_closure* on_complete);
/** \see grpc_lb_policy_cancel_pick */
- void (*cancel_pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connected_subchannel **target,
- grpc_error *error);
+ void (*cancel_pick_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_connected_subchannel** target,
+ grpc_error* error);
/** \see grpc_lb_policy_cancel_picks */
- void (*cancel_picks_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ void (*cancel_picks_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error);
+ grpc_error* error);
/** \see grpc_lb_policy_ping_one */
- void (*ping_one_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_closure *closure);
+ void (*ping_one_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_closure* closure);
/** Try to enter a READY connectivity state */
- void (*exit_idle_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+ void (*exit_idle_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
/** check the current connectivity of the lb_policy */
grpc_connectivity_state (*check_connectivity_locked)(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_error **connectivity_error);
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_error** connectivity_error);
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the policy. Calling with a NULL \a
state cancels the subscription. */
- void (*notify_on_state_change_locked)(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connectivity_state *state,
- grpc_closure *closure);
+ void (*notify_on_state_change_locked)(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ grpc_connectivity_state* state,
+ grpc_closure* closure);
- void (*update_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_args *args);
+ void (*update_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_args* args);
};
#ifndef NDEBUG
@@ -119,29 +119,29 @@ struct grpc_lb_policy_vtable {
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
grpc_lb_policy_weak_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
-void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line,
- const char *reason);
-void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const char *file, int line, const char *reason);
-void grpc_lb_policy_weak_ref(grpc_lb_policy *policy, const char *file, int line,
- const char *reason);
-void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const char *file, int line, const char *reason);
+void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line,
+ const char* reason);
+void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const char* file, int line, const char* reason);
+void grpc_lb_policy_weak_ref(grpc_lb_policy* policy, const char* file, int line,
+ const char* reason);
+void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const char* file, int line, const char* reason);
#else
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
#define GRPC_LB_POLICY_UNREF(cl, p, r) grpc_lb_policy_unref((cl), (p))
#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
#define GRPC_LB_POLICY_WEAK_UNREF(cl, p, r) grpc_lb_policy_weak_unref((cl), (p))
-void grpc_lb_policy_ref(grpc_lb_policy *policy);
-void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
-void grpc_lb_policy_weak_ref(grpc_lb_policy *policy);
-void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+void grpc_lb_policy_ref(grpc_lb_policy* policy);
+void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
+void grpc_lb_policy_weak_ref(grpc_lb_policy* policy);
+void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
#endif
/** called by concrete implementations to initialize the base struct */
-void grpc_lb_policy_init(grpc_lb_policy *policy,
- const grpc_lb_policy_vtable *vtable,
- grpc_combiner *combiner);
+void grpc_lb_policy_init(grpc_lb_policy* policy,
+ const grpc_lb_policy_vtable* vtable,
+ grpc_combiner* combiner);
/** Finds an appropriate subchannel for a call, based on \a pick_args.
@@ -160,53 +160,53 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */
-int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context,
- void **user_data, grpc_closure *on_complete);
+int grpc_lb_policy_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context,
+ void** user_data, grpc_closure* on_complete);
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
against one of the connected subchannels managed by \a policy. */
-void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_closure *closure);
+void grpc_lb_policy_ping_one_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ grpc_closure* closure);
/** Cancel picks for \a target.
The \a on_complete callback of the pending picks will be invoked with \a
*target set to NULL. */
-void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connected_subchannel **target,
- grpc_error *error);
+void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ grpc_connected_subchannel** target,
+ grpc_error* error);
/** Cancel all pending picks for which their \a initial_metadata_flags (as given
in the call to \a grpc_lb_policy_pick) matches \a initial_metadata_flags_eq
when AND'd with \a initial_metadata_flags_mask */
-void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
+void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error);
+ grpc_error* error);
/** Try to enter a READY connectivity state */
-void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy);
+void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy);
/* Call notify when the connectivity state of a channel changes from \a *state.
* Updates \a *state with the new state of the policy */
void grpc_lb_policy_notify_on_state_change_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connectivity_state *state, grpc_closure *closure);
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_connectivity_state* state, grpc_closure* closure);
grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_error **connectivity_error);
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_error** connectivity_error);
/** Update \a policy with \a lb_policy_args. */
-void grpc_lb_policy_update_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- const grpc_lb_policy_args *lb_policy_args);
+void grpc_lb_policy_update_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ const grpc_lb_policy_args* lb_policy_args);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index 7ad322902b..d93a9c3710 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -25,31 +25,31 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/profiling/timers.h"
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
return GRPC_ERROR_NONE;
}
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {}
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {}
typedef struct {
// Stats object to update.
- grpc_grpclb_client_stats *client_stats;
+ grpc_grpclb_client_stats* client_stats;
// State for intercepting send_initial_metadata.
grpc_closure on_complete_for_send;
- grpc_closure *original_on_complete_for_send;
+ grpc_closure* original_on_complete_for_send;
bool send_initial_metadata_succeeded;
// State for intercepting recv_initial_metadata.
grpc_closure recv_initial_metadata_ready;
- grpc_closure *original_recv_initial_metadata_ready;
+ grpc_closure* original_recv_initial_metadata_ready;
bool recv_initial_metadata_succeeded;
} call_data;
-static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- call_data *calld = (call_data *)arg;
+static void on_complete_for_send(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ call_data* calld = (call_data*)arg;
if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true;
}
@@ -57,9 +57,9 @@ static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_REF(error));
}
-static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- call_data *calld = (call_data *)arg;
+static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ call_data* calld = (call_data*)arg;
if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true;
}
@@ -67,25 +67,24 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_REF(error));
}
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
// Get stats object from context and take a ref.
GPR_ASSERT(args->context != NULL);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
calld->client_stats = grpc_grpclb_client_stats_ref(
- (grpc_grpclb_client_stats *)args->context[GRPC_GRPCLB_CLIENT_STATS]
- .value);
+ (grpc_grpclb_client_stats*)args->context[GRPC_GRPCLB_CLIENT_STATS].value);
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
return GRPC_ERROR_NONE;
}
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *calld = (call_data *)elem->call_data;
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ call_data* calld = (call_data*)elem->call_data;
// Record call finished, optionally setting client_failed_to_send and
// received.
grpc_grpclb_client_stats_add_call_finished(
@@ -97,9 +96,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
static void start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
index c6a0d69c3f..abf613a23b 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
@@ -32,4 +32,4 @@ extern const grpc_channel_filter grpc_client_load_reporting_filter;
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 03116b420c..065beb4890 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -130,17 +130,17 @@ grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
/* add lb_token of selected subchannel (address) to the call's initial
* metadata */
-static grpc_error *initial_metadata_add_lb_token(
- grpc_exec_ctx *exec_ctx, grpc_metadata_batch *initial_metadata,
- grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem lb_token) {
+static grpc_error* initial_metadata_add_lb_token(
+ grpc_exec_ctx* exec_ctx, grpc_metadata_batch* initial_metadata,
+ grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
GPR_ASSERT(lb_token_mdelem_storage != NULL);
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
lb_token_mdelem_storage, lb_token);
}
-static void destroy_client_stats(void *arg) {
- grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats *)arg);
+static void destroy_client_stats(void* arg) {
+ grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
}
typedef struct wrapped_rr_closure_arg {
@@ -149,42 +149,42 @@ typedef struct wrapped_rr_closure_arg {
/* the original closure. Usually a on_complete/notify cb for pick() and ping()
* calls against the internal RR instance, respectively. */
- grpc_closure *wrapped_closure;
+ grpc_closure* wrapped_closure;
/* the pick's initial metadata, kept in order to append the LB token for the
* pick */
- grpc_metadata_batch *initial_metadata;
+ grpc_metadata_batch* initial_metadata;
/* the picked target, used to determine which LB token to add to the pick's
* initial metadata */
- grpc_connected_subchannel **target;
+ grpc_connected_subchannel** target;
/* the context to be populated for the subchannel call */
- grpc_call_context_element *context;
+ grpc_call_context_element* context;
/* Stats for client-side load reporting. Note that this holds a
* reference, which must be either passed on via context or unreffed. */
- grpc_grpclb_client_stats *client_stats;
+ grpc_grpclb_client_stats* client_stats;
/* the LB token associated with the pick */
grpc_mdelem lb_token;
/* storage for the lb token initial metadata mdelem */
- grpc_linked_mdelem *lb_token_mdelem_storage;
+ grpc_linked_mdelem* lb_token_mdelem_storage;
/* The RR instance related to the closure */
- grpc_lb_policy *rr_policy;
+ grpc_lb_policy* rr_policy;
/* heap memory to be freed upon closure execution. */
- void *free_when_done;
+ void* free_when_done;
} wrapped_rr_closure_arg;
/* The \a on_complete closure passed as part of the pick requires keeping a
* reference to its associated round robin instance. We wrap this closure in
* order to unref the round robin instance upon its invocation */
-static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg;
+static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
@@ -202,7 +202,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_ERROR,
"No LB token for connected subchannel pick %p (from RR "
"instance %p).",
- (void *)*wc_arg->target, (void *)wc_arg->rr_policy);
+ (void*)*wc_arg->target, (void*)wc_arg->rr_policy);
abort();
}
// Pass on client stats via context. Passes ownership of the reference.
@@ -213,7 +213,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
}
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
+ gpr_log(GPR_INFO, "Unreffing RR %p", (void*)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
}
@@ -230,25 +230,25 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
* order to correctly unref the RR policy instance upon completion of the pick.
* See \a wrapped_rr_closure for details. */
typedef struct pending_pick {
- struct pending_pick *next;
+ struct pending_pick* next;
/* original pick()'s arguments */
grpc_lb_policy_pick_args pick_args;
/* output argument where to store the pick()ed connected subchannel, or NULL
* upon error. */
- grpc_connected_subchannel **target;
+ grpc_connected_subchannel** target;
/* args for wrapped_on_complete */
wrapped_rr_closure_arg wrapped_on_complete_arg;
} pending_pick;
-static void add_pending_pick(pending_pick **root,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context,
- grpc_closure *on_complete) {
- pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp));
+static void add_pending_pick(pending_pick** root,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context,
+ grpc_closure* on_complete) {
+ pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
pp->next = *root;
pp->pick_args = *pick_args;
pp->target = target;
@@ -267,14 +267,14 @@ static void add_pending_pick(pending_pick **root,
/* Same as the \a pending_pick struct but for ping operations */
typedef struct pending_ping {
- struct pending_ping *next;
+ struct pending_ping* next;
/* args for wrapped_notify */
wrapped_rr_closure_arg wrapped_notify_arg;
} pending_ping;
-static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
- pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping));
+static void add_pending_ping(pending_ping** root, grpc_closure* notify) {
+ pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
pping->wrapped_notify_arg.wrapped_closure = notify;
pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root;
@@ -294,9 +294,9 @@ typedef struct glb_lb_policy {
grpc_lb_policy base;
/** who the client is trying to communicate with */
- const char *server_name;
- grpc_client_channel_factory *cc_factory;
- grpc_channel_args *args;
+ const char* server_name;
+ grpc_client_channel_factory* cc_factory;
+ grpc_channel_args* args;
/** timeout in milliseconds for the LB call. 0 means no deadline. */
int lb_call_timeout_ms;
@@ -306,13 +306,13 @@ typedef struct glb_lb_policy {
int lb_fallback_timeout_ms;
/** for communicating with the LB server */
- grpc_channel *lb_channel;
+ grpc_channel* lb_channel;
/** response generator to inject address updates into \a lb_channel */
- grpc_fake_resolver_response_generator *response_generator;
+ grpc_fake_resolver_response_generator* response_generator;
/** the RR policy to use of the backend servers returned by the LB server */
- grpc_lb_policy *rr_policy;
+ grpc_lb_policy* rr_policy;
bool started_picking;
@@ -324,7 +324,7 @@ typedef struct glb_lb_policy {
/** stores the deserialized response from the LB. May be NULL until one such
* response has arrived. */
- grpc_grpclb_serverlist *serverlist;
+ grpc_grpclb_serverlist* serverlist;
/** Index into serverlist for next pick.
* If the server at this index is a drop, we return a drop.
@@ -332,13 +332,13 @@ typedef struct glb_lb_policy {
size_t serverlist_index;
/** stores the backend addresses from the resolver */
- grpc_lb_addresses *fallback_backend_addresses;
+ grpc_lb_addresses* fallback_backend_addresses;
/** list of picks that are waiting on RR's policy connectivity */
- pending_pick *pending_picks;
+ pending_pick* pending_picks;
/** list of pings that are waiting on RR's policy connectivity */
- pending_ping *pending_pings;
+ pending_ping* pending_pings;
bool shutting_down;
@@ -373,7 +373,7 @@ typedef struct glb_lb_policy {
/* LB fallback timer callback. */
grpc_closure lb_on_fallback;
- grpc_call *lb_call; /* streaming call to the LB server, */
+ grpc_call* lb_call; /* streaming call to the LB server, */
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
grpc_metadata_array
@@ -381,10 +381,10 @@ typedef struct glb_lb_policy {
/* what's being sent to the LB server. Note that its value may vary if the LB
* server indicates a redirect. */
- grpc_byte_buffer *lb_request_payload;
+ grpc_byte_buffer* lb_request_payload;
/* response the LB server, if any. Processed in lb_on_response_received() */
- grpc_byte_buffer *lb_response_payload;
+ grpc_byte_buffer* lb_response_payload;
/* call status code and details, set in lb_on_server_status_received() */
grpc_status_code lb_call_status;
@@ -403,7 +403,7 @@ typedef struct glb_lb_policy {
/* Stats for client-side load reporting. Should be unreffed and
* recreated whenever lb_call is replaced. */
- grpc_grpclb_client_stats *client_stats;
+ grpc_grpclb_client_stats* client_stats;
/* Interval and timer for next client load report. */
grpc_millis client_stats_report_interval;
grpc_timer client_load_report_timer;
@@ -413,20 +413,20 @@ typedef struct glb_lb_policy {
* completion of sending the load report. */
grpc_closure client_load_report_closure;
/* Client load report message payload. */
- grpc_byte_buffer *client_load_report_payload;
+ grpc_byte_buffer* client_load_report_payload;
} glb_lb_policy;
/* Keeps track and reacts to changes in connectivity of the RR instance */
struct rr_connectivity_data {
grpc_closure on_change;
grpc_connectivity_state state;
- glb_lb_policy *glb_policy;
+ glb_lb_policy* glb_policy;
};
-static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
+static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
bool log) {
if (server->drop) return false;
- const grpc_grpclb_ip_address *ip = &server->ip_address;
+ const grpc_grpclb_ip_address* ip = &server->ip_address;
if (server->port >> 16 != 0) {
if (log) {
gpr_log(GPR_ERROR,
@@ -448,17 +448,17 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
}
/* vtable for LB tokens in grpc_lb_addresses. */
-static void *lb_token_copy(void *token) {
+static void* lb_token_copy(void* token) {
return token == NULL
? NULL
- : (void *)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
+ : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
}
-static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
+static void lb_token_destroy(grpc_exec_ctx* exec_ctx, void* token) {
if (token != NULL) {
GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
}
}
-static int lb_token_cmp(void *token1, void *token2) {
+static int lb_token_cmp(void* token1, void* token2) {
if (token1 > token2) return 1;
if (token1 < token2) return -1;
return 0;
@@ -466,23 +466,23 @@ static int lb_token_cmp(void *token1, void *token2) {
static const grpc_lb_user_data_vtable lb_token_vtable = {
lb_token_copy, lb_token_destroy, lb_token_cmp};
-static void parse_server(const grpc_grpclb_server *server,
- grpc_resolved_address *addr) {
+static void parse_server(const grpc_grpclb_server* server,
+ grpc_resolved_address* addr) {
memset(addr, 0, sizeof(*addr));
if (server->drop) return;
const uint16_t netorder_port = htons((uint16_t)server->port);
/* the addresses are given in binary format (a in(6)_addr struct) in
* server->ip_address.bytes. */
- const grpc_grpclb_ip_address *ip = &server->ip_address;
+ const grpc_grpclb_ip_address* ip = &server->ip_address;
if (ip->size == 4) {
addr->len = sizeof(struct sockaddr_in);
- struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
+ struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
addr4->sin_family = AF_INET;
memcpy(&addr4->sin_addr, ip->bytes, ip->size);
addr4->sin_port = netorder_port;
} else if (ip->size == 16) {
addr->len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
+ struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
addr6->sin6_family = AF_INET6;
memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
addr6->sin6_port = netorder_port;
@@ -490,15 +490,15 @@ static void parse_server(const grpc_grpclb_server *server,
}
/* Returns addresses extracted from \a serverlist. */
-static grpc_lb_addresses *process_serverlist_locked(
- grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist) {
+static grpc_lb_addresses* process_serverlist_locked(
+ grpc_exec_ctx* exec_ctx, const grpc_grpclb_serverlist* serverlist) {
size_t num_valid = 0;
/* first pass: count how many are valid in order to allocate the necessary
* memory in a single block */
for (size_t i = 0; i < serverlist->num_servers; ++i) {
if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
}
- grpc_lb_addresses *lb_addresses =
+ grpc_lb_addresses* lb_addresses =
grpc_lb_addresses_create(num_valid, &lb_token_vtable);
/* second pass: actually populate the addresses and LB tokens (aka user data
* to the outside world) to be read by the RR policy during its creation.
@@ -507,14 +507,14 @@ static grpc_lb_addresses *process_serverlist_locked(
* incurr in an allocation due to the arbitrary number of server */
size_t addr_idx = 0;
for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
- const grpc_grpclb_server *server = serverlist->servers[sl_idx];
+ const grpc_grpclb_server* server = serverlist->servers[sl_idx];
if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
GPR_ASSERT(addr_idx < num_valid);
/* address processing */
grpc_resolved_address addr;
parse_server(server, &addr);
/* lb token processing */
- void *user_data;
+ void* user_data;
if (server->has_load_balance_token) {
const size_t lb_token_max_length =
GPR_ARRAY_SIZE(server->load_balance_token);
@@ -522,17 +522,17 @@ static grpc_lb_addresses *process_serverlist_locked(
strnlen(server->load_balance_token, lb_token_max_length);
grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
server->load_balance_token, lb_token_length);
- user_data = (void *)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
- lb_token_mdstr)
+ user_data = (void*)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
+ lb_token_mdstr)
.payload;
} else {
- char *uri = grpc_sockaddr_to_uri(&addr);
+ char* uri = grpc_sockaddr_to_uri(&addr);
gpr_log(GPR_INFO,
"Missing LB token for backend address '%s'. The empty token will "
"be used instead",
uri);
gpr_free(uri);
- user_data = (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
+ user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
}
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
@@ -545,8 +545,8 @@ static grpc_lb_addresses *process_serverlist_locked(
}
/* Returns the backend addresses extracted from the given addresses */
-static grpc_lb_addresses *extract_backend_addresses_locked(
- grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
+static grpc_lb_addresses* extract_backend_addresses_locked(
+ grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses) {
/* first pass: count the number of backend addresses */
size_t num_backends = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -555,24 +555,24 @@ static grpc_lb_addresses *extract_backend_addresses_locked(
}
}
/* second pass: actually populate the addresses and (empty) LB tokens */
- grpc_lb_addresses *backend_addresses =
+ grpc_lb_addresses* backend_addresses =
grpc_lb_addresses_create(num_backends, &lb_token_vtable);
size_t num_copied = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) continue;
- const grpc_resolved_address *addr = &addresses->addresses[i].address;
+ const grpc_resolved_address* addr = &addresses->addresses[i].address;
grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
addr->len, false /* is_balancer */,
NULL /* balancer_name */,
- (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
+ (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
++num_copied;
}
return backend_addresses;
}
static void update_lb_connectivity_status_locked(
- grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
- grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
+ grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
+ grpc_connectivity_state rr_state, grpc_error* rr_state_error) {
const grpc_connectivity_state curr_glb_state =
grpc_connectivity_state_check(&glb_policy->state_tracker);
@@ -620,7 +620,7 @@ static void update_lb_connectivity_status_locked(
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(
GPR_INFO, "Setting grpclb's state to %s from new RR policy %p state.",
- grpc_connectivity_state_name(rr_state), (void *)glb_policy->rr_policy);
+ grpc_connectivity_state_name(rr_state), (void*)glb_policy->rr_policy);
}
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
rr_state_error,
@@ -633,13 +633,13 @@ static void update_lb_connectivity_status_locked(
* If \a force_async is true, then we will manually schedule the
* completion callback even if the pick is available immediately. */
static bool pick_from_internal_rr_locked(
- grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
- const grpc_lb_policy_pick_args *pick_args, bool force_async,
- grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
+ grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
+ const grpc_lb_policy_pick_args* pick_args, bool force_async,
+ grpc_connected_subchannel** target, wrapped_rr_closure_arg* wc_arg) {
// Check for drops if we are not using fallback backend addresses.
if (glb_policy->serverlist != NULL) {
// Look at the index into the serverlist to see if we should drop this call.
- grpc_grpclb_server *server =
+ grpc_grpclb_server* server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
glb_policy->serverlist_index = 0; // Wrap-around.
@@ -672,7 +672,7 @@ static bool pick_from_internal_rr_locked(
// Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked(
exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
- (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
+ (void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
@@ -703,9 +703,9 @@ static bool pick_from_internal_rr_locked(
return pick_done;
}
-static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
- grpc_lb_addresses *addresses;
+static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
+ grpc_lb_addresses* addresses;
if (glb_policy->serverlist != NULL) {
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
@@ -718,12 +718,12 @@ static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
}
GPR_ASSERT(addresses != NULL);
- grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
+ grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
args->combiner = glb_policy->base.combiner;
// Replace the LB addresses in the channel args that we pass down to
// the subchannel.
- static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
+ static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
args->args = grpc_channel_args_copy_and_add_and_remove(
glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
@@ -732,19 +732,19 @@ static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
return args;
}
-static void lb_policy_args_destroy(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_args *args) {
+static void lb_policy_args_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy_args* args) {
grpc_channel_args_destroy(exec_ctx, args->args);
gpr_free(args);
}
-static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error);
-static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
- grpc_lb_policy_args *args) {
+static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error);
+static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
+ grpc_lb_policy_args* args) {
GPR_ASSERT(glb_policy->rr_policy == NULL);
- grpc_lb_policy *new_rr_policy =
+ grpc_lb_policy* new_rr_policy =
grpc_lb_policy_create(exec_ctx, "round_robin", args);
if (new_rr_policy == NULL) {
gpr_log(GPR_ERROR,
@@ -753,11 +753,11 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
"to be used. Future updates from the LB will attempt to create new "
"instances.",
(unsigned long)glb_policy->serverlist->num_servers,
- (void *)glb_policy->rr_policy);
+ (void*)glb_policy->rr_policy);
return;
}
glb_policy->rr_policy = new_rr_policy;
- grpc_error *rr_state_error = NULL;
+ grpc_error* rr_state_error = NULL;
const grpc_connectivity_state rr_state =
grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
&rr_state_error);
@@ -773,8 +773,8 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* Allocate the data for the tracking of the new RR policy's connectivity.
* It'll be deallocated in glb_rr_connectivity_changed() */
- rr_connectivity_data *rr_connectivity =
- (rr_connectivity_data *)gpr_zalloc(sizeof(rr_connectivity_data));
+ rr_connectivity_data* rr_connectivity =
+ (rr_connectivity_data*)gpr_zalloc(sizeof(rr_connectivity_data));
GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
glb_rr_connectivity_changed_locked, rr_connectivity,
grpc_combiner_scheduler(glb_policy->base.combiner));
@@ -789,7 +789,7 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
/* Update picks and pings in wait */
- pending_pick *pp;
+ pending_pick* pp;
while ((pp = glb_policy->pending_picks)) {
glb_policy->pending_picks = pp->next;
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
@@ -798,14 +798,14 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Pending pick about to (async) PICK from %p",
- (void *)glb_policy->rr_policy);
+ (void*)glb_policy->rr_policy);
}
pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
true /* force_async */, pp->target,
&pp->wrapped_on_complete_arg);
}
- pending_ping *pping;
+ pending_ping* pping;
while ((pping = glb_policy->pending_pings)) {
glb_policy->pending_pings = pping->next;
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
@@ -820,31 +820,31 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
}
/* glb_policy->rr_policy may be NULL (initial handover) */
-static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void rr_handover_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
if (glb_policy->shutting_down) return;
- grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
+ grpc_lb_policy_args* args = lb_policy_args_create(exec_ctx, glb_policy);
GPR_ASSERT(args != NULL);
if (glb_policy->rr_policy != NULL) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)",
- (void *)glb_policy->rr_policy);
+ (void*)glb_policy->rr_policy);
}
grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
} else {
create_rr_locked(exec_ctx, glb_policy, args);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Created new Round Robin policy (%p)",
- (void *)glb_policy->rr_policy);
+ (void*)glb_policy->rr_policy);
}
}
lb_policy_args_destroy(exec_ctx, args);
}
-static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg;
- glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
+static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
+ glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
if (glb_policy->shutting_down) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"glb_rr_connectivity_cb");
@@ -872,22 +872,22 @@ static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
&rr_connectivity->on_change);
}
-static void destroy_balancer_name(grpc_exec_ctx *exec_ctx,
- void *balancer_name) {
+static void destroy_balancer_name(grpc_exec_ctx* exec_ctx,
+ void* balancer_name) {
gpr_free(balancer_name);
}
static grpc_slice_hash_table_entry targets_info_entry_create(
- const char *address, const char *balancer_name) {
+ const char* address, const char* balancer_name) {
grpc_slice_hash_table_entry entry;
entry.key = grpc_slice_from_copied_string(address);
entry.value = gpr_strdup(balancer_name);
return entry;
}
-static int balancer_name_cmp_fn(void *a, void *b) {
- const char *a_str = (const char *)a;
- const char *b_str = (const char *)b;
+static int balancer_name_cmp_fn(void* a, void* b) {
+ const char* a_str = (const char*)a;
+ const char* b_str = (const char*)b;
return strcmp(a_str, b_str);
}
@@ -899,10 +899,10 @@ static int balancer_name_cmp_fn(void *a, void *b) {
* - \a response_generator: in order to propagate updates from the resolver
* above the grpclb policy.
* - \a args: other args inherited from the grpclb policy. */
-static grpc_channel_args *build_lb_channel_args(
- grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses,
- grpc_fake_resolver_response_generator *response_generator,
- const grpc_channel_args *args) {
+static grpc_channel_args* build_lb_channel_args(
+ grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses,
+ grpc_fake_resolver_response_generator* response_generator,
+ const grpc_channel_args* args) {
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
@@ -911,11 +911,11 @@ static grpc_channel_args *build_lb_channel_args(
* It's the resolver's responsibility to make sure this policy is only
* instantiated and used in that case. Otherwise, something has gone wrong. */
GPR_ASSERT(num_grpclb_addrs > 0);
- grpc_lb_addresses *lb_addresses =
+ grpc_lb_addresses* lb_addresses =
grpc_lb_addresses_create(num_grpclb_addrs, NULL);
- grpc_slice_hash_table_entry *targets_info_entries =
- (grpc_slice_hash_table_entry *)gpr_zalloc(sizeof(*targets_info_entries) *
- num_grpclb_addrs);
+ grpc_slice_hash_table_entry* targets_info_entries =
+ (grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
+ num_grpclb_addrs);
size_t lb_addresses_idx = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -924,7 +924,7 @@ static grpc_channel_args *build_lb_channel_args(
gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored");
}
- char *addr_str;
+ char* addr_str;
GPR_ASSERT(grpc_sockaddr_to_string(
&addr_str, &addresses->addresses[i].address, true) > 0);
targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
@@ -937,19 +937,19 @@ static grpc_channel_args *build_lb_channel_args(
addresses->addresses[i].balancer_name, NULL /* user data */);
}
GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
- grpc_slice_hash_table *targets_info =
+ grpc_slice_hash_table* targets_info =
grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
destroy_balancer_name, balancer_name_cmp_fn);
gpr_free(targets_info_entries);
- grpc_channel_args *lb_channel_args =
+ grpc_channel_args* lb_channel_args =
grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info,
response_generator, args);
grpc_arg lb_channel_addresses_arg =
grpc_lb_addresses_create_channel_arg(lb_addresses);
- grpc_channel_args *result = grpc_channel_args_copy_and_add(
+ grpc_channel_args* result = grpc_channel_args_copy_and_add(
lb_channel_args, &lb_channel_addresses_arg, 1);
grpc_slice_hash_table_unref(exec_ctx, targets_info);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
@@ -957,11 +957,11 @@ static grpc_channel_args *build_lb_channel_args(
return result;
}
-static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+static void glb_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
GPR_ASSERT(glb_policy->pending_picks == NULL);
GPR_ASSERT(glb_policy->pending_pings == NULL);
- gpr_free((void *)glb_policy->server_name);
+ gpr_free((void*)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
if (glb_policy->client_stats != NULL) {
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
@@ -978,14 +978,14 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(glb_policy);
}
-static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
glb_policy->shutting_down = true;
/* We need a copy of the lb_call pointer because we can't cancell the call
* while holding glb_policy->mu: lb_on_server_status_received, invoked due to
* the cancel, needs to acquire that same lock */
- grpc_call *lb_call = glb_policy->lb_call;
+ grpc_call* lb_call = glb_policy->lb_call;
/* glb_policy->lb_call and this local lb_call must be consistent at this point
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
@@ -1004,9 +1004,9 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_policy->fallback_timer_active = false;
}
- pending_pick *pp = glb_policy->pending_picks;
+ pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
- pending_ping *pping = glb_policy->pending_pings;
+ pending_ping* pping = glb_policy->pending_pings;
glb_policy->pending_pings = NULL;
if (glb_policy->rr_policy != NULL) {
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
@@ -1024,7 +1024,7 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
*pp->target = NULL;
GRPC_CLOSURE_SCHED(
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
@@ -1034,7 +1034,7 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
while (pping != NULL) {
- pending_ping *next = pping->next;
+ pending_ping* next = pping->next;
GRPC_CLOSURE_SCHED(
exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
@@ -1053,14 +1053,14 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
// - Otherwise, without an RR instance, picks stay pending at this policy's
// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
// we invoke the completion closure and set *target to NULL right here.
-static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- pending_pick *pp = glb_policy->pending_picks;
+static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_connected_subchannel** target,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if (pp->target == target) {
*target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
@@ -1089,16 +1089,16 @@ static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
// - Otherwise, without an RR instance, picks stay pending at this policy's
// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
// we invoke the completion closure and set *target to NULL right here.
-static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
+static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- pending_pick *pp = glb_policy->pending_picks;
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
@@ -1118,12 +1118,12 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
-static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy);
-static void start_picking_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
+static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy);
+static void start_picking_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
@@ -1143,18 +1143,18 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
query_for_backends_locked(exec_ctx, glb_policy);
}
-static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+static void glb_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
if (!glb_policy->started_picking) {
start_picking_locked(exec_ctx, glb_policy);
}
}
-static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context, void **user_data,
- grpc_closure *on_complete) {
+static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context, void** user_data,
+ grpc_closure* on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
@@ -1164,18 +1164,18 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
bool pick_done;
if (glb_policy->rr_policy != NULL) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
- (void *)glb_policy, (void *)glb_policy->rr_policy);
+ gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p", (void*)glb_policy,
+ (void*)glb_policy->rr_policy);
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
- wrapped_rr_closure_arg *wc_arg =
- (wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
+ wrapped_rr_closure_arg* wc_arg =
+ (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx);
@@ -1197,7 +1197,7 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_log(GPR_DEBUG,
"No RR policy in grpclb instance %p. Adding to grpclb's pending "
"picks",
- (void *)(glb_policy));
+ (void*)(glb_policy));
}
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
on_complete);
@@ -1211,16 +1211,16 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
static grpc_connectivity_state glb_check_connectivity_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_error **connectivity_error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_error** connectivity_error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
return grpc_connectivity_state_get(&glb_policy->state_tracker,
connectivity_error);
}
-static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+static void glb_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_closure* closure) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
if (glb_policy->rr_policy) {
grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
} else {
@@ -1231,23 +1231,23 @@ static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
}
-static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+static void glb_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* pol,
+ grpc_connectivity_state* current,
+ grpc_closure* notify) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &glb_policy->state_tracker, current, notify);
}
-static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void lb_call_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
glb_policy->retry_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
- (void *)glb_policy);
+ (void*)glb_policy);
}
GPR_ASSERT(glb_policy->lb_call == NULL);
query_for_backends_locked(exec_ctx, glb_policy);
@@ -1255,8 +1255,8 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
}
-static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void maybe_restart_lb_call(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
if (glb_policy->started_picking && glb_policy->updating_lb_call) {
if (glb_policy->retry_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
@@ -1270,7 +1270,7 @@ static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
.next_attempt_start_time;
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
- (void *)glb_policy);
+ (void*)glb_policy);
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
if (timeout > 0) {
gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.",
@@ -1291,11 +1291,11 @@ static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
"lb_on_server_status_received_locked");
}
-static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
-static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void schedule_next_client_load_report(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
const grpc_millis next_client_load_report_time =
grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
@@ -1306,9 +1306,9 @@ static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
&glb_policy->client_load_report_closure);
}
-static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void client_load_report_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
glb_policy->client_load_report_payload = NULL;
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
@@ -1320,9 +1320,9 @@ static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
schedule_next_client_load_report(exec_ctx, glb_policy);
}
-static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
- grpc_grpclb_dropped_call_counts *drop_entries =
- (grpc_grpclb_dropped_call_counts *)
+static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
+ grpc_grpclb_dropped_call_counts* drop_entries =
+ (grpc_grpclb_dropped_call_counts*)
request->client_stats.calls_finished_with_drop.arg;
return request->client_stats.num_calls_started == 0 &&
request->client_stats.num_calls_finished == 0 &&
@@ -1332,9 +1332,9 @@ static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
(drop_entries == NULL || drop_entries->num_entries == 0);
}
-static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
@@ -1346,7 +1346,7 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
// Construct message payload.
GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
- grpc_grpclb_request *request =
+ grpc_grpclb_request* request =
grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
// Skip client load report if the counters were all zero in the last
// report and they are still zero in this one.
@@ -1382,12 +1382,12 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
}
-static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error);
-static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error);
+static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
+static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
GPR_ASSERT(glb_policy->server_name != NULL);
GPR_ASSERT(glb_policy->server_name[0] != '\0');
GPR_ASSERT(glb_policy->lb_call == NULL);
@@ -1416,7 +1416,7 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
- grpc_grpclb_request *request =
+ grpc_grpclb_request* request =
grpc_grpclb_request_create(glb_policy->server_name);
grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
glb_policy->lb_request_payload =
@@ -1442,8 +1442,8 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
glb_policy->last_client_load_report_counters_were_zero = false;
}
-static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_unref(glb_policy->lb_call);
glb_policy->lb_call = NULL;
@@ -1462,8 +1462,8 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
/*
* Auxiliary functions and LB client callbacks.
*/
-static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
GPR_ASSERT(glb_policy->lb_channel != NULL);
if (glb_policy->shutting_down) return;
@@ -1472,8 +1472,8 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Query for backends (grpclb: %p, lb_channel: %p, lb_call: %p)",
- (void *)glb_policy, (void *)glb_policy->lb_channel,
- (void *)glb_policy->lb_call);
+ (void*)glb_policy, (void*)glb_policy->lb_channel,
+ (void*)glb_policy->lb_call);
}
GPR_ASSERT(glb_policy->lb_call != NULL);
@@ -1481,7 +1481,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
grpc_op ops[3];
memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
+ grpc_op* op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
@@ -1537,12 +1537,12 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
-static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
+ grpc_op* op = ops;
if (glb_policy->lb_response_payload != NULL) {
grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
@@ -1553,7 +1553,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_byte_buffer_reader_destroy(&bbr);
grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
- grpc_grpclb_initial_response *response = NULL;
+ grpc_grpclb_initial_response* response = NULL;
if (!glb_policy->seen_initial_response &&
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
NULL) {
@@ -1581,7 +1581,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_grpclb_initial_response_destroy(response);
glb_policy->seen_initial_response = true;
} else {
- grpc_grpclb_serverlist *serverlist =
+ grpc_grpclb_serverlist* serverlist =
grpc_grpclb_response_parse_serverlist(response_slice);
if (serverlist != NULL) {
GPR_ASSERT(glb_policy->lb_call != NULL);
@@ -1591,7 +1591,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
for (size_t i = 0; i < serverlist->num_servers; ++i) {
grpc_resolved_address addr;
parse_server(serverlist->servers[i], &addr);
- char *ipport;
+ char* ipport;
grpc_sockaddr_to_string(&ipport, &addr, false);
gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
gpr_free(ipport);
@@ -1664,9 +1664,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
}
-static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
glb_policy->fallback_timer_active = false;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't fall back. */
@@ -1675,7 +1675,7 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Falling back to use backends from resolver (grpclb %p)",
- (void *)glb_policy);
+ (void*)glb_policy);
}
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
rr_handover_locked(exec_ctx, glb_policy);
@@ -1685,18 +1685,18 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
"grpclb_fallback_timer");
}
-static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
GPR_ASSERT(glb_policy->lb_call != NULL);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- char *status_details =
+ char* status_details =
grpc_slice_to_c_string(glb_policy->lb_call_status_details);
gpr_log(GPR_INFO,
"Status from LB server received. Status = %d, Details = '%s', "
"(call: %p), error %p",
glb_policy->lb_call_status, status_details,
- (void *)glb_policy->lb_call, (void *)error);
+ (void*)glb_policy->lb_call, (void*)error);
gpr_free(status_details);
}
/* We need to perform cleanups no matter what. */
@@ -1709,9 +1709,9 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy,
- const grpc_lb_addresses *addresses) {
+static void fallback_update_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy,
+ const grpc_lb_addresses* addresses) {
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses =
@@ -1722,10 +1722,10 @@ static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_args *args) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
- const grpc_arg *arg =
+static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_args* args) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
+ const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
if (glb_policy->lb_channel == NULL) {
@@ -1740,12 +1740,12 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_ERROR,
"No valid LB addresses channel arg for grpclb %p update, "
"ignoring.",
- (void *)glb_policy);
+ (void*)glb_policy);
}
return;
}
- const grpc_lb_addresses *addresses =
- (const grpc_lb_addresses *)arg->value.pointer.p;
+ const grpc_lb_addresses* addresses =
+ (const grpc_lb_addresses*)arg->value.pointer.p;
// If a non-empty serverlist hasn't been received from the balancer,
// propagate the update to fallback_backend_addresses.
if (glb_policy->serverlist == NULL) {
@@ -1754,7 +1754,7 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
GPR_ASSERT(glb_policy->lb_channel != NULL);
// Propagate updates to the LB channel (pick_first) through the fake
// resolver.
- grpc_channel_args *lb_channel_args = build_lb_channel_args(
+ grpc_channel_args* lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
grpc_fake_resolver_response_generator_set_response(
exec_ctx, glb_policy->response_generator, lb_channel_args);
@@ -1764,7 +1764,7 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
if (!glb_policy->watching_lb_channel) {
glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
glb_policy->lb_channel, true /* try to connect */);
- grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
+ grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(glb_policy->lb_channel));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
glb_policy->watching_lb_channel = true;
@@ -1781,10 +1781,10 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
// Invoked as part of the update process. It continues watching the LB channel
// until it shuts down or becomes READY. It's invoked even if the LB channel
// stayed READY throughout the update (for example if the update is identical).
-static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
+ void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
if (glb_policy->shutting_down) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// embedded RR policy. Note that the current RR policy, if any, will stay in
@@ -1793,7 +1793,7 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
/* resub. */
- grpc_channel_element *client_channel_elem =
+ grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(glb_policy->lb_channel));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
@@ -1845,29 +1845,29 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
glb_notify_on_state_change_locked,
glb_update_locked};
-static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args) {
+static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy_factory* factory,
+ grpc_lb_policy_args* args) {
/* Count the number of gRPC-LB addresses. There must be at least one. */
- const grpc_arg *arg =
+ const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
return NULL;
}
- grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
+ grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
if (num_grpclb_addrs == 0) return NULL;
- glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
+ glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
/* Get server name. */
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
GPR_ASSERT(arg != NULL);
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
- grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
+ grpc_uri* uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
GPR_ASSERT(uri->path[0] != '\0');
glb_policy->server_name =
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
@@ -1891,8 +1891,8 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
grpc_arg new_arg = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb");
- static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
+ (char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb");
+ static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
@@ -1904,9 +1904,9 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
/* Create a client channel over them to communicate with a LB service */
glb_policy->response_generator =
grpc_fake_resolver_response_generator_create();
- grpc_channel_args *lb_channel_args = build_lb_channel_args(
+ grpc_channel_args* lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
- char *uri_str;
+ char* uri_str;
gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
@@ -1917,7 +1917,7 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
gpr_free(uri_str);
if (glb_policy->lb_channel == NULL) {
- gpr_free((void *)glb_policy->server_name);
+ gpr_free((void*)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
gpr_free(glb_policy);
return NULL;
@@ -1932,16 +1932,16 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
return &glb_policy->base;
}
-static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
+static void glb_factory_ref(grpc_lb_policy_factory* factory) {}
-static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
+static void glb_factory_unref(grpc_lb_policy_factory* factory) {}
static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
-grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
+grpc_lb_policy_factory* grpc_glb_lb_factory_create() {
return &glb_lb_policy_factory;
}
@@ -1949,15 +1949,15 @@ grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
// Only add client_load_reporting filter if the grpclb LB policy is used.
static bool maybe_add_client_load_reporting_filter(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
- const grpc_channel_args *args =
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+ const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
- const grpc_arg *channel_arg =
+ const grpc_arg* channel_arg =
grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
strcmp(channel_arg->value.string, "grpclb") == 0) {
return grpc_channel_stack_builder_append_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL);
+ builder, (const grpc_channel_filter*)arg, NULL, NULL);
}
return true;
}
@@ -1971,7 +1971,7 @@ extern "C" void grpc_lb_policy_grpclb_init() {
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_client_load_reporting_filter,
- (void *)&grpc_client_load_reporting_filter);
+ (void*)&grpc_client_load_reporting_filter);
}
extern "C" void grpc_lb_policy_grpclb_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
index 15c8a680b7..b6135a4768 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
@@ -28,7 +28,7 @@ extern "C" {
/** Returns a load balancing factory for the glb policy, which tries to connect
* to a load balancing server to decide the next successfully connected
* subchannel to pick. */
-grpc_lb_policy_factory *grpc_glb_lb_factory_create();
+grpc_lb_policy_factory* grpc_glb_lb_factory_create();
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
index f2967182e2..aacaec197d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
@@ -25,20 +25,20 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/support/string.h"
-grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
- grpc_exec_ctx *exec_ctx, const char *lb_service_target_addresses,
- grpc_client_channel_factory *client_channel_factory,
- grpc_channel_args *args) {
- grpc_channel *lb_channel = grpc_client_channel_factory_create_channel(
+grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
+ grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
+ grpc_client_channel_factory* client_channel_factory,
+ grpc_channel_args* args) {
+ grpc_channel* lb_channel = grpc_client_channel_factory_create_channel(
exec_ctx, client_channel_factory, lb_service_target_addresses,
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, args);
return lb_channel;
}
-grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
- grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *targets_info,
- grpc_fake_resolver_response_generator *response_generator,
- const grpc_channel_args *args) {
+grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
+ grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
+ grpc_fake_resolver_response_generator* response_generator,
+ const grpc_channel_args* args) {
const grpc_arg to_add[] = {
grpc_fake_resolver_response_generator_arg(response_generator)};
/* We remove:
@@ -62,7 +62,7 @@ grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
*
* - The fake resolver generator, because we are replacing it with the one
* from the grpclb policy, used to propagate updates to the LB channel. */
- static const char *keys_to_remove[] = {
+ static const char* keys_to_remove[] = {
GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI,
GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR};
return grpc_channel_args_copy_and_add_and_remove(
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
index e8599d1f51..39cbf53428 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
@@ -34,19 +34,19 @@ extern "C" {
* from resolving the LB service's name (eg, ipv4:10.0.0.1:1234,10.2.3.4:9876).
* \a client_channel_factory will be used for the creation of the LB channel,
* alongside the channel args passed in \a args. */
-grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
- grpc_exec_ctx *exec_ctx, const char *lb_service_target_addresses,
- grpc_client_channel_factory *client_channel_factory,
- grpc_channel_args *args);
+grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
+ grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
+ grpc_client_channel_factory* client_channel_factory,
+ grpc_channel_args* args);
-grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
- grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *targets_info,
- grpc_fake_resolver_response_generator *response_generator,
- const grpc_channel_args *args);
+grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
+ grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
+ grpc_fake_resolver_response_generator* response_generator,
+ const grpc_channel_args* args);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
index 2681b2a079..2dcf29fe0e 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
@@ -28,19 +28,19 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
-grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
- grpc_exec_ctx *exec_ctx, const char *lb_service_target_addresses,
- grpc_client_channel_factory *client_channel_factory,
- grpc_channel_args *args) {
- grpc_channel_args *new_args = args;
- grpc_channel_credentials *channel_credentials =
+grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
+ grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
+ grpc_client_channel_factory* client_channel_factory,
+ grpc_channel_args* args) {
+ grpc_channel_args* new_args = args;
+ grpc_channel_credentials* channel_credentials =
grpc_channel_credentials_find_in_args(args);
if (channel_credentials != NULL) {
/* Substitute the channel credentials with a version without call
* credentials: the load balancer is not necessarily trusted to handle
* bearer token credentials */
- static const char *keys_to_remove[] = {GRPC_ARG_CHANNEL_CREDENTIALS};
- grpc_channel_credentials *creds_sans_call_creds =
+ static const char* keys_to_remove[] = {GRPC_ARG_CHANNEL_CREDENTIALS};
+ grpc_channel_credentials* creds_sans_call_creds =
grpc_channel_credentials_duplicate_without_call_credentials(
channel_credentials);
GPR_ASSERT(creds_sans_call_creds != NULL);
@@ -52,7 +52,7 @@ grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
GPR_ARRAY_SIZE(args_to_add));
grpc_channel_credentials_unref(exec_ctx, creds_sans_call_creds);
}
- grpc_channel *lb_channel = grpc_client_channel_factory_create_channel(
+ grpc_channel* lb_channel = grpc_client_channel_factory_create_channel(
exec_ctx, client_channel_factory, lb_service_target_addresses,
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, new_args);
if (channel_credentials != NULL) {
@@ -61,10 +61,10 @@ grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
return lb_channel;
}
-grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
- grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *targets_info,
- grpc_fake_resolver_response_generator *response_generator,
- const grpc_channel_args *args) {
+grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
+ grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
+ grpc_fake_resolver_response_generator* response_generator,
+ const grpc_channel_args* args) {
const grpc_arg to_add[] = {
grpc_lb_targets_info_create_channel_arg(targets_info),
grpc_fake_resolver_response_generator_arg(response_generator)};
@@ -89,7 +89,7 @@ grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
*
* - The fake resolver generator, because we are replacing it with the one
* from the grpclb policy, used to propagate updates to the LB channel. */
- static const char *keys_to_remove[] = {
+ static const char* keys_to_remove[] = {
GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI,
GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR};
/* Add the targets info table to be used for secure naming */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
index b38c076f38..ce88cf9ee4 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
@@ -70,4 +70,4 @@ void grpc_grpclb_dropped_call_counts_destroy(
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
index 4d5fb2081c..87d7336b0c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@@ -23,9 +23,9 @@
#include <grpc/support/alloc.h>
/* invoked once for every Server in ServerList */
-static bool count_serverlist(pb_istream_t *stream, const pb_field_t *field,
- void **arg) {
- grpc_grpclb_serverlist *sl = (grpc_grpclb_serverlist *)*arg;
+static bool count_serverlist(pb_istream_t* stream, const pb_field_t* field,
+ void** arg) {
+ grpc_grpclb_serverlist* sl = (grpc_grpclb_serverlist*)*arg;
grpc_grpclb_server server;
if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -40,16 +40,16 @@ typedef struct decode_serverlist_arg {
* which index of the serverlist are we currently decoding */
size_t decoding_idx;
/* The decoded serverlist */
- grpc_grpclb_serverlist *serverlist;
+ grpc_grpclb_serverlist* serverlist;
} decode_serverlist_arg;
/* invoked once for every Server in ServerList */
-static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
- void **arg) {
- decode_serverlist_arg *dec_arg = (decode_serverlist_arg *)*arg;
+static bool decode_serverlist(pb_istream_t* stream, const pb_field_t* field,
+ void** arg) {
+ decode_serverlist_arg* dec_arg = (decode_serverlist_arg*)*arg;
GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx);
- grpc_grpclb_server *server =
- (grpc_grpclb_server *)gpr_zalloc(sizeof(grpc_grpclb_server));
+ grpc_grpclb_server* server =
+ (grpc_grpclb_server*)gpr_zalloc(sizeof(grpc_grpclb_server));
if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) {
gpr_free(server);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -59,9 +59,9 @@ static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
return true;
}
-grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
- grpc_grpclb_request *req =
- (grpc_grpclb_request *)gpr_malloc(sizeof(grpc_grpclb_request));
+grpc_grpclb_request* grpc_grpclb_request_create(const char* lb_service_name) {
+ grpc_grpclb_request* req =
+ (grpc_grpclb_request*)gpr_malloc(sizeof(grpc_grpclb_request));
req->has_client_stats = false;
req->has_initial_request = true;
req->initial_request.has_name = true;
@@ -71,24 +71,24 @@ grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
}
static void populate_timestamp(gpr_timespec timestamp,
- struct _grpc_lb_v1_Timestamp *timestamp_pb) {
+ struct _grpc_lb_v1_Timestamp* timestamp_pb) {
timestamp_pb->has_seconds = true;
timestamp_pb->seconds = timestamp.tv_sec;
timestamp_pb->has_nanos = true;
timestamp_pb->nanos = timestamp.tv_nsec;
}
-static bool encode_string(pb_ostream_t *stream, const pb_field_t *field,
- void *const *arg) {
- char *str = (char *)*arg;
+static bool encode_string(pb_ostream_t* stream, const pb_field_t* field,
+ void* const* arg) {
+ char* str = (char*)*arg;
if (!pb_encode_tag_for_field(stream, field)) return false;
- return pb_encode_string(stream, (uint8_t *)str, strlen(str));
+ return pb_encode_string(stream, (uint8_t*)str, strlen(str));
}
-static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
- void *const *arg) {
- grpc_grpclb_dropped_call_counts *drop_entries =
- (grpc_grpclb_dropped_call_counts *)*arg;
+static bool encode_drops(pb_ostream_t* stream, const pb_field_t* field,
+ void* const* arg) {
+ grpc_grpclb_dropped_call_counts* drop_entries =
+ (grpc_grpclb_dropped_call_counts*)*arg;
if (drop_entries == NULL) return true;
for (size_t i = 0; i < drop_entries->num_entries; ++i) {
if (!pb_encode_tag_for_field(stream, field)) return false;
@@ -105,10 +105,10 @@ static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
return true;
}
-grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
- grpc_grpclb_client_stats *client_stats) {
- grpc_grpclb_request *req =
- (grpc_grpclb_request *)gpr_zalloc(sizeof(grpc_grpclb_request));
+grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked(
+ grpc_grpclb_client_stats* client_stats) {
+ grpc_grpclb_request* req =
+ (grpc_grpclb_request*)gpr_zalloc(sizeof(grpc_grpclb_request));
req->has_client_stats = true;
req->client_stats.has_timestamp = true;
populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp);
@@ -123,12 +123,12 @@ grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
&req->client_stats.num_calls_finished,
&req->client_stats.num_calls_finished_with_client_failed_to_send,
&req->client_stats.num_calls_finished_known_received,
- (grpc_grpclb_dropped_call_counts **)&req->client_stats
+ (grpc_grpclb_dropped_call_counts**)&req->client_stats
.calls_finished_with_drop.arg);
return req;
}
-grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
+grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request* request) {
size_t encoded_length;
pb_ostream_t sizestream;
pb_ostream_t outputstream;
@@ -145,10 +145,10 @@ grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
return slice;
}
-void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
+void grpc_grpclb_request_destroy(grpc_grpclb_request* request) {
if (request->has_client_stats) {
- grpc_grpclb_dropped_call_counts *drop_entries =
- (grpc_grpclb_dropped_call_counts *)
+ grpc_grpclb_dropped_call_counts* drop_entries =
+ (grpc_grpclb_dropped_call_counts*)
request->client_stats.calls_finished_with_drop.arg;
grpc_grpclb_dropped_call_counts_destroy(drop_entries);
}
@@ -156,7 +156,7 @@ void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
}
typedef grpc_lb_v1_LoadBalanceResponse grpc_grpclb_response;
-grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
+grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
grpc_slice encoded_grpc_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
@@ -170,8 +170,8 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
if (!res.has_initial_response) return NULL;
- grpc_grpclb_initial_response *initial_res =
- (grpc_grpclb_initial_response *)gpr_malloc(
+ grpc_grpclb_initial_response* initial_res =
+ (grpc_grpclb_initial_response*)gpr_malloc(
sizeof(grpc_grpclb_initial_response));
memcpy(initial_res, &res.initial_response,
sizeof(grpc_grpclb_initial_response));
@@ -179,14 +179,14 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
return initial_res;
}
-grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
+grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
grpc_slice encoded_grpc_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
- grpc_grpclb_serverlist *sl =
- (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist* sl =
+ (grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
// First pass: count number of servers.
@@ -200,8 +200,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
- sl->servers = (grpc_grpclb_server **)gpr_zalloc(
- sizeof(grpc_grpclb_server *) * sl->num_servers);
+ sl->servers = (grpc_grpclb_server**)gpr_zalloc(sizeof(grpc_grpclb_server*) *
+ sl->num_servers);
decode_serverlist_arg decode_arg;
memset(&decode_arg, 0, sizeof(decode_arg));
decode_arg.serverlist = sl;
@@ -221,7 +221,7 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
return sl;
}
-void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
+void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist* serverlist) {
if (serverlist == NULL) {
return;
}
@@ -232,25 +232,25 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
gpr_free(serverlist);
}
-grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
- const grpc_grpclb_serverlist *sl) {
- grpc_grpclb_serverlist *copy =
- (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy(
+ const grpc_grpclb_serverlist* sl) {
+ grpc_grpclb_serverlist* copy =
+ (grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
copy->num_servers = sl->num_servers;
memcpy(&copy->expiration_interval, &sl->expiration_interval,
sizeof(grpc_grpclb_duration));
- copy->servers = (grpc_grpclb_server **)gpr_malloc(
- sizeof(grpc_grpclb_server *) * sl->num_servers);
+ copy->servers = (grpc_grpclb_server**)gpr_malloc(sizeof(grpc_grpclb_server*) *
+ sl->num_servers);
for (size_t i = 0; i < sl->num_servers; i++) {
copy->servers[i] =
- (grpc_grpclb_server *)gpr_malloc(sizeof(grpc_grpclb_server));
+ (grpc_grpclb_server*)gpr_malloc(sizeof(grpc_grpclb_server));
memcpy(copy->servers[i], sl->servers[i], sizeof(grpc_grpclb_server));
}
return copy;
}
-bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs,
- const grpc_grpclb_serverlist *rhs) {
+bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist* lhs,
+ const grpc_grpclb_serverlist* rhs) {
if (lhs == NULL || rhs == NULL) {
return false;
}
@@ -269,13 +269,13 @@ bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs,
return true;
}
-bool grpc_grpclb_server_equals(const grpc_grpclb_server *lhs,
- const grpc_grpclb_server *rhs) {
+bool grpc_grpclb_server_equals(const grpc_grpclb_server* lhs,
+ const grpc_grpclb_server* rhs) {
return memcmp(lhs, rhs, sizeof(grpc_grpclb_server)) == 0;
}
-int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
- const grpc_grpclb_duration *rhs) {
+int grpc_grpclb_duration_compare(const grpc_grpclb_duration* lhs,
+ const grpc_grpclb_duration* rhs) {
GPR_ASSERT(lhs && rhs);
if (lhs->has_seconds && rhs->has_seconds) {
if (lhs->seconds < rhs->seconds) return -1;
@@ -299,13 +299,13 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
return 0;
}
-grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb) {
+grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration* duration_pb) {
return (grpc_millis)(
(duration_pb->has_seconds ? duration_pb->seconds : 0) * GPR_MS_PER_SEC +
(duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS);
}
void grpc_grpclb_initial_response_destroy(
- grpc_grpclb_initial_response *response) {
+ grpc_grpclb_initial_response* response) {
gpr_free(response);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
index 56b9c096d0..138012c63a 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@@ -37,59 +37,59 @@ typedef grpc_lb_v1_InitialLoadBalanceResponse grpc_grpclb_initial_response;
typedef grpc_lb_v1_Server grpc_grpclb_server;
typedef grpc_lb_v1_Duration grpc_grpclb_duration;
typedef struct {
- grpc_grpclb_server **servers;
+ grpc_grpclb_server** servers;
size_t num_servers;
grpc_grpclb_duration expiration_interval;
} grpc_grpclb_serverlist;
/** Create a request for a gRPC LB service under \a lb_service_name */
-grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name);
-grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
- grpc_grpclb_client_stats *client_stats);
+grpc_grpclb_request* grpc_grpclb_request_create(const char* lb_service_name);
+grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked(
+ grpc_grpclb_client_stats* client_stats);
/** Protocol Buffers v3-encode \a request */
-grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request);
+grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request* request);
/** Destroy \a request */
-void grpc_grpclb_request_destroy(grpc_grpclb_request *request);
+void grpc_grpclb_request_destroy(grpc_grpclb_request* request);
/** Parse (ie, decode) the bytes in \a encoded_grpc_grpclb_response as a \a
* grpc_grpclb_initial_response */
-grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
+grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
grpc_slice encoded_grpc_grpclb_response);
/** Parse the list of servers from an encoded \a grpc_grpclb_response */
-grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
+grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
grpc_slice encoded_grpc_grpclb_response);
/** Return a copy of \a sl. The caller is responsible for calling \a
* grpc_grpclb_destroy_serverlist on the returned copy. */
-grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
- const grpc_grpclb_serverlist *sl);
+grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy(
+ const grpc_grpclb_serverlist* sl);
-bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs,
- const grpc_grpclb_serverlist *rhs);
+bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist* lhs,
+ const grpc_grpclb_serverlist* rhs);
-bool grpc_grpclb_server_equals(const grpc_grpclb_server *lhs,
- const grpc_grpclb_server *rhs);
+bool grpc_grpclb_server_equals(const grpc_grpclb_server* lhs,
+ const grpc_grpclb_server* rhs);
/** Destroy \a serverlist */
-void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist);
+void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist* serverlist);
/** Compare \a lhs against \a rhs and return 0 if \a lhs and \a rhs are equal,
* < 0 if \a lhs represents a duration shorter than \a rhs and > 0 otherwise */
-int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
- const grpc_grpclb_duration *rhs);
+int grpc_grpclb_duration_compare(const grpc_grpclb_duration* lhs,
+ const grpc_grpclb_duration* rhs);
-grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb);
+grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration* duration_pb);
/** Destroy \a initial_response */
void grpc_grpclb_initial_response_destroy(
- grpc_grpclb_initial_response *response);
+ grpc_grpclb_initial_response* response);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index f0c66c68e1..125a4186aa 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -33,33 +33,33 @@ grpc_tracer_flag grpc_lb_pick_first_trace =
GRPC_TRACER_INITIALIZER(false, "pick_first");
typedef struct pending_pick {
- struct pending_pick *next;
+ struct pending_pick* next;
uint32_t initial_metadata_flags;
- grpc_connected_subchannel **target;
- grpc_closure *on_complete;
+ grpc_connected_subchannel** target;
+ grpc_closure* on_complete;
} pending_pick;
typedef struct {
/** base policy: must be first */
grpc_lb_policy base;
/** all our subchannels */
- grpc_lb_subchannel_list *subchannel_list;
+ grpc_lb_subchannel_list* subchannel_list;
/** latest pending subchannel list */
- grpc_lb_subchannel_list *latest_pending_subchannel_list;
+ grpc_lb_subchannel_list* latest_pending_subchannel_list;
/** selected subchannel in \a subchannel_list */
- grpc_lb_subchannel_data *selected;
+ grpc_lb_subchannel_data* selected;
/** have we started picking? */
bool started_picking;
/** are we shut down? */
bool shutdown;
/** list of picks that are waiting on connectivity */
- pending_pick *pending_picks;
+ pending_pick* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
-static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+static void pf_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
GPR_ASSERT(p->subchannel_list == NULL);
GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
GPR_ASSERT(p->pending_picks == NULL);
@@ -67,17 +67,17 @@ static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p);
grpc_subchannel_index_unref();
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p);
+ gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void*)p);
}
}
-static void shutdown_locked(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p,
- grpc_error *error) {
+static void shutdown_locked(grpc_exec_ctx* exec_ctx, pick_first_lb_policy* p,
+ grpc_error* error) {
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
}
p->shutdown = true;
- pending_pick *pp;
+ pending_pick* pp;
while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
*pp->target = NULL;
@@ -100,19 +100,19 @@ static void shutdown_locked(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p,
GRPC_ERROR_UNREF(error);
}
-static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- shutdown_locked(exec_ctx, (pick_first_lb_policy *)pol,
+static void pf_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ shutdown_locked(exec_ctx, (pick_first_lb_policy*)pol,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"));
}
-static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp = p->pending_picks;
+static void pf_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_connected_subchannel** target,
+ grpc_error* error) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if (pp->target == target) {
*target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
@@ -128,15 +128,15 @@ static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+static void pf_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp = p->pending_picks;
+ grpc_error* error) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
@@ -152,8 +152,8 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static void start_picking_locked(grpc_exec_ctx *exec_ctx,
- pick_first_lb_policy *p) {
+static void start_picking_locked(grpc_exec_ctx* exec_ctx,
+ pick_first_lb_policy* p) {
p->started_picking = true;
if (p->subchannel_list != NULL && p->subchannel_list->num_subchannels > 0) {
p->subchannel_list->checking_subchannel = 0;
@@ -164,19 +164,19 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void pf_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+static void pf_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
}
-static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context, void **user_data,
- grpc_closure *on_complete) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+static int pf_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context, void** user_data,
+ grpc_closure* on_complete) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
// If we have a selected subchannel already, return synchronously.
if (p->selected != NULL) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
@@ -187,7 +187,7 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
- pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
+ pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@@ -196,10 +196,10 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
-static void destroy_unselected_subchannels_locked(grpc_exec_ctx *exec_ctx,
- pick_first_lb_policy *p) {
+static void destroy_unselected_subchannels_locked(grpc_exec_ctx* exec_ctx,
+ pick_first_lb_policy* p) {
for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
- grpc_lb_subchannel_data *sd = &p->subchannel_list->subchannels[i];
+ grpc_lb_subchannel_data* sd = &p->subchannel_list->subchannels[i];
if (p->selected != sd) {
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"selected_different_subchannel");
@@ -208,23 +208,23 @@ static void destroy_unselected_subchannels_locked(grpc_exec_ctx *exec_ctx,
}
static grpc_connectivity_state pf_check_connectivity_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, grpc_error** error) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
return grpc_connectivity_state_get(&p->state_tracker, error);
}
-static void pf_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+static void pf_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* pol,
+ grpc_connectivity_state* current,
+ grpc_closure* notify) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
}
-static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+static void pf_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_closure* closure) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected->connected_subchannel,
closure);
@@ -234,13 +234,13 @@ static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
}
-static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
-static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_args *args) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)policy;
- const grpc_arg *arg =
+static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_args* args) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
+ const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
if (p->subchannel_list == NULL) {
@@ -254,17 +254,17 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_ERROR,
"No valid LB addresses channel arg for Pick First %p update, "
"ignoring.",
- (void *)p);
+ (void*)p);
}
return;
}
- const grpc_lb_addresses *addresses =
- (const grpc_lb_addresses *)arg->value.pointer.p;
+ const grpc_lb_addresses* addresses =
+ (const grpc_lb_addresses*)arg->value.pointer.p;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
- (void *)p, (unsigned long)addresses->num_addresses);
+ (void*)p, (unsigned long)addresses->num_addresses);
}
- grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
+ grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
exec_ctx, &p->base, &grpc_lb_pick_first_trace, addresses, args,
pf_connectivity_changed_locked);
if (subchannel_list->num_subchannels == 0) {
@@ -294,7 +294,7 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
// We do have a selected subchannel.
// Check if it's present in the new list. If so, we're done.
for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
- grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
if (sd->subchannel == p->selected->subchannel) {
// The currently selected subchannel is in the update: we are done.
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
@@ -339,8 +339,8 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_DEBUG,
"Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p",
- (void *)p, (void *)p->latest_pending_subchannel_list,
- (void *)subchannel_list);
+ (void*)p, (void*)p->latest_pending_subchannel_list,
+ (void*)subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list,
@@ -358,19 +358,19 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
}
-static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
- pick_first_lb_policy *p = (pick_first_lb_policy *)sd->subchannel_list->policy;
+static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
+ pick_first_lb_policy* p = (pick_first_lb_policy*)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
" of %" PRIuPTR
"), subchannel_list %p: state=%s p->shutdown=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
- (void *)p, (void *)sd->subchannel,
+ (void*)p, (void*)sd->subchannel,
sd->subchannel_list->checking_subchannel,
- sd->subchannel_list->num_subchannels, (void *)sd->subchannel_list,
+ sd->subchannel_list->num_subchannels, (void*)sd->subchannel_list,
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
p->shutdown, sd->subchannel_list->shutting_down,
grpc_error_string(error));
@@ -465,13 +465,13 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
"connected");
p->selected = sd;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void *)p,
- (void *)sd->subchannel);
+ gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void*)p,
+ (void*)sd->subchannel);
}
// Drop all other subchannels, since we are now connected.
destroy_unselected_subchannels_locked(exec_ctx, p);
// Update any calls that were waiting for a pick.
- pending_pick *pp;
+ pending_pick* pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
@@ -479,7 +479,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
- (void *)p->selected);
+ (void*)p->selected);
}
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
@@ -530,7 +530,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"pf_candidate_shutdown");
// Advance to next subchannel and check its state.
- grpc_lb_subchannel_data *original_sd = sd;
+ grpc_lb_subchannel_data* original_sd = sd;
do {
sd->subchannel_list->checking_subchannel =
(sd->subchannel_list->checking_subchannel + 1) %
@@ -578,17 +578,17 @@ static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_notify_on_state_change_locked,
pf_update_locked};
-static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}
+static void pick_first_factory_ref(grpc_lb_policy_factory* factory) {}
-static void pick_first_factory_unref(grpc_lb_policy_factory *factory) {}
+static void pick_first_factory_unref(grpc_lb_policy_factory* factory) {}
-static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args) {
+static grpc_lb_policy* create_pick_first(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy_factory* factory,
+ grpc_lb_policy_args* args) {
GPR_ASSERT(args->client_channel_factory != NULL);
- pick_first_lb_policy *p = (pick_first_lb_policy *)gpr_zalloc(sizeof(*p));
+ pick_first_lb_policy* p = (pick_first_lb_policy*)gpr_zalloc(sizeof(*p));
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p created.", (void *)p);
+ gpr_log(GPR_DEBUG, "Pick First %p created.", (void*)p);
}
pf_update_locked(exec_ctx, &p->base, args);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
@@ -603,7 +603,7 @@ static const grpc_lb_policy_factory_vtable pick_first_factory_vtable = {
static grpc_lb_policy_factory pick_first_lb_policy_factory = {
&pick_first_factory_vtable};
-static grpc_lb_policy_factory *pick_first_lb_factory_create() {
+static grpc_lb_policy_factory* pick_first_lb_factory_create() {
return &pick_first_lb_policy_factory;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index 8f29c80130..76b4dd7992 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -46,12 +46,12 @@ grpc_tracer_flag grpc_lb_round_robin_trace =
*
* Once a pick is available, \a target is updated and \a on_complete called. */
typedef struct pending_pick {
- struct pending_pick *next;
+ struct pending_pick* next;
/* output argument where to store the pick()ed user_data. It'll be NULL if no
* such data is present or there's an error (the definite test for errors is
* \a target being NULL). */
- void **user_data;
+ void** user_data;
/* bitmask passed to pick() and used for selective cancelling. See
* grpc_lb_policy_cancel_picks() */
@@ -59,24 +59,24 @@ typedef struct pending_pick {
/* output argument where to store the pick()ed connected subchannel, or NULL
* upon error. */
- grpc_connected_subchannel **target;
+ grpc_connected_subchannel** target;
/* to be invoked once the pick() has completed (regardless of success) */
- grpc_closure *on_complete;
+ grpc_closure* on_complete;
} pending_pick;
typedef struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
- grpc_lb_subchannel_list *subchannel_list;
+ grpc_lb_subchannel_list* subchannel_list;
/** have we started picking? */
bool started_picking;
/** are we shutting down? */
bool shutdown;
/** List of picks that are waiting on connectivity */
- pending_pick *pending_picks;
+ pending_pick* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
@@ -89,7 +89,7 @@ typedef struct round_robin_lb_policy {
* lists if they equal \a latest_pending_subchannel_list. In other words,
* racing callbacks that reference outdated subchannel lists won't perform any
* update. */
- grpc_lb_subchannel_list *latest_pending_subchannel_list;
+ grpc_lb_subchannel_list* latest_pending_subchannel_list;
} round_robin_lb_policy;
/** Returns the index into p->subchannel_list->subchannels of the next
@@ -99,13 +99,13 @@ typedef struct round_robin_lb_policy {
* Note that this function does *not* update p->last_ready_subchannel_index.
* The caller must do that if it returns a pick. */
static size_t get_next_ready_subchannel_index_locked(
- const round_robin_lb_policy *p) {
+ const round_robin_lb_policy* p) {
GPR_ASSERT(p->subchannel_list != NULL);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO,
"[RR %p] getting next ready subchannel (out of %lu), "
"last_ready_subchannel_index=%lu",
- (void *)p, (unsigned long)p->subchannel_list->num_subchannels,
+ (void*)p, (unsigned long)p->subchannel_list->num_subchannels,
(unsigned long)p->last_ready_subchannel_index);
}
for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
@@ -116,8 +116,8 @@ static size_t get_next_ready_subchannel_index_locked(
GPR_DEBUG,
"[RR %p] checking subchannel %p, subchannel_list %p, index %lu: "
"state=%s",
- (void *)p, (void *)p->subchannel_list->subchannels[index].subchannel,
- (void *)p->subchannel_list, (unsigned long)index,
+ (void*)p, (void*)p->subchannel_list->subchannels[index].subchannel,
+ (void*)p->subchannel_list, (unsigned long)index,
grpc_connectivity_state_name(
p->subchannel_list->subchannels[index].curr_connectivity_state));
}
@@ -127,40 +127,39 @@ static size_t get_next_ready_subchannel_index_locked(
gpr_log(GPR_DEBUG,
"[RR %p] found next ready subchannel (%p) at index %lu of "
"subchannel_list %p",
- (void *)p,
- (void *)p->subchannel_list->subchannels[index].subchannel,
- (unsigned long)index, (void *)p->subchannel_list);
+ (void*)p,
+ (void*)p->subchannel_list->subchannels[index].subchannel,
+ (unsigned long)index, (void*)p->subchannel_list);
}
return index;
}
}
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", (void *)p);
+ gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", (void*)p);
}
return p->subchannel_list->num_subchannels;
}
// Sets p->last_ready_subchannel_index to last_ready_index.
-static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p,
+static void update_last_ready_subchannel_index_locked(round_robin_lb_policy* p,
size_t last_ready_index) {
GPR_ASSERT(last_ready_index < p->subchannel_list->num_subchannels);
p->last_ready_subchannel_index = last_ready_index;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(
- GPR_DEBUG,
- "[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
- (void *)p, (unsigned long)last_ready_index,
- (void *)p->subchannel_list->subchannels[last_ready_index].subchannel,
- (void *)p->subchannel_list->subchannels[last_ready_index]
- .connected_subchannel);
+ gpr_log(GPR_DEBUG,
+ "[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
+ (void*)p, (unsigned long)last_ready_index,
+ (void*)p->subchannel_list->subchannels[last_ready_index].subchannel,
+ (void*)p->subchannel_list->subchannels[last_ready_index]
+ .connected_subchannel);
}
}
-static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void rr_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
- (void *)pol, (void *)pol);
+ (void*)pol, (void*)pol);
}
GPR_ASSERT(p->subchannel_list == NULL);
GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
@@ -169,13 +168,13 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p);
}
-static void shutdown_locked(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p,
- grpc_error *error) {
+static void shutdown_locked(grpc_exec_ctx* exec_ctx, round_robin_lb_policy* p,
+ grpc_error* error) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
- pending_pick *pp;
+ pending_pick* pp;
while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
*pp->target = NULL;
@@ -199,20 +198,20 @@ static void shutdown_locked(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p,
GRPC_ERROR_UNREF(error);
}
-static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void rr_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
shutdown_locked(exec_ctx, p,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
}
-static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- pending_pick *pp = p->pending_picks;
+static void rr_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_connected_subchannel** target,
+ grpc_error* error) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if (pp->target == target) {
*target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
@@ -228,15 +227,15 @@ static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+static void rr_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- pending_pick *pp = p->pending_picks;
+ grpc_error* error) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
*pp->target = NULL;
@@ -253,8 +252,8 @@ static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static void start_picking_locked(grpc_exec_ctx *exec_ctx,
- round_robin_lb_policy *p) {
+static void start_picking_locked(grpc_exec_ctx* exec_ctx,
+ round_robin_lb_policy* p) {
p->started_picking = true;
for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list,
@@ -264,28 +263,28 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void rr_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void rr_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
}
-static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context, void **user_data,
- grpc_closure *on_complete) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static int rr_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context, void** user_data,
+ grpc_closure* on_complete) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
GPR_ASSERT(!p->shutdown);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_INFO, "[RR %p] Trying to pick", (void *)pol);
+ gpr_log(GPR_INFO, "[RR %p] Trying to pick", (void*)pol);
}
if (p->subchannel_list != NULL) {
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
/* readily available, report right away */
- grpc_lb_subchannel_data *sd =
+ grpc_lb_subchannel_data* sd =
&p->subchannel_list->subchannels[next_ready_index];
*target =
GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
@@ -297,8 +296,8 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GPR_DEBUG,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
"index %lu)",
- (void *)p, (void *)sd->subchannel, (void *)*target,
- (void *)sd->subchannel_list, (unsigned long)next_ready_index);
+ (void*)p, (void*)sd->subchannel, (void*)*target,
+ (void*)sd->subchannel_list, (unsigned long)next_ready_index);
}
/* only advance the last picked pointer if the selection was used */
update_last_ready_subchannel_index_locked(p, next_ready_index);
@@ -309,7 +308,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
- pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
+ pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->on_complete = on_complete;
@@ -319,8 +318,8 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
-static void update_state_counters_locked(grpc_lb_subchannel_data *sd) {
- grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
+static void update_state_counters_locked(grpc_lb_subchannel_data* sd) {
+ grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) {
GPR_ASSERT(subchannel_list->num_ready > 0);
--subchannel_list->num_ready;
@@ -352,7 +351,7 @@ static void update_state_counters_locked(grpc_lb_subchannel_data *sd) {
* used upon policy transition to TRANSIENT_FAILURE or SHUTDOWN. Returns the
* connectivity status set. */
static grpc_connectivity_state update_lb_connectivity_status_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, grpc_error *error) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, grpc_error* error) {
/* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled).
*
@@ -374,8 +373,8 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
* CHECK: p->num_idle == p->subchannel_list->num_subchannels.
*/
grpc_connectivity_state new_state = sd->curr_connectivity_state;
- grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
- round_robin_lb_policy *p = (round_robin_lb_policy *)subchannel_list->policy;
+ grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
+ round_robin_lb_policy* p = (round_robin_lb_policy*)subchannel_list->policy;
if (subchannel_list->num_ready > 0) { /* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
@@ -409,18 +408,18 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
return new_state;
}
-static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
- round_robin_lb_policy *p =
- (round_robin_lb_policy *)sd->subchannel_list->policy;
+static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
+ round_robin_lb_policy* p =
+ (round_robin_lb_policy*)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p: "
"prev_state=%s new_state=%s p->shutdown=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
- (void *)p, (void *)sd->subchannel, (void *)sd->subchannel_list,
+ (void*)p, (void*)sd->subchannel, (void*)sd->subchannel_list,
grpc_connectivity_state_name(sd->prev_connectivity_state),
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
p->shutdown, sd->subchannel_list->shutting_down,
@@ -487,8 +486,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_DEBUG,
"[RR %p] phasing out subchannel list %p (size %lu) in favor "
"of %p (size %lu)",
- (void *)p, (void *)p->subchannel_list, num_subchannels,
- (void *)sd->subchannel_list, num_subchannels);
+ (void*)p, (void*)p->subchannel_list, num_subchannels,
+ (void*)sd->subchannel_list, num_subchannels);
}
if (p->subchannel_list != NULL) {
// dispose of the current subchannel_list
@@ -503,14 +502,14 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
- grpc_lb_subchannel_data *selected =
+ grpc_lb_subchannel_data* selected =
&p->subchannel_list->subchannels[next_ready_index];
if (p->pending_picks != NULL) {
// if the selected subchannel is going to be used for the pending
// picks, update the last picked pointer
update_last_ready_subchannel_index_locked(p, next_ready_index);
}
- pending_pick *pp;
+ pending_pick* pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
@@ -522,8 +521,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_DEBUG,
"[RR %p] Fulfilling pending pick. Target <-- subchannel %p "
"(subchannel_list %p, index %lu)",
- (void *)p, (void *)selected->subchannel,
- (void *)p->subchannel_list, (unsigned long)next_ready_index);
+ (void*)p, (void*)selected->subchannel,
+ (void*)p->subchannel_list, (unsigned long)next_ready_index);
}
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
@@ -535,41 +534,42 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
static grpc_connectivity_state rr_check_connectivity_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, grpc_error** error) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
return grpc_connectivity_state_get(&p->state_tracker, error);
}
-static void rr_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void rr_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* pol,
+ grpc_connectivity_state* current,
+ grpc_closure* notify) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
}
-static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void rr_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_closure* closure) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
- grpc_lb_subchannel_data *selected =
+ grpc_lb_subchannel_data* selected =
&p->subchannel_list->subchannels[next_ready_index];
- grpc_connected_subchannel *target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_connected_subchannel* target = GRPC_CONNECTED_SUBCHANNEL_REF(
selected->connected_subchannel, "rr_ping");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_ping");
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Round Robin not connected"));
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Round Robin not connected"));
}
}
-static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_args *args) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)policy;
- const grpc_arg *arg =
+static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_args* args) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
+ const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", p);
@@ -583,12 +583,12 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
return;
}
- grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
+ grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", p,
addresses->num_addresses);
}
- grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
+ grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
exec_ctx, &p->base, &grpc_lb_round_robin_trace, addresses, args,
rr_connectivity_changed_locked);
if (subchannel_list->num_subchannels == 0) {
@@ -609,8 +609,8 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_DEBUG,
"[RR %p] Shutting down latest pending subchannel list %p, "
"about to be replaced by newer latest %p",
- (void *)p, (void *)p->latest_pending_subchannel_list,
- (void *)subchannel_list);
+ (void*)p, (void*)p->latest_pending_subchannel_list,
+ (void*)subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list, "sl_outdated");
@@ -649,22 +649,22 @@ static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_notify_on_state_change_locked,
rr_update_locked};
-static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
+static void round_robin_factory_ref(grpc_lb_policy_factory* factory) {}
-static void round_robin_factory_unref(grpc_lb_policy_factory *factory) {}
+static void round_robin_factory_unref(grpc_lb_policy_factory* factory) {}
-static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args) {
+static grpc_lb_policy* round_robin_create(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy_factory* factory,
+ grpc_lb_policy_args* args) {
GPR_ASSERT(args->client_channel_factory != NULL);
- round_robin_lb_policy *p = (round_robin_lb_policy *)gpr_zalloc(sizeof(*p));
+ round_robin_lb_policy* p = (round_robin_lb_policy*)gpr_zalloc(sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref();
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
rr_update_locked(exec_ctx, &p->base, args);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void *)p,
+ gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void*)p,
(unsigned long)p->subchannel_list->num_subchannels);
}
return &p->base;
@@ -677,7 +677,7 @@ static const grpc_lb_policy_factory_vtable round_robin_factory_vtable = {
static grpc_lb_policy_factory round_robin_lb_policy_factory = {
&round_robin_factory_vtable};
-static grpc_lb_policy_factory *round_robin_lb_factory_create() {
+static grpc_lb_policy_factory* round_robin_lb_factory_create() {
return &round_robin_lb_policy_factory;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
index 08ea4f480b..db38ef5305 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -28,17 +28,18 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
-void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
- grpc_lb_subchannel_data *sd,
- const char *reason) {
+void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx* exec_ctx,
+ grpc_lb_subchannel_data* sd,
+ const char* reason) {
if (sd->subchannel != NULL) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
- gpr_log(
- GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
- " of %" PRIuPTR " (subchannel %p): unreffing subchannel",
- sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
- sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
- sd->subchannel_list->num_subchannels, sd->subchannel);
+ gpr_log(GPR_DEBUG,
+ "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): unreffing subchannel",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list,
+ (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
sd->subchannel = NULL;
@@ -56,7 +57,7 @@ void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
}
void grpc_lb_subchannel_data_start_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
@@ -74,25 +75,26 @@ void grpc_lb_subchannel_data_start_connectivity_watch(
}
void grpc_lb_subchannel_data_stop_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
- gpr_log(
- GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
- " (subchannel %p): stopping connectivity watch",
- sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
- sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
- sd->subchannel_list->num_subchannels, sd->subchannel);
+ gpr_log(GPR_DEBUG,
+ "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): stopping connectivity watch",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list,
+ (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel);
}
GPR_ASSERT(sd->connectivity_notification_pending);
sd->connectivity_notification_pending = false;
}
-grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
- const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
+grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_tracer_flag* tracer,
+ const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
grpc_iomgr_cb_func connectivity_changed_cb) {
- grpc_lb_subchannel_list *subchannel_list =
- (grpc_lb_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
+ grpc_lb_subchannel_list* subchannel_list =
+ (grpc_lb_subchannel_list*)gpr_zalloc(sizeof(*subchannel_list));
if (GRPC_TRACER_ON(*tracer)) {
gpr_log(GPR_DEBUG,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
@@ -101,11 +103,11 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
subchannel_list->policy = p;
subchannel_list->tracer = tracer;
gpr_ref_init(&subchannel_list->refcount, 1);
- subchannel_list->subchannels = (grpc_lb_subchannel_data *)gpr_zalloc(
+ subchannel_list->subchannels = (grpc_lb_subchannel_data*)gpr_zalloc(
sizeof(grpc_lb_subchannel_data) * addresses->num_addresses);
// We need to remove the LB addresses in order to be able to compare the
// subchannel keys of subchannels from a different batch of addresses.
- static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
+ static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
GRPC_ARG_LB_ADDRESSES};
// Create a subchannel for each address.
grpc_subchannel_args sc_args;
@@ -116,18 +118,18 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
+ grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
- grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
+ grpc_subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
grpc_channel_args_destroy(exec_ctx, new_args);
if (subchannel == NULL) {
// Subchannel could not be created.
if (GRPC_TRACER_ON(*tracer)) {
- char *address_uri =
+ char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG,
"[%s %p] could not create subchannel for address uri %s, "
@@ -138,15 +140,16 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
continue;
}
if (GRPC_TRACER_ON(*tracer)) {
- char *address_uri =
+ char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
- gpr_log(GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
- ": Created subchannel %p for address uri %s",
+ gpr_log(GPR_DEBUG,
+ "[%s %p] subchannel list %p index %" PRIuPTR
+ ": Created subchannel %p for address uri %s",
tracer->name, p, subchannel_list, subchannel_index, subchannel,
address_uri);
gpr_free(address_uri);
}
- grpc_lb_subchannel_data *sd =
+ grpc_lb_subchannel_data* sd =
&subchannel_list->subchannels[subchannel_index++];
sd->subchannel_list = subchannel_list;
sd->subchannel = subchannel;
@@ -169,15 +172,15 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
return subchannel_list;
}
-static void subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
- grpc_lb_subchannel_list *subchannel_list) {
+static void subchannel_list_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_lb_subchannel_list* subchannel_list) {
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p",
subchannel_list->tracer->name, subchannel_list->policy,
subchannel_list);
}
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
- grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"subchannel_list_destroy");
}
@@ -185,8 +188,8 @@ static void subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
gpr_free(subchannel_list);
}
-void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
- const char *reason) {
+void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
+ const char* reason) {
gpr_ref_non_zero(&subchannel_list->refcount);
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
@@ -197,9 +200,9 @@ void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
}
}
-void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
- grpc_lb_subchannel_list *subchannel_list,
- const char *reason) {
+void grpc_lb_subchannel_list_unref(grpc_exec_ctx* exec_ctx,
+ grpc_lb_subchannel_list* subchannel_list,
+ const char* reason) {
const bool done = gpr_unref(&subchannel_list->refcount);
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
@@ -214,35 +217,36 @@ void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
}
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
- grpc_lb_subchannel_list *subchannel_list, const char *reason) {
+ grpc_lb_subchannel_list* subchannel_list, const char* reason) {
GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_ref(subchannel_list, reason);
}
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
- const char *reason) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
+ const char* reason) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, subchannel_list->policy, reason);
grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
}
static void subchannel_data_cancel_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, const char *reason) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, const char* reason) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
- gpr_log(
- GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
- " (subchannel %p): canceling connectivity watch (%s)",
- sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
- sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
- sd->subchannel_list->num_subchannels, sd->subchannel, reason);
+ gpr_log(GPR_DEBUG,
+ "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): canceling connectivity watch (%s)",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list,
+ (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
&sd->connectivity_changed_closure);
}
void grpc_lb_subchannel_list_shutdown_and_unref(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
- const char *reason) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
+ const char* reason) {
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p (%s)",
subchannel_list->tracer->name, subchannel_list->policy,
@@ -251,7 +255,7 @@ void grpc_lb_subchannel_list_shutdown_and_unref(
GPR_ASSERT(!subchannel_list->shutting_down);
subchannel_list->shutting_down = true;
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
- grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
// If there's a pending notification for this subchannel, cancel it;
// the callback is responsible for unreffing the subchannel.
// Otherwise, unref the subchannel directly.
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
index 9d5984260f..e18ad490e8 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -44,10 +44,10 @@ typedef struct grpc_lb_subchannel_list grpc_lb_subchannel_list;
typedef struct {
/** backpointer to owning subchannel list */
- grpc_lb_subchannel_list *subchannel_list;
+ grpc_lb_subchannel_list* subchannel_list;
/** subchannel itself */
- grpc_subchannel *subchannel;
- grpc_connected_subchannel *connected_subchannel;
+ grpc_subchannel* subchannel;
+ grpc_connected_subchannel* connected_subchannel;
/** Is a connectivity notification pending? */
bool connectivity_notification_pending;
/** notification that connectivity has changed on subchannel */
@@ -63,36 +63,36 @@ typedef struct {
* \a connectivity_changed_closure. */
grpc_connectivity_state pending_connectivity_state_unsafe;
/** the subchannel's target user data */
- void *user_data;
+ void* user_data;
/** vtable to operate over \a user_data */
- const grpc_lb_user_data_vtable *user_data_vtable;
+ const grpc_lb_user_data_vtable* user_data_vtable;
} grpc_lb_subchannel_data;
/// Unrefs the subchannel contained in sd.
-void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
- grpc_lb_subchannel_data *sd,
- const char *reason);
+void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx* exec_ctx,
+ grpc_lb_subchannel_data* sd,
+ const char* reason);
/// Starts watching the connectivity state of the subchannel.
/// The connectivity_changed_cb callback must invoke either
/// grpc_lb_subchannel_data_stop_connectivity_watch() or again call
/// grpc_lb_subchannel_data_start_connectivity_watch().
void grpc_lb_subchannel_data_start_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd);
/// Stops watching the connectivity state of the subchannel.
void grpc_lb_subchannel_data_stop_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd);
struct grpc_lb_subchannel_list {
/** backpointer to owning policy */
- grpc_lb_policy *policy;
+ grpc_lb_policy* policy;
- grpc_tracer_flag *tracer;
+ grpc_tracer_flag* tracer;
/** all our subchannels */
size_t num_subchannels;
- grpc_lb_subchannel_data *subchannels;
+ grpc_lb_subchannel_data* subchannels;
/** Index into subchannels of the one we're currently checking.
* Used when connecting to subchannels serially instead of in parallel. */
@@ -120,31 +120,31 @@ struct grpc_lb_subchannel_list {
bool shutting_down;
};
-grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
- const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
+grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_tracer_flag* tracer,
+ const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
grpc_iomgr_cb_func connectivity_changed_cb);
-void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
- const char *reason);
+void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
+ const char* reason);
-void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
- grpc_lb_subchannel_list *subchannel_list,
- const char *reason);
+void grpc_lb_subchannel_list_unref(grpc_exec_ctx* exec_ctx,
+ grpc_lb_subchannel_list* subchannel_list,
+ const char* reason);
/// Takes and releases refs needed for a connectivity notification.
/// This includes a ref to subchannel_list and a weak ref to the LB policy.
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
- grpc_lb_subchannel_list *subchannel_list, const char *reason);
+ grpc_lb_subchannel_list* subchannel_list, const char* reason);
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
- const char *reason);
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
+ const char* reason);
/// Mark subchannel_list as discarded. Unsubscribes all its subchannels. The
/// connectivity state notification callback will ultimately unref it.
void grpc_lb_subchannel_list_shutdown_and_unref(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
- const char *reason);
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
+ const char* reason);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h
index 8790ffdda3..360a42b177 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.h
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.h
@@ -37,7 +37,7 @@ typedef struct grpc_lb_policy_factory grpc_lb_policy_factory;
typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable;
struct grpc_lb_policy_factory {
- const grpc_lb_policy_factory_vtable *vtable;
+ const grpc_lb_policy_factory_vtable* vtable;
};
/** A resolved address alongside any LB related information associated with it.
@@ -48,91 +48,91 @@ struct grpc_lb_policy_factory {
typedef struct grpc_lb_address {
grpc_resolved_address address;
bool is_balancer;
- char *balancer_name; /* For secure naming. */
- void *user_data;
+ char* balancer_name; /* For secure naming. */
+ void* user_data;
} grpc_lb_address;
typedef struct grpc_lb_user_data_vtable {
- void *(*copy)(void *);
- void (*destroy)(grpc_exec_ctx *exec_ctx, void *);
- int (*cmp)(void *, void *);
+ void* (*copy)(void*);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, void*);
+ int (*cmp)(void*, void*);
} grpc_lb_user_data_vtable;
typedef struct grpc_lb_addresses {
size_t num_addresses;
- grpc_lb_address *addresses;
- const grpc_lb_user_data_vtable *user_data_vtable;
+ grpc_lb_address* addresses;
+ const grpc_lb_user_data_vtable* user_data_vtable;
} grpc_lb_addresses;
/** Returns a grpc_addresses struct with enough space for
\a num_addresses addresses. The \a user_data_vtable argument may be
NULL if no user data will be added. */
-grpc_lb_addresses *grpc_lb_addresses_create(
- size_t num_addresses, const grpc_lb_user_data_vtable *user_data_vtable);
+grpc_lb_addresses* grpc_lb_addresses_create(
+ size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable);
/** Creates a copy of \a addresses. */
-grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
+grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses);
/** Sets the value of the address at index \a index of \a addresses.
* \a address is a socket address of length \a address_len.
* Takes ownership of \a balancer_name. */
-void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
- const void *address, size_t address_len,
- bool is_balancer, const char *balancer_name,
- void *user_data);
+void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
+ const void* address, size_t address_len,
+ bool is_balancer, const char* balancer_name,
+ void* user_data);
/** Sets the value of the address at index \a index of \a addresses from \a uri.
* Returns true upon success, false otherwise. Takes ownership of \a
* balancer_name. */
-bool grpc_lb_addresses_set_address_from_uri(grpc_lb_addresses *addresses,
- size_t index, const grpc_uri *uri,
+bool grpc_lb_addresses_set_address_from_uri(grpc_lb_addresses* addresses,
+ size_t index, const grpc_uri* uri,
bool is_balancer,
- const char *balancer_name,
- void *user_data);
+ const char* balancer_name,
+ void* user_data);
/** Compares \a addresses1 and \a addresses2. */
-int grpc_lb_addresses_cmp(const grpc_lb_addresses *addresses1,
- const grpc_lb_addresses *addresses2);
+int grpc_lb_addresses_cmp(const grpc_lb_addresses* addresses1,
+ const grpc_lb_addresses* addresses2);
/** Destroys \a addresses. */
-void grpc_lb_addresses_destroy(grpc_exec_ctx *exec_ctx,
- grpc_lb_addresses *addresses);
+void grpc_lb_addresses_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_lb_addresses* addresses);
/** Returns a channel arg containing \a addresses. */
grpc_arg grpc_lb_addresses_create_channel_arg(
- const grpc_lb_addresses *addresses);
+ const grpc_lb_addresses* addresses);
/** Returns the \a grpc_lb_addresses instance in \a channel_args or NULL */
-grpc_lb_addresses *grpc_lb_addresses_find_channel_arg(
- const grpc_channel_args *channel_args);
+grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
+ const grpc_channel_args* channel_args);
/** Arguments passed to LB policies. */
struct grpc_lb_policy_args {
- grpc_client_channel_factory *client_channel_factory;
- grpc_channel_args *args;
- grpc_combiner *combiner;
+ grpc_client_channel_factory* client_channel_factory;
+ grpc_channel_args* args;
+ grpc_combiner* combiner;
};
struct grpc_lb_policy_factory_vtable {
- void (*ref)(grpc_lb_policy_factory *factory);
- void (*unref)(grpc_lb_policy_factory *factory);
+ void (*ref)(grpc_lb_policy_factory* factory);
+ void (*unref)(grpc_lb_policy_factory* factory);
/** Implementation of grpc_lb_policy_factory_create_lb_policy */
- grpc_lb_policy *(*create_lb_policy)(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args);
+ grpc_lb_policy* (*create_lb_policy)(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy_factory* factory,
+ grpc_lb_policy_args* args);
/** Name for the LB policy this factory implements */
- const char *name;
+ const char* name;
};
-void grpc_lb_policy_factory_ref(grpc_lb_policy_factory *factory);
-void grpc_lb_policy_factory_unref(grpc_lb_policy_factory *factory);
+void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory);
+void grpc_lb_policy_factory_unref(grpc_lb_policy_factory* factory);
/** Create a lb_policy instance. */
-grpc_lb_policy *grpc_lb_policy_factory_create_lb_policy(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args);
+grpc_lb_policy* grpc_lb_policy_factory_create_lb_policy(
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy_factory* factory,
+ grpc_lb_policy_args* args);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.cc b/src/core/ext/filters/client_channel/lb_policy_registry.cc
index f2460f8304..7b0a926a1b 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.cc
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.cc
@@ -24,7 +24,7 @@
#define MAX_POLICIES 10
-static grpc_lb_policy_factory *g_all_of_the_lb_policies[MAX_POLICIES];
+static grpc_lb_policy_factory* g_all_of_the_lb_policies[MAX_POLICIES];
static int g_number_of_lb_policies = 0;
void grpc_lb_policy_registry_init(void) { g_number_of_lb_policies = 0; }
@@ -36,7 +36,7 @@ void grpc_lb_policy_registry_shutdown(void) {
}
}
-void grpc_register_lb_policy(grpc_lb_policy_factory *factory) {
+void grpc_register_lb_policy(grpc_lb_policy_factory* factory) {
int i;
for (i = 0; i < g_number_of_lb_policies; i++) {
GPR_ASSERT(0 != gpr_stricmp(factory->vtable->name,
@@ -47,7 +47,7 @@ void grpc_register_lb_policy(grpc_lb_policy_factory *factory) {
g_all_of_the_lb_policies[g_number_of_lb_policies++] = factory;
}
-static grpc_lb_policy_factory *lookup_factory(const char *name) {
+static grpc_lb_policy_factory* lookup_factory(const char* name) {
int i;
if (name == NULL) return NULL;
@@ -61,10 +61,10 @@ static grpc_lb_policy_factory *lookup_factory(const char *name) {
return NULL;
}
-grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name,
- grpc_lb_policy_args *args) {
- grpc_lb_policy_factory *factory = lookup_factory(name);
- grpc_lb_policy *lb_policy =
+grpc_lb_policy* grpc_lb_policy_create(grpc_exec_ctx* exec_ctx, const char* name,
+ grpc_lb_policy_args* args) {
+ grpc_lb_policy_factory* factory = lookup_factory(name);
+ grpc_lb_policy* lb_policy =
grpc_lb_policy_factory_create_lb_policy(exec_ctx, factory, args);
return lb_policy;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.h b/src/core/ext/filters/client_channel/lb_policy_registry.h
index 55154cb02a..055f751b57 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.h
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.h
@@ -32,14 +32,14 @@ void grpc_lb_policy_registry_init(void);
void grpc_lb_policy_registry_shutdown(void);
/** Register a LB policy factory. */
-void grpc_register_lb_policy(grpc_lb_policy_factory *factory);
+void grpc_register_lb_policy(grpc_lb_policy_factory* factory);
/** Create a \a grpc_lb_policy instance.
*
* If \a name is NULL, the default factory from \a grpc_lb_policy_registry_init
* will be returned. */
-grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name,
- grpc_lb_policy_args *args);
+grpc_lb_policy* grpc_lb_policy_create(grpc_exec_ctx* exec_ctx, const char* name,
+ grpc_lb_policy_args* args);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/parse_address.cc b/src/core/ext/filters/client_channel/parse_address.cc
index 2152b5a1e9..6cf77f13bc 100644
--- a/src/core/ext/filters/client_channel/parse_address.cc
+++ b/src/core/ext/filters/client_channel/parse_address.cc
@@ -33,13 +33,13 @@
#ifdef GRPC_HAVE_UNIX_SOCKET
-bool grpc_parse_unix(const grpc_uri *uri,
- grpc_resolved_address *resolved_addr) {
+bool grpc_parse_unix(const grpc_uri* uri,
+ grpc_resolved_address* resolved_addr) {
if (strcmp("unix", uri->scheme) != 0) {
gpr_log(GPR_ERROR, "Expected 'unix' scheme, got '%s'", uri->scheme);
return false;
}
- struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr;
+ struct sockaddr_un* un = (struct sockaddr_un*)resolved_addr->addr;
const size_t maxlen = sizeof(un->sun_path);
const size_t path_len = strnlen(uri->path, maxlen);
if (path_len == maxlen) return false;
@@ -51,24 +51,24 @@ bool grpc_parse_unix(const grpc_uri *uri,
#else /* GRPC_HAVE_UNIX_SOCKET */
-bool grpc_parse_unix(const grpc_uri *uri,
- grpc_resolved_address *resolved_addr) {
+bool grpc_parse_unix(const grpc_uri* uri,
+ grpc_resolved_address* resolved_addr) {
abort();
}
#endif /* GRPC_HAVE_UNIX_SOCKET */
-bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr,
+bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors) {
bool success = false;
// Split host and port.
- char *host;
- char *port;
+ char* host;
+ char* port;
if (!gpr_split_host_port(hostport, &host, &port)) return false;
// Parse IP address.
memset(addr, 0, sizeof(*addr));
addr->len = sizeof(struct sockaddr_in);
- struct sockaddr_in *in = (struct sockaddr_in *)addr->addr;
+ struct sockaddr_in* in = (struct sockaddr_in*)addr->addr;
in->sin_family = AF_INET;
if (inet_pton(AF_INET, host, &in->sin_addr) == 0) {
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
@@ -92,32 +92,32 @@ done:
return success;
}
-bool grpc_parse_ipv4(const grpc_uri *uri,
- grpc_resolved_address *resolved_addr) {
+bool grpc_parse_ipv4(const grpc_uri* uri,
+ grpc_resolved_address* resolved_addr) {
if (strcmp("ipv4", uri->scheme) != 0) {
gpr_log(GPR_ERROR, "Expected 'ipv4' scheme, got '%s'", uri->scheme);
return false;
}
- const char *host_port = uri->path;
+ const char* host_port = uri->path;
if (*host_port == '/') ++host_port;
return grpc_parse_ipv4_hostport(host_port, resolved_addr,
true /* log_errors */);
}
-bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr,
+bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors) {
bool success = false;
// Split host and port.
- char *host;
- char *port;
+ char* host;
+ char* port;
if (!gpr_split_host_port(hostport, &host, &port)) return false;
// Parse IP address.
memset(addr, 0, sizeof(*addr));
addr->len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr->addr;
+ struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr->addr;
in6->sin6_family = AF_INET6;
// Handle the RFC6874 syntax for IPv6 zone identifiers.
- char *host_end = (char *)gpr_memrchr(host, '%', strlen(host));
+ char* host_end = (char*)gpr_memrchr(host, '%', strlen(host));
if (host_end != NULL) {
GPR_ASSERT(host_end >= host);
char host_without_scope[INET6_ADDRSTRLEN];
@@ -161,19 +161,19 @@ done:
return success;
}
-bool grpc_parse_ipv6(const grpc_uri *uri,
- grpc_resolved_address *resolved_addr) {
+bool grpc_parse_ipv6(const grpc_uri* uri,
+ grpc_resolved_address* resolved_addr) {
if (strcmp("ipv6", uri->scheme) != 0) {
gpr_log(GPR_ERROR, "Expected 'ipv6' scheme, got '%s'", uri->scheme);
return false;
}
- const char *host_port = uri->path;
+ const char* host_port = uri->path;
if (*host_port == '/') ++host_port;
return grpc_parse_ipv6_hostport(host_port, resolved_addr,
true /* log_errors */);
}
-bool grpc_parse_uri(const grpc_uri *uri, grpc_resolved_address *resolved_addr) {
+bool grpc_parse_uri(const grpc_uri* uri, grpc_resolved_address* resolved_addr) {
if (strcmp("unix", uri->scheme) == 0) {
return grpc_parse_unix(uri, resolved_addr);
} else if (strcmp("ipv4", uri->scheme) == 0) {
diff --git a/src/core/ext/filters/client_channel/parse_address.h b/src/core/ext/filters/client_channel/parse_address.h
index 27d06a1cb3..b45859f9a2 100644
--- a/src/core/ext/filters/client_channel/parse_address.h
+++ b/src/core/ext/filters/client_channel/parse_address.h
@@ -30,23 +30,23 @@ extern "C" {
/** Populate \a resolved_addr from \a uri, whose path is expected to contain a
* unix socket path. Returns true upon success. */
-bool grpc_parse_unix(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
+bool grpc_parse_unix(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Populate \a resolved_addr from \a uri, whose path is expected to contain an
* IPv4 host:port pair. Returns true upon success. */
-bool grpc_parse_ipv4(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
+bool grpc_parse_ipv4(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Populate \a resolved_addr from \a uri, whose path is expected to contain an
* IPv6 host:port pair. Returns true upon success. */
-bool grpc_parse_ipv6(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
+bool grpc_parse_ipv6(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Populate \a resolved_addr from \a uri. Returns true upon success. */
-bool grpc_parse_uri(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
+bool grpc_parse_uri(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Parse bare IPv4 or IPv6 "IP:port" strings. */
-bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr,
+bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors);
-bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr,
+bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors);
#ifdef __cplusplus
diff --git a/src/core/ext/filters/client_channel/resolver.cc b/src/core/ext/filters/client_channel/resolver.cc
index 8401504fcf..7e84b98cb9 100644
--- a/src/core/ext/filters/client_channel/resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver.cc
@@ -24,17 +24,17 @@ grpc_tracer_flag grpc_trace_resolver_refcount =
GRPC_TRACER_INITIALIZER(false, "resolver_refcount");
#endif
-void grpc_resolver_init(grpc_resolver *resolver,
- const grpc_resolver_vtable *vtable,
- grpc_combiner *combiner) {
+void grpc_resolver_init(grpc_resolver* resolver,
+ const grpc_resolver_vtable* vtable,
+ grpc_combiner* combiner) {
resolver->vtable = vtable;
resolver->combiner = GRPC_COMBINER_REF(combiner, "resolver");
gpr_ref_init(&resolver->refs, 1);
}
#ifndef NDEBUG
-void grpc_resolver_ref(grpc_resolver *resolver, const char *file, int line,
- const char *reason) {
+void grpc_resolver_ref(grpc_resolver* resolver, const char* file, int line,
+ const char* reason) {
if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -42,14 +42,14 @@ void grpc_resolver_ref(grpc_resolver *resolver, const char *file, int line,
old_refs, old_refs + 1, reason);
}
#else
-void grpc_resolver_ref(grpc_resolver *resolver) {
+void grpc_resolver_ref(grpc_resolver* resolver) {
#endif
gpr_ref(&resolver->refs);
}
#ifndef NDEBUG
-void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- const char *file, int line, const char *reason) {
+void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+ const char* file, int line, const char* reason) {
if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -57,27 +57,27 @@ void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
old_refs, old_refs - 1, reason);
}
#else
-void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
+void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver) {
#endif
if (gpr_unref(&resolver->refs)) {
- grpc_combiner *combiner = resolver->combiner;
+ grpc_combiner* combiner = resolver->combiner;
resolver->vtable->destroy(exec_ctx, resolver);
GRPC_COMBINER_UNREF(exec_ctx, combiner, "resolver");
}
}
-void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
+void grpc_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
resolver->vtable->shutdown_locked(exec_ctx, resolver);
}
-void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
+void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
resolver->vtable->channel_saw_error_locked(exec_ctx, resolver);
}
-void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result,
- grpc_closure *on_complete) {
+void grpc_resolver_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+ grpc_channel_args** result,
+ grpc_closure* on_complete) {
resolver->vtable->next_locked(exec_ctx, resolver, result, on_complete);
}
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index 73fbbbbc3b..a0eb0bcfdf 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -35,49 +35,49 @@ extern grpc_tracer_flag grpc_trace_resolver_refcount;
/** \a grpc_resolver provides \a grpc_channel_args objects to its caller */
struct grpc_resolver {
- const grpc_resolver_vtable *vtable;
+ const grpc_resolver_vtable* vtable;
gpr_refcount refs;
- grpc_combiner *combiner;
+ grpc_combiner* combiner;
};
struct grpc_resolver_vtable {
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
- void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
- void (*channel_saw_error_locked)(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver);
- void (*next_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result, grpc_closure *on_complete);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver);
+ void (*shutdown_locked)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver);
+ void (*channel_saw_error_locked)(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver);
+ void (*next_locked)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+ grpc_channel_args** result, grpc_closure* on_complete);
};
#ifndef NDEBUG
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p), __FILE__, __LINE__, (r))
#define GRPC_RESOLVER_UNREF(e, p, r) \
grpc_resolver_unref((e), (p), __FILE__, __LINE__, (r))
-void grpc_resolver_ref(grpc_resolver *policy, const char *file, int line,
- const char *reason);
-void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy,
- const char *file, int line, const char *reason);
+void grpc_resolver_ref(grpc_resolver* policy, const char* file, int line,
+ const char* reason);
+void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* policy,
+ const char* file, int line, const char* reason);
#else
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p))
#define GRPC_RESOLVER_UNREF(e, p, r) grpc_resolver_unref((e), (p))
-void grpc_resolver_ref(grpc_resolver *policy);
-void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy);
+void grpc_resolver_ref(grpc_resolver* policy);
+void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* policy);
#endif
-void grpc_resolver_init(grpc_resolver *resolver,
- const grpc_resolver_vtable *vtable,
- grpc_combiner *combiner);
+void grpc_resolver_init(grpc_resolver* resolver,
+ const grpc_resolver_vtable* vtable,
+ grpc_combiner* combiner);
-void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver);
+void grpc_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver);
/** Notification that the channel has seen an error on some address.
Can be used as a hint that re-resolution is desirable soon.
Must be called from the combiner passed as a resolver_arg at construction
time.*/
-void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver);
+void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver);
/** Get the next result from the resolver. Expected to set \a *result with
new channel args and then schedule \a on_complete for execution.
@@ -87,9 +87,9 @@ void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
Must be called from the combiner passed as a resolver_arg at construction
time.*/
-void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result,
- grpc_closure *on_complete);
+void grpc_resolver_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+ grpc_channel_args** result,
+ grpc_closure* on_complete);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index a1ddaee499..76f08281f7 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -53,38 +53,38 @@ typedef struct {
/** base class: must be first */
grpc_resolver base;
/** DNS server to use (if not system default) */
- char *dns_server;
+ char* dns_server;
/** name to resolve (usually the same as target_name) */
- char *name_to_resolve;
+ char* name_to_resolve;
/** default port to use */
- char *default_port;
+ char* default_port;
/** channel args. */
- grpc_channel_args *channel_args;
+ grpc_channel_args* channel_args;
/** whether to request the service config */
bool request_service_config;
/** pollset_set to drive the name resolution process */
- grpc_pollset_set *interested_parties;
+ grpc_pollset_set* interested_parties;
/** Closures used by the combiner */
grpc_closure dns_ares_on_retry_timer_locked;
grpc_closure dns_ares_on_resolved_locked;
/** Combiner guarding the rest of the state */
- grpc_combiner *combiner;
+ grpc_combiner* combiner;
/** are we currently resolving? */
bool resolving;
/** the pending resolving request */
- grpc_ares_request *pending_request;
+ grpc_ares_request* pending_request;
/** which version of the result have we published? */
int published_version;
/** which version of the result is current? */
int resolved_version;
/** pending next completion, or NULL */
- grpc_closure *next_completion;
+ grpc_closure* next_completion;
/** target result address for next completion */
- grpc_channel_args **target_result;
+ grpc_channel_args** target_result;
/** current (fully resolved) result */
- grpc_channel_args *resolved_result;
+ grpc_channel_args* resolved_result;
/** retry timer */
bool have_retry_timer;
grpc_timer retry_timer;
@@ -92,32 +92,32 @@ typedef struct {
grpc_backoff backoff_state;
/** currently resolving addresses */
- grpc_lb_addresses *lb_addresses;
+ grpc_lb_addresses* lb_addresses;
/** currently resolving service config */
- char *service_config_json;
+ char* service_config_json;
} ares_dns_resolver;
-static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void dns_ares_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
-static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
- ares_dns_resolver *r);
-static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- ares_dns_resolver *r);
+static void dns_ares_start_resolving_locked(grpc_exec_ctx* exec_ctx,
+ ares_dns_resolver* r);
+static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ ares_dns_resolver* r);
-static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *r);
-static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_channel_args **target_result,
- grpc_closure *on_complete);
+static void dns_ares_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
+static void dns_ares_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* r);
+static void dns_ares_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete);
static const grpc_resolver_vtable dns_ares_resolver_vtable = {
dns_ares_destroy, dns_ares_shutdown_locked,
dns_ares_channel_saw_error_locked, dns_ares_next_locked};
-static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+static void dns_ares_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ ares_dns_resolver* r = (ares_dns_resolver*)resolver;
if (r->have_retry_timer) {
grpc_timer_cancel(exec_ctx, &r->retry_timer);
}
@@ -133,18 +133,18 @@ static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+static void dns_ares_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ ares_dns_resolver* r = (ares_dns_resolver*)resolver;
if (!r->resolving) {
grpc_backoff_reset(&r->backoff_state);
dns_ares_start_resolving_locked(exec_ctx, r);
}
}
-static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- ares_dns_resolver *r = (ares_dns_resolver *)arg;
+static void dns_ares_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ ares_dns_resolver* r = (ares_dns_resolver*)arg;
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
if (!r->resolving) {
@@ -154,8 +154,8 @@ static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
}
-static bool value_in_json_array(grpc_json *array, const char *value) {
- for (grpc_json *entry = array->child; entry != NULL; entry = entry->next) {
+static bool value_in_json_array(grpc_json* array, const char* value) {
+ for (grpc_json* entry = array->child; entry != NULL; entry = entry->next) {
if (entry->type == GRPC_JSON_STRING && strcmp(entry->value, value) == 0) {
return true;
}
@@ -163,21 +163,21 @@ static bool value_in_json_array(grpc_json *array, const char *value) {
return false;
}
-static char *choose_service_config(char *service_config_choice_json) {
- grpc_json *choices_json = grpc_json_parse_string(service_config_choice_json);
+static char* choose_service_config(char* service_config_choice_json) {
+ grpc_json* choices_json = grpc_json_parse_string(service_config_choice_json);
if (choices_json == NULL || choices_json->type != GRPC_JSON_ARRAY) {
gpr_log(GPR_ERROR, "cannot parse service config JSON string");
return NULL;
}
- char *service_config = NULL;
- for (grpc_json *choice = choices_json->child; choice != NULL;
+ char* service_config = NULL;
+ for (grpc_json* choice = choices_json->child; choice != NULL;
choice = choice->next) {
if (choice->type != GRPC_JSON_OBJECT) {
gpr_log(GPR_ERROR, "cannot parse service config JSON string");
break;
}
- grpc_json *service_config_json = NULL;
- for (grpc_json *field = choice->child; field != NULL; field = field->next) {
+ grpc_json* service_config_json = NULL;
+ for (grpc_json* field = choice->child; field != NULL; field = field->next) {
// Check client language, if specified.
if (strcmp(field->key, "clientLanguage") == 0) {
if (field->type != GRPC_JSON_ARRAY ||
@@ -188,7 +188,7 @@ static char *choose_service_config(char *service_config_choice_json) {
}
// Check client hostname, if specified.
if (strcmp(field->key, "clientHostname") == 0) {
- char *hostname = grpc_gethostname();
+ char* hostname = grpc_gethostname();
if (hostname == NULL || field->type != GRPC_JSON_ARRAY ||
!value_in_json_array(field, hostname)) {
service_config_json = NULL;
@@ -225,22 +225,22 @@ static char *choose_service_config(char *service_config_choice_json) {
return service_config;
}
-static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- ares_dns_resolver *r = (ares_dns_resolver *)arg;
- grpc_channel_args *result = NULL;
+static void dns_ares_on_resolved_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ ares_dns_resolver* r = (ares_dns_resolver*)arg;
+ grpc_channel_args* result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
r->pending_request = NULL;
if (r->lb_addresses != NULL) {
- static const char *args_to_remove[2];
+ static const char* args_to_remove[2];
size_t num_args_to_remove = 0;
grpc_arg new_args[3];
size_t num_args_to_add = 0;
new_args[num_args_to_add++] =
grpc_lb_addresses_create_channel_arg(r->lb_addresses);
- grpc_service_config *service_config = NULL;
- char *service_config_string = NULL;
+ grpc_service_config* service_config = NULL;
+ char* service_config_string = NULL;
if (r->service_config_json != NULL) {
service_config_string = choose_service_config(r->service_config_json);
gpr_free(r->service_config_json);
@@ -249,15 +249,15 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
service_config_string);
args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_SERVICE_CONFIG, service_config_string);
+ (char*)GRPC_ARG_SERVICE_CONFIG, service_config_string);
service_config = grpc_service_config_create(service_config_string);
if (service_config != NULL) {
- const char *lb_policy_name =
+ const char* lb_policy_name =
grpc_service_config_get_lb_policy_name(service_config);
if (lb_policy_name != NULL) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name);
+ (char*)GRPC_ARG_LB_POLICY_NAME, (char*)lb_policy_name);
}
}
}
@@ -269,7 +269,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(service_config_string);
grpc_lb_addresses_destroy(exec_ctx, r->lb_addresses);
} else {
- const char *msg = grpc_error_string(error);
+ const char* msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
grpc_millis next_try =
grpc_backoff_step(exec_ctx, &r->backoff_state).next_attempt_start_time;
@@ -296,12 +296,12 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
-static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver,
- grpc_channel_args **target_result,
- grpc_closure *on_complete) {
+static void dns_ares_next_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete) {
gpr_log(GPR_DEBUG, "dns_ares_next is called.");
- ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+ ares_dns_resolver* r = (ares_dns_resolver*)resolver;
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
@@ -313,8 +313,8 @@ static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
- ares_dns_resolver *r) {
+static void dns_ares_start_resolving_locked(grpc_exec_ctx* exec_ctx,
+ ares_dns_resolver* r) {
GRPC_RESOLVER_REF(&r->base, "dns-resolving");
GPR_ASSERT(!r->resolving);
r->resolving = true;
@@ -327,8 +327,8 @@ static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
r->request_service_config ? &r->service_config_json : NULL);
}
-static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- ares_dns_resolver *r) {
+static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ ares_dns_resolver* r) {
if (r->next_completion != NULL &&
r->resolved_version != r->published_version) {
*r->target_result = r->resolved_result == NULL
@@ -341,9 +341,9 @@ static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
+static void dns_ares_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
gpr_log(GPR_DEBUG, "dns_ares_destroy");
- ares_dns_resolver *r = (ares_dns_resolver *)gr;
+ ares_dns_resolver* r = (ares_dns_resolver*)gr;
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
}
@@ -355,15 +355,15 @@ static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
gpr_free(r);
}
-static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
- grpc_resolver_args *args,
- const char *default_port) {
+static grpc_resolver* dns_ares_create(grpc_exec_ctx* exec_ctx,
+ grpc_resolver_args* args,
+ const char* default_port) {
/* Get name from args. */
- const char *path = args->uri->path;
+ const char* path = args->uri->path;
if (path[0] == '/') ++path;
/* Create resolver. */
- ares_dns_resolver *r =
- (ares_dns_resolver *)gpr_zalloc(sizeof(ares_dns_resolver));
+ ares_dns_resolver* r =
+ (ares_dns_resolver*)gpr_zalloc(sizeof(ares_dns_resolver));
grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
if (0 != strcmp(args->uri->authority, "")) {
r->dns_server = gpr_strdup(args->uri->authority);
@@ -371,7 +371,7 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
r->name_to_resolve = gpr_strdup(path);
r->default_port = gpr_strdup(default_port);
r->channel_args = grpc_channel_args_copy(args->args);
- const grpc_arg *arg = grpc_channel_args_find(
+ const grpc_arg* arg = grpc_channel_args_find(
r->channel_args, GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION);
r->request_service_config = !grpc_channel_arg_get_integer(
arg, (grpc_integer_options){false, false, true});
@@ -398,19 +398,19 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
* FACTORY
*/
-static void dns_ares_factory_ref(grpc_resolver_factory *factory) {}
+static void dns_ares_factory_ref(grpc_resolver_factory* factory) {}
-static void dns_ares_factory_unref(grpc_resolver_factory *factory) {}
+static void dns_ares_factory_unref(grpc_resolver_factory* factory) {}
-static grpc_resolver *dns_factory_create_resolver(
- grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory,
- grpc_resolver_args *args) {
+static grpc_resolver* dns_factory_create_resolver(
+ grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
+ grpc_resolver_args* args) {
return dns_ares_create(exec_ctx, args, "https");
}
-static char *dns_ares_factory_get_default_host_name(
- grpc_resolver_factory *factory, grpc_uri *uri) {
- const char *path = uri->path;
+static char* dns_ares_factory_get_default_host_name(
+ grpc_resolver_factory* factory, grpc_uri* uri) {
+ const char* path = uri->path;
if (path[0] == '/') ++path;
return gpr_strdup(path);
}
@@ -420,16 +420,16 @@ static const grpc_resolver_factory_vtable dns_ares_factory_vtable = {
dns_ares_factory_get_default_host_name, "dns"};
static grpc_resolver_factory dns_resolver_factory = {&dns_ares_factory_vtable};
-static grpc_resolver_factory *dns_ares_resolver_factory_create() {
+static grpc_resolver_factory* dns_ares_resolver_factory_create() {
return &dns_resolver_factory;
}
extern "C" void grpc_resolver_dns_ares_init(void) {
- char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
/* TODO(zyc): Turn on c-ares based resolver by default after the address
sorter and the CNAME support are added. */
if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
- grpc_error *error = grpc_ares_init();
+ grpc_error* error = grpc_ares_init();
if (error != GRPC_ERROR_NONE) {
GRPC_LOG_IF_ERROR("ares_library_init() failed", error);
return;
@@ -441,7 +441,7 @@ extern "C" void grpc_resolver_dns_ares_init(void) {
}
extern "C" void grpc_resolver_dns_ares_shutdown(void) {
- char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
grpc_ares_cleanup();
}
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
index 3d4309f2fa..a5fb1f10e1 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -31,31 +31,31 @@ typedef struct grpc_ares_ev_driver grpc_ares_ev_driver;
/* Start \a ev_driver. It will keep working until all IO on its ares_channel is
done, or grpc_ares_ev_driver_destroy() is called. It may notify the callbacks
bound to its ares_channel when necessary. */
-void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver);
+void grpc_ares_ev_driver_start(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver);
/* Returns the ares_channel owned by \a ev_driver. To bind a c-ares query to
\a ev_driver, use the ares_channel owned by \a ev_driver as the arg of the
query. */
-ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver);
+ares_channel* grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver* ev_driver);
/* Creates a new grpc_ares_ev_driver. Returns GRPC_ERROR_NONE if \a ev_driver is
created successfully. */
-grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
- grpc_pollset_set *pollset_set);
+grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
+ grpc_pollset_set* pollset_set);
/* Destroys \a ev_driver asynchronously. Pending lookups made on \a ev_driver
will be cancelled and their on_done callbacks will be invoked with a status
of ARES_ECANCELLED. */
-void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver);
+void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver* ev_driver);
/* Shutdown all the grpc_fds used by \a ev_driver */
-void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver);
+void grpc_ares_ev_driver_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
index c30cc93b6f..2bb98c1a3f 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
@@ -37,7 +37,7 @@
typedef struct fd_node {
/** the owner of this fd node */
- grpc_ares_ev_driver *ev_driver;
+ grpc_ares_ev_driver* ev_driver;
/** a closure wrapping on_readable_cb, which should be invoked when the
grpc_fd in this node becomes readable. */
grpc_closure read_closure;
@@ -45,12 +45,12 @@ typedef struct fd_node {
grpc_fd in this node becomes writable. */
grpc_closure write_closure;
/** next fd node in the list */
- struct fd_node *next;
+ struct fd_node* next;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** the grpc_fd owned by this fd node */
- grpc_fd *fd;
+ grpc_fd* fd;
/** if the readable closure has been registered */
bool readable_registered;
/** if the writable closure has been registered */
@@ -63,31 +63,31 @@ struct grpc_ares_ev_driver {
/** the ares_channel owned by this event driver */
ares_channel channel;
/** pollset set for driving the IO events of the channel */
- grpc_pollset_set *pollset_set;
+ grpc_pollset_set* pollset_set;
/** refcount of the event driver */
gpr_refcount refs;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** a list of grpc_fd that this event driver is currently using. */
- fd_node *fds;
+ fd_node* fds;
/** is this event driver currently working? */
bool working;
/** is this event driver being shut down */
bool shutting_down;
};
-static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver);
+static void grpc_ares_notify_on_event_locked(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver);
-static grpc_ares_ev_driver *grpc_ares_ev_driver_ref(
- grpc_ares_ev_driver *ev_driver) {
+static grpc_ares_ev_driver* grpc_ares_ev_driver_ref(
+ grpc_ares_ev_driver* ev_driver) {
gpr_log(GPR_DEBUG, "Ref ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
gpr_ref(&ev_driver->refs);
return ev_driver;
}
-static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) {
+static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver* ev_driver) {
gpr_log(GPR_DEBUG, "Unref ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
if (gpr_unref(&ev_driver->refs)) {
gpr_log(GPR_DEBUG, "destroy ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
@@ -98,7 +98,7 @@ static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) {
}
}
-static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
+static void fd_node_destroy(grpc_exec_ctx* exec_ctx, fd_node* fdn) {
gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->fd));
GPR_ASSERT(!fdn->readable_registered);
GPR_ASSERT(!fdn->writable_registered);
@@ -111,29 +111,30 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
gpr_free(fdn);
}
-static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
+static void fd_node_shutdown(grpc_exec_ctx* exec_ctx, fd_node* fdn) {
gpr_mu_lock(&fdn->mu);
fdn->shutting_down = true;
if (!fdn->readable_registered && !fdn->writable_registered) {
gpr_mu_unlock(&fdn->mu);
fd_node_destroy(exec_ctx, fdn);
} else {
- grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "c-ares fd shutdown"));
+ grpc_fd_shutdown(
+ exec_ctx, fdn->fd,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("c-ares fd shutdown"));
gpr_mu_unlock(&fdn->mu);
}
}
-grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
- grpc_pollset_set *pollset_set) {
- *ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver));
+grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
+ grpc_pollset_set* pollset_set) {
+ *ev_driver = (grpc_ares_ev_driver*)gpr_malloc(sizeof(grpc_ares_ev_driver));
int status = ares_init(&(*ev_driver)->channel);
gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
if (status != ARES_SUCCESS) {
- char *err_msg;
+ char* err_msg;
gpr_asprintf(&err_msg, "Failed to init ares channel. C-ares error: %s",
ares_strerror(status));
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_msg);
gpr_free(err_msg);
gpr_free(*ev_driver);
return err;
@@ -147,7 +148,7 @@ grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
return GRPC_ERROR_NONE;
}
-void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver) {
+void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver* ev_driver) {
// It's not safe to shut down remaining fds here directly, becauses
// ares_host_callback does not provide an exec_ctx. We mark the event driver
// as being shut down. If the event driver is working,
@@ -159,14 +160,15 @@ void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver) {
grpc_ares_ev_driver_unref(ev_driver);
}
-void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver) {
+void grpc_ares_ev_driver_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver) {
gpr_mu_lock(&ev_driver->mu);
ev_driver->shutting_down = true;
- fd_node *fn = ev_driver->fds;
+ fd_node* fn = ev_driver->fds;
while (fn != NULL) {
- grpc_fd_shutdown(exec_ctx, fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "grpc_ares_ev_driver_shutdown"));
+ grpc_fd_shutdown(
+ exec_ctx, fn->fd,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_ares_ev_driver_shutdown"));
fn = fn->next;
}
gpr_mu_unlock(&ev_driver->mu);
@@ -174,13 +176,13 @@ void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
// Search fd in the fd_node list head. This is an O(n) search, the max possible
// value of n is ARES_GETSOCK_MAXNUM (16). n is typically 1 - 2 in our tests.
-static fd_node *pop_fd_node(fd_node **head, int fd) {
+static fd_node* pop_fd_node(fd_node** head, int fd) {
fd_node dummy_head;
dummy_head.next = *head;
- fd_node *node = &dummy_head;
+ fd_node* node = &dummy_head;
while (node->next != NULL) {
if (grpc_fd_wrapped_fd(node->next->fd) == fd) {
- fd_node *ret = node->next;
+ fd_node* ret = node->next;
node->next = node->next->next;
*head = dummy_head.next;
return ret;
@@ -191,16 +193,16 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
}
/* Check if \a fd is still readable */
-static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver *ev_driver,
+static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver* ev_driver,
int fd) {
size_t bytes_available = 0;
return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0;
}
-static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- fd_node *fdn = (fd_node *)arg;
- grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
+static void on_readable_cb(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ fd_node* fdn = (fd_node*)arg;
+ grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->readable_registered = false;
@@ -232,10 +234,10 @@ static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_ares_ev_driver_unref(ev_driver);
}
-static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- fd_node *fdn = (fd_node *)arg;
- grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
+static void on_writable_cb(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ fd_node* fdn = (fd_node*)arg;
+ grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->writable_registered = false;
@@ -265,15 +267,15 @@ static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_ares_ev_driver_unref(ev_driver);
}
-ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver) {
+ares_channel* grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver* ev_driver) {
return &ev_driver->channel;
}
// Get the file descriptors used by the ev_driver's ares channel, register
// driver_closure with these filedescriptors.
-static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver) {
- fd_node *new_list = NULL;
+static void grpc_ares_notify_on_event_locked(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver) {
+ fd_node* new_list = NULL;
if (!ev_driver->shutting_down) {
ares_socket_t socks[ARES_GETSOCK_MAXNUM];
int socks_bitmask =
@@ -281,12 +283,12 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
for (size_t i = 0; i < ARES_GETSOCK_MAXNUM; i++) {
if (ARES_GETSOCK_READABLE(socks_bitmask, i) ||
ARES_GETSOCK_WRITABLE(socks_bitmask, i)) {
- fd_node *fdn = pop_fd_node(&ev_driver->fds, socks[i]);
+ fd_node* fdn = pop_fd_node(&ev_driver->fds, socks[i]);
// Create a new fd_node if sock[i] is not in the fd_node list.
if (fdn == NULL) {
- char *fd_name;
+ char* fd_name;
gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
- fdn = (fd_node *)gpr_malloc(sizeof(fd_node));
+ fdn = (fd_node*)gpr_malloc(sizeof(fd_node));
gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
fdn->fd = grpc_fd_create(socks[i], fd_name);
fdn->ev_driver = ev_driver;
@@ -331,7 +333,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
// are therefore no longer in use, so they can be shut down and removed from
// the list.
while (ev_driver->fds != NULL) {
- fd_node *cur = ev_driver->fds;
+ fd_node* cur = ev_driver->fds;
ev_driver->fds = ev_driver->fds->next;
fd_node_shutdown(exec_ctx, cur);
}
@@ -343,8 +345,8 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver) {
+void grpc_ares_ev_driver_start(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver) {
gpr_mu_lock(&ev_driver->mu);
if (!ev_driver->working) {
ev_driver->working = true;
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index 04379975e1..9408b9d81d 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
@@ -51,13 +51,13 @@ struct grpc_ares_request {
struct ares_addr_port_node dns_server_addr;
/** following members are set in grpc_resolve_address_ares_impl */
/** closure to call when the request completes */
- grpc_closure *on_done;
+ grpc_closure* on_done;
/** the pointer to receive the resolved addresses */
- grpc_lb_addresses **lb_addrs_out;
+ grpc_lb_addresses** lb_addrs_out;
/** the pointer to receive the service config in JSON */
- char **service_config_json_out;
+ char** service_config_json_out;
/** the evernt driver used by this request */
- grpc_ares_ev_driver *ev_driver;
+ grpc_ares_ev_driver* ev_driver;
/** number of ongoing queries */
gpr_refcount pending_queries;
@@ -66,15 +66,15 @@ struct grpc_ares_request {
/** is there at least one successful query, set in on_done_cb */
bool success;
/** the errors explaining the request failure, set in on_done_cb */
- grpc_error *error;
+ grpc_error* error;
};
typedef struct grpc_ares_hostbyname_request {
/** following members are set in create_hostbyname_request */
/** the top-level request instance */
- grpc_ares_request *parent_request;
+ grpc_ares_request* parent_request;
/** host to resolve, parsed from the name to resolve */
- char *host;
+ char* host;
/** port to fill in sockaddr_in, parsed from the name to resolve */
uint16_t port;
/** is it a grpclb address */
@@ -83,7 +83,7 @@ typedef struct grpc_ares_hostbyname_request {
static void do_basic_init(void) { gpr_mu_init(&g_init_mu); }
-static uint16_t strhtons(const char *port) {
+static uint16_t strhtons(const char* port) {
if (strcmp(port, "http") == 0) {
return htons(80);
} else if (strcmp(port, "https") == 0) {
@@ -92,12 +92,12 @@ static uint16_t strhtons(const char *port) {
return htons((unsigned short)atoi(port));
}
-static void grpc_ares_request_ref(grpc_ares_request *r) {
+static void grpc_ares_request_ref(grpc_ares_request* r) {
gpr_ref(&r->pending_queries);
}
-static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
- grpc_ares_request *r) {
+static void grpc_ares_request_unref(grpc_exec_ctx* exec_ctx,
+ grpc_ares_request* r) {
/* If there are no pending queries, invoke on_done callback and destroy the
request */
if (gpr_unref(&r->pending_queries)) {
@@ -120,10 +120,10 @@ static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
}
}
-static grpc_ares_hostbyname_request *create_hostbyname_request(
- grpc_ares_request *parent_request, char *host, uint16_t port,
+static grpc_ares_hostbyname_request* create_hostbyname_request(
+ grpc_ares_request* parent_request, char* host, uint16_t port,
bool is_balancer) {
- grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)gpr_zalloc(
+ grpc_ares_hostbyname_request* hr = (grpc_ares_hostbyname_request*)gpr_zalloc(
sizeof(grpc_ares_hostbyname_request));
hr->parent_request = parent_request;
hr->host = gpr_strdup(host);
@@ -133,23 +133,23 @@ static grpc_ares_hostbyname_request *create_hostbyname_request(
return hr;
}
-static void destroy_hostbyname_request(grpc_exec_ctx *exec_ctx,
- grpc_ares_hostbyname_request *hr) {
+static void destroy_hostbyname_request(grpc_exec_ctx* exec_ctx,
+ grpc_ares_hostbyname_request* hr) {
grpc_ares_request_unref(exec_ctx, hr->parent_request);
gpr_free(hr->host);
gpr_free(hr);
}
-static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
- struct hostent *hostent) {
- grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)arg;
- grpc_ares_request *r = hr->parent_request;
+static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
+ struct hostent* hostent) {
+ grpc_ares_hostbyname_request* hr = (grpc_ares_hostbyname_request*)arg;
+ grpc_ares_request* r = hr->parent_request;
gpr_mu_lock(&r->mu);
if (status == ARES_SUCCESS) {
GRPC_ERROR_UNREF(r->error);
r->error = GRPC_ERROR_NONE;
r->success = true;
- grpc_lb_addresses **lb_addresses = r->lb_addrs_out;
+ grpc_lb_addresses** lb_addresses = r->lb_addrs_out;
if (*lb_addresses == NULL) {
*lb_addresses = grpc_lb_addresses_create(0, NULL);
}
@@ -158,7 +158,7 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
for (i = 0; hostent->h_addr_list[i] != NULL; i++) {
}
(*lb_addresses)->num_addresses += i;
- (*lb_addresses)->addresses = (grpc_lb_address *)gpr_realloc(
+ (*lb_addresses)->addresses = (grpc_lb_address*)gpr_realloc(
(*lb_addresses)->addresses,
sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
for (i = prev_naddr; i < (*lb_addresses)->num_addresses; i++) {
@@ -208,10 +208,10 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
}
}
} else if (!r->success) {
- char *error_msg;
+ char* error_msg;
gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
ares_strerror(status));
- grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
@@ -223,26 +223,26 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
destroy_hostbyname_request(NULL, hr);
}
-static void on_srv_query_done_cb(void *arg, int status, int timeouts,
- unsigned char *abuf, int alen) {
- grpc_ares_request *r = (grpc_ares_request *)arg;
+static void on_srv_query_done_cb(void* arg, int status, int timeouts,
+ unsigned char* abuf, int alen) {
+ grpc_ares_request* r = (grpc_ares_request*)arg;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_log(GPR_DEBUG, "on_query_srv_done_cb");
if (status == ARES_SUCCESS) {
gpr_log(GPR_DEBUG, "on_query_srv_done_cb ARES_SUCCESS");
- struct ares_srv_reply *reply;
+ struct ares_srv_reply* reply;
const int parse_status = ares_parse_srv_reply(abuf, alen, &reply);
if (parse_status == ARES_SUCCESS) {
- ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
- for (struct ares_srv_reply *srv_it = reply; srv_it != NULL;
+ ares_channel* channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
+ for (struct ares_srv_reply* srv_it = reply; srv_it != NULL;
srv_it = srv_it->next) {
if (grpc_ipv6_loopback_available()) {
- grpc_ares_hostbyname_request *hr = create_hostbyname_request(
+ grpc_ares_hostbyname_request* hr = create_hostbyname_request(
r, srv_it->host, htons(srv_it->port), true /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET6,
on_hostbyname_done_cb, hr);
}
- grpc_ares_hostbyname_request *hr = create_hostbyname_request(
+ grpc_ares_hostbyname_request* hr = create_hostbyname_request(
r, srv_it->host, htons(srv_it->port), true /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb,
hr);
@@ -253,10 +253,10 @@ static void on_srv_query_done_cb(void *arg, int status, int timeouts,
ares_free_data(reply);
}
} else if (!r->success) {
- char *error_msg;
+ char* error_msg;
gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
ares_strerror(status));
- grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
@@ -270,15 +270,15 @@ static void on_srv_query_done_cb(void *arg, int status, int timeouts,
static const char g_service_config_attribute_prefix[] = "grpc_config=";
-static void on_txt_done_cb(void *arg, int status, int timeouts,
- unsigned char *buf, int len) {
+static void on_txt_done_cb(void* arg, int status, int timeouts,
+ unsigned char* buf, int len) {
gpr_log(GPR_DEBUG, "on_txt_done_cb");
- char *error_msg;
- grpc_ares_request *r = (grpc_ares_request *)arg;
+ char* error_msg;
+ grpc_ares_request* r = (grpc_ares_request*)arg;
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
- struct ares_txt_ext *result = NULL;
- struct ares_txt_ext *reply = NULL;
- grpc_error *error = GRPC_ERROR_NONE;
+ struct ares_txt_ext* result = NULL;
+ struct ares_txt_ext* reply = NULL;
+ grpc_error* error = GRPC_ERROR_NONE;
gpr_mu_lock(&r->mu);
if (status != ARES_SUCCESS) goto fail;
status = ares_parse_txt_reply_ext(buf, len, &reply);
@@ -294,12 +294,12 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
// Found a service config record.
if (result != NULL) {
size_t service_config_len = result->length - prefix_len;
- *r->service_config_json_out = (char *)gpr_malloc(service_config_len + 1);
+ *r->service_config_json_out = (char*)gpr_malloc(service_config_len + 1);
memcpy(*r->service_config_json_out, result->txt + prefix_len,
service_config_len);
for (result = result->next; result != NULL && !result->record_start;
result = result->next) {
- *r->service_config_json_out = (char *)gpr_realloc(
+ *r->service_config_json_out = (char*)gpr_realloc(
*r->service_config_json_out, service_config_len + result->length + 1);
memcpy(*r->service_config_json_out + service_config_len, result->txt,
result->length);
@@ -326,15 +326,15 @@ done:
grpc_ares_request_unref(NULL, r);
}
-static grpc_ares_request *grpc_dns_lookup_ares_impl(
- grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
- const char *default_port, grpc_pollset_set *interested_parties,
- grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
- char **service_config_json) {
- grpc_error *error = GRPC_ERROR_NONE;
- grpc_ares_hostbyname_request *hr = NULL;
- grpc_ares_request *r = NULL;
- ares_channel *channel = NULL;
+static grpc_ares_request* grpc_dns_lookup_ares_impl(
+ grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
+ const char* default_port, grpc_pollset_set* interested_parties,
+ grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
+ char** service_config_json) {
+ grpc_error* error = GRPC_ERROR_NONE;
+ grpc_ares_hostbyname_request* hr = NULL;
+ grpc_ares_request* r = NULL;
+ ares_channel* channel = NULL;
/* TODO(zyc): Enable tracing after #9603 is checked in */
/* if (grpc_dns_trace) {
gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s",
@@ -342,8 +342,8 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
} */
/* parse name, splitting it into host and port parts */
- char *host;
- char *port;
+ char* host;
+ char* port;
gpr_split_host_port(name, &host, &port);
if (host == NULL) {
error = grpc_error_set_str(
@@ -360,11 +360,11 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
port = gpr_strdup(default_port);
}
- grpc_ares_ev_driver *ev_driver;
+ grpc_ares_ev_driver* ev_driver;
error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
- r = (grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request));
+ r = (grpc_ares_request*)gpr_zalloc(sizeof(grpc_ares_request));
gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
@@ -380,7 +380,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
grpc_resolved_address addr;
if (grpc_parse_ipv4_hostport(dns_server, &addr, false /* log_errors */)) {
r->dns_server_addr.family = AF_INET;
- struct sockaddr_in *in = (struct sockaddr_in *)addr.addr;
+ struct sockaddr_in* in = (struct sockaddr_in*)addr.addr;
memcpy(&r->dns_server_addr.addr.addr4, &in->sin_addr,
sizeof(struct in_addr));
r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
@@ -388,7 +388,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
} else if (grpc_parse_ipv6_hostport(dns_server, &addr,
false /* log_errors */)) {
r->dns_server_addr.family = AF_INET6;
- struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr.addr;
+ struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr.addr;
memcpy(&r->dns_server_addr.addr.addr6, &in6->sin6_addr,
sizeof(struct in6_addr));
r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
@@ -402,7 +402,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
}
int status = ares_set_servers_ports(*channel, &r->dns_server_addr);
if (status != ARES_SUCCESS) {
- char *error_msg;
+ char* error_msg;
gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
ares_strerror(status));
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
@@ -423,7 +423,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
if (check_grpclb) {
/* Query the SRV record */
grpc_ares_request_ref(r);
- char *service_name;
+ char* service_name;
gpr_asprintf(&service_name, "_grpclb._tcp.%s", host);
ares_query(*channel, service_name, ns_c_in, ns_t_srv, on_srv_query_done_cb,
r);
@@ -447,29 +447,29 @@ error_cleanup:
return NULL;
}
-grpc_ares_request *(*grpc_dns_lookup_ares)(
- grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
- const char *default_port, grpc_pollset_set *interested_parties,
- grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
- char **service_config_json) = grpc_dns_lookup_ares_impl;
+grpc_ares_request* (*grpc_dns_lookup_ares)(
+ grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
+ const char* default_port, grpc_pollset_set* interested_parties,
+ grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
+ char** service_config_json) = grpc_dns_lookup_ares_impl;
-void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx, grpc_ares_request *r) {
+void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx, grpc_ares_request* r) {
if (grpc_dns_lookup_ares == grpc_dns_lookup_ares_impl) {
grpc_ares_ev_driver_shutdown(exec_ctx, r->ev_driver);
}
}
-grpc_error *grpc_ares_init(void) {
+grpc_error* grpc_ares_init(void) {
gpr_once_init(&g_basic_init, do_basic_init);
gpr_mu_lock(&g_init_mu);
int status = ares_library_init(ARES_LIB_INIT_ALL);
gpr_mu_unlock(&g_init_mu);
if (status != ARES_SUCCESS) {
- char *error_msg;
+ char* error_msg;
gpr_asprintf(&error_msg, "ares_library_init failed: %s",
ares_strerror(status));
- grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
return error;
}
@@ -488,28 +488,28 @@ void grpc_ares_cleanup(void) {
typedef struct grpc_resolve_address_ares_request {
/** the pointer to receive the resolved addresses */
- grpc_resolved_addresses **addrs_out;
+ grpc_resolved_addresses** addrs_out;
/** currently resolving lb addresses */
- grpc_lb_addresses *lb_addrs;
+ grpc_lb_addresses* lb_addrs;
/** closure to call when the resolve_address_ares request completes */
- grpc_closure *on_resolve_address_done;
+ grpc_closure* on_resolve_address_done;
/** a closure wrapping on_dns_lookup_done_cb, which should be invoked when the
grpc_dns_lookup_ares operation is done. */
grpc_closure on_dns_lookup_done;
} grpc_resolve_address_ares_request;
-static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_resolve_address_ares_request *r =
- (grpc_resolve_address_ares_request *)arg;
- grpc_resolved_addresses **resolved_addresses = r->addrs_out;
+static void on_dns_lookup_done_cb(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_resolve_address_ares_request* r =
+ (grpc_resolve_address_ares_request*)arg;
+ grpc_resolved_addresses** resolved_addresses = r->addrs_out;
if (r->lb_addrs == NULL || r->lb_addrs->num_addresses == 0) {
*resolved_addresses = NULL;
} else {
*resolved_addresses =
- (grpc_resolved_addresses *)gpr_zalloc(sizeof(grpc_resolved_addresses));
+ (grpc_resolved_addresses*)gpr_zalloc(sizeof(grpc_resolved_addresses));
(*resolved_addresses)->naddrs = r->lb_addrs->num_addresses;
- (*resolved_addresses)->addrs = (grpc_resolved_address *)gpr_zalloc(
+ (*resolved_addresses)->addrs = (grpc_resolved_address*)gpr_zalloc(
sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs);
for (size_t i = 0; i < (*resolved_addresses)->naddrs; i++) {
GPR_ASSERT(!r->lb_addrs->addresses[i].is_balancer);
@@ -523,14 +523,14 @@ static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(r);
}
-static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
- const char *name,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addrs) {
- grpc_resolve_address_ares_request *r =
- (grpc_resolve_address_ares_request *)gpr_zalloc(
+static void grpc_resolve_address_ares_impl(grpc_exec_ctx* exec_ctx,
+ const char* name,
+ const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) {
+ grpc_resolve_address_ares_request* r =
+ (grpc_resolve_address_ares_request*)gpr_zalloc(
sizeof(grpc_resolve_address_ares_request));
r->addrs_out = addrs;
r->on_resolve_address_done = on_done;
@@ -543,8 +543,8 @@ static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
}
void (*grpc_resolve_address_ares)(
- grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
- grpc_pollset_set *interested_parties, grpc_closure *on_done,
- grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl;
+ grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+ grpc_pollset_set* interested_parties, grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) = grpc_resolve_address_ares_impl;
#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
index 38fbea9aac..6882b7b1d1 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -36,12 +36,12 @@ typedef struct grpc_ares_request grpc_ares_request;
must be called at least once before this function. \a on_done may be
called directly in this function without being scheduled with \a exec_ctx,
so it must not try to acquire locks that are being held by the caller. */
-extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx,
- const char *name,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addresses);
+extern void (*grpc_resolve_address_ares)(grpc_exec_ctx* exec_ctx,
+ const char* name,
+ const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addresses);
/* Asynchronously resolve \a name. It will try to resolve grpclb SRV records in
addition to the normal address records. For normal address records, it uses
@@ -50,19 +50,19 @@ extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx,
function. \a on_done may be called directly in this function without being
scheduled with \a exec_ctx, so it must not try to acquire locks that are
being held by the caller. */
-extern grpc_ares_request *(*grpc_dns_lookup_ares)(
- grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
- const char *default_port, grpc_pollset_set *interested_parties,
- grpc_closure *on_done, grpc_lb_addresses **addresses, bool check_grpclb,
- char **service_config_json);
+extern grpc_ares_request* (*grpc_dns_lookup_ares)(
+ grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
+ const char* default_port, grpc_pollset_set* interested_parties,
+ grpc_closure* on_done, grpc_lb_addresses** addresses, bool check_grpclb,
+ char** service_config_json);
/* Cancel the pending grpc_ares_request \a request */
-void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx,
- grpc_ares_request *request);
+void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx,
+ grpc_ares_request* request);
/* Initialize gRPC ares wrapper. Must be called at least once before
grpc_resolve_address_ares(). */
-grpc_error *grpc_ares_init(void);
+grpc_error* grpc_ares_init(void);
/* Uninitialized gRPC ares wrapper. If there was more than one previous call to
grpc_ares_init(), this function uninitializes the gRPC ares wrapper only if
@@ -74,4 +74,4 @@ void grpc_ares_cleanup(void);
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
index f2587c4520..a68a7c47fb 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
@@ -25,36 +25,36 @@ struct grpc_ares_request {
char val;
};
-static grpc_ares_request *grpc_dns_lookup_ares_impl(
- grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
- const char *default_port, grpc_pollset_set *interested_parties,
- grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
- char **service_config_json) {
+static grpc_ares_request* grpc_dns_lookup_ares_impl(
+ grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
+ const char* default_port, grpc_pollset_set* interested_parties,
+ grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
+ char** service_config_json) {
return NULL;
}
-grpc_ares_request *(*grpc_dns_lookup_ares)(
- grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
- const char *default_port, grpc_pollset_set *interested_parties,
- grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
- char **service_config_json) = grpc_dns_lookup_ares_impl;
+grpc_ares_request* (*grpc_dns_lookup_ares)(
+ grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
+ const char* default_port, grpc_pollset_set* interested_parties,
+ grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
+ char** service_config_json) = grpc_dns_lookup_ares_impl;
-void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx, grpc_ares_request *r) {}
+void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx, grpc_ares_request* r) {}
-grpc_error *grpc_ares_init(void) { return GRPC_ERROR_NONE; }
+grpc_error* grpc_ares_init(void) { return GRPC_ERROR_NONE; }
void grpc_ares_cleanup(void) {}
-static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
- const char *name,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addrs) {}
+static void grpc_resolve_address_ares_impl(grpc_exec_ctx* exec_ctx,
+ const char* name,
+ const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) {}
void (*grpc_resolve_address_ares)(
- grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
- grpc_pollset_set *interested_parties, grpc_closure *on_done,
- grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl;
+ grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+ grpc_pollset_set* interested_parties, grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) = grpc_resolve_address_ares_impl;
#endif /* GRPC_ARES != 1 || defined(GRPC_UV) */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
index 62aead5517..a57ab66118 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
@@ -45,13 +45,13 @@ typedef struct {
/** base class: must be first */
grpc_resolver base;
/** name to resolve */
- char *name_to_resolve;
+ char* name_to_resolve;
/** default port to use */
- char *default_port;
+ char* default_port;
/** channel args. */
- grpc_channel_args *channel_args;
+ grpc_channel_args* channel_args;
/** pollset_set to drive the name resolution process */
- grpc_pollset_set *interested_parties;
+ grpc_pollset_set* interested_parties;
/** are we currently resolving? */
bool resolving;
@@ -60,11 +60,11 @@ typedef struct {
/** which version of the result is current? */
int resolved_version;
/** pending next completion, or NULL */
- grpc_closure *next_completion;
+ grpc_closure* next_completion;
/** target result address for next completion */
- grpc_channel_args **target_result;
+ grpc_channel_args** target_result;
/** current (fully resolved) result */
- grpc_channel_args *resolved_result;
+ grpc_channel_args* resolved_result;
/** retry timer */
bool have_retry_timer;
grpc_timer retry_timer;
@@ -73,30 +73,30 @@ typedef struct {
grpc_backoff backoff_state;
/** currently resolving addresses */
- grpc_resolved_addresses *addresses;
+ grpc_resolved_addresses* addresses;
} dns_resolver;
-static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void dns_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
-static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
- dns_resolver *r);
-static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- dns_resolver *r);
+static void dns_start_resolving_locked(grpc_exec_ctx* exec_ctx,
+ dns_resolver* r);
+static void dns_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ dns_resolver* r);
-static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *r);
-static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_channel_args **target_result,
- grpc_closure *on_complete);
+static void dns_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
+static void dns_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* r);
+static void dns_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete);
static const grpc_resolver_vtable dns_resolver_vtable = {
dns_destroy, dns_shutdown_locked, dns_channel_saw_error_locked,
dns_next_locked};
-static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- dns_resolver *r = (dns_resolver *)resolver;
+static void dns_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ dns_resolver* r = (dns_resolver*)resolver;
if (r->have_retry_timer) {
grpc_timer_cancel(exec_ctx, &r->retry_timer);
}
@@ -109,19 +109,19 @@ static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- dns_resolver *r = (dns_resolver *)resolver;
+static void dns_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ dns_resolver* r = (dns_resolver*)resolver;
if (!r->resolving) {
grpc_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
}
}
-static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **target_result,
- grpc_closure *on_complete) {
- dns_resolver *r = (dns_resolver *)resolver;
+static void dns_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete) {
+ dns_resolver* r = (dns_resolver*)resolver;
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
@@ -133,9 +133,9 @@ static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
}
}
-static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- dns_resolver *r = (dns_resolver *)arg;
+static void dns_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ dns_resolver* r = (dns_resolver*)arg;
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
@@ -147,17 +147,17 @@ static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
}
-static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- dns_resolver *r = (dns_resolver *)arg;
- grpc_channel_args *result = NULL;
+static void dns_on_resolved_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ dns_resolver* r = (dns_resolver*)arg;
+ grpc_channel_args* result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
GRPC_ERROR_REF(error);
error = grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
grpc_slice_from_copied_string(r->name_to_resolve));
if (r->addresses != NULL) {
- grpc_lb_addresses *addresses = grpc_lb_addresses_create(
+ grpc_lb_addresses* addresses = grpc_lb_addresses_create(
r->addresses->naddrs, NULL /* user_data_vtable */);
for (size_t i = 0; i < r->addresses->naddrs; ++i) {
grpc_lb_addresses_set_address(
@@ -198,8 +198,8 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
-static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
- dns_resolver *r) {
+static void dns_start_resolving_locked(grpc_exec_ctx* exec_ctx,
+ dns_resolver* r) {
GRPC_RESOLVER_REF(&r->base, "dns-resolving");
GPR_ASSERT(!r->resolving);
r->resolving = true;
@@ -211,8 +211,8 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
&r->addresses);
}
-static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- dns_resolver *r) {
+static void dns_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ dns_resolver* r) {
if (r->next_completion != NULL &&
r->resolved_version != r->published_version) {
*r->target_result = r->resolved_result == NULL
@@ -224,8 +224,8 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
- dns_resolver *r = (dns_resolver *)gr;
+static void dns_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
+ dns_resolver* r = (dns_resolver*)gr;
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
}
@@ -236,18 +236,18 @@ static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
gpr_free(r);
}
-static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
- grpc_resolver_args *args,
- const char *default_port) {
+static grpc_resolver* dns_create(grpc_exec_ctx* exec_ctx,
+ grpc_resolver_args* args,
+ const char* default_port) {
if (0 != strcmp(args->uri->authority, "")) {
gpr_log(GPR_ERROR, "authority based dns uri's not supported");
return NULL;
}
// Get name from args.
- char *path = args->uri->path;
+ char* path = args->uri->path;
if (path[0] == '/') ++path;
// Create resolver.
- dns_resolver *r = (dns_resolver *)gpr_zalloc(sizeof(dns_resolver));
+ dns_resolver* r = (dns_resolver*)gpr_zalloc(sizeof(dns_resolver));
grpc_resolver_init(&r->base, &dns_resolver_vtable, args->combiner);
r->name_to_resolve = gpr_strdup(path);
r->default_port = gpr_strdup(default_port);
@@ -269,19 +269,19 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
* FACTORY
*/
-static void dns_factory_ref(grpc_resolver_factory *factory) {}
+static void dns_factory_ref(grpc_resolver_factory* factory) {}
-static void dns_factory_unref(grpc_resolver_factory *factory) {}
+static void dns_factory_unref(grpc_resolver_factory* factory) {}
-static grpc_resolver *dns_factory_create_resolver(
- grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory,
- grpc_resolver_args *args) {
+static grpc_resolver* dns_factory_create_resolver(
+ grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
+ grpc_resolver_args* args) {
return dns_create(exec_ctx, args, "https");
}
-static char *dns_factory_get_default_host_name(grpc_resolver_factory *factory,
- grpc_uri *uri) {
- const char *path = uri->path;
+static char* dns_factory_get_default_host_name(grpc_resolver_factory* factory,
+ grpc_uri* uri) {
+ const char* path = uri->path;
if (path[0] == '/') ++path;
return gpr_strdup(path);
}
@@ -291,17 +291,17 @@ static const grpc_resolver_factory_vtable dns_factory_vtable = {
dns_factory_get_default_host_name, "dns"};
static grpc_resolver_factory dns_resolver_factory = {&dns_factory_vtable};
-static grpc_resolver_factory *dns_resolver_factory_create() {
+static grpc_resolver_factory* dns_resolver_factory_create() {
return &dns_resolver_factory;
}
extern "C" void grpc_resolver_dns_native_init(void) {
- char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
if (resolver != NULL && gpr_stricmp(resolver, "native") == 0) {
gpr_log(GPR_DEBUG, "Using native dns resolver");
grpc_register_resolver_type(dns_resolver_factory_create());
} else {
- grpc_resolver_factory *existing_factory =
+ grpc_resolver_factory* existing_factory =
grpc_resolver_factory_lookup("dns");
if (existing_factory == NULL) {
gpr_log(GPR_DEBUG, "Using native dns resolver");
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
index 95c3bafed8..3f341fa8ed 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
@@ -65,4 +65,4 @@ void grpc_fake_resolver_response_generator_unref(
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FAKE_FAKE_RESOLVER_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
index dda9542325..9fc8dffea3 100644
--- a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
@@ -41,36 +41,36 @@ typedef struct {
/** base class: must be first */
grpc_resolver base;
/** the addresses that we've 'resolved' */
- grpc_lb_addresses *addresses;
+ grpc_lb_addresses* addresses;
/** channel args */
- grpc_channel_args *channel_args;
+ grpc_channel_args* channel_args;
/** have we published? */
bool published;
/** pending next completion, or NULL */
- grpc_closure *next_completion;
+ grpc_closure* next_completion;
/** target result address for next completion */
- grpc_channel_args **target_result;
+ grpc_channel_args** target_result;
} sockaddr_resolver;
-static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void sockaddr_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
-static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- sockaddr_resolver *r);
+static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ sockaddr_resolver* r);
-static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *r);
-static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_channel_args **target_result,
- grpc_closure *on_complete);
+static void sockaddr_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
+static void sockaddr_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* r);
+static void sockaddr_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete);
static const grpc_resolver_vtable sockaddr_resolver_vtable = {
sockaddr_destroy, sockaddr_shutdown_locked,
sockaddr_channel_saw_error_locked, sockaddr_next_locked};
-static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- sockaddr_resolver *r = (sockaddr_resolver *)resolver;
+static void sockaddr_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ sockaddr_resolver* r = (sockaddr_resolver*)resolver;
if (r->next_completion != NULL) {
*r->target_result = NULL;
GRPC_CLOSURE_SCHED(
@@ -80,26 +80,26 @@ static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- sockaddr_resolver *r = (sockaddr_resolver *)resolver;
+static void sockaddr_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ sockaddr_resolver* r = (sockaddr_resolver*)resolver;
r->published = false;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
}
-static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver,
- grpc_channel_args **target_result,
- grpc_closure *on_complete) {
- sockaddr_resolver *r = (sockaddr_resolver *)resolver;
+static void sockaddr_next_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete) {
+ sockaddr_resolver* r = (sockaddr_resolver*)resolver;
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
}
-static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- sockaddr_resolver *r) {
+static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ sockaddr_resolver* r) {
if (r->next_completion != NULL && !r->published) {
r->published = true;
grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
@@ -110,42 +110,42 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
- sockaddr_resolver *r = (sockaddr_resolver *)gr;
+static void sockaddr_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
+ sockaddr_resolver* r = (sockaddr_resolver*)gr;
grpc_lb_addresses_destroy(exec_ctx, r->addresses);
grpc_channel_args_destroy(exec_ctx, r->channel_args);
gpr_free(r);
}
-static char *ip_get_default_authority(grpc_uri *uri) {
- const char *path = uri->path;
+static char* ip_get_default_authority(grpc_uri* uri) {
+ const char* path = uri->path;
if (path[0] == '/') ++path;
return gpr_strdup(path);
}
-static char *ipv4_get_default_authority(grpc_resolver_factory *factory,
- grpc_uri *uri) {
+static char* ipv4_get_default_authority(grpc_resolver_factory* factory,
+ grpc_uri* uri) {
return ip_get_default_authority(uri);
}
-static char *ipv6_get_default_authority(grpc_resolver_factory *factory,
- grpc_uri *uri) {
+static char* ipv6_get_default_authority(grpc_resolver_factory* factory,
+ grpc_uri* uri) {
return ip_get_default_authority(uri);
}
#ifdef GRPC_HAVE_UNIX_SOCKET
-char *unix_get_default_authority(grpc_resolver_factory *factory,
- grpc_uri *uri) {
+char* unix_get_default_authority(grpc_resolver_factory* factory,
+ grpc_uri* uri) {
return gpr_strdup("localhost");
}
#endif
-static void do_nothing(void *ignored) {}
+static void do_nothing(void* ignored) {}
-static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
- grpc_resolver_args *args,
- bool parse(const grpc_uri *uri,
- grpc_resolved_address *dst)) {
+static grpc_resolver* sockaddr_create(grpc_exec_ctx* exec_ctx,
+ grpc_resolver_args* args,
+ bool parse(const grpc_uri* uri,
+ grpc_resolved_address* dst)) {
if (0 != strcmp(args->uri->authority, "")) {
gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme",
args->uri->scheme);
@@ -157,12 +157,12 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer path_parts;
grpc_slice_buffer_init(&path_parts);
grpc_slice_split(path_slice, ",", &path_parts);
- grpc_lb_addresses *addresses =
+ grpc_lb_addresses* addresses =
grpc_lb_addresses_create(path_parts.count, NULL /* user_data_vtable */);
bool errors_found = false;
for (size_t i = 0; i < addresses->num_addresses; i++) {
grpc_uri ith_uri = *args->uri;
- char *part_str = grpc_slice_to_c_string(path_parts.slices[i]);
+ char* part_str = grpc_slice_to_c_string(path_parts.slices[i]);
ith_uri.path = part_str;
if (!parse(&ith_uri, &addresses->addresses[i].address)) {
errors_found = true; /* GPR_TRUE */
@@ -177,8 +177,8 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
return NULL;
}
/* Instantiate resolver. */
- sockaddr_resolver *r =
- (sockaddr_resolver *)gpr_zalloc(sizeof(sockaddr_resolver));
+ sockaddr_resolver* r =
+ (sockaddr_resolver*)gpr_zalloc(sizeof(sockaddr_resolver));
r->addresses = addresses;
r->channel_args = grpc_channel_args_copy(args->args);
grpc_resolver_init(&r->base, &sockaddr_resolver_vtable, args->combiner);
@@ -189,14 +189,14 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
* FACTORY
*/
-static void sockaddr_factory_ref(grpc_resolver_factory *factory) {}
+static void sockaddr_factory_ref(grpc_resolver_factory* factory) {}
-static void sockaddr_factory_unref(grpc_resolver_factory *factory) {}
+static void sockaddr_factory_unref(grpc_resolver_factory* factory) {}
#define DECL_FACTORY(name) \
- static grpc_resolver *name##_factory_create_resolver( \
- grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, \
- grpc_resolver_args *args) { \
+ static grpc_resolver* name##_factory_create_resolver( \
+ grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory, \
+ grpc_resolver_args* args) { \
return sockaddr_create(exec_ctx, args, grpc_parse_##name); \
} \
static const grpc_resolver_factory_vtable name##_factory_vtable = { \
diff --git a/src/core/ext/filters/client_channel/resolver_factory.h b/src/core/ext/filters/client_channel/resolver_factory.h
index c8b2c58db3..62555a4f01 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.h
+++ b/src/core/ext/filters/client_channel/resolver_factory.h
@@ -32,44 +32,44 @@ typedef struct grpc_resolver_factory grpc_resolver_factory;
typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable;
struct grpc_resolver_factory {
- const grpc_resolver_factory_vtable *vtable;
+ const grpc_resolver_factory_vtable* vtable;
};
typedef struct grpc_resolver_args {
- grpc_uri *uri;
- const grpc_channel_args *args;
- grpc_pollset_set *pollset_set;
- grpc_combiner *combiner;
+ grpc_uri* uri;
+ const grpc_channel_args* args;
+ grpc_pollset_set* pollset_set;
+ grpc_combiner* combiner;
} grpc_resolver_args;
struct grpc_resolver_factory_vtable {
- void (*ref)(grpc_resolver_factory *factory);
- void (*unref)(grpc_resolver_factory *factory);
+ void (*ref)(grpc_resolver_factory* factory);
+ void (*unref)(grpc_resolver_factory* factory);
/** Implementation of grpc_resolver_factory_create_resolver */
- grpc_resolver *(*create_resolver)(grpc_exec_ctx *exec_ctx,
- grpc_resolver_factory *factory,
- grpc_resolver_args *args);
+ grpc_resolver* (*create_resolver)(grpc_exec_ctx* exec_ctx,
+ grpc_resolver_factory* factory,
+ grpc_resolver_args* args);
/** Implementation of grpc_resolver_factory_get_default_authority */
- char *(*get_default_authority)(grpc_resolver_factory *factory, grpc_uri *uri);
+ char* (*get_default_authority)(grpc_resolver_factory* factory, grpc_uri* uri);
/** URI scheme that this factory implements */
- const char *scheme;
+ const char* scheme;
};
-void grpc_resolver_factory_ref(grpc_resolver_factory *resolver);
-void grpc_resolver_factory_unref(grpc_resolver_factory *resolver);
+void grpc_resolver_factory_ref(grpc_resolver_factory* resolver);
+void grpc_resolver_factory_unref(grpc_resolver_factory* resolver);
/** Create a resolver instance for a name */
-grpc_resolver *grpc_resolver_factory_create_resolver(
- grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory,
- grpc_resolver_args *args);
+grpc_resolver* grpc_resolver_factory_create_resolver(
+ grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
+ grpc_resolver_args* args);
/** Return a (freshly allocated with gpr_malloc) string representing
the default authority to use for this scheme. */
-char *grpc_resolver_factory_get_default_authority(
- grpc_resolver_factory *factory, grpc_uri *uri);
+char* grpc_resolver_factory_get_default_authority(
+ grpc_resolver_factory* factory, grpc_uri* uri);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/resolver_registry.cc b/src/core/ext/filters/client_channel/resolver_registry.cc
index 1a0fb0bc3c..9e45887f35 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.cc
+++ b/src/core/ext/filters/client_channel/resolver_registry.cc
@@ -27,7 +27,7 @@
#define MAX_RESOLVERS 10
#define DEFAULT_RESOLVER_PREFIX_MAX_LENGTH 32
-static grpc_resolver_factory *g_all_of_the_resolvers[MAX_RESOLVERS];
+static grpc_resolver_factory* g_all_of_the_resolvers[MAX_RESOLVERS];
static int g_number_of_resolvers = 0;
static char g_default_resolver_prefix[DEFAULT_RESOLVER_PREFIX_MAX_LENGTH] =
@@ -49,7 +49,7 @@ void grpc_resolver_registry_shutdown(void) {
}
void grpc_resolver_registry_set_default_prefix(
- const char *default_resolver_prefix) {
+ const char* default_resolver_prefix) {
const size_t len = strlen(default_resolver_prefix);
GPR_ASSERT(len < DEFAULT_RESOLVER_PREFIX_MAX_LENGTH &&
"default resolver prefix too long");
@@ -59,7 +59,7 @@ void grpc_resolver_registry_set_default_prefix(
strcpy(g_default_resolver_prefix, default_resolver_prefix);
}
-void grpc_register_resolver_type(grpc_resolver_factory *factory) {
+void grpc_register_resolver_type(grpc_resolver_factory* factory) {
int i;
for (i = 0; i < g_number_of_resolvers; i++) {
GPR_ASSERT(0 != strcmp(factory->vtable->scheme,
@@ -70,7 +70,7 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory) {
g_all_of_the_resolvers[g_number_of_resolvers++] = factory;
}
-static grpc_resolver_factory *lookup_factory(const char *name) {
+static grpc_resolver_factory* lookup_factory(const char* name) {
int i;
for (i = 0; i < g_number_of_resolvers; i++) {
@@ -81,22 +81,22 @@ static grpc_resolver_factory *lookup_factory(const char *name) {
return NULL;
}
-grpc_resolver_factory *grpc_resolver_factory_lookup(const char *name) {
- grpc_resolver_factory *f = lookup_factory(name);
+grpc_resolver_factory* grpc_resolver_factory_lookup(const char* name) {
+ grpc_resolver_factory* f = lookup_factory(name);
if (f) grpc_resolver_factory_ref(f);
return f;
}
-static grpc_resolver_factory *lookup_factory_by_uri(grpc_uri *uri) {
+static grpc_resolver_factory* lookup_factory_by_uri(grpc_uri* uri) {
if (!uri) return NULL;
return lookup_factory(uri->scheme);
}
-static grpc_resolver_factory *resolve_factory(grpc_exec_ctx *exec_ctx,
- const char *target,
- grpc_uri **uri,
- char **canonical_target) {
- grpc_resolver_factory *factory = NULL;
+static grpc_resolver_factory* resolve_factory(grpc_exec_ctx* exec_ctx,
+ const char* target,
+ grpc_uri** uri,
+ char** canonical_target) {
+ grpc_resolver_factory* factory = NULL;
GPR_ASSERT(uri != NULL);
*uri = grpc_uri_parse(exec_ctx, target, 1);
@@ -116,15 +116,15 @@ static grpc_resolver_factory *resolve_factory(grpc_exec_ctx *exec_ctx,
return factory;
}
-grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
- const grpc_channel_args *args,
- grpc_pollset_set *pollset_set,
- grpc_combiner *combiner) {
- grpc_uri *uri = NULL;
- char *canonical_target = NULL;
- grpc_resolver_factory *factory =
+grpc_resolver* grpc_resolver_create(grpc_exec_ctx* exec_ctx, const char* target,
+ const grpc_channel_args* args,
+ grpc_pollset_set* pollset_set,
+ grpc_combiner* combiner) {
+ grpc_uri* uri = NULL;
+ char* canonical_target = NULL;
+ grpc_resolver_factory* factory =
resolve_factory(exec_ctx, target, &uri, &canonical_target);
- grpc_resolver *resolver;
+ grpc_resolver* resolver;
grpc_resolver_args resolver_args;
memset(&resolver_args, 0, sizeof(resolver_args));
resolver_args.uri = uri;
@@ -138,21 +138,21 @@ grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
return resolver;
}
-char *grpc_get_default_authority(grpc_exec_ctx *exec_ctx, const char *target) {
- grpc_uri *uri = NULL;
- char *canonical_target = NULL;
- grpc_resolver_factory *factory =
+char* grpc_get_default_authority(grpc_exec_ctx* exec_ctx, const char* target) {
+ grpc_uri* uri = NULL;
+ char* canonical_target = NULL;
+ grpc_resolver_factory* factory =
resolve_factory(exec_ctx, target, &uri, &canonical_target);
- char *authority = grpc_resolver_factory_get_default_authority(factory, uri);
+ char* authority = grpc_resolver_factory_get_default_authority(factory, uri);
grpc_uri_destroy(uri);
gpr_free(canonical_target);
return authority;
}
-char *grpc_resolver_factory_add_default_prefix_if_needed(
- grpc_exec_ctx *exec_ctx, const char *target) {
- grpc_uri *uri = NULL;
- char *canonical_target = NULL;
+char* grpc_resolver_factory_add_default_prefix_if_needed(
+ grpc_exec_ctx* exec_ctx, const char* target) {
+ grpc_uri* uri = NULL;
+ char* canonical_target = NULL;
resolve_factory(exec_ctx, target, &uri, &canonical_target);
grpc_uri_destroy(uri);
return canonical_target == NULL ? gpr_strdup(target) : canonical_target;
diff --git a/src/core/ext/filters/client_channel/resolver_registry.h b/src/core/ext/filters/client_channel/resolver_registry.h
index 06d0b99a35..01a2d0b18b 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.h
+++ b/src/core/ext/filters/client_channel/resolver_registry.h
@@ -30,14 +30,14 @@ void grpc_resolver_registry_init();
void grpc_resolver_registry_shutdown(void);
/** Set the default URI prefix to \a default_prefix. */
-void grpc_resolver_registry_set_default_prefix(const char *default_prefix);
+void grpc_resolver_registry_set_default_prefix(const char* default_prefix);
/** Register a resolver type.
URI's of \a scheme will be resolved with the given resolver.
If \a priority is greater than zero, then the resolver will be eligible
to resolve names that are passed in with no scheme. Higher priority
resolvers will be tried before lower priority schemes. */
-void grpc_register_resolver_type(grpc_resolver_factory *factory);
+void grpc_register_resolver_type(grpc_resolver_factory* factory);
/** Create a resolver given \a target.
First tries to parse \a target as a URI. If this succeeds, tries
@@ -52,23 +52,23 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory);
(typically the set of arguments passed in from the client API).
\a pollset_set is used to drive IO in the name resolution process, it
should not be NULL. */
-grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
- const grpc_channel_args *args,
- grpc_pollset_set *pollset_set,
- grpc_combiner *combiner);
+grpc_resolver* grpc_resolver_create(grpc_exec_ctx* exec_ctx, const char* target,
+ const grpc_channel_args* args,
+ grpc_pollset_set* pollset_set,
+ grpc_combiner* combiner);
/** Find a resolver factory given a name and return an (owned-by-the-caller)
* reference to it */
-grpc_resolver_factory *grpc_resolver_factory_lookup(const char *name);
+grpc_resolver_factory* grpc_resolver_factory_lookup(const char* name);
/** Given a target, return a (freshly allocated with gpr_malloc) string
representing the default authority to pass from a client. */
-char *grpc_get_default_authority(grpc_exec_ctx *exec_ctx, const char *target);
+char* grpc_get_default_authority(grpc_exec_ctx* exec_ctx, const char* target);
/** Returns a newly allocated string containing \a target, adding the
default prefix if needed. */
-char *grpc_resolver_factory_add_default_prefix_if_needed(
- grpc_exec_ctx *exec_ctx, const char *target);
+char* grpc_resolver_factory_add_default_prefix_if_needed(
+ grpc_exec_ctx* exec_ctx, const char* target);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index b954e1b879..427df743d6 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -52,27 +52,27 @@
#define GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2
-#define GET_CONNECTED_SUBCHANNEL(subchannel, barrier) \
- ((grpc_connected_subchannel *)(gpr_atm_##barrier##_load( \
+#define GET_CONNECTED_SUBCHANNEL(subchannel, barrier) \
+ ((grpc_connected_subchannel*)(gpr_atm_##barrier##_load( \
&(subchannel)->connected_subchannel)))
typedef struct {
grpc_closure closure;
- grpc_subchannel *subchannel;
+ grpc_subchannel* subchannel;
grpc_connectivity_state connectivity_state;
} state_watcher;
typedef struct external_state_watcher {
- grpc_subchannel *subchannel;
- grpc_pollset_set *pollset_set;
- grpc_closure *notify;
+ grpc_subchannel* subchannel;
+ grpc_pollset_set* pollset_set;
+ grpc_closure* notify;
grpc_closure closure;
- struct external_state_watcher *next;
- struct external_state_watcher *prev;
+ struct external_state_watcher* next;
+ struct external_state_watcher* prev;
} external_state_watcher;
struct grpc_subchannel {
- grpc_connector *connector;
+ grpc_connector* connector;
/** refcount
- lower INTERNAL_REF_BITS bits are for internal references:
@@ -82,12 +82,12 @@ struct grpc_subchannel {
gpr_atm ref_pair;
/** non-transport related channel filters */
- const grpc_channel_filter **filters;
+ const grpc_channel_filter** filters;
size_t num_filters;
/** channel arguments */
- grpc_channel_args *args;
+ grpc_channel_args* args;
- grpc_subchannel_key *key;
+ grpc_subchannel_key* key;
/** set during connection */
grpc_connect_out_args connecting_result;
@@ -100,7 +100,7 @@ struct grpc_subchannel {
/** pollset_set tracking who's interested in a connection
being setup */
- grpc_pollset_set *pollset_set;
+ grpc_pollset_set* pollset_set;
/** active connection, or null; of type grpc_connected_subchannel */
gpr_atm connected_subchannel;
@@ -130,22 +130,22 @@ struct grpc_subchannel {
};
struct grpc_subchannel_call {
- grpc_connected_subchannel *connection;
- grpc_closure *schedule_closure_after_destroy;
+ grpc_connected_subchannel* connection;
+ grpc_closure* schedule_closure_after_destroy;
};
-#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack *)((call) + 1))
-#define CHANNEL_STACK_FROM_CONNECTION(con) ((grpc_channel_stack *)(con))
+#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack*)((call) + 1))
+#define CHANNEL_STACK_FROM_CONNECTION(con) ((grpc_channel_stack*)(con))
#define CALLSTACK_TO_SUBCHANNEL_CALL(callstack) \
- (((grpc_subchannel_call *)(callstack)) - 1)
+ (((grpc_subchannel_call*)(callstack)) - 1)
-static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
- grpc_error *error);
+static void subchannel_connected(grpc_exec_ctx* exec_ctx, void* subchannel,
+ grpc_error* error);
#ifndef NDEBUG
#define REF_REASON reason
#define REF_MUTATE_EXTRA_ARGS \
- GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char *purpose
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char* purpose
#define REF_MUTATE_PURPOSE(x) , file, line, reason, x
#else
#define REF_REASON ""
@@ -157,21 +157,21 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
* connection implementation
*/
-static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_connected_subchannel *c = (grpc_connected_subchannel *)arg;
+static void connection_destroy(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_connected_subchannel* c = (grpc_connected_subchannel*)arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c));
gpr_free(c);
}
-grpc_connected_subchannel *grpc_connected_subchannel_ref(
- grpc_connected_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+grpc_connected_subchannel* grpc_connected_subchannel_ref(
+ grpc_connected_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON);
return c;
}
-void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
- grpc_connected_subchannel *c
+void grpc_connected_subchannel_unref(grpc_exec_ctx* exec_ctx,
+ grpc_connected_subchannel* c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c),
REF_REASON);
@@ -181,10 +181,10 @@ void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
* grpc_subchannel implementation
*/
-static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_subchannel *c = (grpc_subchannel *)arg;
- gpr_free((void *)c->filters);
+static void subchannel_destroy(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_subchannel* c = (grpc_subchannel*)arg;
+ gpr_free((void*)c->filters);
grpc_channel_args_destroy(exec_ctx, c->args);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
grpc_connector_unref(exec_ctx, c->connector);
@@ -194,7 +194,7 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(c);
}
-static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
+static gpr_atm ref_mutate(grpc_subchannel* c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
@@ -208,8 +208,8 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
return old_val;
}
-grpc_subchannel *grpc_subchannel_ref(
- grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+grpc_subchannel* grpc_subchannel_ref(
+ grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
old_refs = ref_mutate(c, (1 << INTERNAL_REF_BITS),
0 REF_MUTATE_PURPOSE("STRONG_REF"));
@@ -217,16 +217,16 @@ grpc_subchannel *grpc_subchannel_ref(
return c;
}
-grpc_subchannel *grpc_subchannel_weak_ref(
- grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+grpc_subchannel* grpc_subchannel_weak_ref(
+ grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
old_refs = ref_mutate(c, 1, 0 REF_MUTATE_PURPOSE("WEAK_REF"));
GPR_ASSERT(old_refs != 0);
return c;
}
-grpc_subchannel *grpc_subchannel_ref_from_weak_ref(
- grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+grpc_subchannel* grpc_subchannel_ref_from_weak_ref(
+ grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
if (!c) return NULL;
for (;;) {
gpr_atm old_refs = gpr_atm_acq_load(&c->ref_pair);
@@ -241,8 +241,8 @@ grpc_subchannel *grpc_subchannel_ref_from_weak_ref(
}
}
-static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
- grpc_connected_subchannel *con;
+static void disconnect(grpc_exec_ctx* exec_ctx, grpc_subchannel* c) {
+ grpc_connected_subchannel* con;
grpc_subchannel_index_unregister(exec_ctx, c->key, c);
gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->disconnected);
@@ -258,8 +258,8 @@ static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
gpr_mu_unlock(&c->mu);
}
-void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+void grpc_subchannel_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
// add a weak ref and subtract a strong ref (atomically)
old_refs = ref_mutate(c, (gpr_atm)1 - (gpr_atm)(1 << INTERNAL_REF_BITS),
@@ -270,56 +270,57 @@ void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "strong-unref");
}
-void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c
+void grpc_subchannel_weak_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) {
- GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(subchannel_destroy, c,
- grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(subchannel_destroy, c, grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
}
}
-grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
- grpc_connector *connector,
- const grpc_subchannel_args *args) {
- grpc_subchannel_key *key = grpc_subchannel_key_create(args);
- grpc_subchannel *c = grpc_subchannel_index_find(exec_ctx, key);
+grpc_subchannel* grpc_subchannel_create(grpc_exec_ctx* exec_ctx,
+ grpc_connector* connector,
+ const grpc_subchannel_args* args) {
+ grpc_subchannel_key* key = grpc_subchannel_key_create(args);
+ grpc_subchannel* c = grpc_subchannel_index_find(exec_ctx, key);
if (c) {
grpc_subchannel_key_destroy(exec_ctx, key);
return c;
}
GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx);
- c = (grpc_subchannel *)gpr_zalloc(sizeof(*c));
+ c = (grpc_subchannel*)gpr_zalloc(sizeof(*c));
c->key = key;
gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
c->connector = connector;
grpc_connector_ref(c->connector);
c->num_filters = args->filter_count;
if (c->num_filters > 0) {
- c->filters = (const grpc_channel_filter **)gpr_malloc(
- sizeof(grpc_channel_filter *) * c->num_filters);
- memcpy((void *)c->filters, args->filters,
- sizeof(grpc_channel_filter *) * c->num_filters);
+ c->filters = (const grpc_channel_filter**)gpr_malloc(
+ sizeof(grpc_channel_filter*) * c->num_filters);
+ memcpy((void*)c->filters, args->filters,
+ sizeof(grpc_channel_filter*) * c->num_filters);
} else {
c->filters = NULL;
}
c->pollset_set = grpc_pollset_set_create();
- grpc_resolved_address *addr =
- (grpc_resolved_address *)gpr_malloc(sizeof(*addr));
+ grpc_resolved_address* addr =
+ (grpc_resolved_address*)gpr_malloc(sizeof(*addr));
grpc_get_subchannel_address_arg(exec_ctx, args->args, addr);
- grpc_resolved_address *new_address = NULL;
- grpc_channel_args *new_args = NULL;
+ grpc_resolved_address* new_address = NULL;
+ grpc_channel_args* new_args = NULL;
if (grpc_proxy_mappers_map_address(exec_ctx, addr, args->args, &new_address,
&new_args)) {
GPR_ASSERT(new_address != NULL);
gpr_free(addr);
addr = new_address;
}
- static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
+ static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
grpc_arg new_arg = grpc_create_subchannel_address_arg(addr);
gpr_free(addr);
c->args = grpc_channel_args_copy_and_add_and_remove(
@@ -375,8 +376,8 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
return grpc_subchannel_index_register(exec_ctx, key, c);
}
-static void continue_connect_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c) {
+static void continue_connect_locked(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* c) {
grpc_connect_in_args args;
args.interested_parties = c->pollset_set;
@@ -390,8 +391,8 @@ static void continue_connect_locked(grpc_exec_ctx *exec_ctx,
&c->connected);
}
-grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c,
- grpc_error **error) {
+grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel* c,
+ grpc_error** error) {
grpc_connectivity_state state;
gpr_mu_lock(&c->mu);
state = grpc_connectivity_state_get(&c->state_tracker, error);
@@ -399,10 +400,10 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c,
return state;
}
-static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- external_state_watcher *w = (external_state_watcher *)arg;
- grpc_closure *follow_up = w->notify;
+static void on_external_state_watcher_done(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ external_state_watcher* w = (external_state_watcher*)arg;
+ grpc_closure* follow_up = w->notify;
if (w->pollset_set != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx, w->subchannel->pollset_set,
w->pollset_set);
@@ -416,8 +417,8 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
}
-static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_subchannel *c = (grpc_subchannel *)arg;
+static void on_alarm(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_subchannel* c = (grpc_subchannel*)arg;
gpr_mu_lock(&c->mu);
c->have_alarm = false;
if (c->disconnected) {
@@ -438,8 +439,8 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GRPC_ERROR_UNREF(error);
}
-static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c) {
+static void maybe_start_connecting_locked(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* c) {
if (c->disconnected) {
/* Don't try to connect if we're already disconnected */
return;
@@ -484,10 +485,10 @@ static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
}
void grpc_subchannel_notify_on_state_change(
- grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
- grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
- grpc_closure *notify) {
- external_state_watcher *w;
+ grpc_exec_ctx* exec_ctx, grpc_subchannel* c,
+ grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
+ grpc_closure* notify) {
+ external_state_watcher* w;
if (state == NULL) {
gpr_mu_lock(&c->mu);
@@ -500,7 +501,7 @@ void grpc_subchannel_notify_on_state_change(
}
gpr_mu_unlock(&c->mu);
} else {
- w = (external_state_watcher *)gpr_malloc(sizeof(*w));
+ w = (external_state_watcher*)gpr_malloc(sizeof(*w));
w->subchannel = c;
w->pollset_set = interested_parties;
w->notify = notify;
@@ -523,18 +524,18 @@ void grpc_subchannel_notify_on_state_change(
}
void grpc_connected_subchannel_process_transport_op(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- grpc_transport_op *op) {
- grpc_channel_stack *channel_stack = CHANNEL_STACK_FROM_CONNECTION(con);
- grpc_channel_element *top_elem = grpc_channel_stack_element(channel_stack, 0);
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* con,
+ grpc_transport_op* op) {
+ grpc_channel_stack* channel_stack = CHANNEL_STACK_FROM_CONNECTION(con);
+ grpc_channel_element* top_elem = grpc_channel_stack_element(channel_stack, 0);
top_elem->filter->start_transport_op(exec_ctx, top_elem, op);
}
-static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
- grpc_error *error) {
- state_watcher *sw = (state_watcher *)p;
- grpc_subchannel *c = sw->subchannel;
- gpr_mu *mu = &c->mu;
+static void subchannel_on_child_state_changed(grpc_exec_ctx* exec_ctx, void* p,
+ grpc_error* error) {
+ state_watcher* sw = (state_watcher*)p;
+ grpc_subchannel* c = sw->subchannel;
+ gpr_mu* mu = &c->mu;
gpr_mu_lock(mu);
@@ -559,13 +560,13 @@ static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
gpr_free(sw);
}
-static void connected_subchannel_state_op(grpc_exec_ctx *exec_ctx,
- grpc_connected_subchannel *con,
- grpc_pollset_set *interested_parties,
- grpc_connectivity_state *state,
- grpc_closure *closure) {
- grpc_transport_op *op = grpc_make_transport_op(NULL);
- grpc_channel_element *elem;
+static void connected_subchannel_state_op(grpc_exec_ctx* exec_ctx,
+ grpc_connected_subchannel* con,
+ grpc_pollset_set* interested_parties,
+ grpc_connectivity_state* state,
+ grpc_closure* closure) {
+ grpc_transport_op* op = grpc_make_transport_op(NULL);
+ grpc_channel_element* elem;
op->connectivity_state = state;
op->on_connectivity_state_change = closure;
op->bind_pollset_set = interested_parties;
@@ -574,31 +575,31 @@ static void connected_subchannel_state_op(grpc_exec_ctx *exec_ctx,
}
void grpc_connected_subchannel_notify_on_state_change(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
- grpc_closure *closure) {
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* con,
+ grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
+ grpc_closure* closure) {
connected_subchannel_state_op(exec_ctx, con, interested_parties, state,
closure);
}
-void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx,
- grpc_connected_subchannel *con,
- grpc_closure *closure) {
- grpc_transport_op *op = grpc_make_transport_op(NULL);
- grpc_channel_element *elem;
+void grpc_connected_subchannel_ping(grpc_exec_ctx* exec_ctx,
+ grpc_connected_subchannel* con,
+ grpc_closure* closure) {
+ grpc_transport_op* op = grpc_make_transport_op(NULL);
+ grpc_channel_element* elem;
op->send_ping = closure;
elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
elem->filter->start_transport_op(exec_ctx, elem, op);
}
-static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c) {
- grpc_connected_subchannel *con;
- grpc_channel_stack *stk;
- state_watcher *sw_subchannel;
+static bool publish_transport_locked(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* c) {
+ grpc_connected_subchannel* con;
+ grpc_channel_stack* stk;
+ state_watcher* sw_subchannel;
/* construct channel stack */
- grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create();
+ grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
grpc_channel_stack_builder_set_channel_arguments(
exec_ctx, builder, c->connecting_result.channel_args);
grpc_channel_stack_builder_set_transport(builder,
@@ -609,8 +610,8 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_builder_destroy(exec_ctx, builder);
return false;
}
- grpc_error *error = grpc_channel_stack_builder_finish(
- exec_ctx, builder, 0, 1, connection_destroy, NULL, (void **)&con);
+ grpc_error* error = grpc_channel_stack_builder_finish(
+ exec_ctx, builder, 0, 1, connection_destroy, NULL, (void**)&con);
if (error != GRPC_ERROR_NONE) {
grpc_transport_destroy(exec_ctx, c->connecting_result.transport);
gpr_log(GPR_ERROR, "error initializing subchannel stack: %s",
@@ -622,7 +623,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
memset(&c->connecting_result, 0, sizeof(c->connecting_result));
/* initialize state watcher */
- sw_subchannel = (state_watcher *)gpr_malloc(sizeof(*sw_subchannel));
+ sw_subchannel = (state_watcher*)gpr_malloc(sizeof(*sw_subchannel));
sw_subchannel->subchannel = c;
sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
GRPC_CLOSURE_INIT(&sw_subchannel->closure, subchannel_on_child_state_changed,
@@ -657,10 +658,10 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
return true;
}
-static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_subchannel *c = (grpc_subchannel *)arg;
- grpc_channel_args *delete_channel_args = c->connecting_result.channel_args;
+static void subchannel_connected(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_subchannel* c = (grpc_subchannel*)arg;
+ grpc_channel_args* delete_channel_args = c->connecting_result.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "connected");
gpr_mu_lock(&c->mu);
@@ -678,7 +679,7 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
"connect_failed");
- const char *errmsg = grpc_error_string(error);
+ const char* errmsg = grpc_error_string(error);
gpr_log(GPR_INFO, "Connect failed: %s", errmsg);
maybe_start_connecting_locked(exec_ctx, c);
@@ -693,65 +694,65 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
* grpc_subchannel_call implementation
*/
-static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
- grpc_error *error) {
- grpc_subchannel_call *c = (grpc_subchannel_call *)call;
+static void subchannel_call_destroy(grpc_exec_ctx* exec_ctx, void* call,
+ grpc_error* error) {
+ grpc_subchannel_call* c = (grpc_subchannel_call*)call;
GPR_ASSERT(c->schedule_closure_after_destroy != NULL);
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
- grpc_connected_subchannel *connection = c->connection;
+ grpc_connected_subchannel* connection = c->connection;
grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), NULL,
c->schedule_closure_after_destroy);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, connection, "subchannel_call");
GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
}
-void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call *call,
- grpc_closure *closure) {
+void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call* call,
+ grpc_closure* closure) {
GPR_ASSERT(call->schedule_closure_after_destroy == NULL);
GPR_ASSERT(closure != NULL);
call->schedule_closure_after_destroy = closure;
}
void grpc_subchannel_call_ref(
- grpc_subchannel_call *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ grpc_subchannel_call* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
}
-void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *c
+void grpc_subchannel_call_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_call* c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
}
-void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *call,
- grpc_transport_stream_op_batch *batch) {
+void grpc_subchannel_call_process_op(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_call* call,
+ grpc_transport_stream_op_batch* batch) {
GPR_TIMER_BEGIN("grpc_subchannel_call_process_op", 0);
- grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
- grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
+ grpc_call_stack* call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
+ grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0);
GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, batch);
GPR_TIMER_END("grpc_subchannel_call_process_op", 0);
}
-grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
- grpc_subchannel *c) {
+grpc_connected_subchannel* grpc_subchannel_get_connected_subchannel(
+ grpc_subchannel* c) {
return GET_CONNECTED_SUBCHANNEL(c, acq);
}
-const grpc_subchannel_key *grpc_subchannel_get_key(
- const grpc_subchannel *subchannel) {
+const grpc_subchannel_key* grpc_subchannel_get_key(
+ const grpc_subchannel* subchannel) {
return subchannel->key;
}
-grpc_error *grpc_connected_subchannel_create_call(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- const grpc_connected_subchannel_call_args *args,
- grpc_subchannel_call **call) {
- grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
- *call = (grpc_subchannel_call *)gpr_arena_alloc(
+grpc_error* grpc_connected_subchannel_create_call(
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* con,
+ const grpc_connected_subchannel_call_args* args,
+ grpc_subchannel_call** call) {
+ grpc_channel_stack* chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
+ *call = (grpc_subchannel_call*)gpr_arena_alloc(
args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
- grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
+ grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
const grpc_call_element_args call_args = {
callstk, /* call_stack */
@@ -763,10 +764,10 @@ grpc_error *grpc_connected_subchannel_create_call(
args->arena, /* arena */
args->call_combiner /* call_combiner */
};
- grpc_error *error = grpc_call_stack_init(
+ grpc_error* error = grpc_call_stack_init(
exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args);
if (error != GRPC_ERROR_NONE) {
- const char *error_string = grpc_error_string(error);
+ const char* error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);
return error;
}
@@ -774,39 +775,39 @@ grpc_error *grpc_connected_subchannel_create_call(
return GRPC_ERROR_NONE;
}
-grpc_call_stack *grpc_subchannel_call_get_call_stack(
- grpc_subchannel_call *subchannel_call) {
+grpc_call_stack* grpc_subchannel_call_get_call_stack(
+ grpc_subchannel_call* subchannel_call) {
return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call);
}
-static void grpc_uri_to_sockaddr(grpc_exec_ctx *exec_ctx, const char *uri_str,
- grpc_resolved_address *addr) {
- grpc_uri *uri = grpc_uri_parse(exec_ctx, uri_str, 0 /* suppress_errors */);
+static void grpc_uri_to_sockaddr(grpc_exec_ctx* exec_ctx, const char* uri_str,
+ grpc_resolved_address* addr) {
+ grpc_uri* uri = grpc_uri_parse(exec_ctx, uri_str, 0 /* suppress_errors */);
GPR_ASSERT(uri != NULL);
if (!grpc_parse_uri(uri, addr)) memset(addr, 0, sizeof(*addr));
grpc_uri_destroy(uri);
}
-void grpc_get_subchannel_address_arg(grpc_exec_ctx *exec_ctx,
- const grpc_channel_args *args,
- grpc_resolved_address *addr) {
- const char *addr_uri_str = grpc_get_subchannel_address_uri_arg(args);
+void grpc_get_subchannel_address_arg(grpc_exec_ctx* exec_ctx,
+ const grpc_channel_args* args,
+ grpc_resolved_address* addr) {
+ const char* addr_uri_str = grpc_get_subchannel_address_uri_arg(args);
memset(addr, 0, sizeof(*addr));
if (*addr_uri_str != '\0') {
grpc_uri_to_sockaddr(exec_ctx, addr_uri_str, addr);
}
}
-const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args) {
- const grpc_arg *addr_arg =
+const char* grpc_get_subchannel_address_uri_arg(const grpc_channel_args* args) {
+ const grpc_arg* addr_arg =
grpc_channel_args_find(args, GRPC_ARG_SUBCHANNEL_ADDRESS);
GPR_ASSERT(addr_arg != NULL); // Should have been set by LB policy.
GPR_ASSERT(addr_arg->type == GRPC_ARG_STRING);
return addr_arg->value.string;
}
-grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr) {
+grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address* addr) {
return grpc_channel_arg_string_create(
- (char *)GRPC_ARG_SUBCHANNEL_ADDRESS,
+ (char*)GRPC_ARG_SUBCHANNEL_ADDRESS,
addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup(""));
}
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index 1cd73f3ff4..970f182ff0 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -79,118 +79,118 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
#define GRPC_SUBCHANNEL_REF_EXTRA_ARGS
#endif
-grpc_subchannel *grpc_subchannel_ref(
- grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-grpc_subchannel *grpc_subchannel_ref_from_weak_ref(
- grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *channel
+grpc_subchannel* grpc_subchannel_ref(
+ grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+grpc_subchannel* grpc_subchannel_ref_from_weak_ref(
+ grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-grpc_subchannel *grpc_subchannel_weak_ref(
- grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *channel
+grpc_subchannel* grpc_subchannel_weak_ref(
+ grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_weak_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-grpc_connected_subchannel *grpc_connected_subchannel_ref(
- grpc_connected_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
- grpc_connected_subchannel *channel
+grpc_connected_subchannel* grpc_connected_subchannel_ref(
+ grpc_connected_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_connected_subchannel_unref(grpc_exec_ctx* exec_ctx,
+ grpc_connected_subchannel* channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_ref(
- grpc_subchannel_call *call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *call
+ grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_call_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_call* call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
/** construct a subchannel call */
typedef struct {
- grpc_polling_entity *pollent;
+ grpc_polling_entity* pollent;
grpc_slice path;
gpr_timespec start_time;
grpc_millis deadline;
- gpr_arena *arena;
- grpc_call_context_element *context;
- grpc_call_combiner *call_combiner;
+ gpr_arena* arena;
+ grpc_call_context_element* context;
+ grpc_call_combiner* call_combiner;
} grpc_connected_subchannel_call_args;
-grpc_error *grpc_connected_subchannel_create_call(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
- const grpc_connected_subchannel_call_args *args,
- grpc_subchannel_call **subchannel_call);
+grpc_error* grpc_connected_subchannel_create_call(
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* connected_subchannel,
+ const grpc_connected_subchannel_call_args* args,
+ grpc_subchannel_call** subchannel_call);
/** process a transport level op */
void grpc_connected_subchannel_process_transport_op(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *subchannel,
- grpc_transport_op *op);
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* subchannel,
+ grpc_transport_op* op);
/** poll the current connectivity state of a channel */
grpc_connectivity_state grpc_subchannel_check_connectivity(
- grpc_subchannel *channel, grpc_error **error);
+ grpc_subchannel* channel, grpc_error** error);
/** Calls notify when the connectivity state of a channel becomes different
from *state. Updates *state with the new state of the channel. */
void grpc_subchannel_notify_on_state_change(
- grpc_exec_ctx *exec_ctx, grpc_subchannel *channel,
- grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
- grpc_closure *notify);
+ grpc_exec_ctx* exec_ctx, grpc_subchannel* channel,
+ grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
+ grpc_closure* notify);
void grpc_connected_subchannel_notify_on_state_change(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *channel,
- grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
- grpc_closure *notify);
-void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx,
- grpc_connected_subchannel *channel,
- grpc_closure *notify);
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* channel,
+ grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
+ grpc_closure* notify);
+void grpc_connected_subchannel_ping(grpc_exec_ctx* exec_ctx,
+ grpc_connected_subchannel* channel,
+ grpc_closure* notify);
/** retrieve the grpc_connected_subchannel - or NULL if called before
the subchannel becomes connected */
-grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
- grpc_subchannel *subchannel);
+grpc_connected_subchannel* grpc_subchannel_get_connected_subchannel(
+ grpc_subchannel* subchannel);
/** return the subchannel index key for \a subchannel */
-const grpc_subchannel_key *grpc_subchannel_get_key(
- const grpc_subchannel *subchannel);
+const grpc_subchannel_key* grpc_subchannel_get_key(
+ const grpc_subchannel* subchannel);
/** continue processing a transport op */
-void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *subchannel_call,
- grpc_transport_stream_op_batch *op);
+void grpc_subchannel_call_process_op(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_call* subchannel_call,
+ grpc_transport_stream_op_batch* op);
/** Must be called once per call. Sets the 'then_schedule_closure' argument for
call stack destruction. */
void grpc_subchannel_call_set_cleanup_closure(
- grpc_subchannel_call *subchannel_call, grpc_closure *closure);
+ grpc_subchannel_call* subchannel_call, grpc_closure* closure);
-grpc_call_stack *grpc_subchannel_call_get_call_stack(
- grpc_subchannel_call *subchannel_call);
+grpc_call_stack* grpc_subchannel_call_get_call_stack(
+ grpc_subchannel_call* subchannel_call);
struct grpc_subchannel_args {
/* When updating this struct, also update subchannel_index.c */
/** Channel filters for this channel - wrapped factories will likely
want to mutate this */
- const grpc_channel_filter **filters;
+ const grpc_channel_filter** filters;
/** The number of filters in the above array */
size_t filter_count;
/** Channel arguments to be supplied to the newly created channel */
- const grpc_channel_args *args;
+ const grpc_channel_args* args;
};
/** create a subchannel given a connector */
-grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
- grpc_connector *connector,
- const grpc_subchannel_args *args);
+grpc_subchannel* grpc_subchannel_create(grpc_exec_ctx* exec_ctx,
+ grpc_connector* connector,
+ const grpc_subchannel_args* args);
/// Sets \a addr from \a args.
-void grpc_get_subchannel_address_arg(grpc_exec_ctx *exec_ctx,
- const grpc_channel_args *args,
- grpc_resolved_address *addr);
+void grpc_get_subchannel_address_arg(grpc_exec_ctx* exec_ctx,
+ const grpc_channel_args* args,
+ grpc_resolved_address* addr);
/// Returns the URI string for the address to connect to.
-const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args);
+const char* grpc_get_subchannel_address_uri_arg(const grpc_channel_args* args);
/// Returns a new channel arg encoding the subchannel address as a string.
/// Caller is responsible for freeing the string.
-grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr);
+grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address* addr);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/subchannel_index.cc b/src/core/ext/filters/client_channel/subchannel_index.cc
index 1f466ec0b8..0c4213cf77 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.cc
+++ b/src/core/ext/filters/client_channel/subchannel_index.cc
@@ -42,15 +42,15 @@ struct grpc_subchannel_key {
static bool g_force_creation = false;
-static grpc_subchannel_key *create_key(
- const grpc_subchannel_args *args,
- grpc_channel_args *(*copy_channel_args)(const grpc_channel_args *args)) {
- grpc_subchannel_key *k = (grpc_subchannel_key *)gpr_malloc(sizeof(*k));
+static grpc_subchannel_key* create_key(
+ const grpc_subchannel_args* args,
+ grpc_channel_args* (*copy_channel_args)(const grpc_channel_args* args)) {
+ grpc_subchannel_key* k = (grpc_subchannel_key*)gpr_malloc(sizeof(*k));
k->args.filter_count = args->filter_count;
if (k->args.filter_count > 0) {
- k->args.filters = (const grpc_channel_filter **)gpr_malloc(
+ k->args.filters = (const grpc_channel_filter**)gpr_malloc(
sizeof(*k->args.filters) * k->args.filter_count);
- memcpy((grpc_channel_filter *)k->args.filters, args->filters,
+ memcpy((grpc_channel_filter*)k->args.filters, args->filters,
sizeof(*k->args.filters) * k->args.filter_count);
} else {
k->args.filters = NULL;
@@ -59,17 +59,17 @@ static grpc_subchannel_key *create_key(
return k;
}
-grpc_subchannel_key *grpc_subchannel_key_create(
- const grpc_subchannel_args *args) {
+grpc_subchannel_key* grpc_subchannel_key_create(
+ const grpc_subchannel_args* args) {
return create_key(args, grpc_channel_args_normalize);
}
-static grpc_subchannel_key *subchannel_key_copy(grpc_subchannel_key *k) {
+static grpc_subchannel_key* subchannel_key_copy(grpc_subchannel_key* k) {
return create_key(&k->args, grpc_channel_args_copy);
}
-int grpc_subchannel_key_compare(const grpc_subchannel_key *a,
- const grpc_subchannel_key *b) {
+int grpc_subchannel_key_compare(const grpc_subchannel_key* a,
+ const grpc_subchannel_key* b) {
if (g_force_creation) return false;
int c = GPR_ICMP(a->args.filter_count, b->args.filter_count);
if (c != 0) return c;
@@ -81,35 +81,34 @@ int grpc_subchannel_key_compare(const grpc_subchannel_key *a,
return grpc_channel_args_compare(a->args.args, b->args.args);
}
-void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *k) {
- gpr_free((grpc_channel_args *)k->args.filters);
- grpc_channel_args_destroy(exec_ctx, (grpc_channel_args *)k->args.args);
+void grpc_subchannel_key_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* k) {
+ gpr_free((grpc_channel_args*)k->args.filters);
+ grpc_channel_args_destroy(exec_ctx, (grpc_channel_args*)k->args.args);
gpr_free(k);
}
-static void sck_avl_destroy(void *p, void *user_data) {
- grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data;
- grpc_subchannel_key_destroy(exec_ctx, (grpc_subchannel_key *)p);
+static void sck_avl_destroy(void* p, void* user_data) {
+ grpc_exec_ctx* exec_ctx = (grpc_exec_ctx*)user_data;
+ grpc_subchannel_key_destroy(exec_ctx, (grpc_subchannel_key*)p);
}
-static void *sck_avl_copy(void *p, void *unused) {
- return subchannel_key_copy((grpc_subchannel_key *)p);
+static void* sck_avl_copy(void* p, void* unused) {
+ return subchannel_key_copy((grpc_subchannel_key*)p);
}
-static long sck_avl_compare(void *a, void *b, void *unused) {
- return grpc_subchannel_key_compare((grpc_subchannel_key *)a,
- (grpc_subchannel_key *)b);
+static long sck_avl_compare(void* a, void* b, void* unused) {
+ return grpc_subchannel_key_compare((grpc_subchannel_key*)a,
+ (grpc_subchannel_key*)b);
}
-static void scv_avl_destroy(void *p, void *user_data) {
- grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data;
- GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel *)p,
- "subchannel_index");
+static void scv_avl_destroy(void* p, void* user_data) {
+ grpc_exec_ctx* exec_ctx = (grpc_exec_ctx*)user_data;
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel*)p, "subchannel_index");
}
-static void *scv_avl_copy(void *p, void *unused) {
- GRPC_SUBCHANNEL_WEAK_REF((grpc_subchannel *)p, "subchannel_index");
+static void* scv_avl_copy(void* p, void* unused) {
+ GRPC_SUBCHANNEL_WEAK_REF((grpc_subchannel*)p, "subchannel_index");
return p;
}
@@ -145,25 +144,25 @@ void grpc_subchannel_index_unref(void) {
void grpc_subchannel_index_ref(void) { gpr_ref_non_zero(&g_refcount); }
-grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key) {
+grpc_subchannel* grpc_subchannel_index_find(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key) {
// Lock, and take a reference to the subchannel index.
// We don't need to do the search under a lock as avl's are immutable.
gpr_mu_lock(&g_mu);
gpr_avl index = gpr_avl_ref(g_subchannel_index, exec_ctx);
gpr_mu_unlock(&g_mu);
- grpc_subchannel *c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
- (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx), "index_find");
+ grpc_subchannel* c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
+ (grpc_subchannel*)gpr_avl_get(index, key, exec_ctx), "index_find");
gpr_avl_unref(index, exec_ctx);
return c;
}
-grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key,
- grpc_subchannel *constructed) {
- grpc_subchannel *c = NULL;
+grpc_subchannel* grpc_subchannel_index_register(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key,
+ grpc_subchannel* constructed) {
+ grpc_subchannel* c = NULL;
bool need_to_unref_constructed;
while (c == NULL) {
@@ -176,7 +175,7 @@ grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&g_mu);
// - Check to see if a subchannel already exists
- c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
+ c = (grpc_subchannel*)gpr_avl_get(index, key, exec_ctx);
if (c != NULL) {
c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
}
@@ -211,9 +210,9 @@ grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
return c;
}
-void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key,
- grpc_subchannel *constructed) {
+void grpc_subchannel_index_unregister(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key,
+ grpc_subchannel* constructed) {
bool done = false;
while (!done) {
// Compare and swap loop:
@@ -224,7 +223,7 @@ void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
// Check to see if this key still refers to the previously
// registered subchannel
- grpc_subchannel *c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
+ grpc_subchannel* c = (grpc_subchannel*)gpr_avl_get(index, key, exec_ctx);
if (c != constructed) {
gpr_avl_unref(index, exec_ctx);
break;
diff --git a/src/core/ext/filters/client_channel/subchannel_index.h b/src/core/ext/filters/client_channel/subchannel_index.h
index 05c3878379..47f9c7bb1e 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.h
+++ b/src/core/ext/filters/client_channel/subchannel_index.h
@@ -29,34 +29,34 @@ extern "C" {
shared amongst channels */
/** Create a key that can be used to uniquely identify a subchannel */
-grpc_subchannel_key *grpc_subchannel_key_create(
- const grpc_subchannel_args *args);
+grpc_subchannel_key* grpc_subchannel_key_create(
+ const grpc_subchannel_args* args);
/** Destroy a subchannel key */
-void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key);
+void grpc_subchannel_key_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key);
/** Given a subchannel key, find the subchannel registered for it.
Returns NULL if no such channel exists.
Thread-safe. */
-grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key);
+grpc_subchannel* grpc_subchannel_index_find(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key);
/** Register a subchannel against a key.
Takes ownership of \a constructed.
Returns the registered subchannel. This may be different from
\a constructed in the case of a registration race. */
-grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key,
- grpc_subchannel *constructed);
+grpc_subchannel* grpc_subchannel_index_register(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key,
+ grpc_subchannel* constructed);
/** Remove \a constructed as the registered subchannel for \a key. */
-void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key,
- grpc_subchannel *constructed);
+void grpc_subchannel_index_unregister(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key,
+ grpc_subchannel* constructed);
-int grpc_subchannel_key_compare(const grpc_subchannel_key *a,
- const grpc_subchannel_key *b);
+int grpc_subchannel_key_compare(const grpc_subchannel_key* a,
+ const grpc_subchannel_key* b);
/** Initialize the subchannel index (global) */
void grpc_subchannel_index_init(void);
diff --git a/src/core/ext/filters/client_channel/uri_parser.cc b/src/core/ext/filters/client_channel/uri_parser.cc
index fb4fb8e694..917e65342b 100644
--- a/src/core/ext/filters/client_channel/uri_parser.cc
+++ b/src/core/ext/filters/client_channel/uri_parser.cc
@@ -34,9 +34,9 @@
/** a size_t default value... maps to all 1's */
#define NOT_SET (~(size_t)0)
-static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section,
+static grpc_uri* bad_uri(const char* uri_text, size_t pos, const char* section,
bool suppress_errors) {
- char *line_prefix;
+ char* line_prefix;
size_t pfx_len;
if (!suppress_errors) {
@@ -45,7 +45,7 @@ static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section,
gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text);
gpr_free(line_prefix);
- line_prefix = (char *)gpr_malloc(pfx_len + 1);
+ line_prefix = (char*)gpr_malloc(pfx_len + 1);
memset(line_prefix, ' ', pfx_len);
line_prefix[pfx_len] = 0;
gpr_log(GPR_ERROR, "%s^ here", line_prefix);
@@ -56,13 +56,13 @@ static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section,
}
/** Returns a copy of percent decoded \a src[begin, end) */
-static char *decode_and_copy_component(grpc_exec_ctx *exec_ctx, const char *src,
+static char* decode_and_copy_component(grpc_exec_ctx* exec_ctx, const char* src,
size_t begin, size_t end) {
grpc_slice component =
grpc_slice_from_copied_buffer(src + begin, end - begin);
grpc_slice decoded_component =
grpc_permissive_percent_decode_slice(component);
- char *out = grpc_dump_slice(decoded_component, GPR_DUMP_ASCII);
+ char* out = grpc_dump_slice(decoded_component, GPR_DUMP_ASCII);
grpc_slice_unref_internal(exec_ctx, component);
grpc_slice_unref_internal(exec_ctx, decoded_component);
return out;
@@ -76,7 +76,7 @@ static bool valid_hex(char c) {
/** Returns how many chars to advance if \a uri_text[i] begins a valid \a pchar
* production. If \a uri_text[i] introduces an invalid \a pchar (such as percent
* sign not followed by two hex digits), NOT_SET is returned. */
-static size_t parse_pchar(const char *uri_text, size_t i) {
+static size_t parse_pchar(const char* uri_text, size_t i) {
/* pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
* unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
* pct-encoded = "%" HEXDIG HEXDIG
@@ -118,7 +118,7 @@ static size_t parse_pchar(const char *uri_text, size_t i) {
}
/* *( pchar / "?" / "/" ) */
-static int parse_fragment_or_query(const char *uri_text, size_t *i) {
+static int parse_fragment_or_query(const char* uri_text, size_t* i) {
char c;
while ((c = uri_text[*i]) != 0) {
const size_t advance = parse_pchar(uri_text, *i); /* pchar */
@@ -143,9 +143,9 @@ static int parse_fragment_or_query(const char *uri_text, size_t *i) {
return 1;
}
-static void parse_query_parts(grpc_uri *uri) {
- static const char *QUERY_PARTS_SEPARATOR = "&";
- static const char *QUERY_PARTS_VALUE_SEPARATOR = "=";
+static void parse_query_parts(grpc_uri* uri) {
+ static const char* QUERY_PARTS_SEPARATOR = "&";
+ static const char* QUERY_PARTS_VALUE_SEPARATOR = "=";
GPR_ASSERT(uri->query != NULL);
if (uri->query[0] == '\0') {
uri->query_parts = NULL;
@@ -157,11 +157,11 @@ static void parse_query_parts(grpc_uri *uri) {
gpr_string_split(uri->query, QUERY_PARTS_SEPARATOR, &uri->query_parts,
&uri->num_query_parts);
uri->query_parts_values =
- (char **)gpr_malloc(uri->num_query_parts * sizeof(char **));
+ (char**)gpr_malloc(uri->num_query_parts * sizeof(char**));
for (size_t i = 0; i < uri->num_query_parts; i++) {
- char **query_param_parts;
+ char** query_param_parts;
size_t num_query_param_parts;
- char *full = uri->query_parts[i];
+ char* full = uri->query_parts[i];
gpr_string_split(full, QUERY_PARTS_VALUE_SEPARATOR, &query_param_parts,
&num_query_param_parts);
GPR_ASSERT(num_query_param_parts > 0);
@@ -182,9 +182,9 @@ static void parse_query_parts(grpc_uri *uri) {
}
}
-grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
+grpc_uri* grpc_uri_parse(grpc_exec_ctx* exec_ctx, const char* uri_text,
bool suppress_errors) {
- grpc_uri *uri;
+ grpc_uri* uri;
size_t scheme_begin = 0;
size_t scheme_end = NOT_SET;
size_t authority_begin = NOT_SET;
@@ -270,7 +270,7 @@ grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
fragment_end = i;
}
- uri = (grpc_uri *)gpr_zalloc(sizeof(*uri));
+ uri = (grpc_uri*)gpr_zalloc(sizeof(*uri));
uri->scheme =
decode_and_copy_component(exec_ctx, uri_text, scheme_begin, scheme_end);
uri->authority = decode_and_copy_component(exec_ctx, uri_text,
@@ -286,7 +286,7 @@ grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
return uri;
}
-const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key) {
+const char* grpc_uri_get_query_arg(const grpc_uri* uri, const char* key) {
GPR_ASSERT(key != NULL);
if (key[0] == '\0') return NULL;
@@ -298,7 +298,7 @@ const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key) {
return NULL;
}
-void grpc_uri_destroy(grpc_uri *uri) {
+void grpc_uri_destroy(grpc_uri* uri) {
if (!uri) return;
gpr_free(uri->scheme);
gpr_free(uri->authority);
diff --git a/src/core/ext/filters/client_channel/uri_parser.h b/src/core/ext/filters/client_channel/uri_parser.h
index e78da5928b..cd877ade8d 100644
--- a/src/core/ext/filters/client_channel/uri_parser.h
+++ b/src/core/ext/filters/client_channel/uri_parser.h
@@ -27,29 +27,29 @@ extern "C" {
#endif
typedef struct {
- char *scheme;
- char *authority;
- char *path;
- char *query;
+ char* scheme;
+ char* authority;
+ char* path;
+ char* query;
/** Query substrings separated by '&' */
- char **query_parts;
+ char** query_parts;
/** Number of elements in \a query_parts and \a query_parts_values */
size_t num_query_parts;
/** Split each query part by '='. NULL if not present. */
- char **query_parts_values;
- char *fragment;
+ char** query_parts_values;
+ char* fragment;
} grpc_uri;
/** parse a uri, return NULL on failure */
-grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
+grpc_uri* grpc_uri_parse(grpc_exec_ctx* exec_ctx, const char* uri_text,
bool suppress_errors);
/** return the part of a query string after the '=' in "?key=xxx&...", or NULL
* if key is not present */
-const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key);
+const char* grpc_uri_get_query_arg(const grpc_uri* uri, const char* key);
/** destroy a uri */
-void grpc_uri_destroy(grpc_uri *uri);
+void grpc_uri_destroy(grpc_uri* uri);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/http/client/http_client_filter.cc b/src/core/ext/filters/http/client/http_client_filter.cc
index 6208089f2e..590bd22b1e 100644
--- a/src/core/ext/filters/http/client/http_client_filter.cc
+++ b/src/core/ext/filters/http/client/http_client_filter.cc
@@ -36,7 +36,7 @@
static const size_t kMaxPayloadSizeForGet = 2048;
typedef struct call_data {
- grpc_call_combiner *call_combiner;
+ grpc_call_combiner* call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
grpc_linked_mdelem scheme;
@@ -45,20 +45,20 @@ typedef struct call_data {
grpc_linked_mdelem content_type;
grpc_linked_mdelem user_agent;
// State for handling recv_initial_metadata ops.
- grpc_metadata_batch *recv_initial_metadata;
- grpc_closure *original_recv_initial_metadata_ready;
+ grpc_metadata_batch* recv_initial_metadata;
+ grpc_closure* original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
// State for handling recv_trailing_metadata ops.
- grpc_metadata_batch *recv_trailing_metadata;
- grpc_closure *original_recv_trailing_metadata_on_complete;
+ grpc_metadata_batch* recv_trailing_metadata;
+ grpc_closure* original_recv_trailing_metadata_on_complete;
grpc_closure recv_trailing_metadata_on_complete;
// State for handling send_message ops.
- grpc_transport_stream_op_batch *send_message_batch;
+ grpc_transport_stream_op_batch* send_message_batch;
size_t send_message_bytes_read;
grpc_byte_stream_cache send_message_cache;
grpc_caching_byte_stream send_message_caching_stream;
grpc_closure on_send_message_next_done;
- grpc_closure *original_send_message_on_complete;
+ grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
} call_data;
@@ -68,18 +68,18 @@ typedef struct channel_data {
size_t max_payload_size_for_get;
} channel_data;
-static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_metadata_batch *b) {
+static grpc_error* client_filter_incoming_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_metadata_batch* b) {
if (b->idx.named.status != NULL) {
if (grpc_mdelem_eq(b->idx.named.status->md, GRPC_MDELEM_STATUS_200)) {
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.status);
} else {
- char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.status->md),
+ char* val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.status->md),
GPR_DUMP_ASCII);
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "Received http2 header with status: %s", val);
- grpc_error *e = grpc_error_set_str(
+ grpc_error* e = grpc_error_set_str(
grpc_error_set_int(
grpc_error_set_str(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -125,7 +125,7 @@ static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
} else {
/* TODO(klempner): We're currently allowing this, but we shouldn't
see it without a proxy so log for now. */
- char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md),
+ char* val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md),
GPR_DUMP_ASCII);
gpr_log(GPR_INFO, "Unexpected content-type '%s'", val);
gpr_free(val);
@@ -137,10 +137,10 @@ static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
- void *user_data, grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx,
+ void* user_data, grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@@ -151,11 +151,11 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
error);
}
-static void recv_trailing_metadata_on_complete(grpc_exec_ctx *exec_ctx,
- void *user_data,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void recv_trailing_metadata_on_complete(grpc_exec_ctx* exec_ctx,
+ void* user_data,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_trailing_metadata);
@@ -166,10 +166,10 @@ static void recv_trailing_metadata_on_complete(grpc_exec_ctx *exec_ctx,
error);
}
-static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void send_message_on_complete(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
grpc_byte_stream_cache_destroy(exec_ctx, &calld->send_message_cache);
GRPC_CLOSURE_RUN(exec_ctx, calld->original_send_message_on_complete,
GRPC_ERROR_REF(error));
@@ -177,10 +177,10 @@ static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg,
// Pulls a slice from the send_message byte stream, updating
// calld->send_message_bytes_read.
-static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
- call_data *calld) {
+static grpc_error* pull_slice_from_send_message(grpc_exec_ctx* exec_ctx,
+ call_data* calld) {
grpc_slice incoming_slice;
- grpc_error *error = grpc_byte_stream_pull(
+ grpc_error* error = grpc_byte_stream_pull(
exec_ctx, &calld->send_message_caching_stream.base, &incoming_slice);
if (error == GRPC_ERROR_NONE) {
calld->send_message_bytes_read += GRPC_SLICE_LENGTH(incoming_slice);
@@ -194,12 +194,12 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
// calld->send_message_caching_stream.base.length, then we have completed
// reading from the byte stream; otherwise, an async read has been dispatched
// and on_send_message_next_done() will be invoked when it is complete.
-static grpc_error *read_all_available_send_message_data(grpc_exec_ctx *exec_ctx,
- call_data *calld) {
+static grpc_error* read_all_available_send_message_data(grpc_exec_ctx* exec_ctx,
+ call_data* calld) {
while (grpc_byte_stream_next(exec_ctx,
&calld->send_message_caching_stream.base,
~(size_t)0, &calld->on_send_message_next_done)) {
- grpc_error *error = pull_slice_from_send_message(exec_ctx, calld);
+ grpc_error* error = pull_slice_from_send_message(exec_ctx, calld);
if (error != GRPC_ERROR_NONE) return error;
if (calld->send_message_bytes_read ==
calld->send_message_caching_stream.base.length) {
@@ -210,10 +210,10 @@ static grpc_error *read_all_available_send_message_data(grpc_exec_ctx *exec_ctx,
}
// Async callback for grpc_byte_stream_next().
-static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void on_send_message_next_done(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, error, calld->call_combiner);
@@ -233,8 +233,8 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_next_op(exec_ctx, elem, calld->send_message_batch);
}
-static char *slice_buffer_to_string(grpc_slice_buffer *slice_buffer) {
- char *payload_bytes = (char *)gpr_malloc(slice_buffer->length + 1);
+static char* slice_buffer_to_string(grpc_slice_buffer* slice_buffer) {
+ char* payload_bytes = (char*)gpr_malloc(slice_buffer->length + 1);
size_t offset = 0;
for (size_t i = 0; i < slice_buffer->count; ++i) {
memcpy(payload_bytes + offset,
@@ -248,10 +248,10 @@ static char *slice_buffer_to_string(grpc_slice_buffer *slice_buffer) {
// Modifies the path entry in the batch's send_initial_metadata to
// append the base64-encoded query for a GET request.
-static grpc_error *update_path_for_get(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
+static grpc_error* update_path_for_get(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
grpc_slice path_slice =
GRPC_MDVALUE(batch->payload->send_initial_metadata.send_initial_metadata
->idx.named.path->md);
@@ -264,33 +264,33 @@ static grpc_error *update_path_for_get(grpc_exec_ctx *exec_ctx,
false /* multi_line */);
grpc_slice path_with_query_slice = GRPC_SLICE_MALLOC(estimated_len);
/* memcopy individual pieces into this slice */
- char *write_ptr = (char *)GRPC_SLICE_START_PTR(path_with_query_slice);
- char *original_path = (char *)GRPC_SLICE_START_PTR(path_slice);
+ char* write_ptr = (char*)GRPC_SLICE_START_PTR(path_with_query_slice);
+ char* original_path = (char*)GRPC_SLICE_START_PTR(path_slice);
memcpy(write_ptr, original_path, GRPC_SLICE_LENGTH(path_slice));
write_ptr += GRPC_SLICE_LENGTH(path_slice);
*write_ptr++ = '?';
- char *payload_bytes =
+ char* payload_bytes =
slice_buffer_to_string(&calld->send_message_cache.cache_buffer);
- grpc_base64_encode_core((char *)write_ptr, payload_bytes,
+ grpc_base64_encode_core((char*)write_ptr, payload_bytes,
batch->payload->send_message.send_message->length,
true /* url_safe */, false /* multi_line */);
gpr_free(payload_bytes);
/* remove trailing unused memory and add trailing 0 to terminate string */
- char *t = (char *)GRPC_SLICE_START_PTR(path_with_query_slice);
+ char* t = (char*)GRPC_SLICE_START_PTR(path_with_query_slice);
/* safe to use strlen since base64_encode will always add '\0' */
path_with_query_slice =
grpc_slice_sub_no_ref(path_with_query_slice, 0, strlen(t));
/* substitute previous path with the new path+query */
grpc_mdelem mdelem_path_and_query =
grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_PATH, path_with_query_slice);
- grpc_metadata_batch *b =
+ grpc_metadata_batch* b =
batch->payload->send_initial_metadata.send_initial_metadata;
return grpc_metadata_batch_substitute(exec_ctx, b, b->idx.named.path,
mdelem_path_and_query);
}
-static void remove_if_present(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
+static void remove_if_present(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch,
grpc_metadata_batch_callouts_index idx) {
if (batch->idx.array[idx] != NULL) {
grpc_metadata_batch_remove(exec_ctx, batch, batch->idx.array[idx]);
@@ -298,10 +298,10 @@ static void remove_if_present(grpc_exec_ctx *exec_ctx,
}
static void hc_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *channeld = (channel_data *)elem->channel_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* channeld = (channel_data*)elem->channel_data;
GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0);
if (batch->recv_initial_metadata) {
@@ -322,7 +322,7 @@ static void hc_start_transport_stream_op_batch(
batch->on_complete = &calld->recv_trailing_metadata_on_complete;
}
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
bool batch_will_be_handled_asynchronously = false;
if (batch->send_initial_metadata) {
// Decide which HTTP VERB to use. We use GET if the request is marked
@@ -422,10 +422,10 @@ done:
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
@@ -441,11 +441,11 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {}
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {}
-static grpc_mdelem scheme_from_args(const grpc_channel_args *args) {
+static grpc_mdelem scheme_from_args(const grpc_channel_args* args) {
unsigned i;
size_t j;
grpc_mdelem valid_schemes[] = {GRPC_MDELEM_SCHEME_HTTP,
@@ -466,7 +466,7 @@ static grpc_mdelem scheme_from_args(const grpc_channel_args *args) {
return GRPC_MDELEM_SCHEME_HTTP;
}
-static size_t max_payload_size_from_args(const grpc_channel_args *args) {
+static size_t max_payload_size_from_args(const grpc_channel_args* args) {
if (args != NULL) {
for (size_t i = 0; i < args->num_args; ++i) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET)) {
@@ -482,12 +482,12 @@ static size_t max_payload_size_from_args(const grpc_channel_args *args) {
return kMaxPayloadSizeForGet;
}
-static grpc_slice user_agent_from_args(const grpc_channel_args *args,
- const char *transport_name) {
+static grpc_slice user_agent_from_args(const grpc_channel_args* args,
+ const char* transport_name) {
gpr_strvec v;
size_t i;
int is_first = 1;
- char *tmp;
+ char* tmp;
grpc_slice result;
gpr_strvec_init(&v);
@@ -533,10 +533,10 @@ static grpc_slice user_agent_from_args(const grpc_channel_args *args,
}
/* Constructor for channel_data */
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(!args->is_last);
GPR_ASSERT(args->optional_transport != NULL);
chand->static_scheme = scheme_from_args(args->channel_args);
@@ -550,9 +550,9 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
GRPC_MDELEM_UNREF(exec_ctx, chand->user_agent);
}
diff --git a/src/core/ext/filters/http/http_filters_plugin.cc b/src/core/ext/filters/http/http_filters_plugin.cc
index 8f5b856317..064e66e323 100644
--- a/src/core/ext/filters/http/http_filters_plugin.cc
+++ b/src/core/ext/filters/http/http_filters_plugin.cc
@@ -27,25 +27,25 @@
#include "src/core/lib/transport/transport_impl.h"
typedef struct {
- const grpc_channel_filter *filter;
- const char *control_channel_arg;
+ const grpc_channel_filter* filter;
+ const char* control_channel_arg;
} optional_filter;
static optional_filter compress_filter = {
&grpc_message_compress_filter, GRPC_ARG_ENABLE_PER_MESSAGE_COMPRESSION};
static bool is_building_http_like_transport(
- grpc_channel_stack_builder *builder) {
- grpc_transport *t = grpc_channel_stack_builder_get_transport(builder);
+ grpc_channel_stack_builder* builder) {
+ grpc_transport* t = grpc_channel_stack_builder_get_transport(builder);
return t != NULL && strstr(t->vtable->name, "http");
}
-static bool maybe_add_optional_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *arg) {
+static bool maybe_add_optional_filter(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder,
+ void* arg) {
if (!is_building_http_like_transport(builder)) return true;
- optional_filter *filtarg = (optional_filter *)arg;
- const grpc_channel_args *channel_args =
+ optional_filter* filtarg = (optional_filter*)arg;
+ const grpc_channel_args* channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder);
bool enable = grpc_channel_arg_get_bool(
grpc_channel_args_find(channel_args, filtarg->control_channel_arg),
@@ -55,12 +55,12 @@ static bool maybe_add_optional_filter(grpc_exec_ctx *exec_ctx,
: true;
}
-static bool maybe_add_required_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *arg) {
+static bool maybe_add_required_filter(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder,
+ void* arg) {
return is_building_http_like_transport(builder)
? grpc_channel_stack_builder_prepend_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL)
+ builder, (const grpc_channel_filter*)arg, NULL, NULL)
: true;
}
@@ -77,13 +77,13 @@ extern "C" void grpc_http_filters_init(void) {
maybe_add_optional_filter, &compress_filter);
grpc_channel_init_register_stage(
GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
- maybe_add_required_filter, (void *)&grpc_http_client_filter);
+ maybe_add_required_filter, (void*)&grpc_http_client_filter);
grpc_channel_init_register_stage(
GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
- maybe_add_required_filter, (void *)&grpc_http_client_filter);
+ maybe_add_required_filter, (void*)&grpc_http_client_filter);
grpc_channel_init_register_stage(
GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
- maybe_add_required_filter, (void *)&grpc_http_server_filter);
+ maybe_add_required_filter, (void*)&grpc_http_server_filter);
}
extern "C" void grpc_http_filters_shutdown(void) {}
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index f785e1355d..949ff917d6 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -45,7 +45,7 @@ typedef enum {
} initial_metadata_state;
typedef struct call_data {
- grpc_call_combiner *call_combiner;
+ grpc_call_combiner* call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
@@ -54,12 +54,12 @@ typedef struct call_data {
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
initial_metadata_state send_initial_metadata_state;
- grpc_error *cancel_error;
+ grpc_error* cancel_error;
grpc_closure start_send_message_batch_in_call_combiner;
- grpc_transport_stream_op_batch *send_message_batch;
+ grpc_transport_stream_op_batch* send_message_batch;
grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_slice_buffer_stream replacement_stream;
- grpc_closure *original_send_message_on_complete;
+ grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
grpc_closure on_send_message_next_done;
} call_data;
@@ -80,10 +80,10 @@ typedef struct channel_data {
uint32_t supported_stream_compression_algorithms;
} channel_data;
-static bool skip_compression(grpc_call_element *elem, uint32_t flags,
+static bool skip_compression(grpc_call_element* elem, uint32_t flags,
bool has_compression_algorithm) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *channeld = (channel_data *)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* channeld = (channel_data*)elem->channel_data;
if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
return true;
@@ -99,15 +99,15 @@ static bool skip_compression(grpc_call_element *elem, uint32_t flags,
}
/** Filter initial metadata */
-static grpc_error *process_send_initial_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_metadata_batch *initial_metadata,
- bool *has_compression_algorithm) GRPC_MUST_USE_RESULT;
-static grpc_error *process_send_initial_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_metadata_batch *initial_metadata, bool *has_compression_algorithm) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *channeld = (channel_data *)elem->channel_data;
+static grpc_error* process_send_initial_metadata(
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_metadata_batch* initial_metadata,
+ bool* has_compression_algorithm) GRPC_MUST_USE_RESULT;
+static grpc_error* process_send_initial_metadata(
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_metadata_batch* initial_metadata, bool* has_compression_algorithm) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* channeld = (channel_data*)elem->channel_data;
*has_compression_algorithm = false;
grpc_stream_compression_algorithm stream_compression_algorithm =
GRPC_STREAM_COMPRESS_NONE;
@@ -117,7 +117,7 @@ static grpc_error *process_send_initial_metadata(
initial_metadata->idx.named.grpc_internal_stream_encoding_request->md;
if (!grpc_stream_compression_algorithm_parse(
GRPC_MDVALUE(md), &stream_compression_algorithm)) {
- char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR,
"Invalid stream compression algorithm: '%s' (unknown). Ignoring.",
val);
@@ -126,7 +126,7 @@ static grpc_error *process_send_initial_metadata(
}
if (!GPR_BITGET(channeld->enabled_stream_compression_algorithms_bitset,
stream_compression_algorithm)) {
- char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(
GPR_ERROR,
"Invalid stream compression algorithm: '%s' (previously disabled). "
@@ -152,7 +152,7 @@ static grpc_error *process_send_initial_metadata(
initial_metadata->idx.named.grpc_internal_encoding_request->md;
if (!grpc_compression_algorithm_parse(GRPC_MDVALUE(md),
&calld->compression_algorithm)) {
- char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR,
"Invalid compression algorithm: '%s' (unknown). Ignoring.", val);
gpr_free(val);
@@ -177,7 +177,7 @@ static grpc_error *process_send_initial_metadata(
*has_compression_algorithm = true;
}
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
/* hint compression algorithm */
if (stream_compression_algorithm != GRPC_STREAM_COMPRESS_NONE) {
error = grpc_metadata_batch_add_tail(
@@ -211,30 +211,30 @@ static grpc_error *process_send_initial_metadata(
return error;
}
-static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void send_message_on_complete(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &calld->slices);
GRPC_CLOSURE_RUN(exec_ctx, calld->original_send_message_on_complete,
GRPC_ERROR_REF(error));
}
-static void send_message_batch_continue(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- call_data *calld = (call_data *)elem->call_data;
+static void send_message_batch_continue(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ call_data* calld = (call_data*)elem->call_data;
// Note: The call to grpc_call_next_op() results in yielding the
// call combiner, so we need to clear calld->send_message_batch
// before we do that.
- grpc_transport_stream_op_batch *send_message_batch =
+ grpc_transport_stream_op_batch* send_message_batch =
calld->send_message_batch;
calld->send_message_batch = NULL;
grpc_call_next_op(exec_ctx, elem, send_message_batch);
}
-static void finish_send_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- call_data *calld = (call_data *)elem->call_data;
+static void finish_send_message(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ call_data* calld = (call_data*)elem->call_data;
// Compress the data if appropriate.
grpc_slice_buffer tmp;
grpc_slice_buffer_init(&tmp);
@@ -244,21 +244,22 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
&calld->slices, &tmp);
if (did_compress) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- const char *algo_name;
+ const char* algo_name;
const size_t before_size = calld->slices.length;
const size_t after_size = tmp.length;
const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));
- gpr_log(GPR_DEBUG, "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
- " bytes (%.2f%% savings)",
+ gpr_log(GPR_DEBUG,
+ "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
+ " bytes (%.2f%% savings)",
algo_name, before_size, after_size, 100 * savings_ratio);
}
grpc_slice_buffer_swap(&calld->slices, &tmp);
send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
} else {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- const char *algo_name;
+ const char* algo_name;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));
gpr_log(GPR_DEBUG,
@@ -282,10 +283,10 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
send_message_batch_continue(exec_ctx, elem);
}
-static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error) {
- call_data *calld = (call_data *)arg;
+static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx* exec_ctx,
+ void* arg,
+ grpc_error* error) {
+ call_data* calld = (call_data*)arg;
if (calld->send_message_batch != NULL) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error),
@@ -295,10 +296,10 @@ static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
}
// Pulls a slice from the send_message byte stream and adds it to calld->slices.
-static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
- call_data *calld) {
+static grpc_error* pull_slice_from_send_message(grpc_exec_ctx* exec_ctx,
+ call_data* calld) {
grpc_slice incoming_slice;
- grpc_error *error = grpc_byte_stream_pull(
+ grpc_error* error = grpc_byte_stream_pull(
exec_ctx, calld->send_message_batch->payload->send_message.send_message,
&incoming_slice);
if (error == GRPC_ERROR_NONE) {
@@ -311,13 +312,13 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
// If all data has been read, invokes finish_send_message(). Otherwise,
// an async call to grpc_byte_stream_next() has been started, which will
// eventually result in calling on_send_message_next_done().
-static void continue_reading_send_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- call_data *calld = (call_data *)elem->call_data;
+static void continue_reading_send_message(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ call_data* calld = (call_data*)elem->call_data;
while (grpc_byte_stream_next(
exec_ctx, calld->send_message_batch->payload->send_message.send_message,
~(size_t)0, &calld->on_send_message_next_done)) {
- grpc_error *error = pull_slice_from_send_message(exec_ctx, calld);
+ grpc_error* error = pull_slice_from_send_message(exec_ctx, calld);
if (error != GRPC_ERROR_NONE) {
// Closure callback; does not take ownership of error.
fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
@@ -333,10 +334,10 @@ static void continue_reading_send_message(grpc_exec_ctx *exec_ctx,
}
// Async callback for grpc_byte_stream_next().
-static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void on_send_message_next_done(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
if (error != GRPC_ERROR_NONE) {
// Closure callback; does not take ownership of error.
fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
@@ -357,10 +358,10 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
}
}
-static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *unused) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void start_send_message_batch(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* unused) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
if (skip_compression(
elem,
calld->send_message_batch->payload->send_message.send_message->flags,
@@ -372,9 +373,9 @@ static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg,
}
static void compress_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
// Handle cancel_stream.
if (batch->cancel_stream) {
@@ -405,7 +406,7 @@ static void compress_start_transport_stream_op_batch(
if (batch->send_initial_metadata) {
GPR_ASSERT(calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN);
bool has_compression_algorithm;
- grpc_error *error = process_send_initial_metadata(
+ grpc_error* error = process_send_initial_metadata(
exec_ctx, elem,
batch->payload->send_initial_metadata.send_initial_metadata,
&has_compression_algorithm);
@@ -453,10 +454,10 @@ done:
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
calld->call_combiner = args->call_combiner;
calld->cancel_error = GRPC_ERROR_NONE;
grpc_slice_buffer_init(&calld->slices);
@@ -470,19 +471,19 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *calld = (call_data *)elem->call_data;
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ call_data* calld = (call_data*)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
GRPC_ERROR_UNREF(calld->cancel_error);
}
/* Constructor for channel_data */
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- channel_data *channeld = (channel_data *)elem->channel_data;
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ channel_data* channeld = (channel_data*)elem->channel_data;
/* Configuration for message compression */
channeld->enabled_algorithms_bitset =
@@ -530,8 +531,8 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {}
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {}
const grpc_channel_filter grpc_message_compress_filter = {
compress_start_transport_stream_op_batch,
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.h b/src/core/ext/filters/http/message_compress/message_compress_filter.h
index 92771d9858..79a2815655 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.h
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.h
@@ -56,4 +56,4 @@ extern const grpc_channel_filter grpc_message_compress_filter;
#endif
#endif /* GRPC_CORE_EXT_FILTERS_HTTP_MESSAGE_COMPRESS_MESSAGE_COMPRESS_FILTER_H \
- */
+ */
diff --git a/src/core/ext/filters/http/server/http_server_filter.cc b/src/core/ext/filters/http/server/http_server_filter.cc
index 03958136b4..5cfe5acced 100644
--- a/src/core/ext/filters/http/server/http_server_filter.cc
+++ b/src/core/ext/filters/http/server/http_server_filter.cc
@@ -32,7 +32,7 @@
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
typedef struct call_data {
- grpc_call_combiner *call_combiner;
+ grpc_call_combiner* call_combiner;
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
@@ -42,15 +42,15 @@ typedef struct call_data {
/* flag to ensure payload_bin is delivered only once */
bool payload_bin_delivered;
- grpc_metadata_batch *recv_initial_metadata;
- uint32_t *recv_initial_metadata_flags;
+ grpc_metadata_batch* recv_initial_metadata;
+ uint32_t* recv_initial_metadata_flags;
/** Closure to call when finished with the hs_on_recv hook */
- grpc_closure *on_done_recv;
+ grpc_closure* on_done_recv;
/** Closure to call when we retrieve read message from the path URI
*/
- grpc_closure *recv_message_ready;
- grpc_closure *on_complete;
- grpc_byte_stream **pp_recv_message;
+ grpc_closure* recv_message_ready;
+ grpc_closure* on_complete;
+ grpc_byte_stream** pp_recv_message;
grpc_slice_buffer read_slice_buffer;
grpc_slice_buffer_stream read_stream;
@@ -62,11 +62,13 @@ typedef struct call_data {
grpc_closure hs_recv_message_ready;
} call_data;
-typedef struct channel_data { uint8_t unused; } channel_data;
+typedef struct channel_data {
+ uint8_t unused;
+} channel_data;
-static grpc_error *server_filter_outgoing_metadata(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_metadata_batch *b) {
+static grpc_error* server_filter_outgoing_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_metadata_batch* b) {
if (b->idx.named.grpc_message != NULL) {
grpc_slice pct_encoded_msg = grpc_percent_encode_slice(
GRPC_MDVALUE(b->idx.named.grpc_message->md),
@@ -82,8 +84,8 @@ static grpc_error *server_filter_outgoing_metadata(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void add_error(const char *error_name, grpc_error **cumulative,
- grpc_error *new_err) {
+static void add_error(const char* error_name, grpc_error** cumulative,
+ grpc_error* new_err) {
if (new_err == GRPC_ERROR_NONE) return;
if (*cumulative == GRPC_ERROR_NONE) {
*cumulative = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_name);
@@ -91,12 +93,12 @@ static void add_error(const char *error_name, grpc_error **cumulative,
*cumulative = grpc_error_add_child(*cumulative, new_err);
}
-static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_metadata_batch *b) {
- call_data *calld = (call_data *)elem->call_data;
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *error_name = "Failed processing incoming headers";
+static grpc_error* server_filter_incoming_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_metadata_batch* b) {
+ call_data* calld = (call_data*)elem->call_data;
+ grpc_error* error = GRPC_ERROR_NONE;
+ static const char* error_name = "Failed processing incoming headers";
if (b->idx.named.method != NULL) {
if (grpc_mdelem_eq(b->idx.named.method->md, GRPC_MDELEM_METHOD_POST)) {
@@ -183,7 +185,7 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
} else {
/* TODO(klempner): We're currently allowing this, but we shouldn't
see it without a proxy so log for now. */
- char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md),
+ char* val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md),
GPR_DUMP_ASCII);
gpr_log(GPR_INFO, "Unexpected content-type '%s'", val);
gpr_free(val);
@@ -203,7 +205,7 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
* query parameter which is base64 encoded request payload. */
const char k_query_separator = '?';
grpc_slice path_slice = GRPC_MDVALUE(b->idx.named.path->md);
- uint8_t *path_ptr = (uint8_t *)GRPC_SLICE_START_PTR(path_slice);
+ uint8_t* path_ptr = (uint8_t*)GRPC_SLICE_START_PTR(path_slice);
size_t path_length = GRPC_SLICE_LENGTH(path_slice);
/* offset of the character '?' */
size_t offset = 0;
@@ -226,7 +228,7 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_add(
&calld->read_slice_buffer,
grpc_base64_decode_with_len(
- exec_ctx, (const char *)GRPC_SLICE_START_PTR(query_slice),
+ exec_ctx, (const char*)GRPC_SLICE_START_PTR(query_slice),
GRPC_SLICE_LENGTH(query_slice), k_url_safe));
grpc_slice_buffer_stream_init(&calld->read_stream,
&calld->read_slice_buffer, 0);
@@ -238,15 +240,15 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
}
if (b->idx.named.host != NULL && b->idx.named.authority == NULL) {
- grpc_linked_mdelem *el = b->idx.named.host;
+ grpc_linked_mdelem* el = b->idx.named.host;
grpc_mdelem md = GRPC_MDELEM_REF(el->md);
grpc_metadata_batch_remove(exec_ctx, b, el);
- add_error(
- error_name, &error,
- grpc_metadata_batch_add_head(
- exec_ctx, b, el, grpc_mdelem_from_slices(
- exec_ctx, GRPC_MDSTR_AUTHORITY,
- grpc_slice_ref_internal(GRPC_MDVALUE(md)))));
+ add_error(error_name, &error,
+ grpc_metadata_batch_add_head(
+ exec_ctx, b, el,
+ grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_AUTHORITY,
+ grpc_slice_ref_internal(GRPC_MDVALUE(md)))));
GRPC_MDELEM_UNREF(exec_ctx, md);
}
@@ -261,10 +263,10 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
return error;
}
-static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *err) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void hs_on_recv(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* err) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (err == GRPC_ERROR_NONE) {
err = server_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@@ -274,15 +276,15 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv, err);
}
-static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *err) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void hs_on_complete(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* err) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
/* Call recv_message_ready if we got the payload via the path field */
if (calld->seen_path_with_query && calld->recv_message_ready != NULL) {
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
- : (grpc_byte_stream *)&calld->read_stream;
+ : (grpc_byte_stream*)&calld->read_stream;
// Re-enter call combiner for recv_message_ready, since the surface
// code will release the call combiner for each callback it receives.
GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
@@ -294,10 +296,10 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
GRPC_CLOSURE_RUN(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err));
}
-static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *err) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void hs_recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* err) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (calld->seen_path_with_query) {
// Do nothing. This is probably a GET request, and payload will be
// returned in hs_on_complete callback.
@@ -310,15 +312,15 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
}
}
-static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
+static grpc_error* hs_mutate_op(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
/* grab pointers to our data from the call element */
- call_data *calld = (call_data *)elem->call_data;
+ call_data* calld = (call_data*)elem->call_data;
if (op->send_initial_metadata) {
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *error_name = "Failed sending initial metadata";
+ grpc_error* error = GRPC_ERROR_NONE;
+ static const char* error_name = "Failed sending initial metadata";
add_error(
error_name, &error,
grpc_metadata_batch_add_head(
@@ -364,7 +366,7 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
}
if (op->send_trailing_metadata) {
- grpc_error *error = server_filter_outgoing_metadata(
+ grpc_error* error = server_filter_outgoing_metadata(
exec_ctx, elem,
op->payload->send_trailing_metadata.send_trailing_metadata);
if (error != GRPC_ERROR_NONE) return error;
@@ -374,11 +376,11 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
}
static void hs_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- call_data *calld = (call_data *)elem->call_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
+ call_data* calld = (call_data*)elem->call_data;
GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0);
- grpc_error *error = hs_mutate_op(exec_ctx, elem, op);
+ grpc_error* error = hs_mutate_op(exec_ctx, elem, op);
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error,
calld->call_combiner);
@@ -389,11 +391,11 @@ static void hs_start_transport_stream_op_batch(
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
/* grab pointers to our data from the call element */
- call_data *calld = (call_data *)elem->call_data;
+ call_data* calld = (call_data*)elem->call_data;
/* initialize members */
calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
@@ -407,24 +409,24 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *calld = (call_data *)elem->call_data;
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ call_data* calld = (call_data*)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->read_slice_buffer);
}
/* Constructor for channel_data */
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
return GRPC_ERROR_NONE;
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {}
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {}
const grpc_channel_filter grpc_http_server_filter = {
hs_start_transport_stream_op_batch,
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
index ca8a3b2a13..77b086c324 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
@@ -43,21 +43,21 @@ typedef struct call_data {
/* stores the recv_initial_metadata op's ready closure, which we wrap with our
* own (on_initial_md_ready) in order to capture the incoming initial metadata
* */
- grpc_closure *ops_recv_initial_metadata_ready;
+ grpc_closure* ops_recv_initial_metadata_ready;
/* to get notified of the availability of the incoming initial metadata. */
grpc_closure on_initial_md_ready;
- grpc_metadata_batch *recv_initial_metadata;
+ grpc_metadata_batch* recv_initial_metadata;
} call_data;
typedef struct channel_data {
intptr_t id; /**< an id unique to the channel */
} channel_data;
-static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *err) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void on_initial_md_ready(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* err) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (err == GRPC_ERROR_NONE) {
if (calld->recv_initial_metadata->idx.named.path != NULL) {
@@ -85,10 +85,10 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
calld->id = (intptr_t)args->call_stack;
GRPC_CLOSURE_INIT(&calld->on_initial_md_ready, on_initial_md_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -108,10 +108,10 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *calld = (call_data *)elem->call_data;
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ call_data* calld = (call_data*)elem->call_data;
/* TODO(dgq): do something with the data
channel_data *chand = elem->channel_data;
@@ -136,12 +136,12 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
/* Constructor for channel_data */
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
- channel_data *chand = (channel_data *)elem->channel_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
chand->id = (intptr_t)args->channel_stack;
/* TODO(dgq): do something with the data
@@ -158,8 +158,8 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {
/* TODO(dgq): do something with the data
channel_data *chand = elem->channel_data;
grpc_load_reporting_call_data lr_call_data = {
@@ -173,11 +173,11 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
*/
}
-static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx *exec_ctx,
- void *user_data,
+static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx* exec_ctx,
+ void* user_data,
grpc_mdelem md) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_LB_COST_BIN)) {
calld->trailing_md_string = GRPC_MDVALUE(md);
return GRPC_FILTERED_REMOVE();
@@ -186,10 +186,10 @@ static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx *exec_ctx,
}
static void lr_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
GPR_TIMER_BEGIN("lr_start_transport_stream_op_batch", 0);
- call_data *calld = (call_data *)elem->call_data;
+ call_data* calld = (call_data*)elem->call_data;
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.h b/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
index 94d19cc249..356f8b8e66 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
@@ -33,4 +33,4 @@ extern const grpc_channel_filter grpc_server_load_reporting_filter;
#endif
#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H \
- */
+ */
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
index 223fb3ee8b..b26cbe3579 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
@@ -32,17 +32,17 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel_init.h"
-static bool is_load_reporting_enabled(const grpc_channel_args *a) {
+static bool is_load_reporting_enabled(const grpc_channel_args* a) {
return grpc_channel_arg_get_bool(
grpc_channel_args_find(a, GRPC_ARG_ENABLE_LOAD_REPORTING), false);
}
static bool maybe_add_server_load_reporting_filter(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
- const grpc_channel_args *args =
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+ const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
- const grpc_channel_filter *filter = (const grpc_channel_filter *)arg;
- grpc_channel_stack_builder_iterator *it =
+ const grpc_channel_filter* filter = (const grpc_channel_filter*)arg;
+ grpc_channel_stack_builder_iterator* it =
grpc_channel_stack_builder_iterator_find(builder, filter->name);
const bool already_has_load_reporting_filter =
!grpc_channel_stack_builder_iterator_is_end(it);
@@ -55,7 +55,7 @@ static bool maybe_add_server_load_reporting_filter(
}
grpc_arg grpc_load_reporting_enable_arg() {
- return grpc_channel_arg_integer_create((char *)GRPC_ARG_ENABLE_LOAD_REPORTING,
+ return grpc_channel_arg_integer_create((char*)GRPC_ARG_ENABLE_LOAD_REPORTING,
1);
}
@@ -64,7 +64,7 @@ grpc_arg grpc_load_reporting_enable_arg() {
extern "C" void grpc_server_load_reporting_plugin_init(void) {
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
maybe_add_server_load_reporting_filter,
- (void *)&grpc_server_load_reporting_filter);
+ (void*)&grpc_server_load_reporting_filter);
}
extern "C" void grpc_server_load_reporting_plugin_shutdown() {}
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
index 65e254eb53..a6448ce97e 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
@@ -49,11 +49,11 @@ typedef struct grpc_load_reporting_call_data {
/** Only valid when \a source is \a GRPC_LR_POINT_CALL_DESTRUCTION, that is,
* once the call has completed */
- const grpc_call_final_info *final_info;
+ const grpc_call_final_info* final_info;
- const char *initial_md_string; /**< value string for LR's initial md key */
- const char *trailing_md_string; /**< value string for LR's trailing md key */
- const char *method_name; /**< Corresponds to :path header */
+ const char* initial_md_string; /**< value string for LR's initial md key */
+ const char* trailing_md_string; /**< value string for LR's trailing md key */
+ const char* method_name; /**< Corresponds to :path header */
} grpc_load_reporting_call_data;
/** Return a \a grpc_arg enabling load reporting */
@@ -64,4 +64,4 @@ grpc_arg grpc_load_reporting_enable_arg();
#endif
#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H \
- */
+ */
diff --git a/src/core/ext/filters/workarounds/workaround_utils.cc b/src/core/ext/filters/workarounds/workaround_utils.cc
index e600fbee67..9db42fbeee 100644
--- a/src/core/ext/filters/workarounds/workaround_utils.cc
+++ b/src/core/ext/filters/workarounds/workaround_utils.cc
@@ -21,26 +21,26 @@
user_agent_parser ua_parser[GRPC_MAX_WORKAROUND_ID];
-static void destroy_user_agent_md(void *user_agent_md) {
+static void destroy_user_agent_md(void* user_agent_md) {
gpr_free(user_agent_md);
}
-grpc_workaround_user_agent_md *grpc_parse_user_agent(grpc_mdelem md) {
- grpc_workaround_user_agent_md *user_agent_md =
- (grpc_workaround_user_agent_md *)grpc_mdelem_get_user_data(
+grpc_workaround_user_agent_md* grpc_parse_user_agent(grpc_mdelem md) {
+ grpc_workaround_user_agent_md* user_agent_md =
+ (grpc_workaround_user_agent_md*)grpc_mdelem_get_user_data(
md, destroy_user_agent_md);
if (NULL != user_agent_md) {
return user_agent_md;
}
- user_agent_md = (grpc_workaround_user_agent_md *)gpr_malloc(
+ user_agent_md = (grpc_workaround_user_agent_md*)gpr_malloc(
sizeof(grpc_workaround_user_agent_md));
for (int i = 0; i < GRPC_MAX_WORKAROUND_ID; i++) {
if (ua_parser[i]) {
user_agent_md->workaround_active[i] = ua_parser[i](md);
}
}
- grpc_mdelem_set_user_data(md, destroy_user_agent_md, (void *)user_agent_md);
+ grpc_mdelem_set_user_data(md, destroy_user_agent_md, (void*)user_agent_md);
return user_agent_md;
}
diff --git a/src/core/ext/filters/workarounds/workaround_utils.h b/src/core/ext/filters/workarounds/workaround_utils.h
index 3913cae6b2..a954ad4001 100644
--- a/src/core/ext/filters/workarounds/workaround_utils.h
+++ b/src/core/ext/filters/workarounds/workaround_utils.h
@@ -32,7 +32,7 @@ typedef struct grpc_workaround_user_agent_md {
bool workaround_active[GRPC_MAX_WORKAROUND_ID];
} grpc_workaround_user_agent_md;
-grpc_workaround_user_agent_md *grpc_parse_user_agent(grpc_mdelem md);
+grpc_workaround_user_agent_md* grpc_parse_user_agent(grpc_mdelem md);
typedef bool (*user_agent_parser)(grpc_mdelem);