aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext/filters/client_channel
diff options
context:
space:
mode:
authorGravatar Muxi Yan <mxyan@google.com>2017-10-06 18:47:09 -0700
committerGravatar Muxi Yan <mxyan@google.com>2017-10-06 18:47:09 -0700
commit03fc857a6f3ad1ee0a6f46cd19b5c96e56c6fd3b (patch)
tree38e03428954c3abdf2c88cafa9ffbd205789976b /src/core/ext/filters/client_channel
parent63602748415fb836afd7a9685fbe5e85bf5ebfed (diff)
parent03bd5daf5cd36373152cb6b06a5f259db1331e87 (diff)
Merge remote-tracking branch 'upstream/master' into fix-stream-compression-config-interface
Diffstat (limited to 'src/core/ext/filters/client_channel')
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.cc (renamed from src/core/ext/filters/client_channel/channel_connectivity.c)6
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc (renamed from src/core/ext/filters/client_channel/client_channel.c)52
-rw-r--r--src/core/ext/filters/client_channel/client_channel.h10
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.cc (renamed from src/core/ext/filters/client_channel/client_channel_factory.c)0
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.h10
-rw-r--r--src/core/ext/filters/client_channel/client_channel_plugin.cc (renamed from src/core/ext/filters/client_channel/client_channel_plugin.c)4
-rw-r--r--src/core/ext/filters/client_channel/connector.cc (renamed from src/core/ext/filters/client_channel/connector.c)0
-rw-r--r--src/core/ext/filters/client_channel/connector.h12
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.cc (renamed from src/core/ext/filters/client_channel/http_connect_handshaker.c)0
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.h10
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.cc (renamed from src/core/ext/filters/client_channel/http_proxy.c)28
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.h10
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc (renamed from src/core/ext/filters/client_channel/lb_policy.c)0
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c)0
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c)481
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h10
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.c)0
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c)0
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c)0
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c)11
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h3
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc (renamed from src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c)4
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc (renamed from src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c)4
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.cc (renamed from src/core/ext/filters/client_channel/lb_policy_factory.c)2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.h12
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.cc (renamed from src/core/ext/filters/client_channel/lb_policy_registry.c)0
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.h10
-rw-r--r--src/core/ext/filters/client_channel/parse_address.cc (renamed from src/core/ext/filters/client_channel/parse_address.c)0
-rw-r--r--src/core/ext/filters/client_channel/parse_address.h10
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper.cc (renamed from src/core/ext/filters/client_channel/proxy_mapper.c)0
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper.h10
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper_registry.cc (renamed from src/core/ext/filters/client_channel/proxy_mapper_registry.c)0
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper_registry.h10
-rw-r--r--src/core/ext/filters/client_channel/resolver.cc (renamed from src/core/ext/filters/client_channel/resolver.c)0
-rw-r--r--src/core/ext/filters/client_channel/resolver.h8
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc (renamed from src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c)38
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h8
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc (renamed from src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c)0
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc (renamed from src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c)0
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h8
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc (renamed from src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c)0
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc (renamed from src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c)41
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc (renamed from src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c)4
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h8
-rw-r--r--src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc (renamed from src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c)4
-rw-r--r--src/core/ext/filters/client_channel/resolver_factory.cc (renamed from src/core/ext/filters/client_channel/resolver_factory.c)0
-rw-r--r--src/core/ext/filters/client_channel/resolver_factory.h10
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.cc (renamed from src/core/ext/filters/client_channel/resolver_registry.c)0
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.h10
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.cc (renamed from src/core/ext/filters/client_channel/retry_throttle.c)0
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.h10
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc (renamed from src/core/ext/filters/client_channel/subchannel.c)57
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h10
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.cc (renamed from src/core/ext/filters/client_channel/subchannel_index.c)11
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.h10
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.cc (renamed from src/core/ext/filters/client_channel/uri_parser.c)0
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.h10
61 files changed, 644 insertions, 344 deletions
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.c b/src/core/ext/filters/client_channel/channel_connectivity.cc
index 3844b98021..31a8fc39ce 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.c
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -18,6 +18,8 @@
#include "src/core/lib/surface/channel.h"
+#include <inttypes.h>
+
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -186,8 +188,8 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
watcher_timer_init_arg *wa = (watcher_timer_init_arg *)arg;
grpc_timer_init(exec_ctx, &wa->w->alarm,
- gpr_convert_clock_type(wa->deadline, GPR_CLOCK_MONOTONIC),
- &wa->w->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timespec_to_millis_round_up(wa->deadline),
+ &wa->w->on_timeout);
gpr_free(wa);
}
diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.cc
index 016199b1f4..22c2bc8880 100644
--- a/src/core/ext/filters/client_channel/client_channel.c
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -16,8 +16,11 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/client_channel.h"
+#include <inttypes.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
@@ -68,7 +71,7 @@ typedef enum {
typedef struct {
gpr_refcount refs;
- gpr_timespec timeout;
+ grpc_millis timeout;
wait_for_ready_value wait_for_ready;
} method_parameters;
@@ -98,17 +101,18 @@ static bool parse_wait_for_ready(grpc_json *field,
return true;
}
-static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) {
+static bool parse_timeout(grpc_json *field, grpc_millis *timeout) {
if (field->type != GRPC_JSON_STRING) return false;
size_t len = strlen(field->value);
if (field->value[len - 1] != 's') return false;
char *buf = gpr_strdup(field->value);
buf[len - 1] = '\0'; // Remove trailing 's'.
char *decimal_point = strchr(buf, '.');
+ int nanos = 0;
if (decimal_point != NULL) {
*decimal_point = '\0';
- timeout->tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1);
- if (timeout->tv_nsec == -1) {
+ nanos = gpr_parse_nonnegative_int(decimal_point + 1);
+ if (nanos == -1) {
gpr_free(buf);
return false;
}
@@ -127,24 +131,25 @@ static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) {
gpr_free(buf);
return false;
}
- timeout->tv_nsec *= multiplier;
+ nanos *= multiplier;
}
- timeout->tv_sec = gpr_parse_nonnegative_int(buf);
+ int seconds = gpr_parse_nonnegative_int(buf);
gpr_free(buf);
- if (timeout->tv_sec == -1) return false;
+ if (seconds == -1) return false;
+ *timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
return true;
}
static void *method_parameters_create_from_json(const grpc_json *json) {
wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
- gpr_timespec timeout = {0, 0, GPR_TIMESPAN};
+ grpc_millis timeout = 0;
for (grpc_json *field = json->child; field != NULL; field = field->next) {
if (field->key == NULL) continue;
if (strcmp(field->key, "waitForReady") == 0) {
if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
if (!parse_wait_for_ready(field, &wait_for_ready)) return NULL;
} else if (strcmp(field->key, "timeout") == 0) {
- if (timeout.tv_sec > 0 || timeout.tv_nsec > 0) return NULL; // Duplicate.
+ if (timeout > 0) return NULL; // Duplicate.
if (!parse_timeout(field, &timeout)) return NULL;
}
}
@@ -823,7 +828,7 @@ typedef struct client_channel_call_data {
grpc_slice path; // Request path.
gpr_timespec call_start_time;
- gpr_timespec deadline;
+ grpc_millis deadline;
gpr_arena *arena;
grpc_call_stack *owning_call;
grpc_call_combiner *call_combiner;
@@ -976,11 +981,11 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
// If the deadline from the service config is shorter than the one
// from the client API, reset the deadline timer.
if (chand->deadline_checking_enabled &&
- gpr_time_cmp(calld->method_params->timeout,
- gpr_time_0(GPR_TIMESPAN)) != 0) {
- const gpr_timespec per_method_deadline =
- gpr_time_add(calld->call_start_time, calld->method_params->timeout);
- if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+ calld->method_params->timeout != 0) {
+ const grpc_millis per_method_deadline =
+ grpc_timespec_to_millis_round_up(calld->call_start_time) +
+ calld->method_params->timeout;
+ if (per_method_deadline < calld->deadline) {
calld->deadline = per_method_deadline;
grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
}
@@ -995,13 +1000,14 @@ static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
const grpc_connected_subchannel_call_args call_args = {
- .pollent = calld->pollent,
- .path = calld->path,
- .start_time = calld->call_start_time,
- .deadline = calld->deadline,
- .arena = calld->arena,
- .context = calld->subchannel_call_context,
- .call_combiner = calld->call_combiner};
+ calld->pollent, // pollent
+ calld->path, // path
+ calld->call_start_time, // start_time
+ calld->deadline, // deadline
+ calld->arena, // arena
+ calld->subchannel_call_context, // context
+ calld->call_combiner // call_combiner
+ };
grpc_error *new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, &call_args,
&calld->subchannel_call);
@@ -1418,7 +1424,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
- calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
+ calld->deadline = args->deadline;
calld->arena = args->arena;
calld->owning_call = args->call_stack;
calld->call_combiner = args->call_combiner;
diff --git a/src/core/ext/filters/client_channel/client_channel.h b/src/core/ext/filters/client_channel/client_channel.h
index c99f0092e9..b32f378c6c 100644
--- a/src/core/ext/filters/client_channel/client_channel.h
+++ b/src/core/ext/filters/client_channel/client_channel.h
@@ -28,6 +28,10 @@ extern grpc_tracer_flag grpc_client_channel_trace;
// Channel arg key for server URI string.
#define GRPC_ARG_SERVER_URI "grpc.server_uri"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* A client channel is a channel that begins disconnected, and can connect
to some endpoint on demand. If that endpoint disconnects, it will be
connected to again later.
@@ -52,4 +56,8 @@ void grpc_client_channel_watch_connectivity_state(
grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
grpc_call_element *elem);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.c b/src/core/ext/filters/client_channel/client_channel_factory.cc
index 57eac8f875..57eac8f875 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.c
+++ b/src/core/ext/filters/client_channel/client_channel_factory.cc
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.h b/src/core/ext/filters/client_channel/client_channel_factory.h
index ce6266c769..bad8b97042 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.h
+++ b/src/core/ext/filters/client_channel/client_channel_factory.h
@@ -27,6 +27,10 @@
// Channel arg key for client channel factory.
#define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_client_channel_factory grpc_client_channel_factory;
typedef struct grpc_client_channel_factory_vtable
grpc_client_channel_factory_vtable;
@@ -74,4 +78,8 @@ grpc_channel *grpc_client_channel_factory_create_channel(
grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory *factory);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.c b/src/core/ext/filters/client_channel/client_channel_plugin.cc
index 1f71c5a7f9..4431d11519 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.c
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.cc
@@ -65,7 +65,7 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
return true;
}
-void grpc_client_channel_init(void) {
+extern "C" void grpc_client_channel_init(void) {
grpc_lb_policy_registry_init();
grpc_resolver_registry_init();
grpc_retry_throttle_map_init();
@@ -84,7 +84,7 @@ void grpc_client_channel_init(void) {
#endif
}
-void grpc_client_channel_shutdown(void) {
+extern "C" void grpc_client_channel_shutdown(void) {
grpc_subchannel_index_shutdown();
grpc_channel_init_shutdown();
grpc_proxy_mapper_registry_shutdown();
diff --git a/src/core/ext/filters/client_channel/connector.c b/src/core/ext/filters/client_channel/connector.cc
index c258468e58..c258468e58 100644
--- a/src/core/ext/filters/client_channel/connector.c
+++ b/src/core/ext/filters/client_channel/connector.cc
diff --git a/src/core/ext/filters/client_channel/connector.h b/src/core/ext/filters/client_channel/connector.h
index 7f3d4a1cc0..b91c93e446 100644
--- a/src/core/ext/filters/client_channel/connector.h
+++ b/src/core/ext/filters/client_channel/connector.h
@@ -23,6 +23,10 @@
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/transport/transport.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_connector grpc_connector;
typedef struct grpc_connector_vtable grpc_connector_vtable;
@@ -34,7 +38,7 @@ typedef struct {
/** set of pollsets interested in this connection */
grpc_pollset_set *interested_parties;
/** deadline for connection */
- gpr_timespec deadline;
+ grpc_millis deadline;
/** channel arguments (to be passed to transport) */
const grpc_channel_args *channel_args;
} grpc_connect_in_args;
@@ -70,4 +74,8 @@ void grpc_connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
void grpc_connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
grpc_error *why);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.c b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
index 418bb41ef6..418bb41ef6 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.c
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.h b/src/core/ext/filters/client_channel/http_connect_handshaker.h
index 928a23dc93..5042c61bec 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.h
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.h
@@ -28,7 +28,15 @@
/// seperated by colons.
#define GRPC_ARG_HTTP_CONNECT_HEADERS "grpc.http_connect_headers"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/// Registers handshaker factory.
void grpc_http_connect_register_handshaker_factory();
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.cc
index c507a2750e..a16b44d3dc 100644
--- a/src/core/ext/filters/client_channel/http_proxy.c
+++ b/src/core/ext/filters/client_channel/http_proxy.cc
@@ -91,6 +91,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
char* user_cred = NULL;
*name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred);
if (*name_to_resolve == NULL) return false;
+ char* no_proxy_str = NULL;
grpc_uri* uri =
grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */);
if (uri == NULL || uri->path[0] == '\0') {
@@ -98,20 +99,14 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
"'http_proxy' environment variable set, but cannot "
"parse server URI '%s' -- not using proxy",
server_uri);
- if (uri != NULL) {
- gpr_free(user_cred);
- grpc_uri_destroy(uri);
- }
- return false;
+ goto no_use_proxy;
}
if (strcmp(uri->scheme, "unix") == 0) {
gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'",
server_uri);
- gpr_free(user_cred);
- grpc_uri_destroy(uri);
- return false;
+ goto no_use_proxy;
}
- char* no_proxy_str = gpr_getenv("no_proxy");
+ no_proxy_str = gpr_getenv("no_proxy");
if (no_proxy_str != NULL) {
static const char* NO_PROXY_SEPARATOR = ",";
bool use_proxy = true;
@@ -147,12 +142,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
gpr_free(no_proxy_hosts);
gpr_free(server_host);
gpr_free(server_port);
- if (!use_proxy) {
- grpc_uri_destroy(uri);
- gpr_free(*name_to_resolve);
- *name_to_resolve = NULL;
- return false;
- }
+ if (!use_proxy) goto no_use_proxy;
}
}
grpc_arg args_to_add[2];
@@ -173,9 +163,15 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
} else {
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1);
}
- gpr_free(user_cred);
grpc_uri_destroy(uri);
+ gpr_free(user_cred);
return true;
+no_use_proxy:
+ if (uri != NULL) grpc_uri_destroy(uri);
+ gpr_free(*name_to_resolve);
+ *name_to_resolve = NULL;
+ gpr_free(user_cred);
+ return false;
}
static bool proxy_mapper_map_address(grpc_exec_ctx* exec_ctx,
diff --git a/src/core/ext/filters/client_channel/http_proxy.h b/src/core/ext/filters/client_channel/http_proxy.h
index 34694931d0..65d52334af 100644
--- a/src/core/ext/filters/client_channel/http_proxy.h
+++ b/src/core/ext/filters/client_channel/http_proxy.h
@@ -19,6 +19,14 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void grpc_register_http_proxy_mapper();
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/lb_policy.c b/src/core/ext/filters/client_channel/lb_policy.cc
index 8e6673d737..8e6673d737 100644
--- a/src/core/ext/filters/client_channel/lb_policy.c
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 645d51e138..010299c2f4 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -23,6 +23,10 @@
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/connectivity_state.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** A load balancing policy: specified by a vtable and a struct (which
is expected to be extended to contain some parameters) */
typedef struct grpc_lb_policy grpc_lb_policy;
@@ -204,4 +208,8 @@ void grpc_lb_policy_update_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
const grpc_lb_policy_args *lb_policy_args);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index 7ad322902b..7ad322902b 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
index 51e30b20b8..c6a0d69c3f 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
@@ -21,7 +21,15 @@
#include "src/core/lib/channel/channel_stack.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern const grpc_channel_filter grpc_client_load_reporting_filter;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
*/
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 85ef7894ea..53fa0fff04 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -80,6 +80,7 @@
headers. Therefore, sockaddr.h must always be included first */
#include "src/core/lib/iomgr/sockaddr.h"
+#include <inttypes.h>
#include <limits.h>
#include <string.h>
@@ -102,6 +103,7 @@
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/combiner.h"
@@ -111,7 +113,6 @@
#include "src/core/lib/slice/slice_hash_table.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
@@ -123,6 +124,7 @@
#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
+#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
@@ -299,6 +301,10 @@ typedef struct glb_lb_policy {
/** timeout in milliseconds for the LB call. 0 means no deadline. */
int lb_call_timeout_ms;
+ /** timeout in milliseconds for before using fallback backend addresses.
+ * 0 means not using fallback. */
+ int lb_fallback_timeout_ms;
+
/** for communicating with the LB server */
grpc_channel *lb_channel;
@@ -325,6 +331,9 @@ typedef struct glb_lb_policy {
* Otherwise, we delegate to the RR policy. */
size_t serverlist_index;
+ /** stores the backend addresses from the resolver */
+ grpc_lb_addresses *fallback_backend_addresses;
+
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
@@ -345,6 +354,9 @@ typedef struct glb_lb_policy {
/** is \a lb_call_retry_timer active? */
bool retry_timer_active;
+ /** is \a lb_fallback_timer active? */
+ bool fallback_timer_active;
+
/** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed;
@@ -354,9 +366,6 @@ typedef struct glb_lb_policy {
/************************************************************/
/* client data associated with the LB server communication */
/************************************************************/
- /* Finished sending initial request. */
- grpc_closure lb_on_sent_initial_request;
-
/* Status from the LB server has been received. This signals the end of the LB
* call. */
grpc_closure lb_on_server_status_received;
@@ -367,6 +376,9 @@ typedef struct glb_lb_policy {
/* LB call retry timer callback. */
grpc_closure lb_on_call_retry;
+ /* LB fallback timer callback. */
+ grpc_closure lb_on_fallback;
+
grpc_call *lb_call; /* streaming call to the LB server, */
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
@@ -385,19 +397,21 @@ typedef struct glb_lb_policy {
grpc_slice lb_call_status_details;
/** LB call retry backoff state */
- gpr_backoff lb_call_backoff_state;
+ grpc_backoff lb_call_backoff_state;
/** LB call retry timer */
grpc_timer lb_call_retry_timer;
- bool initial_request_sent;
+ /** LB fallback timer */
+ grpc_timer lb_fallback_timer;
+
bool seen_initial_response;
/* Stats for client-side load reporting. Should be unreffed and
* recreated whenever lb_call is replaced. */
grpc_grpclb_client_stats *client_stats;
/* Interval and timer for next client load report. */
- gpr_timespec client_stats_report_interval;
+ grpc_millis client_stats_report_interval;
grpc_timer client_load_report_timer;
bool client_load_report_timer_pending;
bool last_client_load_report_counters_were_zero;
@@ -443,11 +457,11 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
static void *lb_token_copy(void *token) {
return token == NULL
? NULL
- : (void *)GRPC_MDELEM_REF((grpc_mdelem){(uintptr_t)token}).payload;
+ : (void *)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
}
static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
if (token != NULL) {
- GRPC_MDELEM_UNREF(exec_ctx, (grpc_mdelem){(uintptr_t)token});
+ GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
}
}
static int lb_token_cmp(void *token1, void *token2) {
@@ -536,6 +550,32 @@ static grpc_lb_addresses *process_serverlist_locked(
return lb_addresses;
}
+/* Returns the backend addresses extracted from the given addresses */
+static grpc_lb_addresses *extract_backend_addresses_locked(
+ grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
+ /* first pass: count the number of backend addresses */
+ size_t num_backends = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (!addresses->addresses[i].is_balancer) {
+ ++num_backends;
+ }
+ }
+ /* second pass: actually populate the addresses and (empty) LB tokens */
+ grpc_lb_addresses *backend_addresses =
+ grpc_lb_addresses_create(num_backends, &lb_token_vtable);
+ size_t num_copied = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (addresses->addresses[i].is_balancer) continue;
+ const grpc_resolved_address *addr = &addresses->addresses[i].address;
+ grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
+ addr->len, false /* is_balancer */,
+ NULL /* balancer_name */,
+ (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
+ ++num_copied;
+ }
+ return backend_addresses;
+}
+
static void update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
@@ -603,35 +643,38 @@ static bool pick_from_internal_rr_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
const grpc_lb_policy_pick_args *pick_args, bool force_async,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
- // Look at the index into the serverlist to see if we should drop this call.
- grpc_grpclb_server *server =
- glb_policy->serverlist->servers[glb_policy->serverlist_index++];
- if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
- glb_policy->serverlist_index = 0; // Wrap-around.
- }
- if (server->drop) {
- // Not using the RR policy, so unref it.
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
- (intptr_t)wc_arg->rr_policy);
+ // Check for drops if we are not using fallback backend addresses.
+ if (glb_policy->serverlist != NULL) {
+ // Look at the index into the serverlist to see if we should drop this call.
+ grpc_grpclb_server *server =
+ glb_policy->serverlist->servers[glb_policy->serverlist_index++];
+ if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
+ glb_policy->serverlist_index = 0; // Wrap-around.
}
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
- // Update client load reporting stats to indicate the number of
- // dropped calls. Note that we have to do this here instead of in
- // the client_load_reporting filter, because we do not create a
- // subchannel call (and therefore no client_load_reporting filter)
- // for dropped calls.
- grpc_grpclb_client_stats_add_call_dropped_locked(server->load_balance_token,
- wc_arg->client_stats);
- grpc_grpclb_client_stats_unref(wc_arg->client_stats);
- if (force_async) {
- GPR_ASSERT(wc_arg->wrapped_closure != NULL);
- GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+ if (server->drop) {
+ // Not using the RR policy, so unref it.
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
+ (intptr_t)wc_arg->rr_policy);
+ }
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
+ // Update client load reporting stats to indicate the number of
+ // dropped calls. Note that we have to do this here instead of in
+ // the client_load_reporting filter, because we do not create a
+ // subchannel call (and therefore no client_load_reporting filter)
+ // for dropped calls.
+ grpc_grpclb_client_stats_add_call_dropped_locked(
+ server->load_balance_token, wc_arg->client_stats);
+ grpc_grpclb_client_stats_unref(wc_arg->client_stats);
+ if (force_async) {
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
+ GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+ gpr_free(wc_arg->free_when_done);
+ return false;
+ }
gpr_free(wc_arg->free_when_done);
- return false;
+ return true;
}
- gpr_free(wc_arg->free_when_done);
- return true;
}
// Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked(
@@ -669,8 +712,18 @@ static bool pick_from_internal_rr_locked(
static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
- grpc_lb_addresses *addresses =
- process_serverlist_locked(exec_ctx, glb_policy->serverlist);
+ grpc_lb_addresses *addresses;
+ if (glb_policy->serverlist != NULL) {
+ GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
+ addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
+ } else {
+ // If rr_handover_locked() is invoked when we haven't received any
+ // serverlist from the balancer, we use the fallback backends returned by
+ // the resolver. Note that the fallback backend list may be empty, in which
+ // case the new round_robin policy will keep the requested picks pending.
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
+ }
GPR_ASSERT(addresses != NULL);
grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
@@ -776,8 +829,6 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
- GPR_ASSERT(glb_policy->serverlist != NULL &&
- glb_policy->serverlist->num_servers > 0);
if (glb_policy->shutting_down) return;
grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
GPR_ASSERT(args != NULL);
@@ -926,6 +977,9 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
+ if (glb_policy->fallback_backend_addresses != NULL) {
+ grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
+ }
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
grpc_subchannel_index_unref();
if (glb_policy->pending_update_args != NULL) {
@@ -956,6 +1010,10 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
glb_policy->retry_timer_active = false;
}
+ if (glb_policy->fallback_timer_active) {
+ grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
+ glb_policy->fallback_timer_active = false;
+ }
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
@@ -1067,12 +1125,28 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
+static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy);
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
+ /* start a timer to fall back */
+ if (glb_policy->lb_fallback_timeout_ms > 0 &&
+ glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
+ grpc_millis deadline =
+ grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
+ GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
+ glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
+ glb_policy->fallback_timer_active = true;
+ grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
+ &glb_policy->lb_on_fallback);
+ }
+
glb_policy->started_picking = true;
- gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
+ grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
query_for_backends_locked(exec_ctx, glb_policy);
}
@@ -1173,20 +1247,69 @@ static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, &glb_policy->state_tracker, current, notify);
}
+static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+ glb_policy->retry_timer_active = false;
+ if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
+ (void *)glb_policy);
+ }
+ GPR_ASSERT(glb_policy->lb_call == NULL);
+ query_for_backends_locked(exec_ctx, glb_policy);
+ }
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
+}
+
+static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy) {
+ if (glb_policy->started_picking && glb_policy->updating_lb_call) {
+ if (glb_policy->retry_timer_active) {
+ grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
+ }
+ if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
+ glb_policy->updating_lb_call = false;
+ } else if (!glb_policy->shutting_down) {
+ /* if we aren't shutting down, restart the LB client call after some time */
+ grpc_millis next_try =
+ grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state);
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
+ (void *)glb_policy);
+ grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
+ if (timeout > 0) {
+ gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.",
+ timeout);
+ } else {
+ gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
+ }
+ }
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
+ GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
+ lb_call_on_retry_timer_locked, glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
+ glb_policy->retry_timer_active = true;
+ grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
+ &glb_policy->lb_on_call_retry);
+ }
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_server_status_received_locked");
+}
+
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
- const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- const gpr_timespec next_client_load_report_time =
- gpr_time_add(now, glb_policy->client_stats_report_interval);
+ const grpc_millis next_client_load_report_time =
+ grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
send_client_load_report_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
next_client_load_report_time,
- &glb_policy->client_load_report_closure, now);
+ &glb_policy->client_load_report_closure);
}
static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -1203,21 +1326,6 @@ static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
schedule_next_client_load_report(exec_ctx, glb_policy);
}
-static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
- grpc_op op;
- memset(&op, 0, sizeof(op));
- op.op = GRPC_OP_SEND_MESSAGE;
- op.data.send_message.send_message = glb_policy->client_load_report_payload;
- GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
- client_load_report_done_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_call, &op, 1,
- &glb_policy->client_load_report_closure);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-
static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
grpc_grpclb_dropped_call_counts *drop_entries =
(grpc_grpclb_dropped_call_counts *)
@@ -1237,6 +1345,9 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"client_load_report");
+ if (glb_policy->lb_call == NULL) {
+ maybe_restart_lb_call(exec_ctx, glb_policy);
+ }
return;
}
// Construct message payload.
@@ -1260,17 +1371,23 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
- // If we've already sent the initial request, then we can go ahead and
- // sent the load report. Otherwise, we need to wait until the initial
- // request has been sent to send this
- // (see lb_on_sent_initial_request_locked() below).
- if (glb_policy->initial_request_sent) {
- do_send_client_load_report_locked(exec_ctx, glb_policy);
+ // Send load report message.
+ grpc_op op;
+ memset(&op, 0, sizeof(op));
+ op.op = GRPC_OP_SEND_MESSAGE;
+ op.data.send_message.send_message = glb_policy->client_load_report_payload;
+ GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
+ client_load_report_done_locked, glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
+ grpc_call_error call_error = grpc_call_start_batch_and_execute(
+ exec_ctx, glb_policy->lb_call, &op, 1,
+ &glb_policy->client_load_report_closure);
+ if (call_error != GRPC_CALL_OK) {
+ gpr_log(GPR_ERROR, "call_error=%d", call_error);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
}
}
-static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error);
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error);
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -1286,12 +1403,10 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
* glb_policy->base.interested_parties, which is comprised of the polling
* entities from \a client_channel. */
grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
- gpr_timespec deadline =
+ grpc_millis deadline =
glb_policy->lb_call_timeout_ms == 0
- ? gpr_inf_future(GPR_CLOCK_MONOTONIC)
- : gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
- GPR_TIMESPAN));
+ ? GRPC_MILLIS_INF_FUTURE
+ : grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
@@ -1315,9 +1430,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
- GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
- lb_on_sent_initial_request_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
@@ -1325,14 +1437,13 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
lb_on_response_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
- gpr_backoff_init(&glb_policy->lb_call_backoff_state,
- GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
- GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
- GRPC_GRPCLB_RECONNECT_JITTER,
- GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
- GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ grpc_backoff_init(&glb_policy->lb_call_backoff_state,
+ GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_GRPCLB_RECONNECT_JITTER,
+ GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
- glb_policy->initial_request_sent = false;
glb_policy->seen_initial_response = false;
glb_policy->last_client_load_report_counters_were_zero = false;
}
@@ -1349,7 +1460,7 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
- if (!glb_policy->client_load_report_timer_pending) {
+ if (glb_policy->client_load_report_timer_pending) {
grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
}
}
@@ -1373,7 +1484,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_error call_error;
- grpc_op ops[4];
+ grpc_op ops[3];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
@@ -1394,13 +1505,8 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
op->flags = 0;
op->reserved = NULL;
op++;
- /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
- * count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
- "lb_on_sent_initial_request_locked");
- call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
- &glb_policy->lb_on_sent_initial_request);
+ call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
+ ops, (size_t)(op - ops), NULL);
GPR_ASSERT(GRPC_CALL_OK == call_error);
op = ops;
@@ -1437,19 +1543,6 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
-static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
- glb_policy->initial_request_sent = true;
- // If we attempted to send a client load report before the initial
- // request was sent, send the load report now.
- if (glb_policy->client_load_report_payload != NULL) {
- do_send_client_load_report_locked(exec_ctx, glb_policy);
- }
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
- "lb_on_sent_initial_request_locked");
-}
-
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
@@ -1457,7 +1550,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
if (glb_policy->lb_response_payload != NULL) {
- gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
+ grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
* glb_policy->lb_response_payload, for a serverlist. */
grpc_byte_buffer_reader bbr;
@@ -1471,16 +1564,14 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
NULL) {
if (response->has_client_stats_report_interval) {
- glb_policy->client_stats_report_interval =
- gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
- grpc_grpclb_duration_to_timespec(
- &response->client_stats_report_interval));
+ glb_policy->client_stats_report_interval = GPR_MAX(
+ GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
+ &response->client_stats_report_interval));
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"received initial LB response message; "
- "client load reporting interval = %" PRId64 ".%09d sec",
- glb_policy->client_stats_report_interval.tv_sec,
- glb_policy->client_stats_report_interval.tv_nsec);
+ "client load reporting interval = %" PRIdPTR " milliseconds",
+ glb_policy->client_stats_report_interval);
}
/* take a weak ref (won't prevent calling of \a glb_shutdown() if the
* strong ref count goes to zero) to be unref'd in
@@ -1525,6 +1616,15 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
+ } else {
+ /* or dispose of the fallback */
+ grpc_lb_addresses_destroy(exec_ctx,
+ glb_policy->fallback_backend_addresses);
+ glb_policy->fallback_backend_addresses = NULL;
+ if (glb_policy->fallback_timer_active) {
+ grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
+ glb_policy->fallback_timer_active = false;
+ }
}
/* and update the copy in the glb_lb_policy instance. This
* serverlist instance will be destroyed either upon the next
@@ -1535,9 +1635,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
} else {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO,
- "Received empty server list. Picks will stay pending until "
- "a response with > 0 servers is received");
+ gpr_log(GPR_INFO, "Received empty server list, ignoring.");
}
grpc_grpclb_destroy_serverlist(serverlist);
}
@@ -1572,19 +1670,25 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
}
-static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
- glb_policy->retry_timer_active = false;
- if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
- (void *)glb_policy);
+ glb_policy->fallback_timer_active = false;
+ /* If we receive a serverlist after the timer fires but before this callback
+ * actually runs, don't fall back. */
+ if (glb_policy->serverlist == NULL) {
+ if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO,
+ "Falling back to use backends from resolver (grpclb %p)",
+ (void *)glb_policy);
+ }
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ rr_handover_locked(exec_ctx, glb_policy);
}
- GPR_ASSERT(glb_policy->lb_call == NULL);
- query_for_backends_locked(exec_ctx, glb_policy);
}
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "grpclb_fallback_timer");
}
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
@@ -1603,66 +1707,30 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
}
/* We need to perform cleanups no matter what. */
lb_call_destroy_locked(exec_ctx, glb_policy);
- if (glb_policy->started_picking && glb_policy->updating_lb_call) {
- if (glb_policy->retry_timer_active) {
- grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
- }
- if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
- glb_policy->updating_lb_call = false;
- } else if (!glb_policy->shutting_down) {
- /* if we aren't shutting down, restart the LB client call after some time */
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_try =
- gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
- (void *)glb_policy);
- gpr_timespec timeout = gpr_time_sub(next_try, now);
- if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
- gpr_log(GPR_DEBUG,
- "... retry_timer_active in %" PRId64 ".%09d seconds.",
- timeout.tv_sec, timeout.tv_nsec);
- } else {
- gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
- }
- }
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
- GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
- lb_call_on_retry_timer_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- glb_policy->retry_timer_active = true;
- grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
- &glb_policy->lb_on_call_retry, now);
+ // If the load report timer is still pending, we wait for it to be
+ // called before restarting the call. Otherwise, we restart the call
+ // here.
+ if (!glb_policy->client_load_report_timer_pending) {
+ maybe_restart_lb_call(exec_ctx, glb_policy);
+ }
+}
+
+static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy,
+ const grpc_lb_addresses *addresses) {
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
+ glb_policy->fallback_backend_addresses =
+ extract_backend_addresses_locked(exec_ctx, addresses);
+ if (glb_policy->lb_fallback_timeout_ms > 0 &&
+ !glb_policy->fallback_timer_active) {
+ rr_handover_locked(exec_ctx, glb_policy);
}
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
- "lb_on_server_status_received_locked");
}
static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) {
glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
- if (glb_policy->updating_lb_channel) {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO,
- "Update already in progress for grpclb %p. Deferring update.",
- (void *)glb_policy);
- }
- if (glb_policy->pending_update_args != NULL) {
- grpc_channel_args_destroy(exec_ctx,
- glb_policy->pending_update_args->args);
- gpr_free(glb_policy->pending_update_args);
- }
- glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
- sizeof(*glb_policy->pending_update_args));
- glb_policy->pending_update_args->client_channel_factory =
- args->client_channel_factory;
- glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
- glb_policy->pending_update_args->combiner = args->combiner;
- return;
- }
-
- glb_policy->updating_lb_channel = true;
- // Propagate update to lb_channel (pick first).
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@@ -1680,13 +1748,43 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
"ignoring.",
(void *)glb_policy);
}
+ return;
}
const grpc_lb_addresses *addresses =
(const grpc_lb_addresses *)arg->value.pointer.p;
+
+ if (glb_policy->serverlist == NULL) {
+ // If a non-empty serverlist hasn't been received from the balancer,
+ // propagate the update to fallback_backend_addresses.
+ fallback_update_locked(exec_ctx, glb_policy, addresses);
+ } else if (glb_policy->updating_lb_channel) {
+ // If we have recieved serverlist from the balancer, we need to defer update
+ // when there is an in-progress one.
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO,
+ "Update already in progress for grpclb %p. Deferring update.",
+ (void *)glb_policy);
+ }
+ if (glb_policy->pending_update_args != NULL) {
+ grpc_channel_args_destroy(exec_ctx,
+ glb_policy->pending_update_args->args);
+ gpr_free(glb_policy->pending_update_args);
+ }
+ glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
+ sizeof(*glb_policy->pending_update_args));
+ glb_policy->pending_update_args->client_channel_factory =
+ args->client_channel_factory;
+ glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
+ glb_policy->pending_update_args->combiner = args->combiner;
+ return;
+ }
+
+ glb_policy->updating_lb_channel = true;
GPR_ASSERT(glb_policy->lb_channel != NULL);
grpc_channel_args *lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
- /* Propagate updates to the LB channel through the fake resolver */
+ /* Propagate updates to the LB channel (pick first) through the fake resolver
+ */
grpc_fake_resolver_response_generator_set_response(
exec_ctx, glb_policy->response_generator, lb_channel_args);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
@@ -1789,13 +1887,7 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
- /* Count the number of gRPC-LB addresses. There must be at least one.
- * TODO(roth): For now, we ignore non-balancer addresses, but in the
- * future, we may change the behavior such that we fall back to using
- * the non-balancer addresses if we cannot reach any balancers. In the
- * fallback case, we should use the LB policy indicated by
- * GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
- * unset, we should default to pick_first). */
+ /* Count the number of gRPC-LB addresses. There must be at least one. */
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@@ -1829,7 +1921,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
glb_policy->lb_call_timeout_ms =
- grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
+ grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
+
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
+ glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
+ arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
@@ -1839,6 +1935,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
+ /* Extract the backend addresses (may be empty) from the resolver for
+ * fallback. */
+ glb_policy->fallback_backend_addresses =
+ extract_backend_addresses_locked(exec_ctx, addresses);
+
/* Create a client channel over them to communicate with a LB service */
glb_policy->response_generator =
grpc_fake_resolver_response_generator_create();
@@ -1900,7 +2001,7 @@ static bool maybe_add_client_load_reporting_filter(
return true;
}
-void grpc_lb_policy_grpclb_init() {
+extern "C" void grpc_lb_policy_grpclb_init() {
grpc_register_lb_policy(grpc_glb_lb_factory_create());
grpc_register_tracer(&grpc_lb_glb_trace);
#ifndef NDEBUG
@@ -1912,4 +2013,4 @@ void grpc_lb_policy_grpclb_init() {
(void *)&grpc_client_load_reporting_filter);
}
-void grpc_lb_policy_grpclb_shutdown() {}
+extern "C" void grpc_lb_policy_grpclb_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
index 63ad66c5e9..c67df609fc 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
@@ -21,9 +21,17 @@
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Returns a load balancing factory for the glb policy, which tries to connect
* to a load balancing server to decide the next successfully connected
* subchannel to pick. */
grpc_lb_policy_factory *grpc_glb_lb_factory_create();
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
index f2967182e2..f2967182e2 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
index 6120bf53f7..e8599d1f51 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
@@ -23,6 +23,10 @@
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/lib/slice/slice_hash_table.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Create the channel used for communicating with an LB service.
* Note that an LB *service* may be comprised of several LB *servers*.
*
@@ -40,5 +44,9 @@ grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
grpc_fake_resolver_response_generator *response_generator,
const grpc_channel_args *args);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H \
*/
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
index 2681b2a079..2681b2a079 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
index 903120ca7d..903120ca7d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
index c51e2a431a..b38c076f38 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
@@ -23,6 +23,10 @@
#include <grpc/impl/codegen/grpc_types.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_grpclb_client_stats grpc_grpclb_client_stats;
typedef struct {
@@ -61,5 +65,9 @@ void grpc_grpclb_client_stats_get_locked(
void grpc_grpclb_dropped_call_counts_destroy(
grpc_grpclb_dropped_call_counts* drop_entries);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H \
*/
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
index 8ef6dfc6f4..4d5fb2081c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@@ -299,13 +299,10 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
return 0;
}
-gpr_timespec grpc_grpclb_duration_to_timespec(
- grpc_grpclb_duration *duration_pb) {
- gpr_timespec duration;
- duration.tv_sec = duration_pb->has_seconds ? duration_pb->seconds : 0;
- duration.tv_nsec = duration_pb->has_nanos ? duration_pb->nanos : 0;
- duration.clock_type = GPR_TIMESPAN;
- return duration;
+grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb) {
+ return (grpc_millis)(
+ (duration_pb->has_seconds ? duration_pb->seconds : 0) * GPR_MS_PER_SEC +
+ (duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS);
}
void grpc_grpclb_initial_response_destroy(
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
index c4a98492c9..56b9c096d0 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@@ -81,8 +81,7 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist);
int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
const grpc_grpclb_duration *rhs);
-gpr_timespec grpc_grpclb_duration_to_timespec(
- grpc_grpclb_duration *duration_pb);
+grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb);
/** Destroy \a initial_response */
void grpc_grpclb_initial_response_destroy(
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index d20cbb8388..b07fc3b720 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -706,9 +706,9 @@ static grpc_lb_policy_factory *pick_first_lb_factory_create() {
/* Plugin registration */
-void grpc_lb_policy_pick_first_init() {
+extern "C" void grpc_lb_policy_pick_first_init() {
grpc_register_lb_policy(pick_first_lb_factory_create());
grpc_register_tracer(&grpc_lb_pick_first_trace);
}
-void grpc_lb_policy_pick_first_shutdown() {}
+extern "C" void grpc_lb_policy_pick_first_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index a3a62e9f3c..6812bb50cd 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -916,9 +916,9 @@ static grpc_lb_policy_factory *round_robin_lb_factory_create() {
/* Plugin registration */
-void grpc_lb_policy_round_robin_init() {
+extern "C" void grpc_lb_policy_round_robin_init() {
grpc_register_lb_policy(round_robin_lb_factory_create());
grpc_register_tracer(&grpc_lb_round_robin_trace);
}
-void grpc_lb_policy_round_robin_shutdown() {}
+extern "C" void grpc_lb_policy_round_robin_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.c b/src/core/ext/filters/client_channel/lb_policy_factory.cc
index 4d1405454c..05ab43d0b6 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.c
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.cc
@@ -56,7 +56,7 @@ grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) {
}
void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
- void* address, size_t address_len,
+ const void* address, size_t address_len,
bool is_balancer, const char* balancer_name,
void* user_data) {
GPR_ASSERT(index < addresses->num_addresses);
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h
index 9d9fb143df..69bcba4232 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.h
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.h
@@ -29,6 +29,10 @@
// Channel arg key for grpc_lb_addresses.
#define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_lb_policy_factory grpc_lb_policy_factory;
typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable;
@@ -73,7 +77,7 @@ grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
* \a address is a socket address of length \a address_len.
* Takes ownership of \a balancer_name. */
void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
- void *address, size_t address_len,
+ const void *address, size_t address_len,
bool is_balancer, const char *balancer_name,
void *user_data);
@@ -130,4 +134,8 @@ grpc_lb_policy *grpc_lb_policy_factory_create_lb_policy(
grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.c b/src/core/ext/filters/client_channel/lb_policy_registry.cc
index f2460f8304..f2460f8304 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.c
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.cc
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.h b/src/core/ext/filters/client_channel/lb_policy_registry.h
index f5995687cf..0867844c37 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.h
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.h
@@ -22,6 +22,10 @@
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Initialize the registry and set \a default_factory as the factory to be
* returned when no name is provided in a lookup */
void grpc_lb_policy_registry_init(void);
@@ -37,4 +41,8 @@ void grpc_register_lb_policy(grpc_lb_policy_factory *factory);
grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name,
grpc_lb_policy_args *args);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/parse_address.c b/src/core/ext/filters/client_channel/parse_address.cc
index 2152b5a1e9..2152b5a1e9 100644
--- a/src/core/ext/filters/client_channel/parse_address.c
+++ b/src/core/ext/filters/client_channel/parse_address.cc
diff --git a/src/core/ext/filters/client_channel/parse_address.h b/src/core/ext/filters/client_channel/parse_address.h
index c90a827da5..742df380b7 100644
--- a/src/core/ext/filters/client_channel/parse_address.h
+++ b/src/core/ext/filters/client_channel/parse_address.h
@@ -24,6 +24,10 @@
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Populate \a resolved_addr from \a uri, whose path is expected to contain a
* unix socket path. Returns true upon success. */
bool grpc_parse_unix(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
@@ -45,4 +49,8 @@ bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr,
bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr,
bool log_errors);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/proxy_mapper.c b/src/core/ext/filters/client_channel/proxy_mapper.cc
index c6ea5fc680..c6ea5fc680 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper.c
+++ b/src/core/ext/filters/client_channel/proxy_mapper.cc
diff --git a/src/core/ext/filters/client_channel/proxy_mapper.h b/src/core/ext/filters/client_channel/proxy_mapper.h
index a13861ccaf..1325a9f1f6 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper.h
+++ b/src/core/ext/filters/client_channel/proxy_mapper.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/resolve_address.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_proxy_mapper grpc_proxy_mapper;
typedef struct {
@@ -71,4 +75,8 @@ bool grpc_proxy_mapper_map_address(grpc_exec_ctx* exec_ctx,
void grpc_proxy_mapper_destroy(grpc_proxy_mapper* mapper);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.c b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
index 09967eea3c..09967eea3c 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.c
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.h b/src/core/ext/filters/client_channel/proxy_mapper_registry.h
index 99e54d1a78..2d389f1c21 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.h
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.h
@@ -21,6 +21,10 @@
#include "src/core/ext/filters/client_channel/proxy_mapper.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void grpc_proxy_mapper_registry_init();
void grpc_proxy_mapper_registry_shutdown();
@@ -41,4 +45,8 @@ bool grpc_proxy_mappers_map_address(grpc_exec_ctx* exec_ctx,
grpc_resolved_address** new_address,
grpc_channel_args** new_args);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_REGISTRY_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_REGISTRY_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/resolver.c b/src/core/ext/filters/client_channel/resolver.cc
index 8401504fcf..8401504fcf 100644
--- a/src/core/ext/filters/client_channel/resolver.c
+++ b/src/core/ext/filters/client_channel/resolver.cc
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index ae9c8f66fe..73fbbbbc3b 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -22,6 +22,10 @@
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/iomgr/iomgr.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_resolver grpc_resolver;
typedef struct grpc_resolver_vtable grpc_resolver_vtable;
@@ -87,4 +91,8 @@ void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **result,
grpc_closure *on_complete);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_H */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index 9bb229ad95..5f7ab987cb 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -32,13 +32,13 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/gethostname.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/json/json.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/service_config.h"
@@ -89,7 +89,7 @@ typedef struct {
bool have_retry_timer;
grpc_timer retry_timer;
/** retry backoff state */
- gpr_backoff backoff_state;
+ grpc_backoff backoff_state;
/** currently resolving addresses */
grpc_lb_addresses *lb_addresses;
@@ -137,7 +137,7 @@ static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
ares_dns_resolver *r = (ares_dns_resolver *)resolver;
if (!r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_ares_start_resolving_locked(exec_ctx, r);
}
}
@@ -271,22 +271,20 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
} else {
const char *msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
- gpr_timespec timeout = gpr_time_sub(next_try, now);
+ grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state);
+ grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
- if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
- gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
- timeout.tv_nsec);
+ if (timeout > 0) {
+ gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
grpc_timer_init(exec_ctx, &r->retry_timer, next_try,
- &r->dns_ares_on_retry_timer_locked, now);
+ &r->dns_ares_on_retry_timer_locked);
}
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
@@ -307,7 +305,7 @@ static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
r->next_completion = on_complete;
r->target_result = target_result;
if (r->resolved_version == 0 && !r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_ares_start_resolving_locked(exec_ctx, r);
} else {
dns_ares_maybe_finish_next_locked(exec_ctx, r);
@@ -381,11 +379,11 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
args->pollset_set);
}
- gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
- GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
- GRPC_DNS_RECONNECT_JITTER,
- GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
- GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_DNS_RECONNECT_JITTER,
+ GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
GRPC_CLOSURE_INIT(&r->dns_ares_on_retry_timer_locked,
dns_ares_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner));
@@ -425,7 +423,7 @@ static grpc_resolver_factory *dns_ares_resolver_factory_create() {
return &dns_resolver_factory;
}
-void grpc_resolver_dns_ares_init(void) {
+extern "C" void grpc_resolver_dns_ares_init(void) {
char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
/* TODO(zyc): Turn on c-ares based resolver by default after the address
sorter and the CNAME support are added. */
@@ -441,7 +439,7 @@ void grpc_resolver_dns_ares_init(void) {
gpr_free(resolver);
}
-void grpc_resolver_dns_ares_shutdown(void) {
+extern "C" void grpc_resolver_dns_ares_shutdown(void) {
char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
grpc_ares_cleanup();
@@ -451,8 +449,8 @@ void grpc_resolver_dns_ares_shutdown(void) {
#else /* GRPC_ARES == 1 && !defined(GRPC_UV) */
-void grpc_resolver_dns_ares_init(void) {}
+extern "C" void grpc_resolver_dns_ares_init(void) {}
-void grpc_resolver_dns_ares_shutdown(void) {}
+extern "C" void grpc_resolver_dns_ares_shutdown(void) {}
#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
index 386012d2ed..3d4309f2fa 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -22,6 +22,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_ares_ev_driver grpc_ares_ev_driver;
/* Start \a ev_driver. It will keep working until all IO on its ares_channel is
@@ -49,5 +53,9 @@ void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver);
void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
grpc_ares_ev_driver *ev_driver);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H \
*/
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
index c30cc93b6f..c30cc93b6f 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index 04379975e1..04379975e1 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
index 108333047d..38fbea9aac 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_ares_request grpc_ares_request;
/* Asynchronously resolve \a name. Use \a default_port if a port isn't
@@ -65,5 +69,9 @@ grpc_error *grpc_ares_init(void);
it has been called the same number of times as grpc_ares_init(). */
void grpc_ares_cleanup(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H \
*/
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
index f2587c4520..f2587c4520 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
index 5ea75f0554..e669b6dfc7 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
@@ -16,6 +16,9 @@
*
*/
+#include <grpc/support/port_platform.h>
+
+#include <inttypes.h>
#include <string.h>
#include <grpc/support/alloc.h>
@@ -24,11 +27,11 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
@@ -67,7 +70,7 @@ typedef struct {
grpc_timer retry_timer;
grpc_closure on_retry;
/** retry backoff state */
- gpr_backoff backoff_state;
+ grpc_backoff backoff_state;
/** currently resolving addresses */
grpc_resolved_addresses *addresses;
@@ -110,7 +113,7 @@ static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
if (!r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
}
}
@@ -123,7 +126,7 @@ static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
r->next_completion = on_complete;
r->target_result = target_result;
if (r->resolved_version == 0 && !r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
} else {
dns_maybe_finish_next_locked(exec_ctx, r);
@@ -150,6 +153,9 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_channel_args *result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
+ GRPC_ERROR_REF(error);
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(r->name_to_resolve));
if (r->addresses != NULL) {
grpc_lb_addresses *addresses = grpc_lb_addresses_create(
r->addresses->naddrs, NULL /* user_data_vtable */);
@@ -164,23 +170,21 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_resolved_addresses_destroy(r->addresses);
grpc_lb_addresses_destroy(exec_ctx, addresses);
} else {
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
- gpr_timespec timeout = gpr_time_sub(next_try, now);
+ grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state);
+ grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
- if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
- gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
- timeout.tv_nsec);
+ if (timeout > 0) {
+ gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
GRPC_CLOSURE_INIT(&r->on_retry, dns_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner));
- grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now);
+ grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry);
}
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
@@ -188,6 +192,7 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
r->resolved_result = result;
r->resolved_version++;
dns_maybe_finish_next_locked(exec_ctx, r);
+ GRPC_ERROR_UNREF(error);
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
@@ -251,11 +256,11 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
args->pollset_set);
}
- gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
- GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
- GRPC_DNS_RECONNECT_JITTER,
- GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
- GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_DNS_RECONNECT_JITTER,
+ GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
return &r->base;
}
@@ -289,7 +294,7 @@ static grpc_resolver_factory *dns_resolver_factory_create() {
return &dns_resolver_factory;
}
-void grpc_resolver_dns_native_init(void) {
+extern "C" void grpc_resolver_dns_native_init(void) {
char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
if (resolver != NULL && gpr_stricmp(resolver, "native") == 0) {
gpr_log(GPR_DEBUG, "Using native dns resolver");
@@ -307,4 +312,4 @@ void grpc_resolver_dns_native_init(void) {
gpr_free(resolver);
}
-void grpc_resolver_dns_native_shutdown(void) {}
+extern "C" void grpc_resolver_dns_native_shutdown(void) {}
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
index 69ea440ae6..ed5b1011fb 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
@@ -258,8 +258,8 @@ static const grpc_resolver_factory_vtable fake_resolver_factory_vtable = {
static grpc_resolver_factory fake_resolver_factory = {
&fake_resolver_factory_vtable};
-void grpc_resolver_fake_init(void) {
+extern "C" void grpc_resolver_fake_init(void) {
grpc_register_resolver_type(&fake_resolver_factory);
}
-void grpc_resolver_fake_shutdown(void) {}
+extern "C" void grpc_resolver_fake_shutdown(void) {}
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
index c084ef2a58..95c3bafed8 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
@@ -21,6 +21,10 @@
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#define GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR \
"grpc.fake_resolver.response_generator"
@@ -56,5 +60,9 @@ grpc_fake_resolver_response_generator_ref(
void grpc_fake_resolver_response_generator_unref(
grpc_fake_resolver_response_generator* generator);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FAKE_FAKE_RESOLVER_H \
*/
diff --git a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
index 7ceb8f40a1..dda9542325 100644
--- a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
@@ -211,7 +211,7 @@ DECL_FACTORY(unix);
DECL_FACTORY(ipv4);
DECL_FACTORY(ipv6);
-void grpc_resolver_sockaddr_init(void) {
+extern "C" void grpc_resolver_sockaddr_init(void) {
grpc_register_resolver_type(&ipv4_resolver_factory);
grpc_register_resolver_type(&ipv6_resolver_factory);
#ifdef GRPC_HAVE_UNIX_SOCKET
@@ -219,4 +219,4 @@ void grpc_resolver_sockaddr_init(void) {
#endif
}
-void grpc_resolver_sockaddr_shutdown(void) {}
+extern "C" void grpc_resolver_sockaddr_shutdown(void) {}
diff --git a/src/core/ext/filters/client_channel/resolver_factory.c b/src/core/ext/filters/client_channel/resolver_factory.cc
index 6f0a7c1e36..6f0a7c1e36 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.c
+++ b/src/core/ext/filters/client_channel/resolver_factory.cc
diff --git a/src/core/ext/filters/client_channel/resolver_factory.h b/src/core/ext/filters/client_channel/resolver_factory.h
index 6bd7929d4e..6e533e3248 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.h
+++ b/src/core/ext/filters/client_channel/resolver_factory.h
@@ -24,6 +24,10 @@
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_resolver_factory grpc_resolver_factory;
typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable;
@@ -67,4 +71,8 @@ grpc_resolver *grpc_resolver_factory_create_resolver(
char *grpc_resolver_factory_get_default_authority(
grpc_resolver_factory *factory, grpc_uri *uri);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/resolver_registry.c b/src/core/ext/filters/client_channel/resolver_registry.cc
index 1a0fb0bc3c..1a0fb0bc3c 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.c
+++ b/src/core/ext/filters/client_channel/resolver_registry.cc
diff --git a/src/core/ext/filters/client_channel/resolver_registry.h b/src/core/ext/filters/client_channel/resolver_registry.h
index 692490543d..eb08d887b1 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.h
+++ b/src/core/ext/filters/client_channel/resolver_registry.h
@@ -22,6 +22,10 @@
#include "src/core/ext/filters/client_channel/resolver_factory.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void grpc_resolver_registry_init();
void grpc_resolver_registry_shutdown(void);
@@ -66,4 +70,8 @@ char *grpc_get_default_authority(grpc_exec_ctx *exec_ctx, const char *target);
char *grpc_resolver_factory_add_default_prefix_if_needed(
grpc_exec_ctx *exec_ctx, const char *target);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/retry_throttle.c b/src/core/ext/filters/client_channel/retry_throttle.cc
index 09dcade089..09dcade089 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.c
+++ b/src/core/ext/filters/client_channel/retry_throttle.cc
diff --git a/src/core/ext/filters/client_channel/retry_throttle.h b/src/core/ext/filters/client_channel/retry_throttle.h
index bf99297e98..3b849475b9 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.h
+++ b/src/core/ext/filters/client_channel/retry_throttle.h
@@ -21,6 +21,10 @@
#include <stdbool.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/// Tracks retry throttling data for an individual server name.
typedef struct grpc_server_retry_throttle_data grpc_server_retry_throttle_data;
@@ -47,4 +51,8 @@ void grpc_retry_throttle_map_shutdown();
grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
const char* server_name, int max_milli_tokens, int milli_token_ratio);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RETRY_THROTTLE_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RETRY_THROTTLE_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.cc
index 40a51c72d6..5710a22178 100644
--- a/src/core/ext/filters/client_channel/subchannel.c
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -18,6 +18,7 @@
#include "src/core/ext/filters/client_channel/subchannel.h"
+#include <inttypes.h>
#include <limits.h>
#include <string.h>
@@ -30,6 +31,7 @@
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/ext/filters/client_channel/uri_parser.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/debug/stats.h"
@@ -37,7 +39,6 @@
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/connectivity_state.h"
@@ -117,9 +118,9 @@ struct grpc_subchannel {
external_state_watcher root_external_state_watcher;
/** next connect attempt time */
- gpr_timespec next_attempt;
+ grpc_millis next_attempt;
/** backoff state */
- gpr_backoff backoff_state;
+ grpc_backoff backoff_state;
/** do we have an active alarm? */
bool have_alarm;
/** have we started the backoff loop */
@@ -343,31 +344,27 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
"grpc.testing.fixed_reconnect_backoff_ms")) {
fixed_reconnect_backoff = true;
initial_backoff_ms = min_backoff_ms = max_backoff_ms =
- grpc_channel_arg_get_integer(
- &c->args->args[i],
- (grpc_integer_options){initial_backoff_ms, 100, INT_MAX});
+ grpc_channel_arg_get_integer(&c->args->args[i],
+ {initial_backoff_ms, 100, INT_MAX});
} else if (0 == strcmp(c->args->args[i].key,
GRPC_ARG_MIN_RECONNECT_BACKOFF_MS)) {
fixed_reconnect_backoff = false;
min_backoff_ms = grpc_channel_arg_get_integer(
- &c->args->args[i],
- (grpc_integer_options){min_backoff_ms, 100, INT_MAX});
+ &c->args->args[i], {min_backoff_ms, 100, INT_MAX});
} else if (0 == strcmp(c->args->args[i].key,
GRPC_ARG_MAX_RECONNECT_BACKOFF_MS)) {
fixed_reconnect_backoff = false;
max_backoff_ms = grpc_channel_arg_get_integer(
- &c->args->args[i],
- (grpc_integer_options){max_backoff_ms, 100, INT_MAX});
+ &c->args->args[i], {max_backoff_ms, 100, INT_MAX});
} else if (0 == strcmp(c->args->args[i].key,
GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS)) {
fixed_reconnect_backoff = false;
initial_backoff_ms = grpc_channel_arg_get_integer(
- &c->args->args[i],
- (grpc_integer_options){initial_backoff_ms, 100, INT_MAX});
+ &c->args->args[i], {initial_backoff_ms, 100, INT_MAX});
}
}
}
- gpr_backoff_init(
+ grpc_backoff_init(
&c->backoff_state, initial_backoff_ms,
fixed_reconnect_backoff ? 1.0
: GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER,
@@ -431,8 +428,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
if (error == GRPC_ERROR_NONE) {
gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
- c->next_attempt =
- gpr_backoff_step(&c->backoff_state, gpr_now(GPR_CLOCK_MONOTONIC));
+ c->next_attempt = grpc_backoff_step(exec_ctx, &c->backoff_state);
continue_connect_locked(exec_ctx, c);
gpr_mu_unlock(&c->mu);
} else {
@@ -467,24 +463,22 @@ static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
c->connecting = true;
GRPC_SUBCHANNEL_WEAK_REF(c, "connecting");
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!c->backoff_begun) {
c->backoff_begun = true;
- c->next_attempt = gpr_backoff_begin(&c->backoff_state, now);
+ c->next_attempt = grpc_backoff_begin(exec_ctx, &c->backoff_state);
continue_connect_locked(exec_ctx, c);
} else {
GPR_ASSERT(!c->have_alarm);
c->have_alarm = true;
- gpr_timespec time_til_next = gpr_time_sub(c->next_attempt, now);
- if (gpr_time_cmp(time_til_next, gpr_time_0(time_til_next.clock_type)) <=
- 0) {
+ const grpc_millis time_til_next =
+ c->next_attempt - grpc_exec_ctx_now(exec_ctx);
+ if (time_til_next <= 0) {
gpr_log(GPR_INFO, "Retry immediately");
} else {
- gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds",
- time_til_next.tv_sec, time_til_next.tv_nsec);
+ gpr_log(GPR_INFO, "Retry in %" PRIdPTR " milliseconds", time_til_next);
}
GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm, now);
+ grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm);
}
}
@@ -759,14 +753,15 @@ grpc_error *grpc_connected_subchannel_create_call(
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
const grpc_call_element_args call_args = {
- .call_stack = callstk,
- .server_transport_data = NULL,
- .context = args->context,
- .path = args->path,
- .start_time = args->start_time,
- .deadline = args->deadline,
- .arena = args->arena,
- .call_combiner = args->call_combiner};
+ callstk, /* call_stack */
+ NULL, /* server_transport_data */
+ args->context, /* context */
+ args->path, /* path */
+ args->start_time, /* start_time */
+ args->deadline, /* deadline */
+ args->arena, /* arena */
+ args->call_combiner /* call_combiner */
+ };
grpc_error *error = grpc_call_stack_init(
exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args);
if (error != GRPC_ERROR_NONE) {
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index 51d712f6a7..46b29f1fe0 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -26,6 +26,10 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
// Channel arg containing a grpc_resolved_address to connect to.
#define GRPC_ARG_SUBCHANNEL_ADDRESS "grpc.subchannel_address"
@@ -103,7 +107,7 @@ typedef struct {
grpc_polling_entity *pollent;
grpc_slice path;
gpr_timespec start_time;
- gpr_timespec deadline;
+ grpc_millis deadline;
gpr_arena *arena;
grpc_call_context_element *context;
grpc_call_combiner *call_combiner;
@@ -188,4 +192,8 @@ const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args);
/// Caller is responsible for freeing the string.
grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_H */
diff --git a/src/core/ext/filters/client_channel/subchannel_index.c b/src/core/ext/filters/client_channel/subchannel_index.cc
index d7a51f3899..1f466ec0b8 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.c
+++ b/src/core/ext/filters/client_channel/subchannel_index.cc
@@ -114,11 +114,12 @@ static void *scv_avl_copy(void *p, void *unused) {
}
static const gpr_avl_vtable subchannel_avl_vtable = {
- .destroy_key = sck_avl_destroy,
- .copy_key = sck_avl_copy,
- .compare_keys = sck_avl_compare,
- .destroy_value = scv_avl_destroy,
- .copy_value = scv_avl_copy};
+ sck_avl_destroy, // destroy_key
+ sck_avl_copy, // copy_key
+ sck_avl_compare, // compare_keys
+ scv_avl_destroy, // destroy_value
+ scv_avl_copy // copy_value
+};
void grpc_subchannel_index_init(void) {
g_subchannel_index = gpr_avl_create(&subchannel_avl_vtable);
diff --git a/src/core/ext/filters/client_channel/subchannel_index.h b/src/core/ext/filters/client_channel/subchannel_index.h
index 92e36d5283..09bac3592c 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.h
+++ b/src/core/ext/filters/client_channel/subchannel_index.h
@@ -21,6 +21,10 @@
#include "src/core/ext/filters/client_channel/subchannel.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** \file Provides an index of active subchannels so that they can be
shared amongst channels */
@@ -78,4 +82,8 @@ void grpc_subchannel_index_unref(void);
* force_creation set. */
void grpc_subchannel_index_test_only_set_force_creation(bool force_creation);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H */ \ No newline at end of file
diff --git a/src/core/ext/filters/client_channel/uri_parser.c b/src/core/ext/filters/client_channel/uri_parser.cc
index fb4fb8e694..fb4fb8e694 100644
--- a/src/core/ext/filters/client_channel/uri_parser.c
+++ b/src/core/ext/filters/client_channel/uri_parser.cc
diff --git a/src/core/ext/filters/client_channel/uri_parser.h b/src/core/ext/filters/client_channel/uri_parser.h
index 05ca2e00e4..43e8ae64e0 100644
--- a/src/core/ext/filters/client_channel/uri_parser.h
+++ b/src/core/ext/filters/client_channel/uri_parser.h
@@ -22,6 +22,10 @@
#include <stddef.h>
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
char *scheme;
char *authority;
@@ -47,4 +51,8 @@ const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key);
/** destroy a uri */
void grpc_uri_destroy(grpc_uri *uri);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H */ \ No newline at end of file