aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext/filters/client_channel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/ext/filters/client_channel')
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.cc21
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.h4
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.cc2
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc2305
-rw-r--r--src/core/ext/filters/client_channel/client_channel.h2
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.cc2
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.h2
-rw-r--r--src/core/ext/filters/client_channel/client_channel_plugin.cc4
-rw-r--r--src/core/ext/filters/client_channel/connector.cc2
-rw-r--r--src/core/ext/filters/client_channel/connector.h2
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.cc2
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.cc2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc125
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h333
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc2987
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h29
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc52
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h26
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc132
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc523
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc578
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc30
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.h18
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.cc16
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.h52
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.cc93
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.h39
-rw-r--r--src/core/ext/filters/client_channel/method_params.cc178
-rw-r--r--src/core/ext/filters/client_channel/method_params.h74
-rw-r--r--src/core/ext/filters/client_channel/parse_address.cc2
-rw-r--r--src/core/ext/filters/client_channel/parse_address.h2
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper.cc2
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper.h2
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper_registry.cc2
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper_registry.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver.cc2
-rw-r--r--src/core/ext/filters/client_channel/resolver.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc9
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc1
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc1
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc1
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc3
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc3
-rw-r--r--src/core/ext/filters/client_channel/resolver_factory.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.cc2
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.h2
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.cc6
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.h2
-rw-r--r--src/core/ext/filters/client_channel/status_util.cc100
-rw-r--r--src/core/ext/filters/client_channel/status_util.h58
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc17
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h11
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.cc2
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.h2
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.cc3
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.h2
65 files changed, 4885 insertions, 3011 deletions
diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc
index ee90b499eb..3e2faa57bc 100644
--- a/src/core/ext/filters/client_channel/backup_poller.cc
+++ b/src/core/ext/filters/client_channel/backup_poller.cc
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include <grpc/grpc.h>
@@ -125,13 +127,7 @@ static void run_poller(void* arg, grpc_error* error) {
&p->run_poller_closure);
}
-void grpc_client_channel_start_backup_polling(
- grpc_pollset_set* interested_parties) {
- gpr_once_init(&g_once, init_globals);
- if (g_poll_interval_ms == 0) {
- return;
- }
- gpr_mu_lock(&g_poller_mu);
+static void g_poller_init_locked() {
if (g_poller == nullptr) {
g_poller = static_cast<backup_poller*>(gpr_zalloc(sizeof(backup_poller)));
g_poller->pollset =
@@ -147,7 +143,16 @@ void grpc_client_channel_start_backup_polling(
grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms,
&g_poller->run_poller_closure);
}
+}
+void grpc_client_channel_start_backup_polling(
+ grpc_pollset_set* interested_parties) {
+ gpr_once_init(&g_once, init_globals);
+ if (g_poll_interval_ms == 0) {
+ return;
+ }
+ gpr_mu_lock(&g_poller_mu);
+ g_poller_init_locked();
gpr_ref(&g_poller->refs);
/* Get a reference to g_poller->pollset before releasing g_poller_mu to make
* TSAN happy. Otherwise, reading from g_poller (i.e g_poller->pollset) after
diff --git a/src/core/ext/filters/client_channel/backup_poller.h b/src/core/ext/filters/client_channel/backup_poller.h
index 551e0331dc..7285b9b93e 100644
--- a/src/core/ext/filters/client_channel/backup_poller.h
+++ b/src/core/ext/filters/client_channel/backup_poller.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
+#include <grpc/support/port_platform.h>
+
#include <grpc/grpc.h>
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/exec_ctx.h"
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.cc b/src/core/ext/filters/client_channel/channel_connectivity.cc
index 31a5c31124..37860e82e3 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.cc
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/lib/surface/channel.h"
#include <inttypes.h>
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 8aa9905d5c..90b93fbe23 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -21,6 +21,7 @@
#include "src/core/ext/filters/client_channel/client_channel.h"
#include <inttypes.h>
+#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
@@ -33,153 +34,74 @@
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
+#include "src/core/ext/filters/client_channel/method_params.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/ext/filters/client_channel/retry_throttle.h"
+#include "src/core/ext/filters/client_channel/status_util.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/deadline/deadline_filter.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/inlined_vector.h"
+#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
+#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/service_config.h"
#include "src/core/lib/transport/static_metadata.h"
+#include "src/core/lib/transport/status_metadata.h"
+
+using grpc_core::internal::ClientChannelMethodParams;
/* Client channel implementation */
+// By default, we buffer 256 KiB per RPC for retries.
+// TODO(roth): Do we have any data to suggest a better value?
+#define DEFAULT_PER_RPC_RETRY_BUFFER_SIZE (256 << 10)
+
+// This value was picked arbitrarily. It can be changed if there is
+// any even moderately compelling reason to do so.
+#define RETRY_BACKOFF_JITTER 0.2
+
grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
/*************************************************************************
- * METHOD-CONFIG TABLE
+ * CHANNEL-WIDE FUNCTIONS
*/
-typedef enum {
- /* zero so it can be default initialized */
- WAIT_FOR_READY_UNSET = 0,
- WAIT_FOR_READY_FALSE,
- WAIT_FOR_READY_TRUE
-} wait_for_ready_value;
-
-typedef struct {
- gpr_refcount refs;
- grpc_millis timeout;
- wait_for_ready_value wait_for_ready;
-} method_parameters;
-
-static method_parameters* method_parameters_ref(
- method_parameters* method_params) {
- gpr_ref(&method_params->refs);
- return method_params;
-}
-
-static void method_parameters_unref(method_parameters* method_params) {
- if (gpr_unref(&method_params->refs)) {
- gpr_free(method_params);
- }
-}
-
-// Wrappers to pass to grpc_service_config_create_method_config_table().
-static void* method_parameters_ref_wrapper(void* value) {
- return method_parameters_ref(static_cast<method_parameters*>(value));
-}
-static void method_parameters_unref_wrapper(void* value) {
- method_parameters_unref(static_cast<method_parameters*>(value));
-}
-
-static bool parse_wait_for_ready(grpc_json* field,
- wait_for_ready_value* wait_for_ready) {
- if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
- return false;
- }
- *wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE
- : WAIT_FOR_READY_FALSE;
- return true;
-}
-
-static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
- if (field->type != GRPC_JSON_STRING) return false;
- size_t len = strlen(field->value);
- if (field->value[len - 1] != 's') return false;
- char* buf = gpr_strdup(field->value);
- buf[len - 1] = '\0'; // Remove trailing 's'.
- char* decimal_point = strchr(buf, '.');
- int nanos = 0;
- if (decimal_point != nullptr) {
- *decimal_point = '\0';
- nanos = gpr_parse_nonnegative_int(decimal_point + 1);
- if (nanos == -1) {
- gpr_free(buf);
- return false;
- }
- int num_digits = static_cast<int>(strlen(decimal_point + 1));
- if (num_digits > 9) { // We don't accept greater precision than nanos.
- gpr_free(buf);
- return false;
- }
- for (int i = 0; i < (9 - num_digits); ++i) {
- nanos *= 10;
- }
- }
- int seconds = decimal_point == buf ? 0 : gpr_parse_nonnegative_int(buf);
- gpr_free(buf);
- if (seconds == -1) return false;
- *timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
- return true;
-}
-
-static void* method_parameters_create_from_json(const grpc_json* json) {
- wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
- grpc_millis timeout = 0;
- for (grpc_json* field = json->child; field != nullptr; field = field->next) {
- if (field->key == nullptr) continue;
- if (strcmp(field->key, "waitForReady") == 0) {
- if (wait_for_ready != WAIT_FOR_READY_UNSET) return nullptr; // Duplicate.
- if (!parse_wait_for_ready(field, &wait_for_ready)) return nullptr;
- } else if (strcmp(field->key, "timeout") == 0) {
- if (timeout > 0) return nullptr; // Duplicate.
- if (!parse_timeout(field, &timeout)) return nullptr;
- }
- }
- method_parameters* value =
- static_cast<method_parameters*>(gpr_malloc(sizeof(method_parameters)));
- gpr_ref_init(&value->refs, 1);
- value->timeout = timeout;
- value->wait_for_ready = wait_for_ready;
- return value;
-}
-
struct external_connectivity_watcher;
-/*************************************************************************
- * CHANNEL-WIDE FUNCTIONS
- */
+typedef grpc_core::SliceHashTable<
+ grpc_core::RefCountedPtr<ClientChannelMethodParams>>
+ MethodParamsTable;
typedef struct client_channel_channel_data {
- /** resolver for this channel */
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver;
- /** have we started resolving this channel */
bool started_resolving;
- /** is deadline checking enabled? */
bool deadline_checking_enabled;
- /** client channel factory */
grpc_client_channel_factory* client_channel_factory;
+ bool enable_retries;
+ size_t per_rpc_retry_buffer_size;
/** combiner protecting all variables below in this data structure */
grpc_combiner* combiner;
/** currently active load balancer */
- grpc_lb_policy* lb_policy;
+ grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> lb_policy;
/** retry throttle data */
grpc_server_retry_throttle_data* retry_throttle_data;
/** maps method names to method_parameters structs */
- grpc_slice_hash_table* method_params_table;
+ grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
/** incoming resolver result - set by resolver.next() */
grpc_channel_args* resolver_result;
/** a list of closures that are all waiting for resolver result to come in */
@@ -200,7 +122,7 @@ typedef struct client_channel_channel_data {
gpr_mu external_connectivity_watcher_list_mu;
struct external_connectivity_watcher* external_connectivity_watcher_list_head;
- /* the following properties are guarded by a mutex since API's require them
+ /* the following properties are guarded by a mutex since APIs require them
to be instantaneously available */
gpr_mu info_mu;
char* info_lb_policy_name;
@@ -212,7 +134,7 @@ typedef struct {
channel_data* chand;
/** used as an identifier, don't dereference it because the LB policy may be
* non-existing when the callback is run */
- grpc_lb_policy* lb_policy;
+ grpc_core::LoadBalancingPolicy* lb_policy;
grpc_closure closure;
} reresolution_request_args;
@@ -223,11 +145,11 @@ typedef struct {
channel_data* chand;
grpc_closure on_changed;
grpc_connectivity_state state;
- grpc_lb_policy* lb_policy;
+ grpc_core::LoadBalancingPolicy* lb_policy;
} lb_policy_connectivity_watcher;
static void watch_lb_policy_locked(channel_data* chand,
- grpc_lb_policy* lb_policy,
+ grpc_core::LoadBalancingPolicy* lb_policy,
grpc_connectivity_state current_state);
static void set_channel_connectivity_state_locked(channel_data* chand,
@@ -241,15 +163,13 @@ static void set_channel_connectivity_state_locked(channel_data* chand,
if (chand->lb_policy != nullptr) {
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* cancel picks with wait_for_ready=false */
- grpc_lb_policy_cancel_picks_locked(
- chand->lb_policy,
+ chand->lb_policy->CancelMatchingPicksLocked(
/* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
/* check= */ 0, GRPC_ERROR_REF(error));
} else if (state == GRPC_CHANNEL_SHUTDOWN) {
/* cancel all picks */
- grpc_lb_policy_cancel_picks_locked(chand->lb_policy,
- /* mask= */ 0, /* check= */ 0,
- GRPC_ERROR_REF(error));
+ chand->lb_policy->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0,
+ GRPC_ERROR_REF(error));
}
}
if (grpc_client_channel_trace.enabled()) {
@@ -263,7 +183,7 @@ static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
lb_policy_connectivity_watcher* w =
static_cast<lb_policy_connectivity_watcher*>(arg);
/* check if the notification is for the latest policy */
- if (w->lb_policy == w->chand->lb_policy) {
+ if (w->lb_policy == w->chand->lb_policy.get()) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand,
w->lb_policy, grpc_connectivity_state_name(w->state));
@@ -279,7 +199,7 @@ static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
}
static void watch_lb_policy_locked(channel_data* chand,
- grpc_lb_policy* lb_policy,
+ grpc_core::LoadBalancingPolicy* lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher* w =
static_cast<lb_policy_connectivity_watcher*>(gpr_malloc(sizeof(*w)));
@@ -289,8 +209,7 @@ static void watch_lb_policy_locked(channel_data* chand,
grpc_combiner_scheduler(chand->combiner));
w->state = current_state;
w->lb_policy = lb_policy;
- grpc_lb_policy_notify_on_state_change_locked(lb_policy, &w->state,
- &w->on_changed);
+ lb_policy->NotifyOnStateChangeLocked(&w->state, &w->on_changed);
}
static void start_resolving_locked(channel_data* chand) {
@@ -309,9 +228,8 @@ typedef struct {
grpc_server_retry_throttle_data* retry_throttle_data;
} service_config_parsing_state;
-static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
- service_config_parsing_state* parsing_state =
- static_cast<service_config_parsing_state*>(arg);
+static void parse_retry_throttle_params(
+ const grpc_json* field, service_config_parsing_state* parsing_state) {
if (strcmp(field->key, "retryThrottling") == 0) {
if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
@@ -371,7 +289,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
channel_data* chand = args->chand;
// If this invocation is for a stale LB policy, treat it as an LB shutdown
// signal.
- if (args->lb_policy != chand->lb_policy || error != GRPC_ERROR_NONE ||
+ if (args->lb_policy != chand->lb_policy.get() || error != GRPC_ERROR_NONE ||
chand->resolver == nullptr) {
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "re-resolution");
gpr_free(args);
@@ -382,7 +300,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
}
chand->resolver->RequestReresolutionLocked();
// Give back the closure to the LB policy.
- grpc_lb_policy_set_reresolve_closure_locked(chand->lb_policy, &args->closure);
+ chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
}
static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
@@ -391,14 +309,15 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
}
- // Extract the following fields from the resolver result, if non-NULL.
+ // Extract the following fields from the resolver result, if non-nullptr.
bool lb_policy_updated = false;
+ bool lb_policy_created = false;
char* lb_policy_name_dup = nullptr;
bool lb_policy_name_changed = false;
- grpc_lb_policy* new_lb_policy = nullptr;
+ grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy;
char* service_config_json = nullptr;
grpc_server_retry_throttle_data* retry_throttle_data = nullptr;
- grpc_slice_hash_table* method_params_table = nullptr;
+ grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
if (chand->resolver_result != nullptr) {
if (chand->resolver != nullptr) {
// Find LB policy name.
@@ -433,10 +352,6 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
- grpc_lb_policy_args lb_policy_args;
- lb_policy_args.args = chand->resolver_result;
- lb_policy_args.client_channel_factory = chand->client_channel_factory;
- lb_policy_args.combiner = chand->combiner;
// Check to see if we're already using the right LB policy.
// Note: It's safe to use chand->info_lb_policy_name here without
// taking a lock on chand->info_mu, because this function is the
@@ -448,59 +363,65 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
lb_policy_updated = true;
- grpc_lb_policy_update_locked(chand->lb_policy, &lb_policy_args);
+ chand->lb_policy->UpdateLocked(*chand->resolver_result);
} else {
// Instantiate new LB policy.
- new_lb_policy = grpc_lb_policy_create(lb_policy_name, &lb_policy_args);
+ grpc_core::LoadBalancingPolicy::Args lb_policy_args;
+ lb_policy_args.combiner = chand->combiner;
+ lb_policy_args.client_channel_factory = chand->client_channel_factory;
+ lb_policy_args.args = chand->resolver_result;
+ new_lb_policy =
+ grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
+ lb_policy_name, lb_policy_args);
if (new_lb_policy == nullptr) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"",
lb_policy_name);
} else {
+ lb_policy_created = true;
reresolution_request_args* args =
static_cast<reresolution_request_args*>(
gpr_zalloc(sizeof(*args)));
args->chand = chand;
- args->lb_policy = new_lb_policy;
+ args->lb_policy = new_lb_policy.get();
GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
grpc_combiner_scheduler(chand->combiner));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
- grpc_lb_policy_set_reresolve_closure_locked(new_lb_policy,
- &args->closure);
+ new_lb_policy->SetReresolutionClosureLocked(&args->closure);
}
}
+ // Before we clean up, save a copy of lb_policy_name, since it might
+ // be pointing to data inside chand->resolver_result.
+ // The copy will be saved in chand->lb_policy_name below.
+ lb_policy_name_dup = gpr_strdup(lb_policy_name);
// Find service config.
channel_arg = grpc_channel_args_find(chand->resolver_result,
GRPC_ARG_SERVICE_CONFIG);
service_config_json =
gpr_strdup(grpc_channel_arg_get_string(channel_arg));
if (service_config_json != nullptr) {
- grpc_service_config* service_config =
- grpc_service_config_create(service_config_json);
+ grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
+ grpc_core::ServiceConfig::Create(service_config_json);
if (service_config != nullptr) {
- channel_arg = grpc_channel_args_find(chand->resolver_result,
- GRPC_ARG_SERVER_URI);
- const char* server_uri = grpc_channel_arg_get_string(channel_arg);
- GPR_ASSERT(server_uri != nullptr);
- grpc_uri* uri = grpc_uri_parse(server_uri, true);
- GPR_ASSERT(uri->path[0] != '\0');
- service_config_parsing_state parsing_state;
- memset(&parsing_state, 0, sizeof(parsing_state));
- parsing_state.server_name =
- uri->path[0] == '/' ? uri->path + 1 : uri->path;
- grpc_service_config_parse_global_params(
- service_config, parse_retry_throttle_params, &parsing_state);
- grpc_uri_destroy(uri);
- retry_throttle_data = parsing_state.retry_throttle_data;
- method_params_table = grpc_service_config_create_method_config_table(
- service_config, method_parameters_create_from_json,
- method_parameters_ref_wrapper, method_parameters_unref_wrapper);
- grpc_service_config_destroy(service_config);
+ if (chand->enable_retries) {
+ channel_arg = grpc_channel_args_find(chand->resolver_result,
+ GRPC_ARG_SERVER_URI);
+ const char* server_uri = grpc_channel_arg_get_string(channel_arg);
+ GPR_ASSERT(server_uri != nullptr);
+ grpc_uri* uri = grpc_uri_parse(server_uri, true);
+ GPR_ASSERT(uri->path[0] != '\0');
+ service_config_parsing_state parsing_state;
+ memset(&parsing_state, 0, sizeof(parsing_state));
+ parsing_state.server_name =
+ uri->path[0] == '/' ? uri->path + 1 : uri->path;
+ service_config->ParseGlobalParams(parse_retry_throttle_params,
+ &parsing_state);
+ grpc_uri_destroy(uri);
+ retry_throttle_data = parsing_state.retry_throttle_data;
+ }
+ method_params_table = service_config->CreateMethodConfigTable(
+ ClientChannelMethodParams::CreateFromJson);
}
}
- // Before we clean up, save a copy of lb_policy_name, since it might
- // be pointing to data inside chand->resolver_result.
- // The copy will be saved in chand->lb_policy_name below.
- lb_policy_name_dup = gpr_strdup(lb_policy_name);
}
grpc_channel_args_destroy(chand->resolver_result);
chand->resolver_result = nullptr;
@@ -513,7 +434,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
lb_policy_name_changed ? " (changed)" : "", service_config_json);
}
// Now swap out fields in chand. Note that the new values may still
- // be NULL if (e.g.) the resolver failed to return results or the
+ // be nullptr if (e.g.) the resolver failed to return results or the
// results did not contain the necessary data.
//
// First, swap out the data used by cc_get_channel_info().
@@ -533,29 +454,26 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
}
chand->retry_throttle_data = retry_throttle_data;
// Swap out the method params table.
- if (chand->method_params_table != nullptr) {
- grpc_slice_hash_table_unref(chand->method_params_table);
- }
- chand->method_params_table = method_params_table;
+ chand->method_params_table = std::move(method_params_table);
// If we have a new LB policy or are shutting down (in which case
- // new_lb_policy will be NULL), swap out the LB policy, unreffing the old one
- // and removing its fds from chand->interested_parties. Note that we do NOT do
- // this if either (a) we updated the existing LB policy above or (b) we failed
- // to create the new LB policy (in which case we want to continue using the
- // most recent one we had).
+ // new_lb_policy will be nullptr), swap out the LB policy, unreffing the
+ // old one and removing its fds from chand->interested_parties.
+ // Note that we do NOT do this if either (a) we updated the existing
+ // LB policy above or (b) we failed to create the new LB policy (in
+ // which case we want to continue using the most recent one we had).
if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
chand->resolver == nullptr) {
if (chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand,
- chand->lb_policy);
+ chand->lb_policy.get());
}
- grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
+ grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
chand->interested_parties);
- grpc_lb_policy_shutdown_locked(chand->lb_policy, new_lb_policy);
- GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
+ chand->lb_policy->HandOffPendingPicksLocked(new_lb_policy.get());
+ chand->lb_policy.reset();
}
- chand->lb_policy = new_lb_policy;
+ chand->lb_policy = std::move(new_lb_policy);
}
// Now that we've swapped out the relevant fields of chand, check for
// error or shutdown.
@@ -583,21 +501,20 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_error* state_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
- if (new_lb_policy != nullptr) {
+ if (lb_policy_created) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand);
}
GRPC_ERROR_UNREF(state_error);
- state =
- grpc_lb_policy_check_connectivity_locked(new_lb_policy, &state_error);
- grpc_pollset_set_add_pollset_set(new_lb_policy->interested_parties,
+ state = chand->lb_policy->CheckConnectivityLocked(&state_error);
+ grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(),
chand->interested_parties);
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
if (chand->exit_idle_when_lb_policy_arrives) {
- grpc_lb_policy_exit_idle_locked(new_lb_policy);
+ chand->lb_policy->ExitIdleLocked();
chand->exit_idle_when_lb_policy_arrives = false;
}
- watch_lb_policy_locked(chand, new_lb_policy, state);
+ watch_lb_policy_locked(chand, chand->lb_policy.get(), state);
}
if (!lb_policy_updated) {
set_channel_connectivity_state_locked(
@@ -632,8 +549,8 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
op->send_ping.on_ack,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
} else {
- grpc_lb_policy_ping_one_locked(
- chand->lb_policy, op->send_ping.on_initiate, op->send_ping.on_ack);
+ chand->lb_policy->PingOneLocked(op->send_ping.on_initiate,
+ op->send_ping.on_ack);
op->bind_pollset = nullptr;
}
op->send_ping.on_initiate = nullptr;
@@ -652,11 +569,9 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
}
if (chand->lb_policy != nullptr) {
- grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
+ grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
chand->interested_parties);
- grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
- GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
- chand->lb_policy = nullptr;
+ chand->lb_policy.reset();
}
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
@@ -724,9 +639,17 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
grpc_client_channel_start_backup_polling(chand->interested_parties);
+ // Record max per-RPC retry buffer size.
+ const grpc_arg* arg = grpc_channel_args_find(
+ args->channel_args, GRPC_ARG_PER_RPC_RETRY_BUFFER_SIZE);
+ chand->per_rpc_retry_buffer_size = (size_t)grpc_channel_arg_get_integer(
+ arg, {DEFAULT_PER_RPC_RETRY_BUFFER_SIZE, 0, INT_MAX});
+ // Record enable_retries.
+ arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_ENABLE_RETRIES);
+ chand->enable_retries = grpc_channel_arg_get_bool(arg, true);
// Record client channel factory.
- const grpc_arg* arg = grpc_channel_args_find(args->channel_args,
- GRPC_ARG_CLIENT_CHANNEL_FACTORY);
+ arg = grpc_channel_args_find(args->channel_args,
+ GRPC_ARG_CLIENT_CHANNEL_FACTORY);
if (arg == nullptr) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Missing client channel factory in args for client channel filter");
@@ -786,10 +709,9 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
grpc_client_channel_factory_unref(chand->client_channel_factory);
}
if (chand->lb_policy != nullptr) {
- grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
+ grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
chand->interested_parties);
- grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
- GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
+ chand->lb_policy.reset();
}
gpr_free(chand->info_lb_policy_name);
gpr_free(chand->info_service_config_json);
@@ -797,7 +719,7 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
}
if (chand->method_params_table != nullptr) {
- grpc_slice_hash_table_unref(chand->method_params_table);
+ chand->method_params_table.reset();
}
grpc_client_channel_stop_backup_polling(chand->interested_parties);
grpc_connectivity_state_destroy(&chand->state_tracker);
@@ -812,15 +734,122 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
*/
// Max number of batches that can be pending on a call at any given
-// time. This includes:
+// time. This includes one batch for each of the following ops:
// recv_initial_metadata
// send_initial_metadata
// recv_message
// send_message
// recv_trailing_metadata
// send_trailing_metadata
-// We also add room for a single cancel_stream batch.
-#define MAX_WAITING_BATCHES 7
+#define MAX_PENDING_BATCHES 6
+
+// Retry support:
+//
+// In order to support retries, we act as a proxy for stream op batches.
+// When we get a batch from the surface, we add it to our list of pending
+// batches, and we then use those batches to construct separate "child"
+// batches to be started on the subchannel call. When the child batches
+// return, we then decide which pending batches have been completed and
+// schedule their callbacks accordingly. If a subchannel call fails and
+// we want to retry it, we do a new pick and start again, constructing
+// new "child" batches for the new subchannel call.
+//
+// Note that retries are committed when receiving data from the server
+// (except for Trailers-Only responses). However, there may be many
+// send ops started before receiving any data, so we may have already
+// completed some number of send ops (and returned the completions up to
+// the surface) by the time we realize that we need to retry. To deal
+// with this, we cache data for send ops, so that we can replay them on a
+// different subchannel call even after we have completed the original
+// batches.
+//
+// There are two sets of data to maintain:
+// - In call_data (in the parent channel), we maintain a list of pending
+// ops and cached data for send ops.
+// - In the subchannel call, we maintain state to indicate what ops have
+// already been sent down to that call.
+//
+// When constructing the "child" batches, we compare those two sets of
+// data to see which batches need to be sent to the subchannel call.
+
+// TODO(roth): In subsequent PRs:
+// - add support for transparent retries (including initial metadata)
+// - figure out how to record stats in census for retries
+// (census filter is on top of this one)
+// - add census stats for retries
+
+// State used for starting a retryable batch on a subchannel call.
+// This provides its own grpc_transport_stream_op_batch and other data
+// structures needed to populate the ops in the batch.
+// We allocate one struct on the arena for each attempt at starting a
+// batch on a given subchannel call.
+typedef struct {
+ gpr_refcount refs;
+ grpc_call_element* elem;
+ grpc_subchannel_call* subchannel_call; // Holds a ref.
+ // The batch to use in the subchannel call.
+ // Its payload field points to subchannel_call_retry_state.batch_payload.
+ grpc_transport_stream_op_batch batch;
+ // For send_initial_metadata.
+ // Note that we need to make a copy of the initial metadata for each
+ // subchannel call instead of just referring to the copy in call_data,
+ // because filters in the subchannel stack will probably add entries,
+ // so we need to start in a pristine state for each attempt of the call.
+ grpc_linked_mdelem* send_initial_metadata_storage;
+ grpc_metadata_batch send_initial_metadata;
+ // For send_message.
+ grpc_caching_byte_stream send_message;
+ // For send_trailing_metadata.
+ grpc_linked_mdelem* send_trailing_metadata_storage;
+ grpc_metadata_batch send_trailing_metadata;
+ // For intercepting recv_initial_metadata.
+ grpc_metadata_batch recv_initial_metadata;
+ grpc_closure recv_initial_metadata_ready;
+ bool trailing_metadata_available;
+ // For intercepting recv_message.
+ grpc_closure recv_message_ready;
+ grpc_byte_stream* recv_message;
+ // For intercepting recv_trailing_metadata.
+ grpc_metadata_batch recv_trailing_metadata;
+ grpc_transport_stream_stats collect_stats;
+ // For intercepting on_complete.
+ grpc_closure on_complete;
+} subchannel_batch_data;
+
+// Retry state associated with a subchannel call.
+// Stored in the parent_data of the subchannel call object.
+typedef struct {
+ // subchannel_batch_data.batch.payload points to this.
+ grpc_transport_stream_op_batch_payload batch_payload;
+ // These fields indicate which ops have been started and completed on
+ // this subchannel call.
+ size_t started_send_message_count;
+ size_t completed_send_message_count;
+ size_t started_recv_message_count;
+ size_t completed_recv_message_count;
+ bool started_send_initial_metadata : 1;
+ bool completed_send_initial_metadata : 1;
+ bool started_send_trailing_metadata : 1;
+ bool completed_send_trailing_metadata : 1;
+ bool started_recv_initial_metadata : 1;
+ bool completed_recv_initial_metadata : 1;
+ bool started_recv_trailing_metadata : 1;
+ bool completed_recv_trailing_metadata : 1;
+ // State for callback processing.
+ bool retry_dispatched : 1;
+ bool recv_initial_metadata_ready_deferred : 1;
+ bool recv_message_ready_deferred : 1;
+ grpc_error* recv_initial_metadata_error;
+ grpc_error* recv_message_error;
+} subchannel_call_retry_state;
+
+// Pending batches stored in call data.
+typedef struct {
+ // The pending batch. If nullptr, this slot is empty.
+ grpc_transport_stream_op_batch* batch;
+ // Indicates whether payload for send ops has been cached in call data.
+ bool send_ops_cached;
+} pending_batch;
/** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer.
@@ -844,159 +873,1592 @@ typedef struct client_channel_call_data {
grpc_call_combiner* call_combiner;
grpc_server_retry_throttle_data* retry_throttle_data;
- method_parameters* method_params;
+ grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
grpc_subchannel_call* subchannel_call;
- grpc_error* error;
- grpc_lb_policy_pick_state pick;
- grpc_closure lb_pick_closure;
- grpc_closure lb_pick_cancel_closure;
+ // Set when we get a cancel_stream op.
+ grpc_error* cancel_error;
+
+ grpc_core::LoadBalancingPolicy::PickState pick;
+ grpc_closure pick_closure;
+ grpc_closure pick_cancel_closure;
grpc_polling_entity* pollent;
- grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
- size_t waiting_for_pick_batches_count;
- grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
+ // Batches are added to this list when received from above.
+ // They are removed when we are done handling the batch (i.e., when
+ // either we have invoked all of the batch's callbacks or we have
+ // passed the batch down to the subchannel call and are not
+ // intercepting any of its callbacks).
+ pending_batch pending_batches[MAX_PENDING_BATCHES];
+ bool pending_send_initial_metadata : 1;
+ bool pending_send_message : 1;
+ bool pending_send_trailing_metadata : 1;
+
+ // Retry state.
+ bool enable_retries : 1;
+ bool retry_committed : 1;
+ bool last_attempt_got_server_pushback : 1;
+ int num_attempts_completed;
+ size_t bytes_buffered_for_retry;
+ grpc_core::ManualConstructor<grpc_core::BackOff> retry_backoff;
+ grpc_timer retry_timer;
+
+ // Cached data for retrying send ops.
+ // send_initial_metadata
+ bool seen_send_initial_metadata;
+ grpc_linked_mdelem* send_initial_metadata_storage;
+ grpc_metadata_batch send_initial_metadata;
+ uint32_t send_initial_metadata_flags;
+ gpr_atm* peer_string;
+ // send_message
+ // When we get a send_message op, we replace the original byte stream
+ // with a grpc_caching_byte_stream that caches the slices to a
+ // local buffer for use in retries.
+ // Note: We inline the cache for the first 3 send_message ops and use
+ // dynamic allocation after that. This number was essentially picked
+ // at random; it could be changed in the future to tune performance.
+ grpc_core::InlinedVector<grpc_byte_stream_cache*, 3> send_messages;
+ // send_trailing_metadata
+ bool seen_send_trailing_metadata;
+ grpc_linked_mdelem* send_trailing_metadata_storage;
+ grpc_metadata_batch send_trailing_metadata;
+} call_data;
- grpc_transport_stream_op_batch* initial_metadata_batch;
+// Forward declarations.
+static void retry_commit(grpc_call_element* elem,
+ subchannel_call_retry_state* retry_state);
+static void start_internal_recv_trailing_metadata(grpc_call_element* elem);
+static void on_complete(void* arg, grpc_error* error);
+static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored);
+static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
+static void start_pick_locked(void* arg, grpc_error* ignored);
+
+//
+// send op data caching
+//
+
+// Caches data for send ops so that it can be retried later, if not
+// already cached.
+static void maybe_cache_send_ops_for_batch(call_data* calld,
+ pending_batch* pending) {
+ if (pending->send_ops_cached) return;
+ pending->send_ops_cached = true;
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ // Save a copy of metadata for send_initial_metadata ops.
+ if (batch->send_initial_metadata) {
+ calld->seen_send_initial_metadata = true;
+ GPR_ASSERT(calld->send_initial_metadata_storage == nullptr);
+ grpc_metadata_batch* send_initial_metadata =
+ batch->payload->send_initial_metadata.send_initial_metadata;
+ calld->send_initial_metadata_storage = (grpc_linked_mdelem*)gpr_arena_alloc(
+ calld->arena,
+ sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count);
+ grpc_metadata_batch_copy(send_initial_metadata,
+ &calld->send_initial_metadata,
+ calld->send_initial_metadata_storage);
+ calld->send_initial_metadata_flags =
+ batch->payload->send_initial_metadata.send_initial_metadata_flags;
+ calld->peer_string = batch->payload->send_initial_metadata.peer_string;
+ }
+ // Set up cache for send_message ops.
+ if (batch->send_message) {
+ grpc_byte_stream_cache* cache = (grpc_byte_stream_cache*)gpr_arena_alloc(
+ calld->arena, sizeof(grpc_byte_stream_cache));
+ grpc_byte_stream_cache_init(cache,
+ batch->payload->send_message.send_message);
+ calld->send_messages.push_back(cache);
+ }
+ // Save metadata batch for send_trailing_metadata ops.
+ if (batch->send_trailing_metadata) {
+ calld->seen_send_trailing_metadata = true;
+ GPR_ASSERT(calld->send_trailing_metadata_storage == nullptr);
+ grpc_metadata_batch* send_trailing_metadata =
+ batch->payload->send_trailing_metadata.send_trailing_metadata;
+ calld->send_trailing_metadata_storage =
+ (grpc_linked_mdelem*)gpr_arena_alloc(
+ calld->arena,
+ sizeof(grpc_linked_mdelem) * send_trailing_metadata->list.count);
+ grpc_metadata_batch_copy(send_trailing_metadata,
+ &calld->send_trailing_metadata,
+ calld->send_trailing_metadata_storage);
+ }
+}
- grpc_closure on_complete;
- grpc_closure* original_on_complete;
-} call_data;
+// Frees cached send ops that have already been completed after
+// committing the call.
+static void free_cached_send_op_data_after_commit(
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (retry_state->completed_send_initial_metadata) {
+ grpc_metadata_batch_destroy(&calld->send_initial_metadata);
+ }
+ for (size_t i = 0; i < retry_state->completed_send_message_count; ++i) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
+ "]",
+ chand, calld, i);
+ }
+ grpc_byte_stream_cache_destroy(calld->send_messages[i]);
+ }
+ if (retry_state->completed_send_trailing_metadata) {
+ grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
+ }
+}
-grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
- grpc_call_element* elem) {
+// Frees cached send ops that were completed by the completed batch in
+// batch_data. Used when batches are completed after the call is committed.
+static void free_cached_send_op_data_for_completed_batch(
+ grpc_call_element* elem, subchannel_batch_data* batch_data,
+ subchannel_call_retry_state* retry_state) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
- return calld->subchannel_call;
+ if (batch_data->batch.send_initial_metadata) {
+ grpc_metadata_batch_destroy(&calld->send_initial_metadata);
+ }
+ if (batch_data->batch.send_message) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
+ "]",
+ chand, calld, retry_state->completed_send_message_count - 1);
+ }
+ grpc_byte_stream_cache_destroy(
+ calld->send_messages[retry_state->completed_send_message_count - 1]);
+ }
+ if (batch_data->batch.send_trailing_metadata) {
+ grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
+ }
+}
+
+//
+// pending_batches management
+//
+
+// Returns the index into calld->pending_batches to be used for batch.
+static size_t get_batch_index(grpc_transport_stream_op_batch* batch) {
+ // Note: It is important the send_initial_metadata be the first entry
+ // here, since the code in pick_subchannel_locked() assumes it will be.
+ if (batch->send_initial_metadata) return 0;
+ if (batch->send_message) return 1;
+ if (batch->send_trailing_metadata) return 2;
+ if (batch->recv_initial_metadata) return 3;
+ if (batch->recv_message) return 4;
+ if (batch->recv_trailing_metadata) return 5;
+ GPR_UNREACHABLE_CODE(return (size_t)-1);
}
// This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_add(
- call_data* calld, grpc_transport_stream_op_batch* batch) {
- if (batch->send_initial_metadata) {
- GPR_ASSERT(calld->initial_metadata_batch == nullptr);
- calld->initial_metadata_batch = batch;
- } else {
- GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
- batch;
+static void pending_batches_add(grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ const size_t idx = get_batch_index(batch);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
+ calld, idx);
+ }
+ pending_batch* pending = &calld->pending_batches[idx];
+ GPR_ASSERT(pending->batch == nullptr);
+ pending->batch = batch;
+ pending->send_ops_cached = false;
+ if (calld->enable_retries) {
+ // Update state in calld about pending batches.
+ // Also check if the batch takes us over the retry buffer limit.
+ // Note: We don't check the size of trailing metadata here, because
+ // gRPC clients do not send trailing metadata.
+ if (batch->send_initial_metadata) {
+ calld->pending_send_initial_metadata = true;
+ calld->bytes_buffered_for_retry += grpc_metadata_batch_size(
+ batch->payload->send_initial_metadata.send_initial_metadata);
+ }
+ if (batch->send_message) {
+ calld->pending_send_message = true;
+ calld->bytes_buffered_for_retry +=
+ batch->payload->send_message.send_message->length;
+ }
+ if (batch->send_trailing_metadata) {
+ calld->pending_send_trailing_metadata = true;
+ }
+ if (calld->bytes_buffered_for_retry > chand->per_rpc_retry_buffer_size) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: exceeded retry buffer size, committing",
+ chand, calld);
+ }
+ subchannel_call_retry_state* retry_state =
+ calld->subchannel_call == nullptr
+ ? nullptr
+ : static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call));
+ retry_commit(elem, retry_state);
+ // If we are not going to retry and have not yet started, pretend
+ // retries are disabled so that we don't bother with retry overhead.
+ if (calld->num_attempts_completed == 0) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: disabling retries before first attempt",
+ chand, calld);
+ }
+ calld->enable_retries = false;
+ }
+ }
+ }
+}
+
+static void pending_batch_clear(call_data* calld, pending_batch* pending) {
+ if (calld->enable_retries) {
+ if (pending->batch->send_initial_metadata) {
+ calld->pending_send_initial_metadata = false;
+ }
+ if (pending->batch->send_message) {
+ calld->pending_send_message = false;
+ }
+ if (pending->batch->send_trailing_metadata) {
+ calld->pending_send_trailing_metadata = false;
+ }
}
+ pending->batch = nullptr;
}
// This is called via the call combiner, so access to calld is synchronized.
static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
- call_data* calld = static_cast<call_data*>(arg);
- if (calld->waiting_for_pick_batches_count > 0) {
- --calld->waiting_for_pick_batches_count;
- grpc_transport_stream_op_batch_finish_with_failure(
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
- GRPC_ERROR_REF(error), calld->call_combiner);
- }
+ grpc_transport_stream_op_batch* batch =
+ static_cast<grpc_transport_stream_op_batch*>(arg);
+ call_data* calld = static_cast<call_data*>(batch->handler_private.extra_arg);
+ // Note: This will release the call combiner.
+ grpc_transport_stream_op_batch_finish_with_failure(
+ batch, GRPC_ERROR_REF(error), calld->call_combiner);
}
// This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_fail(grpc_call_element* elem,
- grpc_error* error) {
+// If yield_call_combiner is true, assumes responsibility for yielding
+// the call combiner.
+static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
+ bool yield_call_combiner) {
+ GPR_ASSERT(error != GRPC_ERROR_NONE);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
+ size_t num_batches = 0;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ if (calld->pending_batches[i].batch != nullptr) ++num_batches;
+ }
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
- elem->channel_data, calld, calld->waiting_for_pick_batches_count,
- grpc_error_string(error));
+ elem->channel_data, calld, num_batches, grpc_error_string(error));
+ }
+ grpc_transport_stream_op_batch*
+ batches[GPR_ARRAY_SIZE(calld->pending_batches)];
+ size_t num_batches = 0;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ if (batch != nullptr) {
+ batches[num_batches++] = batch;
+ pending_batch_clear(calld, pending);
+ }
}
- for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
- GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
- fail_pending_batch_in_call_combiner, calld,
+ for (size_t i = yield_call_combiner ? 1 : 0; i < num_batches; ++i) {
+ grpc_transport_stream_op_batch* batch = batches[i];
+ batch->handler_private.extra_arg = calld;
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure,
+ fail_pending_batch_in_call_combiner, batch,
grpc_schedule_on_exec_ctx);
- GRPC_CALL_COMBINER_START(
- calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
- GRPC_ERROR_REF(error), "waiting_for_pick_batches_fail");
- }
- if (calld->initial_metadata_batch != nullptr) {
- grpc_transport_stream_op_batch_finish_with_failure(
- calld->initial_metadata_batch, GRPC_ERROR_REF(error),
- calld->call_combiner);
- } else {
- GRPC_CALL_COMBINER_STOP(calld->call_combiner,
- "waiting_for_pick_batches_fail");
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
+ &batch->handler_private.closure,
+ GRPC_ERROR_REF(error), "pending_batches_fail");
+ }
+ if (yield_call_combiner) {
+ if (num_batches > 0) {
+ // Note: This will release the call combiner.
+ grpc_transport_stream_op_batch_finish_with_failure(
+ batches[0], GRPC_ERROR_REF(error), calld->call_combiner);
+ } else {
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner, "pending_batches_fail");
+ }
}
GRPC_ERROR_UNREF(error);
}
// This is called via the call combiner, so access to calld is synchronized.
-static void run_pending_batch_in_call_combiner(void* arg, grpc_error* ignored) {
- call_data* calld = static_cast<call_data*>(arg);
- if (calld->waiting_for_pick_batches_count > 0) {
- --calld->waiting_for_pick_batches_count;
- grpc_subchannel_call_process_op(
- calld->subchannel_call,
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
- }
+static void resume_pending_batch_in_call_combiner(void* arg,
+ grpc_error* ignored) {
+ grpc_transport_stream_op_batch* batch =
+ static_cast<grpc_transport_stream_op_batch*>(arg);
+ grpc_subchannel_call* subchannel_call =
+ static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(subchannel_call, batch);
}
// This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_resume(grpc_call_element* elem) {
+static void pending_batches_resume(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (calld->enable_retries) {
+ start_retriable_subchannel_batches(elem, GRPC_ERROR_NONE);
+ return;
+ }
+ // Retries not enabled; send down batches as-is.
if (grpc_client_channel_trace.enabled()) {
+ size_t num_batches = 0;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ if (calld->pending_batches[i].batch != nullptr) ++num_batches;
+ }
gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending %" PRIuPTR
- " pending batches to subchannel_call=%p",
- chand, calld, calld->waiting_for_pick_batches_count,
- calld->subchannel_call);
- }
- for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
- GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
- run_pending_batch_in_call_combiner, calld,
+ "chand=%p calld=%p: starting %" PRIuPTR
+ " pending batches on subchannel_call=%p",
+ chand, calld, num_batches, calld->subchannel_call);
+ }
+ grpc_transport_stream_op_batch*
+ batches[GPR_ARRAY_SIZE(calld->pending_batches)];
+ size_t num_batches = 0;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ if (batch != nullptr) {
+ batches[num_batches++] = batch;
+ pending_batch_clear(calld, pending);
+ }
+ }
+ for (size_t i = 1; i < num_batches; ++i) {
+ grpc_transport_stream_op_batch* batch = batches[i];
+ batch->handler_private.extra_arg = calld->subchannel_call;
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure,
+ resume_pending_batch_in_call_combiner, batch,
grpc_schedule_on_exec_ctx);
- GRPC_CALL_COMBINER_START(
- calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
- GRPC_ERROR_NONE, "waiting_for_pick_batches_resume");
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
+ &batch->handler_private.closure, GRPC_ERROR_NONE,
+ "pending_batches_resume");
}
- GPR_ASSERT(calld->initial_metadata_batch != nullptr);
- grpc_subchannel_call_process_op(calld->subchannel_call,
- calld->initial_metadata_batch);
+ GPR_ASSERT(num_batches > 0);
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
}
-// Applies service config to the call. Must be invoked once we know
-// that the resolver has returned results to the channel.
-static void apply_service_config_to_call_locked(grpc_call_element* elem) {
+static void maybe_clear_pending_batch(grpc_call_element* elem,
+ pending_batch* pending) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ // We clear the pending batch if all of its callbacks have been
+ // scheduled and reset to nullptr.
+ if (batch->on_complete == nullptr &&
+ (!batch->recv_initial_metadata ||
+ batch->payload->recv_initial_metadata.recv_initial_metadata_ready ==
+ nullptr) &&
+ (!batch->recv_message ||
+ batch->payload->recv_message.recv_message_ready == nullptr)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: clearing pending batch", chand,
+ calld);
+ }
+ pending_batch_clear(calld, pending);
+ }
+}
+
+// Returns true if all ops in the pending batch have been completed.
+static bool pending_batch_is_completed(
+ pending_batch* pending, call_data* calld,
+ subchannel_call_retry_state* retry_state) {
+ if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
+ return false;
+ }
+ if (pending->batch->send_initial_metadata &&
+ !retry_state->completed_send_initial_metadata) {
+ return false;
+ }
+ if (pending->batch->send_message &&
+ retry_state->completed_send_message_count < calld->send_messages.size()) {
+ return false;
+ }
+ if (pending->batch->send_trailing_metadata &&
+ !retry_state->completed_send_trailing_metadata) {
+ return false;
+ }
+ if (pending->batch->recv_initial_metadata &&
+ !retry_state->completed_recv_initial_metadata) {
+ return false;
+ }
+ if (pending->batch->recv_message &&
+ retry_state->completed_recv_message_count <
+ retry_state->started_recv_message_count) {
+ return false;
+ }
+ if (pending->batch->recv_trailing_metadata &&
+ !retry_state->completed_recv_trailing_metadata) {
+ return false;
+ }
+ return true;
+}
+
+// Returns true if any op in the batch was not yet started.
+static bool pending_batch_is_unstarted(
+ pending_batch* pending, call_data* calld,
+ subchannel_call_retry_state* retry_state) {
+ if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
+ return false;
+ }
+ if (pending->batch->send_initial_metadata &&
+ !retry_state->started_send_initial_metadata) {
+ return true;
+ }
+ if (pending->batch->send_message &&
+ retry_state->started_send_message_count < calld->send_messages.size()) {
+ return true;
+ }
+ if (pending->batch->send_trailing_metadata &&
+ !retry_state->started_send_trailing_metadata) {
+ return true;
+ }
+ if (pending->batch->recv_initial_metadata &&
+ !retry_state->started_recv_initial_metadata) {
+ return true;
+ }
+ if (pending->batch->recv_message &&
+ retry_state->completed_recv_message_count ==
+ retry_state->started_recv_message_count) {
+ return true;
+ }
+ if (pending->batch->recv_trailing_metadata &&
+ !retry_state->started_recv_trailing_metadata) {
+ return true;
+ }
+ return false;
+}
+
+//
+// retry code
+//
+
+// Commits the call so that no further retry attempts will be performed.
+static void retry_commit(grpc_call_element* elem,
+ subchannel_call_retry_state* retry_state) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (calld->retry_committed) return;
+ calld->retry_committed = true;
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: committing retries", chand, calld);
+ }
+ if (retry_state != nullptr) {
+ free_cached_send_op_data_after_commit(elem, retry_state);
+ }
+}
+
+// Starts a retry after appropriate back-off.
+static void do_retry(grpc_call_element* elem,
+ subchannel_call_retry_state* retry_state,
+ grpc_millis server_pushback_ms) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ GPR_ASSERT(calld->method_params != nullptr);
+ const ClientChannelMethodParams::RetryPolicy* retry_policy =
+ calld->method_params->retry_policy();
+ GPR_ASSERT(retry_policy != nullptr);
+ // Reset subchannel call and connected subchannel.
+ if (calld->subchannel_call != nullptr) {
+ GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
+ "client_channel_call_retry");
+ calld->subchannel_call = nullptr;
+ }
+ if (calld->pick.connected_subchannel != nullptr) {
+ calld->pick.connected_subchannel.reset();
+ }
+ // Compute backoff delay.
+ grpc_millis next_attempt_time;
+ if (server_pushback_ms >= 0) {
+ next_attempt_time = grpc_core::ExecCtx::Get()->Now() + server_pushback_ms;
+ calld->last_attempt_got_server_pushback = true;
+ } else {
+ if (calld->num_attempts_completed == 1 ||
+ calld->last_attempt_got_server_pushback) {
+ calld->retry_backoff.Init(
+ grpc_core::BackOff::Options()
+ .set_initial_backoff(retry_policy->initial_backoff)
+ .set_multiplier(retry_policy->backoff_multiplier)
+ .set_jitter(RETRY_BACKOFF_JITTER)
+ .set_max_backoff(retry_policy->max_backoff));
+ calld->last_attempt_got_server_pushback = false;
+ }
+ next_attempt_time = calld->retry_backoff->NextAttemptTime();
+ }
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand,
+ calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
+ }
+ // Schedule retry after computed delay.
+ GRPC_CLOSURE_INIT(&calld->pick_closure, start_pick_locked, elem,
+ grpc_combiner_scheduler(chand->combiner));
+ grpc_timer_init(&calld->retry_timer, next_attempt_time, &calld->pick_closure);
+ // Update bookkeeping.
+ if (retry_state != nullptr) retry_state->retry_dispatched = true;
+}
+
+// Returns true if the call is being retried.
+static bool maybe_retry(grpc_call_element* elem,
+ subchannel_batch_data* batch_data,
+ grpc_status_code status,
+ grpc_mdelem* server_pushback_md) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ // Get retry policy.
+ if (calld->method_params == nullptr) return false;
+ const ClientChannelMethodParams::RetryPolicy* retry_policy =
+ calld->method_params->retry_policy();
+ if (retry_policy == nullptr) return false;
+ // If we've already dispatched a retry from this call, return true.
+ // This catches the case where the batch has multiple callbacks
+ // (i.e., it includes either recv_message or recv_initial_metadata).
+ subchannel_call_retry_state* retry_state = nullptr;
+ if (batch_data != nullptr) {
+ retry_state = static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ batch_data->subchannel_call));
+ if (retry_state->retry_dispatched) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: retry already dispatched", chand,
+ calld);
+ }
+ return true;
+ }
+ }
+ // Check status.
+ if (status == GRPC_STATUS_OK) {
+ grpc_server_retry_throttle_data_record_success(calld->retry_throttle_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: call succeeded", chand, calld);
+ }
+ return false;
+ }
+ // Status is not OK. Check whether the status is retryable.
+ if (!retry_policy->retryable_status_codes.Contains(status)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: status %s not configured as retryable", chand,
+ calld, grpc_status_code_to_string(status));
+ }
+ return false;
+ }
+ // Record the failure and check whether retries are throttled.
+ // Note that it's important for this check to come after the status
+ // code check above, since we should only record failures whose statuses
+ // match the configured retryable status codes, so that we don't count
+ // things like failures due to malformed requests (INVALID_ARGUMENT).
+ // Conversely, it's important for this to come before the remaining
+ // checks, so that we don't fail to record failures due to other factors.
+ if (!grpc_server_retry_throttle_data_record_failure(
+ calld->retry_throttle_data)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries throttled", chand, calld);
+ }
+ return false;
+ }
+ // Check whether the call is committed.
+ if (calld->retry_committed) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries already committed", chand,
+ calld);
+ }
+ return false;
+ }
+ // Check whether we have retries remaining.
+ ++calld->num_attempts_completed;
+ if (calld->num_attempts_completed >= retry_policy->max_attempts) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: exceeded %d retry attempts", chand,
+ calld, retry_policy->max_attempts);
+ }
+ return false;
+ }
+ // If the call was cancelled from the surface, don't retry.
+ if (calld->cancel_error != GRPC_ERROR_NONE) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: call cancelled from surface, not retrying",
+ chand, calld);
+ }
+ return false;
+ }
+ // Check server push-back.
+ grpc_millis server_pushback_ms = -1;
+ if (server_pushback_md != nullptr) {
+ // If the value is "-1" or any other unparseable string, we do not retry.
+ uint32_t ms;
+ if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: not retrying due to server push-back",
+ chand, calld);
+ }
+ return false;
+ } else {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: server push-back: retry in %u ms", chand,
+ calld, ms);
+ }
+ server_pushback_ms = (grpc_millis)ms;
+ }
+ }
+ do_retry(elem, retry_state, server_pushback_ms);
+ return true;
+}
+
+//
+// subchannel_batch_data
+//
+
+static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
+ int refcount) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call));
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(
+ gpr_arena_alloc(calld->arena, sizeof(*batch_data)));
+ batch_data->elem = elem;
+ batch_data->subchannel_call =
+ GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create");
+ batch_data->batch.payload = &retry_state->batch_payload;
+ gpr_ref_init(&batch_data->refs, refcount);
+ GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
+ grpc_schedule_on_exec_ctx);
+ batch_data->batch.on_complete = &batch_data->on_complete;
+ GRPC_CALL_STACK_REF(calld->owning_call, "batch_data");
+ return batch_data;
+}
+
+static void batch_data_unref(subchannel_batch_data* batch_data) {
+ if (gpr_unref(&batch_data->refs)) {
+ if (batch_data->send_initial_metadata_storage != nullptr) {
+ grpc_metadata_batch_destroy(&batch_data->send_initial_metadata);
+ }
+ if (batch_data->send_trailing_metadata_storage != nullptr) {
+ grpc_metadata_batch_destroy(&batch_data->send_trailing_metadata);
+ }
+ if (batch_data->batch.recv_initial_metadata) {
+ grpc_metadata_batch_destroy(&batch_data->recv_initial_metadata);
+ }
+ if (batch_data->batch.recv_trailing_metadata) {
+ grpc_metadata_batch_destroy(&batch_data->recv_trailing_metadata);
+ }
+ GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref");
+ call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
+ GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
+ }
+}
+
+//
+// recv_initial_metadata callback handling
+//
+
+// Invokes recv_initial_metadata_ready for a subchannel batch.
+static void invoke_recv_initial_metadata_callback(void* arg,
+ grpc_error* error) {
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
+ channel_data* chand =
+ static_cast<channel_data*>(batch_data->elem->channel_data);
+ call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
+ // Find pending batch.
+ pending_batch* pending = nullptr;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
+ if (batch != nullptr && batch->recv_initial_metadata &&
+ batch->payload->recv_initial_metadata.recv_initial_metadata_ready !=
+ nullptr) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: invoking recv_initial_metadata_ready for "
+ "pending batch at index %" PRIuPTR,
+ chand, calld, i);
+ }
+ pending = &calld->pending_batches[i];
+ break;
+ }
+ }
+ GPR_ASSERT(pending != nullptr);
+ // Return metadata.
+ grpc_metadata_batch_move(
+ &batch_data->recv_initial_metadata,
+ pending->batch->payload->recv_initial_metadata.recv_initial_metadata);
+ // Update bookkeeping.
+ // Note: Need to do this before invoking the callback, since invoking
+ // the callback will result in yielding the call combiner.
+ grpc_closure* recv_initial_metadata_ready =
+ pending->batch->payload->recv_initial_metadata
+ .recv_initial_metadata_ready;
+ pending->batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
+ nullptr;
+ maybe_clear_pending_batch(batch_data->elem, pending);
+ batch_data_unref(batch_data);
+ // Invoke callback.
+ GRPC_CLOSURE_RUN(recv_initial_metadata_ready, GRPC_ERROR_REF(error));
+}
+
+// Intercepts recv_initial_metadata_ready callback for retries.
+// Commits the call and returns the initial metadata up the stack.
+static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
+ grpc_call_element* elem = batch_data->elem;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: got recv_initial_metadata_ready, error=%s",
+ chand, calld, grpc_error_string(error));
+ }
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ batch_data->subchannel_call));
+ // If we got an error or a Trailers-Only response and have not yet gotten
+ // the recv_trailing_metadata on_complete callback, then defer
+ // propagating this callback back to the surface. We can evaluate whether
+ // to retry when recv_trailing_metadata comes back.
+ if ((batch_data->trailing_metadata_available || error != GRPC_ERROR_NONE) &&
+ !retry_state->completed_recv_trailing_metadata) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: deferring recv_initial_metadata_ready "
+ "(Trailers-Only)",
+ chand, calld);
+ }
+ retry_state->recv_initial_metadata_ready_deferred = true;
+ retry_state->recv_initial_metadata_error = GRPC_ERROR_REF(error);
+ if (!retry_state->started_recv_trailing_metadata) {
+ // recv_trailing_metadata not yet started by application; start it
+ // ourselves to get status.
+ start_internal_recv_trailing_metadata(elem);
+ } else {
+ GRPC_CALL_COMBINER_STOP(
+ calld->call_combiner,
+ "recv_initial_metadata_ready trailers-only or error");
+ }
+ return;
+ }
+ // Received valid initial metadata, so commit the call.
+ retry_commit(elem, retry_state);
+ // Manually invoking a callback function; it does not take ownership of error.
+ invoke_recv_initial_metadata_callback(batch_data, error);
+ GRPC_ERROR_UNREF(error);
+}
+
+//
+// recv_message callback handling
+//
+
+// Invokes recv_message_ready for a subchannel batch.
+static void invoke_recv_message_callback(void* arg, grpc_error* error) {
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
+ channel_data* chand =
+ static_cast<channel_data*>(batch_data->elem->channel_data);
+ call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
+ // Find pending op.
+ pending_batch* pending = nullptr;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
+ if (batch != nullptr && batch->recv_message &&
+ batch->payload->recv_message.recv_message_ready != nullptr) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: invoking recv_message_ready for "
+ "pending batch at index %" PRIuPTR,
+ chand, calld, i);
+ }
+ pending = &calld->pending_batches[i];
+ break;
+ }
+ }
+ GPR_ASSERT(pending != nullptr);
+ // Return payload.
+ *pending->batch->payload->recv_message.recv_message =
+ batch_data->recv_message;
+ // Update bookkeeping.
+ // Note: Need to do this before invoking the callback, since invoking
+ // the callback will result in yielding the call combiner.
+ grpc_closure* recv_message_ready =
+ pending->batch->payload->recv_message.recv_message_ready;
+ pending->batch->payload->recv_message.recv_message_ready = nullptr;
+ maybe_clear_pending_batch(batch_data->elem, pending);
+ batch_data_unref(batch_data);
+ // Invoke callback.
+ GRPC_CLOSURE_RUN(recv_message_ready, GRPC_ERROR_REF(error));
+}
+
+// Intercepts recv_message_ready callback for retries.
+// Commits the call and returns the message up the stack.
+static void recv_message_ready(void* arg, grpc_error* error) {
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
+ grpc_call_element* elem = batch_data->elem;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: got recv_message_ready, error=%s",
+ chand, calld, grpc_error_string(error));
+ }
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ batch_data->subchannel_call));
+ // If we got an error or the payload was nullptr and we have not yet gotten
+ // the recv_trailing_metadata on_complete callback, then defer
+ // propagating this callback back to the surface. We can evaluate whether
+ // to retry when recv_trailing_metadata comes back.
+ if ((batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
+ !retry_state->completed_recv_trailing_metadata) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: deferring recv_message_ready (nullptr "
+ "message and recv_trailing_metadata pending)",
+ chand, calld);
+ }
+ retry_state->recv_message_ready_deferred = true;
+ retry_state->recv_message_error = GRPC_ERROR_REF(error);
+ if (!retry_state->started_recv_trailing_metadata) {
+ // recv_trailing_metadata not yet started by application; start it
+ // ourselves to get status.
+ start_internal_recv_trailing_metadata(elem);
+ } else {
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner, "recv_message_ready null");
+ }
+ return;
+ }
+ // Received a valid message, so commit the call.
+ retry_commit(elem, retry_state);
+ // Manually invoking a callback function; it does not take ownership of error.
+ invoke_recv_message_callback(batch_data, error);
+ GRPC_ERROR_UNREF(error);
+}
+
+//
+// on_complete callback handling
+//
+
+// Updates retry_state to reflect the ops completed in batch_data.
+static void update_retry_state_for_completed_batch(
+ subchannel_batch_data* batch_data,
+ subchannel_call_retry_state* retry_state) {
+ if (batch_data->batch.send_initial_metadata) {
+ retry_state->completed_send_initial_metadata = true;
+ }
+ if (batch_data->batch.send_message) {
+ ++retry_state->completed_send_message_count;
+ }
+ if (batch_data->batch.send_trailing_metadata) {
+ retry_state->completed_send_trailing_metadata = true;
+ }
+ if (batch_data->batch.recv_initial_metadata) {
+ retry_state->completed_recv_initial_metadata = true;
+ }
+ if (batch_data->batch.recv_message) {
+ ++retry_state->completed_recv_message_count;
+ }
+ if (batch_data->batch.recv_trailing_metadata) {
+ retry_state->completed_recv_trailing_metadata = true;
+ }
+}
+
+// Represents a closure that needs to run as a result of a completed batch.
+typedef struct {
+ grpc_closure* closure;
+ grpc_error* error;
+ const char* reason;
+} closure_to_execute;
+
+// Adds any necessary closures for deferred recv_initial_metadata and
+// recv_message callbacks to closures, updating *num_closures as needed.
+static void add_closures_for_deferred_recv_callbacks(
+ subchannel_batch_data* batch_data, subchannel_call_retry_state* retry_state,
+ closure_to_execute* closures, size_t* num_closures) {
+ if (batch_data->batch.recv_trailing_metadata &&
+ retry_state->recv_initial_metadata_ready_deferred) {
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure =
+ GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
+ invoke_recv_initial_metadata_callback, batch_data,
+ grpc_schedule_on_exec_ctx);
+ closure->error = retry_state->recv_initial_metadata_error;
+ closure->reason = "resuming recv_initial_metadata_ready";
+ }
+ if (batch_data->batch.recv_trailing_metadata &&
+ retry_state->recv_message_ready_deferred) {
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = GRPC_CLOSURE_INIT(&batch_data->recv_message_ready,
+ invoke_recv_message_callback,
+ batch_data, grpc_schedule_on_exec_ctx);
+ closure->error = retry_state->recv_message_error;
+ closure->reason = "resuming recv_message_ready";
+ }
+}
+
+// If there are any cached ops to replay or pending ops to start on the
+// subchannel call, adds a closure to closures to invoke
+// start_retriable_subchannel_batches(), updating *num_closures as needed.
+static void add_closures_for_replay_or_pending_send_ops(
+ grpc_call_element* elem, subchannel_batch_data* batch_data,
+ subchannel_call_retry_state* retry_state, closure_to_execute* closures,
+ size_t* num_closures) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ bool have_pending_send_message_ops =
+ retry_state->started_send_message_count < calld->send_messages.size();
+ bool have_pending_send_trailing_metadata_op =
+ calld->seen_send_trailing_metadata &&
+ !retry_state->started_send_trailing_metadata;
+ if (!have_pending_send_message_ops &&
+ !have_pending_send_trailing_metadata_op) {
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ if (batch == nullptr || pending->send_ops_cached) continue;
+ if (batch->send_message) have_pending_send_message_ops = true;
+ if (batch->send_trailing_metadata) {
+ have_pending_send_trailing_metadata_op = true;
+ }
+ }
+ }
+ if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: starting next batch for pending send op(s)",
+ chand, calld);
+ }
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = GRPC_CLOSURE_INIT(
+ &batch_data->batch.handler_private.closure,
+ start_retriable_subchannel_batches, elem, grpc_schedule_on_exec_ctx);
+ closure->error = GRPC_ERROR_NONE;
+ closure->reason = "starting next batch for send_* op(s)";
+ }
+}
+
+// For any pending batch completed in batch_data, adds the necessary
+// completion closures to closures, updating *num_closures as needed.
+static void add_closures_for_completed_pending_batches(
+ grpc_call_element* elem, subchannel_batch_data* batch_data,
+ subchannel_call_retry_state* retry_state, grpc_error* error,
+ closure_to_execute* closures, size_t* num_closures) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ if (pending_batch_is_completed(pending, calld, retry_state)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: pending batch completed at index %" PRIuPTR,
+ chand, calld, i);
+ }
+ // Copy the trailing metadata to return it to the surface.
+ if (batch_data->batch.recv_trailing_metadata) {
+ grpc_metadata_batch_move(&batch_data->recv_trailing_metadata,
+ pending->batch->payload->recv_trailing_metadata
+ .recv_trailing_metadata);
+ }
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = pending->batch->on_complete;
+ closure->error = GRPC_ERROR_REF(error);
+ closure->reason = "on_complete for pending batch";
+ pending->batch->on_complete = nullptr;
+ maybe_clear_pending_batch(elem, pending);
+ }
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+// For any pending batch containing an op that has not yet been started,
+// adds the pending batch's completion closures to closures, updating
+// *num_closures as needed.
+static void add_closures_to_fail_unstarted_pending_batches(
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state,
+ grpc_error* error, closure_to_execute* closures, size_t* num_closures) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ if (pending_batch_is_unstarted(pending, calld, retry_state)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: failing unstarted pending batch at index "
+ "%" PRIuPTR,
+ chand, calld, i);
+ }
+ if (pending->batch->recv_initial_metadata) {
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = pending->batch->payload->recv_initial_metadata
+ .recv_initial_metadata_ready;
+ closure->error = GRPC_ERROR_REF(error);
+ closure->reason =
+ "failing recv_initial_metadata_ready for pending batch";
+ pending->batch->payload->recv_initial_metadata
+ .recv_initial_metadata_ready = nullptr;
+ }
+ if (pending->batch->recv_message) {
+ *pending->batch->payload->recv_message.recv_message = nullptr;
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure =
+ pending->batch->payload->recv_message.recv_message_ready;
+ closure->error = GRPC_ERROR_REF(error);
+ closure->reason = "failing recv_message_ready for pending batch";
+ pending->batch->payload->recv_message.recv_message_ready = nullptr;
+ }
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = pending->batch->on_complete;
+ closure->error = GRPC_ERROR_REF(error);
+ closure->reason = "failing on_complete for pending batch";
+ pending->batch->on_complete = nullptr;
+ maybe_clear_pending_batch(elem, pending);
+ }
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+// Callback used to intercept on_complete from subchannel calls.
+// Called only when retries are enabled.
+static void on_complete(void* arg, grpc_error* error) {
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
+ grpc_call_element* elem = batch_data->elem;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch);
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
+ chand, calld, grpc_error_string(error), batch_str);
+ gpr_free(batch_str);
+ }
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ batch_data->subchannel_call));
+ // If we have previously completed recv_trailing_metadata, then the
+ // call is finished.
+ bool call_finished = retry_state->completed_recv_trailing_metadata;
+ // Update bookkeeping in retry_state.
+ update_retry_state_for_completed_batch(batch_data, retry_state);
+ if (call_finished) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: call already finished", chand,
+ calld);
+ }
+ } else {
+ // Check if this batch finished the call, and if so, get its status.
+ // The call is finished if either (a) this callback was invoked with
+ // an error or (b) we receive status.
+ grpc_status_code status = GRPC_STATUS_OK;
+ grpc_mdelem* server_pushback_md = nullptr;
+ if (error != GRPC_ERROR_NONE) { // Case (a).
+ call_finished = true;
+ grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
+ nullptr);
+ } else if (batch_data->batch.recv_trailing_metadata) { // Case (b).
+ call_finished = true;
+ grpc_metadata_batch* md_batch =
+ batch_data->batch.payload->recv_trailing_metadata
+ .recv_trailing_metadata;
+ GPR_ASSERT(md_batch->idx.named.grpc_status != nullptr);
+ status = grpc_get_status_code_from_metadata(
+ md_batch->idx.named.grpc_status->md);
+ if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
+ server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
+ }
+ } else if (retry_state->completed_recv_trailing_metadata) {
+ call_finished = true;
+ }
+ if (call_finished && grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: call finished, status=%s", chand,
+ calld, grpc_status_code_to_string(status));
+ }
+ // If the call is finished, check if we should retry.
+ if (call_finished &&
+ maybe_retry(elem, batch_data, status, server_pushback_md)) {
+ // Unref batch_data for deferred recv_initial_metadata_ready or
+ // recv_message_ready callbacks, if any.
+ if (batch_data->batch.recv_trailing_metadata &&
+ retry_state->recv_initial_metadata_ready_deferred) {
+ batch_data_unref(batch_data);
+ GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
+ }
+ if (batch_data->batch.recv_trailing_metadata &&
+ retry_state->recv_message_ready_deferred) {
+ batch_data_unref(batch_data);
+ GRPC_ERROR_UNREF(retry_state->recv_message_error);
+ }
+ batch_data_unref(batch_data);
+ return;
+ }
+ }
+ // If the call is finished or retries are committed, free cached data for
+ // send ops that we've just completed.
+ if (call_finished || calld->retry_committed) {
+ free_cached_send_op_data_for_completed_batch(elem, batch_data, retry_state);
+ }
+ // Call not being retried.
+ // Construct list of closures to execute.
+ // Max number of closures is number of pending batches plus one for
+ // each of:
+ // - recv_initial_metadata_ready (either deferred or unstarted)
+ // - recv_message_ready (either deferred or unstarted)
+ // - starting a new batch for pending send ops
+ closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches) + 3];
+ size_t num_closures = 0;
+ // If there are deferred recv_initial_metadata_ready or recv_message_ready
+ // callbacks, add them to closures.
+ add_closures_for_deferred_recv_callbacks(batch_data, retry_state, closures,
+ &num_closures);
+ // Find pending batches whose ops are now complete and add their
+ // on_complete callbacks to closures.
+ add_closures_for_completed_pending_batches(elem, batch_data, retry_state,
+ GRPC_ERROR_REF(error), closures,
+ &num_closures);
+ // Add closures to handle any pending batches that have not yet been started.
+ // If the call is finished, we fail these batches; otherwise, we add a
+ // callback to start_retriable_subchannel_batches() to start them on
+ // the subchannel call.
+ if (call_finished) {
+ add_closures_to_fail_unstarted_pending_batches(
+ elem, retry_state, GRPC_ERROR_REF(error), closures, &num_closures);
+ } else {
+ add_closures_for_replay_or_pending_send_ops(elem, batch_data, retry_state,
+ closures, &num_closures);
+ }
+ // Don't need batch_data anymore.
+ batch_data_unref(batch_data);
+ // Schedule all of the closures identified above.
+ // Note that the call combiner will be yielded for each closure that
+ // we schedule. We're already running in the call combiner, so one of
+ // the closures can be scheduled directly, but the others will
+ // have to re-enter the call combiner.
+ if (num_closures > 0) {
+ GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
+ for (size_t i = 1; i < num_closures; ++i) {
+ GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
+ closures[i].error, closures[i].reason);
+ }
+ } else {
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner,
+ "no closures to run for on_complete");
+ }
+}
+
+//
+// subchannel batch construction
+//
+
+// Helper function used to start a subchannel batch in the call combiner.
+static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
+ grpc_transport_stream_op_batch* batch =
+ static_cast<grpc_transport_stream_op_batch*>(arg);
+ grpc_subchannel_call* subchannel_call =
+ static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(subchannel_call, batch);
+}
+
+// Adds retriable send_initial_metadata op to batch_data.
+static void add_retriable_send_initial_metadata_op(
+ call_data* calld, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ // Maps the number of retries to the corresponding metadata value slice.
+ static const grpc_slice* retry_count_strings[] = {
+ &GRPC_MDSTR_1, &GRPC_MDSTR_2, &GRPC_MDSTR_3, &GRPC_MDSTR_4};
+ // We need to make a copy of the metadata batch for each attempt, since
+ // the filters in the subchannel stack may modify this batch, and we don't
+ // want those modifications to be passed forward to subsequent attempts.
+ //
+ // If we've already completed one or more attempts, add the
+ // grpc-retry-attempts header.
+ batch_data->send_initial_metadata_storage =
+ static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
+ calld->arena, sizeof(grpc_linked_mdelem) *
+ (calld->send_initial_metadata.list.count +
+ (calld->num_attempts_completed > 0))));
+ grpc_metadata_batch_copy(&calld->send_initial_metadata,
+ &batch_data->send_initial_metadata,
+ batch_data->send_initial_metadata_storage);
+ if (batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts !=
+ nullptr) {
+ grpc_metadata_batch_remove(
+ &batch_data->send_initial_metadata,
+ batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts);
+ }
+ if (calld->num_attempts_completed > 0) {
+ grpc_mdelem retry_md = grpc_mdelem_from_slices(
+ GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS,
+ *retry_count_strings[calld->num_attempts_completed - 1]);
+ grpc_error* error = grpc_metadata_batch_add_tail(
+ &batch_data->send_initial_metadata,
+ &batch_data->send_initial_metadata_storage[calld->send_initial_metadata
+ .list.count],
+ retry_md);
+ if (error != GRPC_ERROR_NONE) {
+ gpr_log(GPR_ERROR, "error adding retry metadata: %s",
+ grpc_error_string(error));
+ GPR_ASSERT(false);
+ }
+ }
+ retry_state->started_send_initial_metadata = true;
+ batch_data->batch.send_initial_metadata = true;
+ batch_data->batch.payload->send_initial_metadata.send_initial_metadata =
+ &batch_data->send_initial_metadata;
+ batch_data->batch.payload->send_initial_metadata.send_initial_metadata_flags =
+ calld->send_initial_metadata_flags;
+ batch_data->batch.payload->send_initial_metadata.peer_string =
+ calld->peer_string;
+}
+
+// Adds retriable send_message op to batch_data.
+static void add_retriable_send_message_op(
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]",
+ chand, calld, retry_state->started_send_message_count);
+ }
+ grpc_byte_stream_cache* cache =
+ calld->send_messages[retry_state->started_send_message_count];
+ ++retry_state->started_send_message_count;
+ grpc_caching_byte_stream_init(&batch_data->send_message, cache);
+ batch_data->batch.send_message = true;
+ batch_data->batch.payload->send_message.send_message =
+ &batch_data->send_message.base;
+}
+
+// Adds retriable send_trailing_metadata op to batch_data.
+static void add_retriable_send_trailing_metadata_op(
+ call_data* calld, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ // We need to make a copy of the metadata batch for each attempt, since
+ // the filters in the subchannel stack may modify this batch, and we don't
+ // want those modifications to be passed forward to subsequent attempts.
+ batch_data->send_trailing_metadata_storage =
+ static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
+ calld->arena, sizeof(grpc_linked_mdelem) *
+ calld->send_trailing_metadata.list.count));
+ grpc_metadata_batch_copy(&calld->send_trailing_metadata,
+ &batch_data->send_trailing_metadata,
+ batch_data->send_trailing_metadata_storage);
+ retry_state->started_send_trailing_metadata = true;
+ batch_data->batch.send_trailing_metadata = true;
+ batch_data->batch.payload->send_trailing_metadata.send_trailing_metadata =
+ &batch_data->send_trailing_metadata;
+}
+
+// Adds retriable recv_initial_metadata op to batch_data.
+static void add_retriable_recv_initial_metadata_op(
+ call_data* calld, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ retry_state->started_recv_initial_metadata = true;
+ batch_data->batch.recv_initial_metadata = true;
+ grpc_metadata_batch_init(&batch_data->recv_initial_metadata);
+ batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata =
+ &batch_data->recv_initial_metadata;
+ batch_data->batch.payload->recv_initial_metadata.trailing_metadata_available =
+ &batch_data->trailing_metadata_available;
+ GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
+ recv_initial_metadata_ready, batch_data,
+ grpc_schedule_on_exec_ctx);
+ batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata_ready =
+ &batch_data->recv_initial_metadata_ready;
+}
+
+// Adds retriable recv_message op to batch_data.
+static void add_retriable_recv_message_op(
+ call_data* calld, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ ++retry_state->started_recv_message_count;
+ batch_data->batch.recv_message = true;
+ batch_data->batch.payload->recv_message.recv_message =
+ &batch_data->recv_message;
+ GRPC_CLOSURE_INIT(&batch_data->recv_message_ready, recv_message_ready,
+ batch_data, grpc_schedule_on_exec_ctx);
+ batch_data->batch.payload->recv_message.recv_message_ready =
+ &batch_data->recv_message_ready;
+}
+
+// Adds retriable recv_trailing_metadata op to batch_data.
+static void add_retriable_recv_trailing_metadata_op(
+ call_data* calld, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ retry_state->started_recv_trailing_metadata = true;
+ batch_data->batch.recv_trailing_metadata = true;
+ grpc_metadata_batch_init(&batch_data->recv_trailing_metadata);
+ batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata =
+ &batch_data->recv_trailing_metadata;
+ batch_data->batch.collect_stats = true;
+ batch_data->batch.payload->collect_stats.collect_stats =
+ &batch_data->collect_stats;
+}
+
+// Helper function used to start a recv_trailing_metadata batch. This
+// is used in the case where a recv_initial_metadata or recv_message
+// op fails in a way that we know the call is over but when the application
+// has not yet started its own recv_trailing_metadata op.
+static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: call failed but recv_trailing_metadata not "
+ "started; starting it internally",
chand, calld);
}
- if (chand->retry_throttle_data != nullptr) {
- calld->retry_throttle_data =
- grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call));
+ subchannel_batch_data* batch_data = batch_data_create(elem, 1);
+ add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(calld->subchannel_call, &batch_data->batch);
+}
+
+// If there are any cached send ops that need to be replayed on the
+// current subchannel call, creates and returns a new subchannel batch
+// to replay those ops. Otherwise, returns nullptr.
+static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ subchannel_batch_data* replay_batch_data = nullptr;
+ // send_initial_metadata.
+ if (calld->seen_send_initial_metadata &&
+ !retry_state->started_send_initial_metadata &&
+ !calld->pending_send_initial_metadata) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: replaying previously completed "
+ "send_initial_metadata op",
+ chand, calld);
+ }
+ replay_batch_data = batch_data_create(elem, 1);
+ add_retriable_send_initial_metadata_op(calld, retry_state,
+ replay_batch_data);
+ }
+ // send_message.
+ // Note that we can only have one send_message op in flight at a time.
+ if (retry_state->started_send_message_count < calld->send_messages.size() &&
+ retry_state->started_send_message_count ==
+ retry_state->completed_send_message_count &&
+ !calld->pending_send_message) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: replaying previously completed "
+ "send_message op",
+ chand, calld);
+ }
+ if (replay_batch_data == nullptr) {
+ replay_batch_data = batch_data_create(elem, 1);
+ }
+ add_retriable_send_message_op(elem, retry_state, replay_batch_data);
+ }
+ // send_trailing_metadata.
+ // Note that we only add this op if we have no more send_message ops
+ // to start, since we can't send down any more send_message ops after
+ // send_trailing_metadata.
+ if (calld->seen_send_trailing_metadata &&
+ retry_state->started_send_message_count == calld->send_messages.size() &&
+ !retry_state->started_send_trailing_metadata &&
+ !calld->pending_send_trailing_metadata) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: replaying previously completed "
+ "send_trailing_metadata op",
+ chand, calld);
+ }
+ if (replay_batch_data == nullptr) {
+ replay_batch_data = batch_data_create(elem, 1);
+ }
+ add_retriable_send_trailing_metadata_op(calld, retry_state,
+ replay_batch_data);
}
- if (chand->method_params_table != nullptr) {
- calld->method_params = static_cast<method_parameters*>(
- grpc_method_config_table_get(chand->method_params_table, calld->path));
- if (calld->method_params != nullptr) {
- method_parameters_ref(calld->method_params);
- // If the deadline from the service config is shorter than the one
- // from the client API, reset the deadline timer.
- if (chand->deadline_checking_enabled &&
- calld->method_params->timeout != 0) {
- const grpc_millis per_method_deadline =
- grpc_timespec_to_millis_round_up(calld->call_start_time) +
- calld->method_params->timeout;
- if (per_method_deadline < calld->deadline) {
- calld->deadline = per_method_deadline;
- grpc_deadline_state_reset(elem, calld->deadline);
- }
+ return replay_batch_data;
+}
+
+// Adds subchannel batches for pending batches to batches, updating
+// *num_batches as needed.
+static void add_subchannel_batches_for_pending_batches(
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state,
+ grpc_transport_stream_op_batch** batches, size_t* num_batches) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ if (batch == nullptr) continue;
+ // Skip any batch that either (a) has already been started on this
+ // subchannel call or (b) we can't start yet because we're still
+ // replaying send ops that need to be completed first.
+ // TODO(roth): Note that if any one op in the batch can't be sent
+ // yet due to ops that we're replaying, we don't start any of the ops
+ // in the batch. This is probably okay, but it could conceivably
+ // lead to increased latency in some cases -- e.g., we could delay
+ // starting a recv op due to it being in the same batch with a send
+ // op. If/when we revamp the callback protocol in
+ // transport_stream_op_batch, we may be able to fix this.
+ if (batch->send_initial_metadata &&
+ retry_state->started_send_initial_metadata) {
+ continue;
+ }
+ if (batch->send_message && retry_state->completed_send_message_count <
+ retry_state->started_send_message_count) {
+ continue;
+ }
+ // Note that we only start send_trailing_metadata if we have no more
+ // send_message ops to start, since we can't send down any more
+ // send_message ops after send_trailing_metadata.
+ if (batch->send_trailing_metadata &&
+ (retry_state->started_send_message_count + batch->send_message <
+ calld->send_messages.size() ||
+ retry_state->started_send_trailing_metadata)) {
+ continue;
+ }
+ if (batch->recv_initial_metadata &&
+ retry_state->started_recv_initial_metadata) {
+ continue;
+ }
+ if (batch->recv_message && retry_state->completed_recv_message_count <
+ retry_state->started_recv_message_count) {
+ continue;
+ }
+ if (batch->recv_trailing_metadata &&
+ retry_state->started_recv_trailing_metadata) {
+ continue;
+ }
+ // If we're not retrying, just send the batch as-is.
+ if (calld->method_params == nullptr ||
+ calld->method_params->retry_policy() == nullptr ||
+ calld->retry_committed) {
+ batches[(*num_batches)++] = batch;
+ pending_batch_clear(calld, pending);
+ continue;
+ }
+ // Create batch with the right number of callbacks.
+ const int num_callbacks =
+ 1 + batch->recv_initial_metadata + batch->recv_message;
+ subchannel_batch_data* batch_data = batch_data_create(elem, num_callbacks);
+ // Cache send ops if needed.
+ maybe_cache_send_ops_for_batch(calld, pending);
+ // send_initial_metadata.
+ if (batch->send_initial_metadata) {
+ add_retriable_send_initial_metadata_op(calld, retry_state, batch_data);
+ }
+ // send_message.
+ if (batch->send_message) {
+ add_retriable_send_message_op(elem, retry_state, batch_data);
+ }
+ // send_trailing_metadata.
+ if (batch->send_trailing_metadata) {
+ add_retriable_send_trailing_metadata_op(calld, retry_state, batch_data);
+ }
+ // recv_initial_metadata.
+ if (batch->recv_initial_metadata) {
+ // recv_flags is only used on the server side.
+ GPR_ASSERT(batch->payload->recv_initial_metadata.recv_flags == nullptr);
+ add_retriable_recv_initial_metadata_op(calld, retry_state, batch_data);
+ }
+ // recv_message.
+ if (batch->recv_message) {
+ add_retriable_recv_message_op(calld, retry_state, batch_data);
+ }
+ // recv_trailing_metadata.
+ if (batch->recv_trailing_metadata) {
+ GPR_ASSERT(batch->collect_stats);
+ add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
+ }
+ batches[(*num_batches)++] = &batch_data->batch;
+ }
+}
+
+// Constructs and starts whatever subchannel batches are needed on the
+// subchannel call.
+static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: constructing retriable batches",
+ chand, calld);
+ }
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call));
+ // We can start up to 6 batches.
+ grpc_transport_stream_op_batch*
+ batches[GPR_ARRAY_SIZE(calld->pending_batches)];
+ size_t num_batches = 0;
+ // Replay previously-returned send_* ops if needed.
+ subchannel_batch_data* replay_batch_data =
+ maybe_create_subchannel_batch_for_replay(elem, retry_state);
+ if (replay_batch_data != nullptr) {
+ batches[num_batches++] = &replay_batch_data->batch;
+ }
+ // Now add pending batches.
+ add_subchannel_batches_for_pending_batches(elem, retry_state, batches,
+ &num_batches);
+ // Start batches on subchannel call.
+ // Note that the call combiner will be yielded for each batch that we
+ // send down. We're already running in the call combiner, so one of
+ // the batches can be started directly, but the others will have to
+ // re-enter the call combiner.
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: starting %" PRIuPTR
+ " retriable batches on subchannel_call=%p",
+ chand, calld, num_batches, calld->subchannel_call);
+ }
+ if (num_batches == 0) {
+ // This should be fairly rare, but it can happen when (e.g.) an
+ // attempt completes before it has finished replaying all
+ // previously sent messages.
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner,
+ "no retriable subchannel batches to start");
+ } else {
+ for (size_t i = 1; i < num_batches; ++i) {
+ if (grpc_client_channel_trace.enabled()) {
+ char* batch_str = grpc_transport_stream_op_batch_string(batches[i]);
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: starting batch in call combiner: %s", chand,
+ calld, batch_str);
+ gpr_free(batch_str);
}
+ batches[i]->handler_private.extra_arg = calld->subchannel_call;
+ GRPC_CLOSURE_INIT(&batches[i]->handler_private.closure,
+ start_batch_in_call_combiner, batches[i],
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
+ &batches[i]->handler_private.closure,
+ GRPC_ERROR_NONE, "start_subchannel_batch");
+ }
+ if (grpc_client_channel_trace.enabled()) {
+ char* batch_str = grpc_transport_stream_op_batch_string(batches[0]);
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting batch: %s", chand, calld,
+ batch_str);
+ gpr_free(batch_str);
}
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
}
}
-static void create_subchannel_call_locked(grpc_call_element* elem,
- grpc_error* error) {
+//
+// LB pick
+//
+
+static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
+ const size_t parent_data_size =
+ calld->enable_retries ? sizeof(subchannel_call_retry_state) : 0;
const grpc_core::ConnectedSubchannel::CallArgs call_args = {
calld->pollent, // pollent
calld->path, // path
@@ -1004,7 +2466,8 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
calld->deadline, // deadline
calld->arena, // arena
calld->pick.subchannel_call_context, // context
- calld->call_combiner // call_combiner
+ calld->call_combiner, // call_combiner
+ parent_data_size // parent_data_size
};
grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
call_args, &calld->subchannel_call);
@@ -1014,36 +2477,61 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
}
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
- waiting_for_pick_batches_fail(elem, new_error);
+ pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
} else {
- waiting_for_pick_batches_resume(elem);
+ if (parent_data_size > 0) {
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call));
+ retry_state->batch_payload.context = calld->pick.subchannel_call_context;
+ }
+ pending_batches_resume(elem);
}
GRPC_ERROR_UNREF(error);
}
// Invoked when a pick is completed, on both success or failure.
-static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
+static void pick_done(void* arg, grpc_error* error) {
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->pick.connected_subchannel == nullptr) {
// Failed to create subchannel.
- GRPC_ERROR_UNREF(calld->error);
- calld->error = error == GRPC_ERROR_NONE
- ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Call dropped by load balancing policy")
- : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Failed to create subchannel", &error, 1);
- if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: failed to create subchannel: error=%s", chand,
- calld, grpc_error_string(calld->error));
+ // If there was no error, this is an LB policy drop, in which case
+ // we return an error; otherwise, we may retry.
+ grpc_status_code status = GRPC_STATUS_OK;
+ grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
+ nullptr);
+ if (error == GRPC_ERROR_NONE || !calld->enable_retries ||
+ !maybe_retry(elem, nullptr /* batch_data */, status,
+ nullptr /* server_pushback_md */)) {
+ grpc_error* new_error =
+ error == GRPC_ERROR_NONE
+ ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Call dropped by load balancing policy")
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to create subchannel", &error, 1);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: failed to create subchannel: error=%s",
+ chand, calld, grpc_error_string(new_error));
+ }
+ pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
}
- waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
} else {
/* Create call on subchannel. */
- create_subchannel_call_locked(elem, GRPC_ERROR_REF(error));
+ create_subchannel_call(elem, GRPC_ERROR_REF(error));
}
- GRPC_ERROR_UNREF(error);
+}
+
+// Invoked when a pick is completed to leave the client_channel combiner
+// and continue processing in the call combiner.
+static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_SCHED(&calld->pick_closure, error);
}
// A wrapper around pick_done_locked() that is used in cases where
@@ -1070,15 +2558,14 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
- chand, calld, chand->lb_policy);
+ chand, calld, chand->lb_policy.get());
}
- grpc_lb_policy_cancel_pick_locked(chand->lb_policy, &calld->pick,
- GRPC_ERROR_REF(error));
+ chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
}
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
}
-// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
+// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
// Unrefs the LB policy and invokes async_pick_done_locked().
static void pick_callback_done_locked(void* arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
@@ -1092,48 +2579,98 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
}
-// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
-// If the pick was completed synchronously, unrefs the LB policy and
-// returns true.
+// Applies service config to the call. Must be invoked once we know
+// that the resolver has returned results to the channel.
+static void apply_service_config_to_call_locked(grpc_call_element* elem) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
+ chand, calld);
+ }
+ if (chand->retry_throttle_data != nullptr) {
+ calld->retry_throttle_data =
+ grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
+ }
+ if (chand->method_params_table != nullptr) {
+ calld->method_params = grpc_core::ServiceConfig::MethodConfigTableLookup(
+ *chand->method_params_table, calld->path);
+ if (calld->method_params != nullptr) {
+ // If the deadline from the service config is shorter than the one
+ // from the client API, reset the deadline timer.
+ if (chand->deadline_checking_enabled &&
+ calld->method_params->timeout() != 0) {
+ const grpc_millis per_method_deadline =
+ grpc_timespec_to_millis_round_up(calld->call_start_time) +
+ calld->method_params->timeout();
+ if (per_method_deadline < calld->deadline) {
+ calld->deadline = per_method_deadline;
+ grpc_deadline_state_reset(elem, calld->deadline);
+ }
+ }
+ }
+ }
+ // If no retry policy, disable retries.
+ // TODO(roth): Remove this when adding support for transparent retries.
+ if (calld->method_params == nullptr ||
+ calld->method_params->retry_policy() == nullptr) {
+ calld->enable_retries = false;
+ }
+}
+
+// Starts a pick on chand->lb_policy.
+// Returns true if pick is completed synchronously.
static bool pick_callback_start_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
- chand, calld, chand->lb_policy);
+ chand, calld, chand->lb_policy.get());
+ }
+ // Only get service config data on the first attempt.
+ if (calld->num_attempts_completed == 0) {
+ apply_service_config_to_call_locked(elem);
}
- apply_service_config_to_call_locked(elem);
// If the application explicitly set wait_for_ready, use that.
// Otherwise, if the service config specified a value for this
// method, use that.
- uint32_t initial_metadata_flags =
- calld->initial_metadata_batch->payload->send_initial_metadata
- .send_initial_metadata_flags;
+ //
+ // The send_initial_metadata batch will be the first one in the list,
+ // as set by get_batch_index() above.
+ calld->pick.initial_metadata =
+ calld->seen_send_initial_metadata
+ ? &calld->send_initial_metadata
+ : calld->pending_batches[0]
+ .batch->payload->send_initial_metadata.send_initial_metadata;
+ uint32_t send_initial_metadata_flags =
+ calld->seen_send_initial_metadata
+ ? calld->send_initial_metadata_flags
+ : calld->pending_batches[0]
+ .batch->payload->send_initial_metadata
+ .send_initial_metadata_flags;
const bool wait_for_ready_set_from_api =
- initial_metadata_flags &
+ send_initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
const bool wait_for_ready_set_from_service_config =
calld->method_params != nullptr &&
- calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
+ calld->method_params->wait_for_ready() !=
+ ClientChannelMethodParams::WAIT_FOR_READY_UNSET;
if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
- if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
- initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ if (calld->method_params->wait_for_ready() ==
+ ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
+ send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
} else {
- initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
- calld->pick.initial_metadata =
- calld->initial_metadata_batch->payload->send_initial_metadata
- .send_initial_metadata;
- calld->pick.initial_metadata_flags = initial_metadata_flags;
- GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
+ calld->pick.initial_metadata_flags = send_initial_metadata_flags;
+ GRPC_CLOSURE_INIT(&calld->pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
- calld->pick.on_complete = &calld->lb_pick_closure;
+ calld->pick.on_complete = &calld->pick_closure;
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
- const bool pick_done =
- grpc_lb_policy_pick_locked(chand->lb_policy, &calld->pick);
+ const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
if (pick_done) {
- /* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
+ // Pick completed synchronously.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
@@ -1143,7 +2680,7 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
calld->call_combiner,
- GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
+ GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
pick_callback_cancel_locked, elem,
grpc_combiner_scheduler(chand->combiner)));
}
@@ -1192,8 +2729,6 @@ static void pick_after_resolver_result_cancel_locked(void* arg,
"Pick cancelled", &error, 1));
}
-static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
-
static void pick_after_resolver_result_done_locked(void* arg,
grpc_error* error) {
pick_after_resolver_result_args* args =
@@ -1230,7 +2765,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
async_pick_done_locked(elem, GRPC_ERROR_NONE);
}
}
- // TODO(roth): It should be impossible for chand->lb_policy to be NULL
+ // TODO(roth): It should be impossible for chand->lb_policy to be nullptr
// here, so the rest of this code should never actually be executed.
// However, we have reports of a crash on iOS that triggers this case,
// so we are temporarily adding this to restore branches that were
@@ -1283,6 +2818,7 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
+ GPR_ASSERT(calld->subchannel_call == nullptr);
if (chand->lb_policy != nullptr) {
// We already have an LB policy, so ask it for a pick.
if (pick_callback_start_locked(elem)) {
@@ -1311,24 +2847,9 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
chand->interested_parties);
}
-static void on_complete(void* arg, grpc_error* error) {
- grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
- call_data* calld = static_cast<call_data*>(elem->call_data);
- if (calld->retry_throttle_data != nullptr) {
- if (error == GRPC_ERROR_NONE) {
- grpc_server_retry_throttle_data_record_success(
- calld->retry_throttle_data);
- } else {
- // TODO(roth): In a subsequent PR, check the return value here and
- // decide whether or not to retry. Note that we should only
- // record failures whose statuses match the configured retryable
- // or non-fatal status codes.
- grpc_server_retry_throttle_data_record_failure(
- calld->retry_throttle_data);
- }
- }
- GRPC_CLOSURE_RUN(calld->original_on_complete, GRPC_ERROR_REF(error));
-}
+//
+// filter call vtable functions
+//
static void cc_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
@@ -1339,46 +2860,47 @@ static void cc_start_transport_stream_op_batch(
grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
}
// If we've previously been cancelled, immediately fail any new batches.
- if (calld->error != GRPC_ERROR_NONE) {
+ if (calld->cancel_error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
- chand, calld, grpc_error_string(calld->error));
+ chand, calld, grpc_error_string(calld->cancel_error));
}
+ // Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
- batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
+ batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
return;
}
+ // Handle cancellation.
if (batch->cancel_stream) {
// Stash a copy of cancel_error in our call data, so that we can use
// it for subsequent operations. This ensures that if the call is
// cancelled before any batches are passed down (e.g., if the deadline
// is in the past when the call starts), we can return the right
// error to the caller when the first batch does get passed down.
- GRPC_ERROR_UNREF(calld->error);
- calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
+ GRPC_ERROR_UNREF(calld->cancel_error);
+ calld->cancel_error =
+ GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
- calld, grpc_error_string(calld->error));
+ calld, grpc_error_string(calld->cancel_error));
}
- // If we have a subchannel call, send the cancellation batch down.
- // Otherwise, fail all pending batches.
- if (calld->subchannel_call != nullptr) {
- grpc_subchannel_call_process_op(calld->subchannel_call, batch);
+ // If we do not have a subchannel call (i.e., a pick has not yet
+ // been started), fail all pending batches. Otherwise, send the
+ // cancellation down to the subchannel call.
+ if (calld->subchannel_call == nullptr) {
+ pending_batches_fail(elem, GRPC_ERROR_REF(calld->cancel_error),
+ false /* yield_call_combiner */);
+ // Note: This will release the call combiner.
+ grpc_transport_stream_op_batch_finish_with_failure(
+ batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
} else {
- waiting_for_pick_batches_add(calld, batch);
- waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(calld->subchannel_call, batch);
}
return;
}
- // Intercept on_complete for recv_trailing_metadata so that we can
- // check retry throttle status.
- if (batch->recv_trailing_metadata) {
- GPR_ASSERT(batch->on_complete != nullptr);
- calld->original_on_complete = batch->on_complete;
- GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
- grpc_schedule_on_exec_ctx);
- batch->on_complete = &calld->on_complete;
- }
+ // Add the batch to the pending list.
+ pending_batches_add(elem, batch);
// Check if we've already gotten a subchannel call.
// Note that once we have completed the pick, we do not need to enter
// the channel combiner, which is more efficient (especially for
@@ -1386,15 +2908,13 @@ static void cc_start_transport_stream_op_batch(
if (calld->subchannel_call != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
+ "chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
calld, calld->subchannel_call);
}
- grpc_subchannel_call_process_op(calld->subchannel_call, batch);
+ pending_batches_resume(elem);
return;
}
// We do not yet have a subchannel call.
- // Add the batch to the waiting-for-pick list.
- waiting_for_pick_batches_add(calld, batch);
// For batches containing a send_initial_metadata op, enter the channel
// combiner to start a pick.
if (batch->send_initial_metadata) {
@@ -1434,6 +2954,7 @@ static grpc_error* cc_init_call_elem(grpc_call_element* elem,
grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
calld->deadline);
}
+ calld->enable_retries = chand->enable_retries;
return GRPC_ERROR_NONE;
}
@@ -1447,10 +2968,8 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
grpc_deadline_state_destroy(elem);
}
grpc_slice_unref_internal(calld->path);
- if (calld->method_params != nullptr) {
- method_parameters_unref(calld->method_params);
- }
- GRPC_ERROR_UNREF(calld->error);
+ calld->method_params.reset();
+ GRPC_ERROR_UNREF(calld->cancel_error);
if (calld->subchannel_call != nullptr) {
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
then_schedule_closure);
@@ -1458,7 +2977,9 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
"client_channel_destroy_call");
}
- GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ GPR_ASSERT(calld->pending_batches[i].batch == nullptr);
+ }
if (calld->pick.connected_subchannel != nullptr) {
calld->pick.connected_subchannel.reset();
}
@@ -1498,7 +3019,7 @@ const grpc_channel_filter grpc_client_channel_filter = {
static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
channel_data* chand = static_cast<channel_data*>(arg);
if (chand->lb_policy != nullptr) {
- grpc_lb_policy_exit_idle_locked(chand->lb_policy);
+ chand->lb_policy->ExitIdleLocked();
} else {
chand->exit_idle_when_lb_policy_arrives = true;
if (!chand->started_resolving && chand->resolver != nullptr) {
@@ -1658,3 +3179,9 @@ void grpc_client_channel_watch_connectivity_state(
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
+
+grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
+ grpc_call_element* elem) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ return calld->subchannel_call;
+}
diff --git a/src/core/ext/filters/client_channel/client_channel.h b/src/core/ext/filters/client_channel/client_channel.h
index 9670405cbe..a21e5623a7 100644
--- a/src/core/ext/filters/client_channel/client_channel.h
+++ b/src/core/ext/filters/client_channel/client_channel.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/channel/channel_stack.h"
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.cc b/src/core/ext/filters/client_channel/client_channel_factory.cc
index 3baf5b31ab..172e9f03c7 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.cc
+++ b/src/core/ext/filters/client_channel/client_channel_factory.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/lib/channel/channel_args.h"
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.h b/src/core/ext/filters/client_channel/client_channel_factory.h
index 766ebb9389..601ec46b2a 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.h
+++ b/src/core/ext/filters/client_channel/client_channel_factory.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H
+#include <grpc/support/port_platform.h>
+
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/ext/filters/client_channel/subchannel.h"
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.cc b/src/core/ext/filters/client_channel/client_channel_plugin.cc
index 9172fa781c..3c3a97532f 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.cc
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.cc
@@ -63,7 +63,7 @@ static bool set_default_host_if_unset(grpc_channel_stack_builder* builder,
}
void grpc_client_channel_init(void) {
- grpc_lb_policy_registry_init();
+ grpc_core::LoadBalancingPolicyRegistry::Builder::InitRegistry();
grpc_core::ResolverRegistry::Builder::InitRegistry();
grpc_retry_throttle_map_init();
grpc_proxy_mapper_registry_init();
@@ -83,5 +83,5 @@ void grpc_client_channel_shutdown(void) {
grpc_proxy_mapper_registry_shutdown();
grpc_retry_throttle_map_shutdown();
grpc_core::ResolverRegistry::Builder::ShutdownRegistry();
- grpc_lb_policy_registry_shutdown();
+ grpc_core::LoadBalancingPolicyRegistry::Builder::ShutdownRegistry();
}
diff --git a/src/core/ext/filters/client_channel/connector.cc b/src/core/ext/filters/client_channel/connector.cc
index c8bf2f3e1c..5e04b3b453 100644
--- a/src/core/ext/filters/client_channel/connector.cc
+++ b/src/core/ext/filters/client_channel/connector.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/connector.h"
grpc_connector* grpc_connector_ref(grpc_connector* connector) {
diff --git a/src/core/ext/filters/client_channel/connector.h b/src/core/ext/filters/client_channel/connector.h
index d657658d67..556594929c 100644
--- a/src/core/ext/filters/client_channel/connector.h
+++ b/src/core/ext/filters/client_channel/connector.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/transport/transport.h"
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.cc b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
index 248a6347d5..fb29fa788d 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.cc
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include <string.h>
diff --git a/src/core/ext/filters/client_channel/http_proxy.cc b/src/core/ext/filters/client_channel/http_proxy.cc
index d42376413d..29a6c0e367 100644
--- a/src/core/ext/filters/client_channel/http_proxy.cc
+++ b/src/core/ext/filters/client_channel/http_proxy.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/http_proxy.h"
#include <stdbool.h>
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index 27fb2ad1f4..fa63dd75b5 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -16,127 +16,44 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/lib/iomgr/combiner.h"
grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
false, "lb_policy_refcount");
-void grpc_lb_policy_init(grpc_lb_policy* policy,
- const grpc_lb_policy_vtable* vtable,
- grpc_combiner* combiner) {
- policy->vtable = vtable;
- gpr_ref_init(&policy->refs, 1);
- policy->interested_parties = grpc_pollset_set_create();
- policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy");
-}
-
-#ifndef NDEBUG
-void grpc_lb_policy_ref(grpc_lb_policy* lb_policy, const char* file, int line,
- const char* reason) {
- if (grpc_trace_lb_policy_refcount.enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "LB_POLICY:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
- old_refs, old_refs + 1, reason);
- }
-#else
-void grpc_lb_policy_ref(grpc_lb_policy* lb_policy) {
-#endif
- gpr_ref(&lb_policy->refs);
-}
-
-#ifndef NDEBUG
-void grpc_lb_policy_unref(grpc_lb_policy* lb_policy, const char* file, int line,
- const char* reason) {
- if (grpc_trace_lb_policy_refcount.enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "LB_POLICY:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
- old_refs, old_refs - 1, reason);
- }
-#else
-void grpc_lb_policy_unref(grpc_lb_policy* lb_policy) {
-#endif
- if (gpr_unref(&lb_policy->refs)) {
- grpc_pollset_set_destroy(lb_policy->interested_parties);
- grpc_combiner* combiner = lb_policy->combiner;
- lb_policy->vtable->destroy(lb_policy);
- GRPC_COMBINER_UNREF(combiner, "lb_policy");
- }
-}
-
-void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
- grpc_lb_policy* new_policy) {
- policy->vtable->shutdown_locked(policy, new_policy);
-}
+namespace grpc_core {
-int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
- grpc_lb_policy_pick_state* pick) {
- return policy->vtable->pick_locked(policy, pick);
-}
-
-void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
- grpc_lb_policy_pick_state* pick,
- grpc_error* error) {
- policy->vtable->cancel_pick_locked(policy, pick, error);
-}
+LoadBalancingPolicy::LoadBalancingPolicy(const Args& args)
+ : InternallyRefCountedWithTracing(&grpc_trace_lb_policy_refcount),
+ combiner_(GRPC_COMBINER_REF(args.combiner, "lb_policy")),
+ client_channel_factory_(args.client_channel_factory),
+ interested_parties_(grpc_pollset_set_create()),
+ request_reresolution_(nullptr) {}
-void grpc_lb_policy_cancel_picks_locked(grpc_lb_policy* policy,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error* error) {
- policy->vtable->cancel_picks_locked(policy, initial_metadata_flags_mask,
- initial_metadata_flags_eq, error);
+LoadBalancingPolicy::~LoadBalancingPolicy() {
+ grpc_pollset_set_destroy(interested_parties_);
+ GRPC_COMBINER_UNREF(combiner_, "lb_policy");
}
-void grpc_lb_policy_exit_idle_locked(grpc_lb_policy* policy) {
- policy->vtable->exit_idle_locked(policy);
-}
-
-void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy,
- grpc_closure* on_initiate,
- grpc_closure* on_ack) {
- policy->vtable->ping_one_locked(policy, on_initiate, on_ack);
-}
-
-void grpc_lb_policy_notify_on_state_change_locked(
- grpc_lb_policy* policy, grpc_connectivity_state* state,
- grpc_closure* closure) {
- policy->vtable->notify_on_state_change_locked(policy, state, closure);
-}
-
-grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
- grpc_lb_policy* policy, grpc_error** connectivity_error) {
- return policy->vtable->check_connectivity_locked(policy, connectivity_error);
-}
-
-void grpc_lb_policy_update_locked(grpc_lb_policy* policy,
- const grpc_lb_policy_args* lb_policy_args) {
- policy->vtable->update_locked(policy, lb_policy_args);
-}
-
-void grpc_lb_policy_set_reresolve_closure_locked(
- grpc_lb_policy* policy, grpc_closure* request_reresolution) {
- GPR_ASSERT(policy->request_reresolution == nullptr);
- policy->request_reresolution = request_reresolution;
-}
-
-void grpc_lb_policy_try_reresolve(grpc_lb_policy* policy,
- grpc_core::TraceFlag* grpc_lb_trace,
- grpc_error* error) {
- if (policy->request_reresolution != nullptr) {
- GRPC_CLOSURE_SCHED(policy->request_reresolution, error);
- policy->request_reresolution = nullptr;
+void LoadBalancingPolicy::TryReresolutionLocked(
+ grpc_core::TraceFlag* grpc_lb_trace, grpc_error* error) {
+ if (request_reresolution_ != nullptr) {
+ GRPC_CLOSURE_SCHED(request_reresolution_, error);
+ request_reresolution_ = nullptr;
if (grpc_lb_trace->enabled()) {
gpr_log(GPR_DEBUG,
"%s %p: scheduling re-resolution closure with error=%s.",
- grpc_lb_trace->name(), policy, grpc_error_string(error));
+ grpc_lb_trace->name(), this, grpc_error_string(error));
}
} else {
if (grpc_lb_trace->enabled()) {
gpr_log(GPR_DEBUG, "%s %p: no available re-resolution closure.",
- grpc_lb_trace->name(), policy);
+ grpc_lb_trace->name(), this);
}
}
}
+
+} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 6edd314d5e..c3e43e5ef6 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -19,182 +19,183 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H
+#include <grpc/support/port_platform.h>
+
+#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
+#include "src/core/lib/gprpp/abstract.h"
+#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/connectivity_state.h"
-/** A load balancing policy: specified by a vtable and a struct (which
- is expected to be extended to contain some parameters) */
-typedef struct grpc_lb_policy grpc_lb_policy;
-typedef struct grpc_lb_policy_vtable grpc_lb_policy_vtable;
-typedef struct grpc_lb_policy_args grpc_lb_policy_args;
-
extern grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
-struct grpc_lb_policy {
- const grpc_lb_policy_vtable* vtable;
- gpr_refcount refs;
- /* owned pointer to interested parties in load balancing decisions */
- grpc_pollset_set* interested_parties;
- /* combiner under which lb_policy actions take place */
- grpc_combiner* combiner;
- /* callback to force a re-resolution */
- grpc_closure* request_reresolution;
-};
-
-/// State used for an LB pick.
-typedef struct grpc_lb_policy_pick_state {
- /// Initial metadata associated with the picking call.
- grpc_metadata_batch* initial_metadata;
- /// Bitmask used for selective cancelling. See \a
- /// grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
- /// grpc_types.h.
- uint32_t initial_metadata_flags;
- /// Storage for LB token in \a initial_metadata, or NULL if not used.
- grpc_linked_mdelem lb_token_mdelem_storage;
- /// Closure to run when pick is complete, if not completed synchronously.
- grpc_closure* on_complete;
- /// Will be set to the selected subchannel, or nullptr on failure or when
- /// the LB policy decides to drop the call.
- grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel> connected_subchannel;
- /// Will be populated with context to pass to the subchannel call, if needed.
- grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
- /// Upon success, \a *user_data will be set to whatever opaque information
- /// may need to be propagated from the LB policy, or NULL if not needed.
- void** user_data;
- /// Next pointer. For internal use by LB policy.
- struct grpc_lb_policy_pick_state* next;
-} grpc_lb_policy_pick_state;
-
-struct grpc_lb_policy_vtable {
- void (*destroy)(grpc_lb_policy* policy);
-
- /// \see grpc_lb_policy_shutdown_locked().
- void (*shutdown_locked)(grpc_lb_policy* policy, grpc_lb_policy* new_policy);
-
- /** \see grpc_lb_policy_pick */
- int (*pick_locked)(grpc_lb_policy* policy, grpc_lb_policy_pick_state* pick);
-
- /** \see grpc_lb_policy_cancel_pick */
- void (*cancel_pick_locked)(grpc_lb_policy* policy,
- grpc_lb_policy_pick_state* pick,
+namespace grpc_core {
+
+/// Interface for load balancing policies.
+///
+/// Note: All methods with a "Locked" suffix must be called from the
+/// combiner passed to the constructor.
+///
+/// Any I/O done by the LB policy should be done under the pollset_set
+/// returned by \a interested_parties().
+class LoadBalancingPolicy
+ : public InternallyRefCountedWithTracing<LoadBalancingPolicy> {
+ public:
+ struct Args {
+ /// The combiner under which all LB policy calls will be run.
+ /// Policy does NOT take ownership of the reference to the combiner.
+ // TODO(roth): Once we have a C++-like interface for combiners, this
+ // API should change to take a smart pointer that does pass ownership
+ // of a reference.
+ grpc_combiner* combiner = nullptr;
+ /// Used to create channels and subchannels.
+ grpc_client_channel_factory* client_channel_factory = nullptr;
+ /// Channel args from the resolver.
+ /// Note that the LB policy gets the set of addresses from the
+ /// GRPC_ARG_LB_ADDRESSES channel arg.
+ grpc_channel_args* args = nullptr;
+ };
+
+ /// State used for an LB pick.
+ struct PickState {
+ /// Initial metadata associated with the picking call.
+ grpc_metadata_batch* initial_metadata;
+ /// Bitmask used for selective cancelling. See
+ /// \a CancelMatchingPicksLocked() and \a GRPC_INITIAL_METADATA_* in
+ /// grpc_types.h.
+ uint32_t initial_metadata_flags;
+ /// Storage for LB token in \a initial_metadata, or nullptr if not used.
+ grpc_linked_mdelem lb_token_mdelem_storage;
+ /// Closure to run when pick is complete, if not completed synchronously.
+ grpc_closure* on_complete;
+ /// Will be set to the selected subchannel, or nullptr on failure or when
+ /// the LB policy decides to drop the call.
+ RefCountedPtr<ConnectedSubchannel> connected_subchannel;
+ /// Will be populated with context to pass to the subchannel call, if
+ /// needed.
+ grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
+ /// Upon success, \a *user_data will be set to whatever opaque information
+ /// may need to be propagated from the LB policy, or nullptr if not needed.
+ // TODO(roth): As part of revamping our metadata APIs, try to find a
+ // way to clean this up and C++-ify it.
+ void** user_data;
+ /// Next pointer. For internal use by LB policy.
+ PickState* next;
+ };
+
+ // Not copyable nor movable.
+ LoadBalancingPolicy(const LoadBalancingPolicy&) = delete;
+ LoadBalancingPolicy& operator=(const LoadBalancingPolicy&) = delete;
+
+ /// Updates the policy with a new set of \a args from the resolver.
+ /// Note that the LB policy gets the set of addresses from the
+ /// GRPC_ARG_LB_ADDRESSES channel arg.
+ virtual void UpdateLocked(const grpc_channel_args& args) GRPC_ABSTRACT;
+
+ /// Finds an appropriate subchannel for a call, based on data in \a pick.
+ /// \a pick must remain alive until the pick is complete.
+ ///
+ /// If the pick succeeds and a result is known immediately, returns true.
+ /// Otherwise, \a pick->on_complete will be invoked once the pick is
+ /// complete with its error argument set to indicate success or failure.
+ virtual bool PickLocked(PickState* pick) GRPC_ABSTRACT;
+
+ /// Cancels \a pick.
+ /// The \a on_complete callback of the pending pick will be invoked with
+ /// \a pick->connected_subchannel set to null.
+ virtual void CancelPickLocked(PickState* pick,
+ grpc_error* error) GRPC_ABSTRACT;
+
+ /// Cancels all pending picks for which their \a initial_metadata_flags (as
+ /// given in the call to \a PickLocked()) matches
+ /// \a initial_metadata_flags_eq when ANDed with
+ /// \a initial_metadata_flags_mask.
+ virtual void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error* error) GRPC_ABSTRACT;
+
+ /// Requests a notification when the connectivity state of the policy
+ /// changes from \a *state. When that happens, sets \a *state to the
+ /// new state and schedules \a closure.
+ virtual void NotifyOnStateChangeLocked(grpc_connectivity_state* state,
+ grpc_closure* closure) GRPC_ABSTRACT;
+
+ /// Returns the policy's current connectivity state. Sets \a error to
+ /// the associated error, if any.
+ virtual grpc_connectivity_state CheckConnectivityLocked(
+ grpc_error** connectivity_error) GRPC_ABSTRACT;
+
+ /// Hands off pending picks to \a new_policy.
+ virtual void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy)
+ GRPC_ABSTRACT;
+
+ /// Performs a connected subchannel ping via \a ConnectedSubchannel::Ping()
+ /// against one of the connected subchannels managed by the policy.
+ /// Note: This is intended only for use in tests.
+ virtual void PingOneLocked(grpc_closure* on_initiate,
+ grpc_closure* on_ack) GRPC_ABSTRACT;
+
+ /// Tries to enter a READY connectivity state.
+ /// TODO(roth): As part of restructuring how we handle IDLE state,
+ /// consider whether this method is still needed.
+ virtual void ExitIdleLocked() GRPC_ABSTRACT;
+
+ void Orphan() override {
+ // Invoke ShutdownAndUnrefLocked() inside of the combiner.
+ GRPC_CLOSURE_SCHED(
+ GRPC_CLOSURE_CREATE(&LoadBalancingPolicy::ShutdownAndUnrefLocked, this,
+ grpc_combiner_scheduler(combiner_)),
+ GRPC_ERROR_NONE);
+ }
+
+ /// Sets the re-resolution closure to \a request_reresolution.
+ void SetReresolutionClosureLocked(grpc_closure* request_reresolution) {
+ GPR_ASSERT(request_reresolution_ == nullptr);
+ request_reresolution_ = request_reresolution;
+ }
+
+ grpc_pollset_set* interested_parties() const { return interested_parties_; }
+
+ GRPC_ABSTRACT_BASE_CLASS
+
+ protected:
+ explicit LoadBalancingPolicy(const Args& args);
+ virtual ~LoadBalancingPolicy();
+
+ grpc_combiner* combiner() const { return combiner_; }
+ grpc_client_channel_factory* client_channel_factory() const {
+ return client_channel_factory_;
+ }
+
+ /// Shuts down the policy. Any pending picks that have not been
+ /// handed off to a new policy via HandOffPendingPicksLocked() will be
+ /// failed.
+ virtual void ShutdownLocked() GRPC_ABSTRACT;
+
+ /// Tries to request a re-resolution.
+ void TryReresolutionLocked(grpc_core::TraceFlag* grpc_lb_trace,
grpc_error* error);
- /** \see grpc_lb_policy_cancel_picks */
- void (*cancel_picks_locked)(grpc_lb_policy* policy,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error* error);
-
- /** \see grpc_lb_policy_ping_one */
- void (*ping_one_locked)(grpc_lb_policy* policy, grpc_closure* on_initiate,
- grpc_closure* on_ack);
-
- /** Try to enter a READY connectivity state */
- void (*exit_idle_locked)(grpc_lb_policy* policy);
-
- /** check the current connectivity of the lb_policy */
- grpc_connectivity_state (*check_connectivity_locked)(
- grpc_lb_policy* policy, grpc_error** connectivity_error);
-
- /** call notify when the connectivity state of a channel changes from *state.
- Updates *state with the new state of the policy. Calling with a NULL \a
- state cancels the subscription. */
- void (*notify_on_state_change_locked)(grpc_lb_policy* policy,
- grpc_connectivity_state* state,
- grpc_closure* closure);
-
- void (*update_locked)(grpc_lb_policy* policy,
- const grpc_lb_policy_args* args);
+ private:
+ static void ShutdownAndUnrefLocked(void* arg, grpc_error* ignored) {
+ LoadBalancingPolicy* policy = static_cast<LoadBalancingPolicy*>(arg);
+ policy->ShutdownLocked();
+ policy->Unref();
+ }
+
+ /// Combiner under which LB policy actions take place.
+ grpc_combiner* combiner_;
+ /// Client channel factory, used to create channels and subchannels.
+ grpc_client_channel_factory* client_channel_factory_;
+ /// Owned pointer to interested parties in load balancing decisions.
+ grpc_pollset_set* interested_parties_;
+ /// Callback to force a re-resolution.
+ grpc_closure* request_reresolution_;
};
-#ifndef NDEBUG
-#define GRPC_LB_POLICY_REF(p, r) \
- grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_LB_POLICY_UNREF(p, r) \
- grpc_lb_policy_unref((p), __FILE__, __LINE__, (r))
-void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line,
- const char* reason);
-void grpc_lb_policy_unref(grpc_lb_policy* policy, const char* file, int line,
- const char* reason);
-#else // !NDEBUG
-#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
-#define GRPC_LB_POLICY_UNREF(p, r) grpc_lb_policy_unref((p))
-void grpc_lb_policy_ref(grpc_lb_policy* policy);
-void grpc_lb_policy_unref(grpc_lb_policy* policy);
-#endif
-
-/** called by concrete implementations to initialize the base struct */
-void grpc_lb_policy_init(grpc_lb_policy* policy,
- const grpc_lb_policy_vtable* vtable,
- grpc_combiner* combiner);
-
-/// Shuts down \a policy.
-/// If \a new_policy is non-null, any pending picks will be restarted
-/// on that policy; otherwise, they will be failed.
-void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
- grpc_lb_policy* new_policy);
-
-/** Finds an appropriate subchannel for a call, based on data in \a pick.
- \a pick must remain alive until the pick is complete.
-
- If the pick succeeds and a result is known immediately, a non-zero
- value will be returned. Otherwise, \a pick->on_complete will be invoked
- once the pick is complete with its error argument set to indicate
- success or failure.
-
- Any IO should be done under the \a interested_parties \a grpc_pollset_set
- in the \a grpc_lb_policy struct. */
-int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
- grpc_lb_policy_pick_state* pick);
-
-/** Perform a connected subchannel ping (see \a
- grpc_core::ConnectedSubchannel::Ping)
- against one of the connected subchannels managed by \a policy. */
-void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy,
- grpc_closure* on_initiate,
- grpc_closure* on_ack);
-
-/** Cancel picks for \a pick.
- The \a on_complete callback of the pending picks will be invoked with \a
- *target set to NULL. */
-void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
- grpc_lb_policy_pick_state* pick,
- grpc_error* error);
-
-/** Cancel all pending picks for which their \a initial_metadata_flags (as given
- in the call to \a grpc_lb_policy_pick) matches \a initial_metadata_flags_eq
- when AND'd with \a initial_metadata_flags_mask */
-void grpc_lb_policy_cancel_picks_locked(grpc_lb_policy* policy,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error* error);
-
-/** Try to enter a READY connectivity state */
-void grpc_lb_policy_exit_idle_locked(grpc_lb_policy* policy);
-
-/* Call notify when the connectivity state of a channel changes from \a *state.
- * Updates \a *state with the new state of the policy */
-void grpc_lb_policy_notify_on_state_change_locked(
- grpc_lb_policy* policy, grpc_connectivity_state* state,
- grpc_closure* closure);
-
-grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
- grpc_lb_policy* policy, grpc_error** connectivity_error);
-
-/** Update \a policy with \a lb_policy_args. */
-void grpc_lb_policy_update_locked(grpc_lb_policy* policy,
- const grpc_lb_policy_args* lb_policy_args);
-
-/** Set the re-resolution closure to \a request_reresolution. */
-void grpc_lb_policy_set_reresolve_closure_locked(
- grpc_lb_policy* policy, grpc_closure* request_reresolution);
-
-/** Try to request a re-resolution. It's NOT a public API; it's only for use by
- the LB policy implementations. */
-void grpc_lb_policy_try_reresolve(grpc_lb_policy* policy,
- grpc_core::TraceFlag* grpc_lb_trace,
- grpc_error* error);
+} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index 1a3a1f029c..18ef1f6ff5 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
#include <grpc/support/atm.h>
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
index 04de7a04df..838e2ef1ca 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/lib/channel/channel_stack.h"
extern const grpc_channel_filter grpc_client_load_reporting_filter;
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index da82b3f4da..cb39e4224e 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -16,68 +16,50 @@
*
*/
-/** Implementation of the gRPC LB policy.
- *
- * This policy takes as input a set of resolved addresses {a1..an} for which the
- * LB set was set (it's the resolver's responsibility to ensure this). That is
- * to say, {a1..an} represent a collection of LB servers.
- *
- * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}.
- * This channel behaves just like a regular channel. In particular, the
- * constructed URI over the addresses a1..an will use the default pick first
- * policy to select from this list of LB server backends.
- *
- * The first time the policy gets a request for a pick, a ping, or to exit the
- * idle state, \a query_for_backends_locked() is called. This function sets up
- * and initiates the internal communication with the LB server. In particular,
- * it's responsible for instantiating the internal *streaming* call to the LB
- * server (whichever address from {a1..an} pick-first chose). This call is
- * serviced by two callbacks, \a lb_on_server_status_received and \a
- * lb_on_response_received. The former will be called when the call to the LB
- * server completes. This can happen if the LB server closes the connection or
- * if this policy itself cancels the call (for example because it's shutting
- * down). If the internal call times out, the usual behavior of pick-first
- * applies, continuing to pick from the list {a1..an}.
- *
- * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
- * res_recv. An invalid one results in the termination of the streaming call. A
- * new streaming call should be created if possible, failing the original call
- * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
- * backends is extracted. A Round Robin policy will be created from this list.
- * There are two possible scenarios:
- *
- * 1. This is the first server list received. There was no previous instance of
- * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
- * policy and perform all the pending operations over it.
- * 2. There's already a RR policy instance active. We need to introduce the new
- * one build from the new serverlist, but taking care not to disrupt the
- * operations in progress over the old RR instance. This is done by
- * decreasing the reference count on the old policy. The moment no more
- * references are held on the old RR policy, it'll be destroyed and \a
- * on_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
- * state. At this point we can transition to a new RR instance safely, which
- * is done once again via \a rr_handover_locked().
- *
- *
- * Once a RR policy instance is in place (and getting updated as described),
- * calls to for a pick, a ping or a cancellation will be serviced right away by
- * forwarding them to the RR instance. Any time there's no RR policy available
- * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
- * received, etc), pick/ping requests are added to a list of pending picks/pings
- * to be flushed and serviced as part of \a rr_handover_locked() the moment the
- * RR policy instance becomes available.
- *
- * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
- * high level design and details. */
+/// Implementation of the gRPC LB policy.
+///
+/// This policy takes as input a list of resolved addresses, which must
+/// include at least one balancer address.
+///
+/// An internal channel (\a lb_channel_) is created for the addresses
+/// from that are balancers. This channel behaves just like a regular
+/// channel that uses pick_first to select from the list of balancer
+/// addresses.
+///
+/// The first time the policy gets a request for a pick, a ping, or to exit
+/// the idle state, \a StartPickingLocked() is called. This method is
+/// responsible for instantiating the internal *streaming* call to the LB
+/// server (whichever address pick_first chose). The call will be complete
+/// when either the balancer sends status or when we cancel the call (e.g.,
+/// because we are shutting down). In needed, we retry the call. If we
+/// received at least one valid message from the server, a new call attempt
+/// will be made immediately; otherwise, we apply back-off delays between
+/// attempts.
+///
+/// We maintain an internal round_robin policy instance for distributing
+/// requests across backends. Whenever we receive a new serverlist from
+/// the balancer, we update the round_robin policy with the new list of
+/// addresses. If we cannot communicate with the balancer on startup,
+/// however, we may enter fallback mode, in which case we will populate
+/// the RR policy's addresses from the backend addresses returned by the
+/// resolver.
+///
+/// Once an RR policy instance is in place (and getting updated as described),
+/// calls for a pick, a ping, or a cancellation will be serviced right
+/// away by forwarding them to the RR instance. Any time there's no RR
+/// policy available (i.e., right after the creation of the gRPCLB policy),
+/// pick and ping requests are added to a list of pending picks and pings
+/// to be flushed and serviced when the RR policy instance becomes available.
+///
+/// \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
+/// high level design and details.
-/* TODO(dgq):
- * - Implement LB service forwarding (point 2c. in the doc's diagram).
- */
+// With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
+// using that endpoint. Because of various transitive includes in uv.h,
+// including windows.h on Windows, uv.h must be included before other system
+// headers. Therefore, sockaddr.h must always be included first.
+#include <grpc/support/port_platform.h>
-/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
- using that endpoint. Because of various transitive includes in uv.h,
- including windows.h on Windows, uv.h must be included before other system
- headers. Therefore, sockaddr.h must always be included first */
#include "src/core/lib/iomgr/sockaddr.h"
#include <inttypes.h>
@@ -93,7 +75,6 @@
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
-#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
@@ -108,6 +89,8 @@
#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/manual_constructor.h"
+#include "src/core/lib/gprpp/memory.h"
+#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr.h"
@@ -127,336 +110,294 @@
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
-grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
+namespace grpc_core {
-struct glb_lb_policy;
+TraceFlag grpc_lb_glb_trace(false, "glb");
namespace {
-/// Linked list of pending pick requests. It stores all information needed to
-/// eventually call (Round Robin's) pick() on them. They mainly stay pending
-/// waiting for the RR policy to be created.
-///
-/// Note that when a pick is sent to the RR policy, we inject our own
-/// on_complete callback, so that we can intercept the result before
-/// invoking the original on_complete callback. This allows us to set the
-/// LB token metadata and add client_stats to the call context.
-/// See \a pending_pick_complete() for details.
-struct pending_pick {
- // Our on_complete closure and the original one.
- grpc_closure on_complete;
- grpc_closure* original_on_complete;
- // The original pick.
- grpc_lb_policy_pick_state* pick;
- // Stats for client-side load reporting. Note that this holds a
- // reference, which must be either passed on via context or unreffed.
- grpc_grpclb_client_stats* client_stats;
- // The LB token associated with the pick. This is set via user_data in
- // the pick.
- grpc_mdelem lb_token;
- // The grpclb instance that created the wrapping. This instance is not owned,
- // reference counts are untouched. It's used only for logging purposes.
- glb_lb_policy* glb_policy;
- // Next pending pick.
- struct pending_pick* next;
-};
+class GrpcLb : public LoadBalancingPolicy {
+ public:
+ GrpcLb(const grpc_lb_addresses* addresses, const Args& args);
+
+ void UpdateLocked(const grpc_channel_args& args) override;
+ bool PickLocked(PickState* pick) override;
+ void CancelPickLocked(PickState* pick, grpc_error* error) override;
+ void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error* error) override;
+ void NotifyOnStateChangeLocked(grpc_connectivity_state* state,
+ grpc_closure* closure) override;
+ grpc_connectivity_state CheckConnectivityLocked(
+ grpc_error** connectivity_error) override;
+ void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
+ void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
+ void ExitIdleLocked() override;
+
+ private:
+ /// Linked list of pending pick requests. It stores all information needed to
+ /// eventually call (Round Robin's) pick() on them. They mainly stay pending
+ /// waiting for the RR policy to be created.
+ ///
+ /// Note that when a pick is sent to the RR policy, we inject our own
+ /// on_complete callback, so that we can intercept the result before
+ /// invoking the original on_complete callback. This allows us to set the
+ /// LB token metadata and add client_stats to the call context.
+ /// See \a pending_pick_complete() for details.
+ struct PendingPick {
+ // The grpclb instance that created the wrapping. This instance is not
+ // owned; reference counts are untouched. It's used only for logging
+ // purposes.
+ GrpcLb* grpclb_policy;
+ // The original pick.
+ PickState* pick;
+ // Our on_complete closure and the original one.
+ grpc_closure on_complete;
+ grpc_closure* original_on_complete;
+ // The LB token associated with the pick. This is set via user_data in
+ // the pick.
+ grpc_mdelem lb_token;
+ // Stats for client-side load reporting. Note that this holds a
+ // reference, which must be either passed on via context or unreffed.
+ grpc_grpclb_client_stats* client_stats = nullptr;
+ // Next pending pick.
+ PendingPick* next = nullptr;
+ };
+
+ /// A linked list of pending pings waiting for the RR policy to be created.
+ struct PendingPing {
+ grpc_closure* on_initiate;
+ grpc_closure* on_ack;
+ PendingPing* next = nullptr;
+ };
+
+ /// Contains a call to the LB server and all the data related to the call.
+ class BalancerCallState
+ : public InternallyRefCountedWithTracing<BalancerCallState> {
+ public:
+ explicit BalancerCallState(
+ RefCountedPtr<LoadBalancingPolicy> parent_grpclb_policy);
+
+ // It's the caller's responsibility to ensure that Orphan() is called from
+ // inside the combiner.
+ void Orphan() override;
+
+ void StartQuery();
+
+ grpc_grpclb_client_stats* client_stats() const { return client_stats_; }
+ bool seen_initial_response() const { return seen_initial_response_; }
+
+ private:
+ ~BalancerCallState();
+
+ GrpcLb* grpclb_policy() const {
+ return static_cast<GrpcLb*>(grpclb_policy_.get());
+ }
-/// A linked list of pending pings waiting for the RR policy to be created.
-struct pending_ping {
- grpc_closure* on_initiate;
- grpc_closure* on_ack;
- struct pending_ping* next;
+ void ScheduleNextClientLoadReportLocked();
+ void SendClientLoadReportLocked();
+
+ static bool LoadReportCountersAreZero(grpc_grpclb_request* request);
+
+ static void MaybeSendClientLoadReportLocked(void* arg, grpc_error* error);
+ static void ClientLoadReportDoneLocked(void* arg, grpc_error* error);
+ static void OnInitialRequestSentLocked(void* arg, grpc_error* error);
+ static void OnBalancerMessageReceivedLocked(void* arg, grpc_error* error);
+ static void OnBalancerStatusReceivedLocked(void* arg, grpc_error* error);
+
+ // The owning LB policy.
+ RefCountedPtr<LoadBalancingPolicy> grpclb_policy_;
+
+ // The streaming call to the LB server. Always non-NULL.
+ grpc_call* lb_call_ = nullptr;
+
+ // recv_initial_metadata
+ grpc_metadata_array lb_initial_metadata_recv_;
+
+ // send_message
+ grpc_byte_buffer* send_message_payload_ = nullptr;
+ grpc_closure lb_on_initial_request_sent_;
+
+ // recv_message
+ grpc_byte_buffer* recv_message_payload_ = nullptr;
+ grpc_closure lb_on_balancer_message_received_;
+ bool seen_initial_response_ = false;
+
+ // recv_trailing_metadata
+ grpc_closure lb_on_balancer_status_received_;
+ grpc_metadata_array lb_trailing_metadata_recv_;
+ grpc_status_code lb_call_status_;
+ grpc_slice lb_call_status_details_;
+
+ // The stats for client-side load reporting associated with this LB call.
+ // Created after the first serverlist is received.
+ grpc_grpclb_client_stats* client_stats_ = nullptr;
+ grpc_millis client_stats_report_interval_ = 0;
+ grpc_timer client_load_report_timer_;
+ bool client_load_report_timer_callback_pending_ = false;
+ bool last_client_load_report_counters_were_zero_ = false;
+ bool client_load_report_is_due_ = false;
+ // The closure used for either the load report timer or the callback for
+ // completion of sending the load report.
+ grpc_closure client_load_report_closure_;
+ };
+
+ ~GrpcLb();
+
+ void ShutdownLocked() override;
+
+ // Helper function used in ctor and UpdateLocked().
+ void ProcessChannelArgsLocked(const grpc_channel_args& args);
+
+ // Methods for dealing with the balancer channel and call.
+ void StartPickingLocked();
+ void StartBalancerCallLocked();
+ static void OnFallbackTimerLocked(void* arg, grpc_error* error);
+ void StartBalancerCallRetryTimerLocked();
+ static void OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error);
+ static void OnBalancerChannelConnectivityChangedLocked(void* arg,
+ grpc_error* error);
+
+ // Pending pick methods.
+ static void PendingPickSetMetadataAndContext(PendingPick* pp);
+ PendingPick* PendingPickCreate(PickState* pick);
+ void AddPendingPick(PendingPick* pp);
+ static void OnPendingPickComplete(void* arg, grpc_error* error);
+
+ // Pending ping methods.
+ void AddPendingPing(grpc_closure* on_initiate, grpc_closure* on_ack);
+
+ // Methods for dealing with the RR policy.
+ void CreateOrUpdateRoundRobinPolicyLocked();
+ grpc_channel_args* CreateRoundRobinPolicyArgsLocked();
+ void CreateRoundRobinPolicyLocked(const Args& args);
+ bool PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp);
+ void UpdateConnectivityStateFromRoundRobinPolicyLocked(
+ grpc_error* rr_state_error);
+ static void OnRoundRobinConnectivityChangedLocked(void* arg,
+ grpc_error* error);
+ static void OnRoundRobinRequestReresolutionLocked(void* arg,
+ grpc_error* error);
+
+ // Who the client is trying to communicate with.
+ const char* server_name_ = nullptr;
+
+ // Current channel args from the resolver.
+ grpc_channel_args* args_ = nullptr;
+
+ // Internal state.
+ bool started_picking_ = false;
+ bool shutting_down_ = false;
+ grpc_connectivity_state_tracker state_tracker_;
+
+ // The channel for communicating with the LB server.
+ grpc_channel* lb_channel_ = nullptr;
+ grpc_connectivity_state lb_channel_connectivity_;
+ grpc_closure lb_channel_on_connectivity_changed_;
+ // Are we already watching the LB channel's connectivity?
+ bool watching_lb_channel_ = false;
+ // Response generator to inject address updates into lb_channel_.
+ RefCountedPtr<FakeResolverResponseGenerator> response_generator_;
+
+ // The data associated with the current LB call. It holds a ref to this LB
+ // policy. It's initialized every time we query for backends. It's reset to
+ // NULL whenever the current LB call is no longer needed (e.g., the LB policy
+ // is shutting down, or the LB call has ended). A non-NULL lb_calld_ always
+ // contains a non-NULL lb_call_.
+ OrphanablePtr<BalancerCallState> lb_calld_;
+ // Timeout in milliseconds for the LB call. 0 means no deadline.
+ int lb_call_timeout_ms_ = 0;
+ // Balancer call retry state.
+ BackOff lb_call_backoff_;
+ bool retry_timer_callback_pending_ = false;
+ grpc_timer lb_call_retry_timer_;
+ grpc_closure lb_on_call_retry_;
+
+ // The deserialized response from the balancer. May be nullptr until one
+ // such response has arrived.
+ grpc_grpclb_serverlist* serverlist_ = nullptr;
+ // Index into serverlist for next pick.
+ // If the server at this index is a drop, we return a drop.
+ // Otherwise, we delegate to the RR policy.
+ size_t serverlist_index_ = 0;
+
+ // Timeout in milliseconds for before using fallback backend addresses.
+ // 0 means not using fallback.
+ int lb_fallback_timeout_ms_ = 0;
+ // The backend addresses from the resolver.
+ grpc_lb_addresses* fallback_backend_addresses_ = nullptr;
+ // Fallback timer.
+ bool fallback_timer_callback_pending_ = false;
+ grpc_timer lb_fallback_timer_;
+ grpc_closure lb_on_fallback_;
+
+ // Pending picks and pings that are waiting on the RR policy's connectivity.
+ PendingPick* pending_picks_ = nullptr;
+ PendingPing* pending_pings_ = nullptr;
+
+ // The RR policy to use for the backends.
+ OrphanablePtr<LoadBalancingPolicy> rr_policy_;
+ grpc_connectivity_state rr_connectivity_state_;
+ grpc_closure on_rr_connectivity_changed_;
+ grpc_closure on_rr_request_reresolution_;
};
-} // namespace
-
-typedef struct glb_lb_call_data {
- struct glb_lb_policy* glb_policy;
- // TODO(juanlishen): c++ize this struct.
- gpr_refcount refs;
-
- /** The streaming call to the LB server. Always non-NULL. */
- grpc_call* lb_call;
-
- /** The initial metadata received from the LB server. */
- grpc_metadata_array lb_initial_metadata_recv;
-
- /** The message sent to the LB server. It's used to query for backends (the
- * value may vary if the LB server indicates a redirect) or send client load
- * report. */
- grpc_byte_buffer* send_message_payload;
- /** The callback after the initial request is sent. */
- grpc_closure lb_on_sent_initial_request;
-
- /** The response received from the LB server, if any. */
- grpc_byte_buffer* recv_message_payload;
- /** The callback to process the response received from the LB server. */
- grpc_closure lb_on_response_received;
- bool seen_initial_response;
-
- /** The callback to process the status received from the LB server, which
- * signals the end of the LB call. */
- grpc_closure lb_on_server_status_received;
- /** The trailing metadata from the LB server. */
- grpc_metadata_array lb_trailing_metadata_recv;
- /** The call status code and details. */
- grpc_status_code lb_call_status;
- grpc_slice lb_call_status_details;
-
- /** The stats for client-side load reporting associated with this LB call.
- * Created after the first serverlist is received. */
- grpc_grpclb_client_stats* client_stats;
- /** The interval and timer for next client load report. */
- grpc_millis client_stats_report_interval;
- grpc_timer client_load_report_timer;
- bool client_load_report_timer_callback_pending;
- bool last_client_load_report_counters_were_zero;
- bool client_load_report_is_due;
- /** The closure used for either the load report timer or the callback for
- * completion of sending the load report. */
- grpc_closure client_load_report_closure;
-} glb_lb_call_data;
-
-typedef struct glb_lb_policy {
- /** Base policy: must be first. */
- grpc_lb_policy base;
-
- /** Who the client is trying to communicate with. */
- const char* server_name;
-
- /** Channel related data that will be propagated to the internal RR policy. */
- grpc_client_channel_factory* cc_factory;
- grpc_channel_args* args;
-
- /** Timeout in milliseconds for before using fallback backend addresses.
- * 0 means not using fallback. */
- int lb_fallback_timeout_ms;
-
- /** The channel for communicating with the LB server. */
- grpc_channel* lb_channel;
-
- /** The data associated with the current LB call. It holds a ref to this LB
- * policy. It's initialized every time we query for backends. It's reset to
- * NULL whenever the current LB call is no longer needed (e.g., the LB policy
- * is shutting down, or the LB call has ended). A non-NULL lb_calld always
- * contains a non-NULL lb_call. */
- glb_lb_call_data* lb_calld;
-
- /** response generator to inject address updates into \a lb_channel */
- grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
- response_generator;
-
- /** the RR policy to use of the backend servers returned by the LB server */
- grpc_lb_policy* rr_policy;
-
- /** the connectivity state of the embedded RR policy */
- grpc_connectivity_state rr_connectivity_state;
-
- bool started_picking;
-
- /** our connectivity state tracker */
- grpc_connectivity_state_tracker state_tracker;
-
- /** connectivity state of the LB channel */
- grpc_connectivity_state lb_channel_connectivity;
-
- /** stores the deserialized response from the LB. May be nullptr until one
- * such response has arrived. */
- grpc_grpclb_serverlist* serverlist;
-
- /** Index into serverlist for next pick.
- * If the server at this index is a drop, we return a drop.
- * Otherwise, we delegate to the RR policy. */
- size_t serverlist_index;
-
- /** stores the backend addresses from the resolver */
- grpc_lb_addresses* fallback_backend_addresses;
-
- /** list of picks that are waiting on RR's policy connectivity */
- pending_pick* pending_picks;
-
- /** list of pings that are waiting on RR's policy connectivity */
- pending_ping* pending_pings;
-
- bool shutting_down;
-
- /** are we already watching the LB channel's connectivity? */
- bool watching_lb_channel;
-
- /** is the callback associated with \a lb_call_retry_timer pending? */
- bool retry_timer_callback_pending;
-
- /** is the callback associated with \a lb_fallback_timer pending? */
- bool fallback_timer_callback_pending;
-
- /** called upon changes to the LB channel's connectivity. */
- grpc_closure lb_channel_on_connectivity_changed;
-
- /** called upon changes to the RR's connectivity. */
- grpc_closure rr_on_connectivity_changed;
-
- /** called upon reresolution request from the RR policy. */
- grpc_closure rr_on_reresolution_requested;
-
- /************************************************************/
- /* client data associated with the LB server communication */
- /************************************************************/
-
- /** LB call retry backoff state */
- grpc_core::ManualConstructor<grpc_core::BackOff> lb_call_backoff;
-
- /** timeout in milliseconds for the LB call. 0 means no deadline. */
- int lb_call_timeout_ms;
-
- /** LB call retry timer */
- grpc_timer lb_call_retry_timer;
- /** LB call retry timer callback */
- grpc_closure lb_on_call_retry;
-
- /** LB fallback timer */
- grpc_timer lb_fallback_timer;
- /** LB fallback timer callback */
- grpc_closure lb_on_fallback;
-} glb_lb_policy;
-
-static void glb_lb_call_data_ref(glb_lb_call_data* lb_calld,
- const char* reason) {
- gpr_ref_non_zero(&lb_calld->refs);
- if (grpc_lb_glb_trace.enabled()) {
- const gpr_atm count = gpr_atm_acq_load(&lb_calld->refs.count);
- gpr_log(GPR_DEBUG, "[%s %p] lb_calld %p REF %lu->%lu (%s)",
- grpc_lb_glb_trace.name(), lb_calld->glb_policy, lb_calld,
- static_cast<unsigned long>(count - 1),
- static_cast<unsigned long>(count), reason);
- }
-}
+//
+// serverlist parsing code
+//
-static void glb_lb_call_data_unref(glb_lb_call_data* lb_calld,
- const char* reason) {
- const bool done = gpr_unref(&lb_calld->refs);
- if (grpc_lb_glb_trace.enabled()) {
- const gpr_atm count = gpr_atm_acq_load(&lb_calld->refs.count);
- gpr_log(GPR_DEBUG, "[%s %p] lb_calld %p UNREF %lu->%lu (%s)",
- grpc_lb_glb_trace.name(), lb_calld->glb_policy, lb_calld,
- static_cast<unsigned long>(count + 1),
- static_cast<unsigned long>(count), reason);
- }
- if (done) {
- GPR_ASSERT(lb_calld->lb_call != nullptr);
- grpc_call_unref(lb_calld->lb_call);
- grpc_metadata_array_destroy(&lb_calld->lb_initial_metadata_recv);
- grpc_metadata_array_destroy(&lb_calld->lb_trailing_metadata_recv);
- grpc_byte_buffer_destroy(lb_calld->send_message_payload);
- grpc_byte_buffer_destroy(lb_calld->recv_message_payload);
- grpc_slice_unref_internal(lb_calld->lb_call_status_details);
- if (lb_calld->client_stats != nullptr) {
- grpc_grpclb_client_stats_unref(lb_calld->client_stats);
- }
- GRPC_LB_POLICY_UNREF(&lb_calld->glb_policy->base, "lb_calld");
- gpr_free(lb_calld);
- }
+// vtable for LB tokens in grpc_lb_addresses
+void* lb_token_copy(void* token) {
+ return token == nullptr
+ ? nullptr
+ : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
}
-
-static void lb_call_data_shutdown(glb_lb_policy* glb_policy) {
- GPR_ASSERT(glb_policy->lb_calld != nullptr);
- GPR_ASSERT(glb_policy->lb_calld->lb_call != nullptr);
- // lb_on_server_status_received will complete the cancellation and clean up.
- grpc_call_cancel(glb_policy->lb_calld->lb_call, nullptr);
- if (glb_policy->lb_calld->client_load_report_timer_callback_pending) {
- grpc_timer_cancel(&glb_policy->lb_calld->client_load_report_timer);
+void lb_token_destroy(void* token) {
+ if (token != nullptr) {
+ GRPC_MDELEM_UNREF(grpc_mdelem{(uintptr_t)token});
}
- glb_policy->lb_calld = nullptr;
}
-
-/* add lb_token of selected subchannel (address) to the call's initial
- * metadata */
-static grpc_error* initial_metadata_add_lb_token(
- grpc_metadata_batch* initial_metadata,
- grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
- GPR_ASSERT(lb_token_mdelem_storage != nullptr);
- GPR_ASSERT(!GRPC_MDISNULL(lb_token));
- return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
- lb_token);
-}
-
-static void destroy_client_stats(void* arg) {
- grpc_grpclb_client_stats_unref(static_cast<grpc_grpclb_client_stats*>(arg));
+int lb_token_cmp(void* token1, void* token2) {
+ if (token1 > token2) return 1;
+ if (token1 < token2) return -1;
+ return 0;
}
+const grpc_lb_user_data_vtable lb_token_vtable = {
+ lb_token_copy, lb_token_destroy, lb_token_cmp};
-static void pending_pick_set_metadata_and_context(pending_pick* pp) {
- /* if connected_subchannel is nullptr, no pick has been made by the RR
- * policy (e.g., all addresses failed to connect). There won't be any
- * user_data/token available */
- if (pp->pick->connected_subchannel != nullptr) {
- if (!GRPC_MDISNULL(pp->lb_token)) {
- initial_metadata_add_lb_token(pp->pick->initial_metadata,
- &pp->pick->lb_token_mdelem_storage,
- GRPC_MDELEM_REF(pp->lb_token));
- } else {
- gpr_log(GPR_ERROR,
- "[grpclb %p] No LB token for connected subchannel pick %p",
- pp->glb_policy, pp->pick);
- abort();
- }
- // Pass on client stats via context. Passes ownership of the reference.
- if (pp->client_stats != nullptr) {
- pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
- pp->client_stats;
- pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
- destroy_client_stats;
- }
- } else {
- if (pp->client_stats != nullptr) {
- grpc_grpclb_client_stats_unref(pp->client_stats);
+// Returns the backend addresses extracted from the given addresses.
+grpc_lb_addresses* ExtractBackendAddresses(const grpc_lb_addresses* addresses) {
+ // First pass: count the number of backend addresses.
+ size_t num_backends = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (!addresses->addresses[i].is_balancer) {
+ ++num_backends;
}
}
+ // Second pass: actually populate the addresses and (empty) LB tokens.
+ grpc_lb_addresses* backend_addresses =
+ grpc_lb_addresses_create(num_backends, &lb_token_vtable);
+ size_t num_copied = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (addresses->addresses[i].is_balancer) continue;
+ const grpc_resolved_address* addr = &addresses->addresses[i].address;
+ grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
+ addr->len, false /* is_balancer */,
+ nullptr /* balancer_name */,
+ (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
+ ++num_copied;
+ }
+ return backend_addresses;
}
-/* The \a on_complete closure passed as part of the pick requires keeping a
- * reference to its associated round robin instance. We wrap this closure in
- * order to unref the round robin instance upon its invocation */
-static void pending_pick_complete(void* arg, grpc_error* error) {
- pending_pick* pp = static_cast<pending_pick*>(arg);
- pending_pick_set_metadata_and_context(pp);
- GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
- gpr_free(pp);
-}
-
-static pending_pick* pending_pick_create(glb_lb_policy* glb_policy,
- grpc_lb_policy_pick_state* pick) {
- pending_pick* pp = static_cast<pending_pick*>(gpr_zalloc(sizeof(*pp)));
- pp->pick = pick;
- pp->glb_policy = glb_policy;
- GRPC_CLOSURE_INIT(&pp->on_complete, pending_pick_complete, pp,
- grpc_schedule_on_exec_ctx);
- pp->original_on_complete = pick->on_complete;
- pp->pick->on_complete = &pp->on_complete;
- return pp;
-}
-
-static void pending_pick_add(pending_pick** root, pending_pick* new_pp) {
- new_pp->next = *root;
- *root = new_pp;
-}
-
-static void pending_ping_add(pending_ping** root, grpc_closure* on_initiate,
- grpc_closure* on_ack) {
- pending_ping* pping = static_cast<pending_ping*>(gpr_zalloc(sizeof(*pping)));
- pping->on_initiate = on_initiate;
- pping->on_ack = on_ack;
- pping->next = *root;
- *root = pping;
-}
-
-static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
- bool log) {
+bool IsServerValid(const grpc_grpclb_server* server, size_t idx, bool log) {
if (server->drop) return false;
const grpc_grpclb_ip_address* ip = &server->ip_address;
if (server->port >> 16 != 0) {
if (log) {
gpr_log(GPR_ERROR,
"Invalid port '%d' at index %lu of serverlist. Ignoring.",
- server->port, static_cast<unsigned long>(idx));
+ server->port, (unsigned long)idx);
}
return false;
}
@@ -465,65 +406,43 @@ static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
gpr_log(GPR_ERROR,
"Expected IP to be 4 or 16 bytes, got %d at index %lu of "
"serverlist. Ignoring",
- ip->size, static_cast<unsigned long>(idx));
+ ip->size, (unsigned long)idx);
}
return false;
}
return true;
}
-/* vtable for LB tokens in grpc_lb_addresses. */
-static void* lb_token_copy(void* token) {
- return token == nullptr
- ? nullptr
- : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
-}
-static void lb_token_destroy(void* token) {
- if (token != nullptr) {
- GRPC_MDELEM_UNREF(grpc_mdelem{(uintptr_t)token});
- }
-}
-static int lb_token_cmp(void* token1, void* token2) {
- if (token1 > token2) return 1;
- if (token1 < token2) return -1;
- return 0;
-}
-static const grpc_lb_user_data_vtable lb_token_vtable = {
- lb_token_copy, lb_token_destroy, lb_token_cmp};
-
-static void parse_server(const grpc_grpclb_server* server,
- grpc_resolved_address* addr) {
+void ParseServer(const grpc_grpclb_server* server,
+ grpc_resolved_address* addr) {
memset(addr, 0, sizeof(*addr));
if (server->drop) return;
- const uint16_t netorder_port = htons(static_cast<uint16_t>(server->port));
+ const uint16_t netorder_port = htons((uint16_t)server->port);
/* the addresses are given in binary format (a in(6)_addr struct) in
* server->ip_address.bytes. */
const grpc_grpclb_ip_address* ip = &server->ip_address;
if (ip->size == 4) {
addr->len = sizeof(struct sockaddr_in);
- struct sockaddr_in* addr4 =
- reinterpret_cast<struct sockaddr_in*>(&addr->addr);
+ struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
addr4->sin_family = AF_INET;
memcpy(&addr4->sin_addr, ip->bytes, ip->size);
addr4->sin_port = netorder_port;
} else if (ip->size == 16) {
addr->len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6* addr6 =
- reinterpret_cast<struct sockaddr_in6*>(&addr->addr);
+ struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
addr6->sin6_family = AF_INET6;
memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
addr6->sin6_port = netorder_port;
}
}
-/* Returns addresses extracted from \a serverlist. */
-static grpc_lb_addresses* process_serverlist_locked(
- const grpc_grpclb_serverlist* serverlist) {
+// Returns addresses extracted from \a serverlist.
+grpc_lb_addresses* ProcessServerlist(const grpc_grpclb_serverlist* serverlist) {
size_t num_valid = 0;
/* first pass: count how many are valid in order to allocate the necessary
* memory in a single block */
for (size_t i = 0; i < serverlist->num_servers; ++i) {
- if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
+ if (IsServerValid(serverlist->servers[i], i, true)) ++num_valid;
}
grpc_lb_addresses* lb_addresses =
grpc_lb_addresses_create(num_valid, &lb_token_vtable);
@@ -535,11 +454,11 @@ static grpc_lb_addresses* process_serverlist_locked(
size_t addr_idx = 0;
for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
const grpc_grpclb_server* server = serverlist->servers[sl_idx];
- if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
+ if (!IsServerValid(serverlist->servers[sl_idx], sl_idx, false)) continue;
GPR_ASSERT(addr_idx < num_valid);
/* address processing */
grpc_resolved_address addr;
- parse_server(server, &addr);
+ ParseServer(server, &addr);
/* lb token processing */
void* user_data;
if (server->has_load_balance_token) {
@@ -570,849 +489,89 @@ static grpc_lb_addresses* process_serverlist_locked(
return lb_addresses;
}
-/* Returns the backend addresses extracted from the given addresses */
-static grpc_lb_addresses* extract_backend_addresses_locked(
- const grpc_lb_addresses* addresses) {
- /* first pass: count the number of backend addresses */
- size_t num_backends = 0;
- for (size_t i = 0; i < addresses->num_addresses; ++i) {
- if (!addresses->addresses[i].is_balancer) {
- ++num_backends;
- }
- }
- /* second pass: actually populate the addresses and (empty) LB tokens */
- grpc_lb_addresses* backend_addresses =
- grpc_lb_addresses_create(num_backends, &lb_token_vtable);
- size_t num_copied = 0;
- for (size_t i = 0; i < addresses->num_addresses; ++i) {
- if (addresses->addresses[i].is_balancer) continue;
- const grpc_resolved_address* addr = &addresses->addresses[i].address;
- grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
- addr->len, false /* is_balancer */,
- nullptr /* balancer_name */,
- (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
- ++num_copied;
- }
- return backend_addresses;
-}
-
-static void update_lb_connectivity_status_locked(glb_lb_policy* glb_policy,
- grpc_error* rr_state_error) {
- const grpc_connectivity_state curr_glb_state =
- grpc_connectivity_state_check(&glb_policy->state_tracker);
- /* The new connectivity status is a function of the previous one and the new
- * input coming from the status of the RR policy.
- *
- * current state (grpclb's)
- * |
- * v || I | C | R | TF | SD | <- new state (RR's)
- * ===++====+=====+=====+======+======+
- * I || I | C | R | [I] | [I] |
- * ---++----+-----+-----+------+------+
- * C || I | C | R | [C] | [C] |
- * ---++----+-----+-----+------+------+
- * R || I | C | R | [R] | [R] |
- * ---++----+-----+-----+------+------+
- * TF || I | C | R | [TF] | [TF] |
- * ---++----+-----+-----+------+------+
- * SD || NA | NA | NA | NA | NA | (*)
- * ---++----+-----+-----+------+------+
- *
- * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
- * is the current state of grpclb, which is left untouched.
- *
- * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
- * the previous RR instance.
- *
- * Note that the status is never updated to SHUTDOWN as a result of calling
- * this function. Only glb_shutdown() has the power to set that state.
- *
- * (*) This function mustn't be called during shutting down. */
- GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
- switch (glb_policy->rr_connectivity_state) {
- case GRPC_CHANNEL_TRANSIENT_FAILURE:
- case GRPC_CHANNEL_SHUTDOWN:
- GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
- break;
- case GRPC_CHANNEL_IDLE:
- case GRPC_CHANNEL_CONNECTING:
- case GRPC_CHANNEL_READY:
- GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
- }
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(
- GPR_INFO,
- "[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.",
- glb_policy,
- grpc_connectivity_state_name(glb_policy->rr_connectivity_state),
- glb_policy->rr_policy);
- }
- grpc_connectivity_state_set(&glb_policy->state_tracker,
- glb_policy->rr_connectivity_state, rr_state_error,
- "update_lb_connectivity_status_locked");
-}
-
-/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
- * immediately (ignoring its completion callback), we need to perform the
- * cleanups this callback would otherwise be responsible for.
- * If \a force_async is true, then we will manually schedule the
- * completion callback even if the pick is available immediately. */
-static bool pick_from_internal_rr_locked(glb_lb_policy* glb_policy,
- bool force_async, pending_pick* pp) {
- // Check for drops if we are not using fallback backend addresses.
- if (glb_policy->serverlist != nullptr) {
- // Look at the index into the serverlist to see if we should drop this call.
- grpc_grpclb_server* server =
- glb_policy->serverlist->servers[glb_policy->serverlist_index++];
- if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
- glb_policy->serverlist_index = 0; // Wrap-around.
- }
- if (server->drop) {
- // Update client load reporting stats to indicate the number of
- // dropped calls. Note that we have to do this here instead of in
- // the client_load_reporting filter, because we do not create a
- // subchannel call (and therefore no client_load_reporting filter)
- // for dropped calls.
- if (glb_policy->lb_calld != nullptr &&
- glb_policy->lb_calld->client_stats != nullptr) {
- grpc_grpclb_client_stats_add_call_dropped_locked(
- server->load_balance_token, glb_policy->lb_calld->client_stats);
- }
- if (force_async) {
- GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
- return false;
- }
- gpr_free(pp);
- return true;
- }
- }
- // Set client_stats and user_data.
- if (glb_policy->lb_calld != nullptr &&
- glb_policy->lb_calld->client_stats != nullptr) {
- pp->client_stats =
- grpc_grpclb_client_stats_ref(glb_policy->lb_calld->client_stats);
- }
- GPR_ASSERT(pp->pick->user_data == nullptr);
- pp->pick->user_data = reinterpret_cast<void**>(&pp->lb_token);
- // Pick via the RR policy.
- bool pick_done = grpc_lb_policy_pick_locked(glb_policy->rr_policy, pp->pick);
- if (pick_done) {
- pending_pick_set_metadata_and_context(pp);
- if (force_async) {
- GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
- pick_done = false;
- }
- gpr_free(pp);
- }
- /* else, the pending pick will be registered and taken care of by the
- * pending pick list inside the RR policy (glb_policy->rr_policy).
- * Eventually, wrapped_on_complete will be called, which will -among other
- * things- add the LB token to the call's initial metadata */
- return pick_done;
-}
-
-static grpc_lb_policy_args* lb_policy_args_create(glb_lb_policy* glb_policy) {
- grpc_lb_addresses* addresses;
- if (glb_policy->serverlist != nullptr) {
- GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
- addresses = process_serverlist_locked(glb_policy->serverlist);
- } else {
- // If rr_handover_locked() is invoked when we haven't received any
- // serverlist from the balancer, we use the fallback backends returned by
- // the resolver. Note that the fallback backend list may be empty, in which
- // case the new round_robin policy will keep the requested picks pending.
- GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
- addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
- }
- GPR_ASSERT(addresses != nullptr);
- grpc_lb_policy_args* args =
- static_cast<grpc_lb_policy_args*>(gpr_zalloc(sizeof(*args)));
- args->client_channel_factory = glb_policy->cc_factory;
- args->combiner = glb_policy->base.combiner;
- // Replace the LB addresses in the channel args that we pass down to
- // the subchannel.
- static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
- const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
- args->args = grpc_channel_args_copy_and_add_and_remove(
- glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
- 1);
- grpc_lb_addresses_destroy(addresses);
- return args;
-}
-
-static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
- grpc_channel_args_destroy(args->args);
- gpr_free(args);
-}
-
-static void rr_on_reresolution_requested_locked(void* arg, grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
- if (glb_policy->shutting_down || error != GRPC_ERROR_NONE) {
- GRPC_LB_POLICY_UNREF(&glb_policy->base,
- "rr_on_reresolution_requested_locked");
- return;
- }
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(
- GPR_DEBUG,
- "[grpclb %p] Re-resolution requested from the internal RR policy (%p).",
- glb_policy, glb_policy->rr_policy);
- }
- // If we are talking to a balancer, we expect to get updated addresses form
- // the balancer, so we can ignore the re-resolution request from the RR
- // policy. Otherwise, handle the re-resolution request using glb's original
- // re-resolution closure.
- if (glb_policy->lb_calld == nullptr ||
- !glb_policy->lb_calld->seen_initial_response) {
- grpc_lb_policy_try_reresolve(&glb_policy->base, &grpc_lb_glb_trace,
- GRPC_ERROR_NONE);
- }
- // Give back the wrapper closure to the RR policy.
- grpc_lb_policy_set_reresolve_closure_locked(
- glb_policy->rr_policy, &glb_policy->rr_on_reresolution_requested);
-}
-
-static void create_rr_locked(glb_lb_policy* glb_policy,
- grpc_lb_policy_args* args) {
- GPR_ASSERT(glb_policy->rr_policy == nullptr);
- grpc_lb_policy* new_rr_policy = grpc_lb_policy_create("round_robin", args);
- if (new_rr_policy == nullptr) {
- gpr_log(GPR_ERROR,
- "[grpclb %p] Failure creating a RoundRobin policy for serverlist "
- "update with %" PRIuPTR
- " entries. The previous RR instance (%p), if any, will continue to "
- "be used. Future updates from the LB will attempt to create new "
- "instances.",
- glb_policy, glb_policy->serverlist->num_servers,
- glb_policy->rr_policy);
- return;
- }
- GRPC_LB_POLICY_REF(&glb_policy->base, "rr_on_reresolution_requested_locked");
- grpc_lb_policy_set_reresolve_closure_locked(
- new_rr_policy, &glb_policy->rr_on_reresolution_requested);
- glb_policy->rr_policy = new_rr_policy;
- grpc_error* rr_state_error = nullptr;
- glb_policy->rr_connectivity_state = grpc_lb_policy_check_connectivity_locked(
- glb_policy->rr_policy, &rr_state_error);
- /* Connectivity state is a function of the RR policy updated/created */
- update_lb_connectivity_status_locked(glb_policy, rr_state_error);
- /* Add the gRPC LB's interested_parties pollset_set to that of the newly
- * created RR policy. This will make the RR policy progress upon activity on
- * gRPC LB, which in turn is tied to the application's call */
- grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
- glb_policy->base.interested_parties);
- /* Subscribe to changes to the connectivity of the new RR */
- GRPC_LB_POLICY_REF(&glb_policy->base, "rr_on_connectivity_changed_locked");
- grpc_lb_policy_notify_on_state_change_locked(
- glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
- &glb_policy->rr_on_connectivity_changed);
- grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
- // Send pending picks to RR policy.
- pending_pick* pp;
- while ((pp = glb_policy->pending_picks)) {
- glb_policy->pending_picks = pp->next;
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO,
- "[grpclb %p] Pending pick about to (async) PICK from RR %p",
- glb_policy, glb_policy->rr_policy);
- }
- pick_from_internal_rr_locked(glb_policy, true /* force_async */, pp);
- }
- // Send pending pings to RR policy.
- pending_ping* pping;
- while ((pping = glb_policy->pending_pings)) {
- glb_policy->pending_pings = pping->next;
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
- glb_policy, glb_policy->rr_policy);
- }
- grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, pping->on_initiate,
- pping->on_ack);
- gpr_free(pping);
- }
-}
-
-/* glb_policy->rr_policy may be nullptr (initial handover) */
-static void rr_handover_locked(glb_lb_policy* glb_policy) {
- if (glb_policy->shutting_down) return;
- grpc_lb_policy_args* args = lb_policy_args_create(glb_policy);
- GPR_ASSERT(args != nullptr);
- if (glb_policy->rr_policy != nullptr) {
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
- glb_policy->rr_policy);
- }
- grpc_lb_policy_update_locked(glb_policy->rr_policy, args);
- } else {
- create_rr_locked(glb_policy, args);
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
- glb_policy->rr_policy);
- }
- }
- lb_policy_args_destroy(args);
-}
-
-static void rr_on_connectivity_changed_locked(void* arg, grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
- if (glb_policy->shutting_down) {
- GRPC_LB_POLICY_UNREF(&glb_policy->base,
- "rr_on_connectivity_changed_locked");
- return;
- }
- update_lb_connectivity_status_locked(glb_policy, GRPC_ERROR_REF(error));
- // Resubscribe. Reuse the "rr_on_connectivity_changed_locked" ref.
- grpc_lb_policy_notify_on_state_change_locked(
- glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
- &glb_policy->rr_on_connectivity_changed);
-}
-
-static void destroy_balancer_name(void* balancer_name) {
- gpr_free(balancer_name);
-}
-
-static grpc_slice_hash_table_entry targets_info_entry_create(
- const char* address, const char* balancer_name) {
- grpc_slice_hash_table_entry entry;
- entry.key = grpc_slice_from_copied_string(address);
- entry.value = gpr_strdup(balancer_name);
- return entry;
-}
-
-static int balancer_name_cmp_fn(void* a, void* b) {
- const char* a_str = static_cast<const char*>(a);
- const char* b_str = static_cast<const char*>(b);
- return strcmp(a_str, b_str);
-}
-
-/* Returns the channel args for the LB channel, used to create a bidirectional
- * stream for the reception of load balancing updates.
- *
- * Inputs:
- * - \a addresses: corresponding to the balancers.
- * - \a response_generator: in order to propagate updates from the resolver
- * above the grpclb policy.
- * - \a args: other args inherited from the grpclb policy. */
-static grpc_channel_args* build_lb_channel_args(
- const grpc_lb_addresses* addresses,
- grpc_core::FakeResolverResponseGenerator* response_generator,
- const grpc_channel_args* args) {
- size_t num_grpclb_addrs = 0;
- for (size_t i = 0; i < addresses->num_addresses; ++i) {
- if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
- }
- /* All input addresses come from a resolver that claims they are LB services.
- * It's the resolver's responsibility to make sure this policy is only
- * instantiated and used in that case. Otherwise, something has gone wrong. */
- GPR_ASSERT(num_grpclb_addrs > 0);
- grpc_lb_addresses* lb_addresses =
- grpc_lb_addresses_create(num_grpclb_addrs, nullptr);
- grpc_slice_hash_table_entry* targets_info_entries =
- static_cast<grpc_slice_hash_table_entry*>(
- gpr_zalloc(sizeof(*targets_info_entries) * num_grpclb_addrs));
-
- size_t lb_addresses_idx = 0;
- for (size_t i = 0; i < addresses->num_addresses; ++i) {
- if (!addresses->addresses[i].is_balancer) continue;
- if (addresses->addresses[i].user_data != nullptr) {
- gpr_log(GPR_ERROR,
- "This LB policy doesn't support user data. It will be ignored");
- }
- char* addr_str;
- GPR_ASSERT(grpc_sockaddr_to_string(
- &addr_str, &addresses->addresses[i].address, true) > 0);
- targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
- addr_str, addresses->addresses[i].balancer_name);
- gpr_free(addr_str);
-
- grpc_lb_addresses_set_address(
- lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
- addresses->addresses[i].address.len, false /* is balancer */,
- addresses->addresses[i].balancer_name, nullptr /* user data */);
- }
- GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
- grpc_slice_hash_table* targets_info =
- grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
- destroy_balancer_name, balancer_name_cmp_fn);
- gpr_free(targets_info_entries);
-
- grpc_channel_args* lb_channel_args =
- grpc_lb_policy_grpclb_build_lb_channel_args(targets_info,
- response_generator, args);
-
- grpc_arg lb_channel_addresses_arg =
- grpc_lb_addresses_create_channel_arg(lb_addresses);
-
- grpc_channel_args* result = grpc_channel_args_copy_and_add(
- lb_channel_args, &lb_channel_addresses_arg, 1);
- grpc_slice_hash_table_unref(targets_info);
- grpc_channel_args_destroy(lb_channel_args);
- grpc_lb_addresses_destroy(lb_addresses);
- return result;
-}
-
-static void glb_destroy(grpc_lb_policy* pol) {
- glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
- GPR_ASSERT(glb_policy->pending_picks == nullptr);
- GPR_ASSERT(glb_policy->pending_pings == nullptr);
- gpr_free((void*)glb_policy->server_name);
- grpc_channel_args_destroy(glb_policy->args);
- grpc_connectivity_state_destroy(&glb_policy->state_tracker);
- if (glb_policy->serverlist != nullptr) {
- grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
- }
- if (glb_policy->fallback_backend_addresses != nullptr) {
- grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
- }
- // TODO(roth): Remove this once the LB policy becomes a C++ object.
- glb_policy->response_generator.reset();
- grpc_subchannel_index_unref();
- gpr_free(glb_policy);
-}
-
-static void glb_shutdown_locked(grpc_lb_policy* pol,
- grpc_lb_policy* new_policy) {
- glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
- grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
- glb_policy->shutting_down = true;
- if (glb_policy->lb_calld != nullptr) {
- lb_call_data_shutdown(glb_policy);
- }
- if (glb_policy->retry_timer_callback_pending) {
- grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
- }
- if (glb_policy->fallback_timer_callback_pending) {
- grpc_timer_cancel(&glb_policy->lb_fallback_timer);
- }
- if (glb_policy->rr_policy != nullptr) {
- grpc_lb_policy_shutdown_locked(glb_policy->rr_policy, nullptr);
- GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
- }
- // We destroy the LB channel here because
- // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
- // instance. Destroying the lb channel in glb_destroy would likely result in
- // a callback invocation without a valid glb_policy arg.
- if (glb_policy->lb_channel != nullptr) {
- grpc_channel_destroy(glb_policy->lb_channel);
- glb_policy->lb_channel = nullptr;
- }
- grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_REF(error), "glb_shutdown");
- grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
- // Clear pending picks.
- pending_pick* pp = glb_policy->pending_picks;
- glb_policy->pending_picks = nullptr;
- while (pp != nullptr) {
- pending_pick* next = pp->next;
- if (new_policy != nullptr) {
- // Hand pick over to new policy.
- if (pp->client_stats != nullptr) {
- grpc_grpclb_client_stats_unref(pp->client_stats);
- }
- pp->pick->on_complete = pp->original_on_complete;
- if (grpc_lb_policy_pick_locked(new_policy, pp->pick)) {
- // Synchronous return; schedule callback.
- GRPC_CLOSURE_SCHED(pp->pick->on_complete, GRPC_ERROR_NONE);
- }
- gpr_free(pp);
- } else {
- pp->pick->connected_subchannel.reset();
- GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error));
- }
- pp = next;
- }
- // Clear pending pings.
- pending_ping* pping = glb_policy->pending_pings;
- glb_policy->pending_pings = nullptr;
- while (pping != nullptr) {
- pending_ping* next = pping->next;
- GRPC_CLOSURE_SCHED(pping->on_initiate, GRPC_ERROR_REF(error));
- GRPC_CLOSURE_SCHED(pping->on_ack, GRPC_ERROR_REF(error));
- gpr_free(pping);
- pping = next;
- }
- GRPC_ERROR_UNREF(error);
-}
-
-// Cancel a specific pending pick.
//
-// A grpclb pick progresses as follows:
-// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
-// handed over to the RR policy (in create_rr_locked()). From that point
-// onwards, it'll be RR's responsibility. For cancellations, that implies the
-// pick needs also be cancelled by the RR instance.
-// - Otherwise, without an RR instance, picks stay pending at this policy's
-// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
-// we invoke the completion closure and set *target to nullptr right here.
-static void glb_cancel_pick_locked(grpc_lb_policy* pol,
- grpc_lb_policy_pick_state* pick,
- grpc_error* error) {
- glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
- pending_pick* pp = glb_policy->pending_picks;
- glb_policy->pending_picks = nullptr;
- while (pp != nullptr) {
- pending_pick* next = pp->next;
- if (pp->pick == pick) {
- pick->connected_subchannel.reset();
- GRPC_CLOSURE_SCHED(&pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick Cancelled", &error, 1));
- } else {
- pp->next = glb_policy->pending_picks;
- glb_policy->pending_picks = pp;
- }
- pp = next;
- }
- if (glb_policy->rr_policy != nullptr) {
- grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, pick,
- GRPC_ERROR_REF(error));
- }
- GRPC_ERROR_UNREF(error);
-}
-
-// Cancel all pending picks.
+// GrpcLb::BalancerCallState
//
-// A grpclb pick progresses as follows:
-// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be
-// handed over to the RR policy (in create_rr_locked()). From that point
-// onwards, it'll be RR's responsibility. For cancellations, that implies the
-// pick needs also be cancelled by the RR instance.
-// - Otherwise, without an RR instance, picks stay pending at this policy's
-// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
-// we invoke the completion closure and set *target to nullptr right here.
-static void glb_cancel_picks_locked(grpc_lb_policy* pol,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error* error) {
- glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
- pending_pick* pp = glb_policy->pending_picks;
- glb_policy->pending_picks = nullptr;
- while (pp != nullptr) {
- pending_pick* next = pp->next;
- if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
- initial_metadata_flags_eq) {
- GRPC_CLOSURE_SCHED(&pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick Cancelled", &error, 1));
- } else {
- pp->next = glb_policy->pending_picks;
- glb_policy->pending_picks = pp;
- }
- pp = next;
- }
- if (glb_policy->rr_policy != nullptr) {
- grpc_lb_policy_cancel_picks_locked(
- glb_policy->rr_policy, initial_metadata_flags_mask,
- initial_metadata_flags_eq, GRPC_ERROR_REF(error));
- }
- GRPC_ERROR_UNREF(error);
-}
-
-static void lb_on_fallback_timer_locked(void* arg, grpc_error* error);
-static void query_for_backends_locked(glb_lb_policy* glb_policy);
-static void start_picking_locked(glb_lb_policy* glb_policy) {
- /* start a timer to fall back */
- if (glb_policy->lb_fallback_timeout_ms > 0 &&
- glb_policy->serverlist == nullptr &&
- !glb_policy->fallback_timer_callback_pending) {
- grpc_millis deadline =
- grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
- GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_fallback_timer");
- GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
- glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- glb_policy->fallback_timer_callback_pending = true;
- grpc_timer_init(&glb_policy->lb_fallback_timer, deadline,
- &glb_policy->lb_on_fallback);
- }
- glb_policy->started_picking = true;
- glb_policy->lb_call_backoff->Reset();
- query_for_backends_locked(glb_policy);
-}
-
-static void glb_exit_idle_locked(grpc_lb_policy* pol) {
- glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
- if (!glb_policy->started_picking) {
- start_picking_locked(glb_policy);
- }
-}
-
-static int glb_pick_locked(grpc_lb_policy* pol,
- grpc_lb_policy_pick_state* pick) {
- glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
- pending_pick* pp = pending_pick_create(glb_policy, pick);
- bool pick_done = false;
- if (glb_policy->rr_policy != nullptr) {
- const grpc_connectivity_state rr_connectivity_state =
- grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
- nullptr);
- // The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
- // callback registered to capture this event
- // (on_rr_connectivity_changed_locked) may not have been invoked yet. We
- // need to make sure we aren't trying to pick from a RR policy instance
- // that's in shutdown.
- if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO,
- "[grpclb %p] NOT picking from from RR %p: RR conn state=%s",
- glb_policy, glb_policy->rr_policy,
- grpc_connectivity_state_name(rr_connectivity_state));
- }
- pending_pick_add(&glb_policy->pending_picks, pp);
- pick_done = false;
- } else { // RR not in shutdown
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
- glb_policy->rr_policy);
- }
- pick_done =
- pick_from_internal_rr_locked(glb_policy, false /* force_async */, pp);
- }
- } else { // glb_policy->rr_policy == NULL
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_DEBUG,
- "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
- glb_policy);
- }
- pending_pick_add(&glb_policy->pending_picks, pp);
- if (!glb_policy->started_picking) {
- start_picking_locked(glb_policy);
- }
- pick_done = false;
- }
- return pick_done;
-}
-static grpc_connectivity_state glb_check_connectivity_locked(
- grpc_lb_policy* pol, grpc_error** connectivity_error) {
- glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
- return grpc_connectivity_state_get(&glb_policy->state_tracker,
- connectivity_error);
-}
-
-static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
- grpc_closure* on_ack) {
- glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
- if (glb_policy->rr_policy) {
- grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
- } else {
- pending_ping_add(&glb_policy->pending_pings, on_initiate, on_ack);
- if (!glb_policy->started_picking) {
- start_picking_locked(glb_policy);
- }
- }
-}
-
-static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
- grpc_connectivity_state* current,
- grpc_closure* notify) {
- glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
- grpc_connectivity_state_notify_on_state_change(&glb_policy->state_tracker,
- current, notify);
-}
-
-static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
- glb_lb_policy* glb_policy = static_cast<glb_lb_policy*>(arg);
- glb_policy->retry_timer_callback_pending = false;
- if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE &&
- glb_policy->lb_calld == nullptr) {
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
- }
- query_for_backends_locked(glb_policy);
- }
- GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_retry_timer");
-}
-
-static void start_lb_call_retry_timer_locked(glb_lb_policy* glb_policy) {
- grpc_millis next_try = glb_policy->lb_call_backoff->NextAttemptTime();
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
- glb_policy);
- grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
- if (timeout > 0) {
- gpr_log(GPR_DEBUG,
- "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
- glb_policy, timeout);
- } else {
- gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
- glb_policy);
- }
- }
- GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_retry_timer");
- GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
- lb_call_on_retry_timer_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- glb_policy->retry_timer_callback_pending = true;
- grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
- &glb_policy->lb_on_call_retry);
-}
-
-static void maybe_send_client_load_report_locked(void* arg, grpc_error* error);
-
-static void schedule_next_client_load_report(glb_lb_call_data* lb_calld) {
- const grpc_millis next_client_load_report_time =
- grpc_core::ExecCtx::Get()->Now() + lb_calld->client_stats_report_interval;
- GRPC_CLOSURE_INIT(
- &lb_calld->client_load_report_closure,
- maybe_send_client_load_report_locked, lb_calld,
- grpc_combiner_scheduler(lb_calld->glb_policy->base.combiner));
- grpc_timer_init(&lb_calld->client_load_report_timer,
- next_client_load_report_time,
- &lb_calld->client_load_report_closure);
- lb_calld->client_load_report_timer_callback_pending = true;
-}
-
-static void client_load_report_done_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
- glb_lb_policy* glb_policy = lb_calld->glb_policy;
- grpc_byte_buffer_destroy(lb_calld->send_message_payload);
- lb_calld->send_message_payload = nullptr;
- if (error != GRPC_ERROR_NONE || lb_calld != glb_policy->lb_calld) {
- glb_lb_call_data_unref(lb_calld, "client_load_report");
- return;
- }
- schedule_next_client_load_report(lb_calld);
-}
-
-static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
- grpc_grpclb_dropped_call_counts* drop_entries =
- static_cast<grpc_grpclb_dropped_call_counts*>(
- request->client_stats.calls_finished_with_drop.arg);
- return request->client_stats.num_calls_started == 0 &&
- request->client_stats.num_calls_finished == 0 &&
- request->client_stats.num_calls_finished_with_client_failed_to_send ==
- 0 &&
- request->client_stats.num_calls_finished_known_received == 0 &&
- (drop_entries == nullptr || drop_entries->num_entries == 0);
-}
-
-static void send_client_load_report_locked(glb_lb_call_data* lb_calld) {
- glb_lb_policy* glb_policy = lb_calld->glb_policy;
- // Construct message payload.
- GPR_ASSERT(lb_calld->send_message_payload == nullptr);
- grpc_grpclb_request* request =
- grpc_grpclb_load_report_request_create_locked(lb_calld->client_stats);
- // Skip client load report if the counters were all zero in the last
- // report and they are still zero in this one.
- if (load_report_counters_are_zero(request)) {
- if (lb_calld->last_client_load_report_counters_were_zero) {
- grpc_grpclb_request_destroy(request);
- schedule_next_client_load_report(lb_calld);
- return;
- }
- lb_calld->last_client_load_report_counters_were_zero = true;
- } else {
- lb_calld->last_client_load_report_counters_were_zero = false;
- }
- grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
- lb_calld->send_message_payload =
- grpc_raw_byte_buffer_create(&request_payload_slice, 1);
- grpc_slice_unref_internal(request_payload_slice);
- grpc_grpclb_request_destroy(request);
- // Send the report.
- grpc_op op;
- memset(&op, 0, sizeof(op));
- op.op = GRPC_OP_SEND_MESSAGE;
- op.data.send_message.send_message = lb_calld->send_message_payload;
- GRPC_CLOSURE_INIT(&lb_calld->client_load_report_closure,
- client_load_report_done_locked, lb_calld,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- lb_calld->lb_call, &op, 1, &lb_calld->client_load_report_closure);
- if (call_error != GRPC_CALL_OK) {
- gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
- }
-}
-
-static void maybe_send_client_load_report_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
- glb_lb_policy* glb_policy = lb_calld->glb_policy;
- lb_calld->client_load_report_timer_callback_pending = false;
- if (error != GRPC_ERROR_NONE || lb_calld != glb_policy->lb_calld) {
- glb_lb_call_data_unref(lb_calld, "client_load_report");
- return;
- }
- // If we've already sent the initial request, then we can go ahead and send
- // the load report. Otherwise, we need to wait until the initial request has
- // been sent to send this (see lb_on_sent_initial_request_locked()).
- if (lb_calld->send_message_payload == nullptr) {
- send_client_load_report_locked(lb_calld);
- } else {
- lb_calld->client_load_report_is_due = true;
- }
-}
-
-static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error);
-static void lb_on_server_status_received_locked(void* arg, grpc_error* error);
-static void lb_on_response_received_locked(void* arg, grpc_error* error);
-static glb_lb_call_data* lb_call_data_create_locked(glb_lb_policy* glb_policy) {
- GPR_ASSERT(!glb_policy->shutting_down);
+GrpcLb::BalancerCallState::BalancerCallState(
+ RefCountedPtr<LoadBalancingPolicy> parent_grpclb_policy)
+ : InternallyRefCountedWithTracing<BalancerCallState>(&grpc_lb_glb_trace),
+ grpclb_policy_(std::move(parent_grpclb_policy)) {
+ GPR_ASSERT(grpclb_policy_ != nullptr);
+ GPR_ASSERT(!grpclb_policy()->shutting_down_);
// Init the LB call. Note that the LB call will progress every time there's
- // activity in glb_policy->base.interested_parties, which is comprised of the
- // polling entities from client_channel.
- GPR_ASSERT(glb_policy->server_name != nullptr);
- GPR_ASSERT(glb_policy->server_name[0] != '\0');
- grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
+ // activity in grpclb_policy_->interested_parties(), which is comprised of
+ // the polling entities from client_channel.
+ GPR_ASSERT(grpclb_policy()->server_name_ != nullptr);
+ GPR_ASSERT(grpclb_policy()->server_name_[0] != '\0');
+ grpc_slice host =
+ grpc_slice_from_copied_string(grpclb_policy()->server_name_);
grpc_millis deadline =
- glb_policy->lb_call_timeout_ms == 0
+ grpclb_policy()->lb_call_timeout_ms_ == 0
? GRPC_MILLIS_INF_FUTURE
- : grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_call_timeout_ms;
- glb_lb_call_data* lb_calld =
- static_cast<glb_lb_call_data*>(gpr_zalloc(sizeof(*lb_calld)));
- lb_calld->lb_call = grpc_channel_create_pollset_set_call(
- glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
- glb_policy->base.interested_parties,
+ : ExecCtx::Get()->Now() + grpclb_policy()->lb_call_timeout_ms_;
+ lb_call_ = grpc_channel_create_pollset_set_call(
+ grpclb_policy()->lb_channel_, nullptr, GRPC_PROPAGATE_DEFAULTS,
+ grpclb_policy_->interested_parties(),
GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
&host, deadline, nullptr);
grpc_slice_unref_internal(host);
// Init the LB call request payload.
grpc_grpclb_request* request =
- grpc_grpclb_request_create(glb_policy->server_name);
+ grpc_grpclb_request_create(grpclb_policy()->server_name_);
grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
- lb_calld->send_message_payload =
+ send_message_payload_ =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_slice_unref_internal(request_payload_slice);
grpc_grpclb_request_destroy(request);
// Init other data associated with the LB call.
- lb_calld->glb_policy = glb_policy;
- gpr_ref_init(&lb_calld->refs, 1);
- grpc_metadata_array_init(&lb_calld->lb_initial_metadata_recv);
- grpc_metadata_array_init(&lb_calld->lb_trailing_metadata_recv);
- GRPC_CLOSURE_INIT(&lb_calld->lb_on_sent_initial_request,
- lb_on_sent_initial_request_locked, lb_calld,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- GRPC_CLOSURE_INIT(&lb_calld->lb_on_response_received,
- lb_on_response_received_locked, lb_calld,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- GRPC_CLOSURE_INIT(&lb_calld->lb_on_server_status_received,
- lb_on_server_status_received_locked, lb_calld,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- // Hold a ref to the glb_policy.
- GRPC_LB_POLICY_REF(&glb_policy->base, "lb_calld");
- return lb_calld;
+ grpc_metadata_array_init(&lb_initial_metadata_recv_);
+ grpc_metadata_array_init(&lb_trailing_metadata_recv_);
+ GRPC_CLOSURE_INIT(&lb_on_initial_request_sent_, OnInitialRequestSentLocked,
+ this, grpc_combiner_scheduler(grpclb_policy()->combiner()));
+ GRPC_CLOSURE_INIT(&lb_on_balancer_message_received_,
+ OnBalancerMessageReceivedLocked, this,
+ grpc_combiner_scheduler(grpclb_policy()->combiner()));
+ GRPC_CLOSURE_INIT(&lb_on_balancer_status_received_,
+ OnBalancerStatusReceivedLocked, this,
+ grpc_combiner_scheduler(grpclb_policy()->combiner()));
}
-/*
- * Auxiliary functions and LB client callbacks.
- */
+GrpcLb::BalancerCallState::~BalancerCallState() {
+ GPR_ASSERT(lb_call_ != nullptr);
+ grpc_call_unref(lb_call_);
+ grpc_metadata_array_destroy(&lb_initial_metadata_recv_);
+ grpc_metadata_array_destroy(&lb_trailing_metadata_recv_);
+ grpc_byte_buffer_destroy(send_message_payload_);
+ grpc_byte_buffer_destroy(recv_message_payload_);
+ grpc_slice_unref_internal(lb_call_status_details_);
+ if (client_stats_ != nullptr) {
+ grpc_grpclb_client_stats_unref(client_stats_);
+ }
+}
-static void query_for_backends_locked(glb_lb_policy* glb_policy) {
- GPR_ASSERT(glb_policy->lb_channel != nullptr);
- if (glb_policy->shutting_down) return;
- // Init the LB call data.
- GPR_ASSERT(glb_policy->lb_calld == nullptr);
- glb_policy->lb_calld = lb_call_data_create_locked(glb_policy);
+void GrpcLb::BalancerCallState::Orphan() {
+ GPR_ASSERT(lb_call_ != nullptr);
+ // If we are here because grpclb_policy wants to cancel the call,
+ // lb_on_balancer_status_received_ will complete the cancellation and clean
+ // up. Otherwise, we are here because grpclb_policy has to orphan a failed
+ // call, then the following cancellation will be a no-op.
+ grpc_call_cancel(lb_call_, nullptr);
+ if (client_load_report_timer_callback_pending_) {
+ grpc_timer_cancel(&client_load_report_timer_);
+ }
+ // Note that the initial ref is hold by lb_on_balancer_status_received_
+ // instead of the caller of this function. So the corresponding unref happens
+ // in lb_on_balancer_status_received_ instead of here.
+}
+
+void GrpcLb::BalancerCallState::StartQuery() {
+ GPR_ASSERT(lb_call_ != nullptr);
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
- "[grpclb %p] Query for backends (lb_channel: %p, lb_calld: %p, "
- "lb_call: %p)",
- glb_policy, glb_policy->lb_channel, glb_policy->lb_calld,
- glb_policy->lb_calld->lb_call);
+ "[grpclb %p] Starting LB call (lb_calld: %p, lb_call: %p)",
+ grpclb_policy_.get(), this, lb_call_);
}
- GPR_ASSERT(glb_policy->lb_calld->lb_call != nullptr);
// Create the ops.
grpc_call_error call_error;
grpc_op ops[3];
@@ -1425,47 +584,49 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op->reserved = nullptr;
op++;
// Op: send request message.
- GPR_ASSERT(glb_policy->lb_calld->send_message_payload != nullptr);
+ GPR_ASSERT(send_message_payload_ != nullptr);
op->op = GRPC_OP_SEND_MESSAGE;
- op->data.send_message.send_message =
- glb_policy->lb_calld->send_message_payload;
+ op->data.send_message.send_message = send_message_payload_;
op->flags = 0;
op->reserved = nullptr;
op++;
- glb_lb_call_data_ref(glb_policy->lb_calld,
- "lb_on_sent_initial_request_locked");
+ // TODO(roth): We currently track this ref manually. Once the
+ // ClosureRef API is ready, we should pass the RefCountedPtr<> along
+ // with the callback.
+ auto self = Ref(DEBUG_LOCATION, "on_initial_request_sent");
+ self.release();
call_error = grpc_call_start_batch_and_execute(
- glb_policy->lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
- &glb_policy->lb_calld->lb_on_sent_initial_request);
+ lb_call_, ops, (size_t)(op - ops), &lb_on_initial_request_sent_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: recv initial metadata.
op = ops;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata =
- &glb_policy->lb_calld->lb_initial_metadata_recv;
+ &lb_initial_metadata_recv_;
op->flags = 0;
op->reserved = nullptr;
op++;
// Op: recv response.
op->op = GRPC_OP_RECV_MESSAGE;
- op->data.recv_message.recv_message =
- &glb_policy->lb_calld->recv_message_payload;
+ op->data.recv_message.recv_message = &recv_message_payload_;
op->flags = 0;
op->reserved = nullptr;
op++;
- glb_lb_call_data_ref(glb_policy->lb_calld, "lb_on_response_received_locked");
+ // TODO(roth): We currently track this ref manually. Once the
+ // ClosureRef API is ready, we should pass the RefCountedPtr<> along
+ // with the callback.
+ self = Ref(DEBUG_LOCATION, "on_message_received");
+ self.release();
call_error = grpc_call_start_batch_and_execute(
- glb_policy->lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
- &glb_policy->lb_calld->lb_on_response_received);
+ lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_message_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: recv server status.
op = ops;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata =
- &glb_policy->lb_calld->lb_trailing_metadata_recv;
- op->data.recv_status_on_client.status = &glb_policy->lb_calld->lb_call_status;
- op->data.recv_status_on_client.status_details =
- &glb_policy->lb_calld->lb_call_status_details;
+ &lb_trailing_metadata_recv_;
+ op->data.recv_status_on_client.status = &lb_call_status_;
+ op->data.recv_status_on_client.status_details = &lb_call_status_details_;
op->flags = 0;
op->reserved = nullptr;
op++;
@@ -1473,83 +634,174 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
// ref instead of a new ref. When it's invoked, it's the initial ref that is
// unreffed.
call_error = grpc_call_start_batch_and_execute(
- glb_policy->lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
- &glb_policy->lb_calld->lb_on_server_status_received);
+ lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_status_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
+};
+
+void GrpcLb::BalancerCallState::ScheduleNextClientLoadReportLocked() {
+ const grpc_millis next_client_load_report_time =
+ ExecCtx::Get()->Now() + client_stats_report_interval_;
+ GRPC_CLOSURE_INIT(&client_load_report_closure_,
+ MaybeSendClientLoadReportLocked, this,
+ grpc_combiner_scheduler(grpclb_policy()->combiner()));
+ grpc_timer_init(&client_load_report_timer_, next_client_load_report_time,
+ &client_load_report_closure_);
+ client_load_report_timer_callback_pending_ = true;
+}
+
+void GrpcLb::BalancerCallState::MaybeSendClientLoadReportLocked(
+ void* arg, grpc_error* error) {
+ BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
+ GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
+ lb_calld->client_load_report_timer_callback_pending_ = false;
+ if (error != GRPC_ERROR_NONE || lb_calld != grpclb_policy->lb_calld_.get()) {
+ lb_calld->Unref(DEBUG_LOCATION, "client_load_report");
+ return;
+ }
+ // If we've already sent the initial request, then we can go ahead and send
+ // the load report. Otherwise, we need to wait until the initial request has
+ // been sent to send this (see OnInitialRequestSentLocked()).
+ if (lb_calld->send_message_payload_ == nullptr) {
+ lb_calld->SendClientLoadReportLocked();
+ } else {
+ lb_calld->client_load_report_is_due_ = true;
+ }
+}
+
+bool GrpcLb::BalancerCallState::LoadReportCountersAreZero(
+ grpc_grpclb_request* request) {
+ grpc_grpclb_dropped_call_counts* drop_entries =
+ static_cast<grpc_grpclb_dropped_call_counts*>(
+ request->client_stats.calls_finished_with_drop.arg);
+ return request->client_stats.num_calls_started == 0 &&
+ request->client_stats.num_calls_finished == 0 &&
+ request->client_stats.num_calls_finished_with_client_failed_to_send ==
+ 0 &&
+ request->client_stats.num_calls_finished_known_received == 0 &&
+ (drop_entries == nullptr || drop_entries->num_entries == 0);
}
-static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
- grpc_byte_buffer_destroy(lb_calld->send_message_payload);
- lb_calld->send_message_payload = nullptr;
+void GrpcLb::BalancerCallState::SendClientLoadReportLocked() {
+ // Construct message payload.
+ GPR_ASSERT(send_message_payload_ == nullptr);
+ grpc_grpclb_request* request =
+ grpc_grpclb_load_report_request_create_locked(client_stats_);
+ // Skip client load report if the counters were all zero in the last
+ // report and they are still zero in this one.
+ if (LoadReportCountersAreZero(request)) {
+ if (last_client_load_report_counters_were_zero_) {
+ grpc_grpclb_request_destroy(request);
+ ScheduleNextClientLoadReportLocked();
+ return;
+ }
+ last_client_load_report_counters_were_zero_ = true;
+ } else {
+ last_client_load_report_counters_were_zero_ = false;
+ }
+ grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
+ send_message_payload_ =
+ grpc_raw_byte_buffer_create(&request_payload_slice, 1);
+ grpc_slice_unref_internal(request_payload_slice);
+ grpc_grpclb_request_destroy(request);
+ // Send the report.
+ grpc_op op;
+ memset(&op, 0, sizeof(op));
+ op.op = GRPC_OP_SEND_MESSAGE;
+ op.data.send_message.send_message = send_message_payload_;
+ GRPC_CLOSURE_INIT(&client_load_report_closure_, ClientLoadReportDoneLocked,
+ this, grpc_combiner_scheduler(grpclb_policy()->combiner()));
+ grpc_call_error call_error = grpc_call_start_batch_and_execute(
+ lb_call_, &op, 1, &client_load_report_closure_);
+ if (call_error != GRPC_CALL_OK) {
+ gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", grpclb_policy_.get(),
+ call_error);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+ }
+}
+
+void GrpcLb::BalancerCallState::ClientLoadReportDoneLocked(void* arg,
+ grpc_error* error) {
+ BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
+ GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
+ grpc_byte_buffer_destroy(lb_calld->send_message_payload_);
+ lb_calld->send_message_payload_ = nullptr;
+ if (error != GRPC_ERROR_NONE || lb_calld != grpclb_policy->lb_calld_.get()) {
+ lb_calld->Unref(DEBUG_LOCATION, "client_load_report");
+ return;
+ }
+ lb_calld->ScheduleNextClientLoadReportLocked();
+}
+
+void GrpcLb::BalancerCallState::OnInitialRequestSentLocked(void* arg,
+ grpc_error* error) {
+ BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
+ grpc_byte_buffer_destroy(lb_calld->send_message_payload_);
+ lb_calld->send_message_payload_ = nullptr;
// If we attempted to send a client load report before the initial request was
// sent (and this lb_calld is still in use), send the load report now.
- if (lb_calld->client_load_report_is_due &&
- lb_calld == lb_calld->glb_policy->lb_calld) {
- send_client_load_report_locked(lb_calld);
- lb_calld->client_load_report_is_due = false;
+ if (lb_calld->client_load_report_is_due_ &&
+ lb_calld == lb_calld->grpclb_policy()->lb_calld_.get()) {
+ lb_calld->SendClientLoadReportLocked();
+ lb_calld->client_load_report_is_due_ = false;
}
- glb_lb_call_data_unref(lb_calld, "lb_on_sent_initial_request_locked");
+ lb_calld->Unref(DEBUG_LOCATION, "on_initial_request_sent");
}
-static void lb_on_response_received_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
- glb_lb_policy* glb_policy = lb_calld->glb_policy;
+void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
+ void* arg, grpc_error* error) {
+ BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
+ GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
// Empty payload means the LB call was cancelled.
- if (lb_calld != glb_policy->lb_calld ||
- lb_calld->recv_message_payload == nullptr) {
- glb_lb_call_data_unref(lb_calld, "lb_on_response_received_locked");
+ if (lb_calld != grpclb_policy->lb_calld_.get() ||
+ lb_calld->recv_message_payload_ == nullptr) {
+ lb_calld->Unref(DEBUG_LOCATION, "on_message_received");
return;
}
- grpc_op ops[2];
- memset(ops, 0, sizeof(ops));
- grpc_op* op = ops;
- glb_policy->lb_call_backoff->Reset();
grpc_byte_buffer_reader bbr;
- grpc_byte_buffer_reader_init(&bbr, lb_calld->recv_message_payload);
+ grpc_byte_buffer_reader_init(&bbr, lb_calld->recv_message_payload_);
grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
grpc_byte_buffer_reader_destroy(&bbr);
- grpc_byte_buffer_destroy(lb_calld->recv_message_payload);
- lb_calld->recv_message_payload = nullptr;
+ grpc_byte_buffer_destroy(lb_calld->recv_message_payload_);
+ lb_calld->recv_message_payload_ = nullptr;
grpc_grpclb_initial_response* initial_response;
grpc_grpclb_serverlist* serverlist;
- if (!lb_calld->seen_initial_response &&
+ if (!lb_calld->seen_initial_response_ &&
(initial_response = grpc_grpclb_initial_response_parse(response_slice)) !=
nullptr) {
// Have NOT seen initial response, look for initial response.
if (initial_response->has_client_stats_report_interval) {
- lb_calld->client_stats_report_interval = GPR_MAX(
+ lb_calld->client_stats_report_interval_ = GPR_MAX(
GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
&initial_response->client_stats_report_interval));
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Received initial LB response message; "
"client load reporting interval = %" PRIdPTR " milliseconds",
- glb_policy, lb_calld->client_stats_report_interval);
+ grpclb_policy, lb_calld->client_stats_report_interval_);
}
} else if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Received initial LB response message; client load "
"reporting NOT enabled",
- glb_policy);
+ grpclb_policy);
}
grpc_grpclb_initial_response_destroy(initial_response);
- lb_calld->seen_initial_response = true;
+ lb_calld->seen_initial_response_ = true;
} else if ((serverlist = grpc_grpclb_response_parse_serverlist(
response_slice)) != nullptr) {
// Have seen initial response, look for serverlist.
- GPR_ASSERT(lb_calld->lb_call != nullptr);
+ GPR_ASSERT(lb_calld->lb_call_ != nullptr);
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Serverlist with %" PRIuPTR " servers received",
- glb_policy, serverlist->num_servers);
+ grpclb_policy, serverlist->num_servers);
for (size_t i = 0; i < serverlist->num_servers; ++i) {
grpc_resolved_address addr;
- parse_server(serverlist->servers[i], &addr);
+ ParseServer(serverlist->servers[i], &addr);
char* ipport;
grpc_sockaddr_to_string(&ipport, &addr, false);
gpr_log(GPR_INFO, "[grpclb %p] Serverlist[%" PRIuPTR "]: %s",
- glb_policy, i, ipport);
+ grpclb_policy, i, ipport);
gpr_free(ipport);
}
}
@@ -1557,44 +809,48 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
if (serverlist->num_servers > 0) {
// Start sending client load report only after we start using the
// serverlist returned from the current LB call.
- if (lb_calld->client_stats_report_interval > 0 &&
- lb_calld->client_stats == nullptr) {
- lb_calld->client_stats = grpc_grpclb_client_stats_create();
- glb_lb_call_data_ref(lb_calld, "client_load_report");
- schedule_next_client_load_report(lb_calld);
+ if (lb_calld->client_stats_report_interval_ > 0 &&
+ lb_calld->client_stats_ == nullptr) {
+ lb_calld->client_stats_ = grpc_grpclb_client_stats_create();
+ // TODO(roth): We currently track this ref manually. Once the
+ // ClosureRef API is ready, we should pass the RefCountedPtr<> along
+ // with the callback.
+ auto self = lb_calld->Ref(DEBUG_LOCATION, "client_load_report");
+ self.release();
+ lb_calld->ScheduleNextClientLoadReportLocked();
}
- if (grpc_grpclb_serverlist_equals(glb_policy->serverlist, serverlist)) {
+ if (grpc_grpclb_serverlist_equals(grpclb_policy->serverlist_,
+ serverlist)) {
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Incoming server list identical to current, "
"ignoring.",
- glb_policy);
+ grpclb_policy);
}
grpc_grpclb_destroy_serverlist(serverlist);
} else { /* new serverlist */
- if (glb_policy->serverlist != nullptr) {
+ if (grpclb_policy->serverlist_ != nullptr) {
/* dispose of the old serverlist */
- grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
+ grpc_grpclb_destroy_serverlist(grpclb_policy->serverlist_);
} else {
/* or dispose of the fallback */
- grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
- glb_policy->fallback_backend_addresses = nullptr;
- if (glb_policy->fallback_timer_callback_pending) {
- grpc_timer_cancel(&glb_policy->lb_fallback_timer);
- glb_policy->fallback_timer_callback_pending = false;
+ grpc_lb_addresses_destroy(grpclb_policy->fallback_backend_addresses_);
+ grpclb_policy->fallback_backend_addresses_ = nullptr;
+ if (grpclb_policy->fallback_timer_callback_pending_) {
+ grpc_timer_cancel(&grpclb_policy->lb_fallback_timer_);
}
}
- /* and update the copy in the glb_lb_policy instance. This
- * serverlist instance will be destroyed either upon the next
- * update or in glb_destroy() */
- glb_policy->serverlist = serverlist;
- glb_policy->serverlist_index = 0;
- rr_handover_locked(glb_policy);
+ // and update the copy in the GrpcLb instance. This
+ // serverlist instance will be destroyed either upon the next
+ // update or when the GrpcLb instance is destroyed.
+ grpclb_policy->serverlist_ = serverlist;
+ grpclb_policy->serverlist_index_ = 0;
+ grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked();
}
} else {
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Received empty server list, ignoring.",
- glb_policy);
+ grpclb_policy);
}
grpc_grpclb_destroy_serverlist(serverlist);
}
@@ -1602,335 +858,1020 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
// No valid initial response or serverlist found.
gpr_log(GPR_ERROR,
"[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
- glb_policy,
+ grpclb_policy,
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
}
grpc_slice_unref_internal(response_slice);
- if (!glb_policy->shutting_down) {
+ if (!grpclb_policy->shutting_down_) {
// Keep listening for serverlist updates.
- op->op = GRPC_OP_RECV_MESSAGE;
- op->data.recv_message.recv_message = &lb_calld->recv_message_payload;
- op->flags = 0;
- op->reserved = nullptr;
- op++;
- // Reuse the "lb_on_response_received_locked" ref taken in
- // query_for_backends_locked().
+ grpc_op op;
+ memset(&op, 0, sizeof(op));
+ op.op = GRPC_OP_RECV_MESSAGE;
+ op.data.recv_message.recv_message = &lb_calld->recv_message_payload_;
+ op.flags = 0;
+ op.reserved = nullptr;
+ // Reuse the "OnBalancerMessageReceivedLocked" ref taken in StartQuery().
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
- lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
- &lb_calld->lb_on_response_received);
+ lb_calld->lb_call_, &op, 1,
+ &lb_calld->lb_on_balancer_message_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
} else {
- glb_lb_call_data_unref(lb_calld,
- "lb_on_response_received_locked+glb_shutdown");
+ lb_calld->Unref(DEBUG_LOCATION, "on_message_received+grpclb_shutdown");
}
}
-static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
- glb_lb_policy* glb_policy = lb_calld->glb_policy;
- GPR_ASSERT(lb_calld->lb_call != nullptr);
+void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked(
+ void* arg, grpc_error* error) {
+ BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
+ GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
+ GPR_ASSERT(lb_calld->lb_call_ != nullptr);
if (grpc_lb_glb_trace.enabled()) {
char* status_details =
- grpc_slice_to_c_string(lb_calld->lb_call_status_details);
+ grpc_slice_to_c_string(lb_calld->lb_call_status_details_);
gpr_log(GPR_INFO,
"[grpclb %p] Status from LB server received. Status = %d, details "
"= '%s', (lb_calld: %p, lb_call: %p), error '%s'",
- lb_calld->glb_policy, lb_calld->lb_call_status, status_details,
- lb_calld, lb_calld->lb_call, grpc_error_string(error));
+ grpclb_policy, lb_calld->lb_call_status_, status_details, lb_calld,
+ lb_calld->lb_call_, grpc_error_string(error));
gpr_free(status_details);
}
- grpc_lb_policy_try_reresolve(&glb_policy->base, &grpc_lb_glb_trace,
- GRPC_ERROR_NONE);
+ grpclb_policy->TryReresolutionLocked(&grpc_lb_glb_trace, GRPC_ERROR_NONE);
// If this lb_calld is still in use, this call ended because of a failure so
// we want to retry connecting. Otherwise, we have deliberately ended this
// call and no further action is required.
- if (lb_calld == glb_policy->lb_calld) {
- glb_policy->lb_calld = nullptr;
- if (lb_calld->client_load_report_timer_callback_pending) {
- grpc_timer_cancel(&lb_calld->client_load_report_timer);
- }
- GPR_ASSERT(!glb_policy->shutting_down);
- if (lb_calld->seen_initial_response) {
+ if (lb_calld == grpclb_policy->lb_calld_.get()) {
+ grpclb_policy->lb_calld_.reset();
+ GPR_ASSERT(!grpclb_policy->shutting_down_);
+ if (lb_calld->seen_initial_response_) {
// If we lose connection to the LB server, reset the backoff and restart
// the LB call immediately.
- glb_policy->lb_call_backoff->Reset();
- query_for_backends_locked(glb_policy);
+ grpclb_policy->lb_call_backoff_.Reset();
+ grpclb_policy->StartBalancerCallLocked();
} else {
// If this LB call fails establishing any connection to the LB server,
// retry later.
- start_lb_call_retry_timer_locked(glb_policy);
+ grpclb_policy->StartBalancerCallRetryTimerLocked();
}
}
- glb_lb_call_data_unref(lb_calld, "lb_call_ended");
+ lb_calld->Unref(DEBUG_LOCATION, "lb_call_ended");
}
-static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
- glb_lb_policy* glb_policy = static_cast<glb_lb_policy*>(arg);
- glb_policy->fallback_timer_callback_pending = false;
- /* If we receive a serverlist after the timer fires but before this callback
- * actually runs, don't fall back. */
- if (glb_policy->serverlist == nullptr && !glb_policy->shutting_down &&
- error == GRPC_ERROR_NONE) {
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO,
- "[grpclb %p] Falling back to use backends from resolver",
- glb_policy);
+//
+// helper code for creating balancer channel
+//
+
+grpc_lb_addresses* ExtractBalancerAddresses(
+ const grpc_lb_addresses* addresses) {
+ size_t num_grpclb_addrs = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
+ }
+ // There must be at least one balancer address, or else the
+ // client_channel would not have chosen this LB policy.
+ GPR_ASSERT(num_grpclb_addrs > 0);
+ grpc_lb_addresses* lb_addresses =
+ grpc_lb_addresses_create(num_grpclb_addrs, nullptr);
+ size_t lb_addresses_idx = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (!addresses->addresses[i].is_balancer) continue;
+ if (addresses->addresses[i].user_data != nullptr) {
+ gpr_log(GPR_ERROR,
+ "This LB policy doesn't support user data. It will be ignored");
+ }
+ grpc_lb_addresses_set_address(
+ lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
+ addresses->addresses[i].address.len, false /* is balancer */,
+ addresses->addresses[i].balancer_name, nullptr /* user data */);
+ }
+ GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
+ return lb_addresses;
+}
+
+/* Returns the channel args for the LB channel, used to create a bidirectional
+ * stream for the reception of load balancing updates.
+ *
+ * Inputs:
+ * - \a addresses: corresponding to the balancers.
+ * - \a response_generator: in order to propagate updates from the resolver
+ * above the grpclb policy.
+ * - \a args: other args inherited from the grpclb policy. */
+grpc_channel_args* BuildBalancerChannelArgs(
+ const grpc_lb_addresses* addresses,
+ FakeResolverResponseGenerator* response_generator,
+ const grpc_channel_args* args) {
+ grpc_lb_addresses* lb_addresses = ExtractBalancerAddresses(addresses);
+ // Channel args to remove.
+ static const char* args_to_remove[] = {
+ // LB policy name, since we want to use the default (pick_first) in
+ // the LB channel.
+ GRPC_ARG_LB_POLICY_NAME,
+ // The channel arg for the server URI, since that will be different for
+ // the LB channel than for the parent channel. The client channel
+ // factory will re-add this arg with the right value.
+ GRPC_ARG_SERVER_URI,
+ // The resolved addresses, which will be generated by the name resolver
+ // used in the LB channel. Note that the LB channel will use the fake
+ // resolver, so this won't actually generate a query to DNS (or some
+ // other name service). However, the addresses returned by the fake
+ // resolver will have is_balancer=false, whereas our own addresses have
+ // is_balancer=true. We need the LB channel to return addresses with
+ // is_balancer=false so that it does not wind up recursively using the
+ // grpclb LB policy, as per the special case logic in client_channel.c.
+ GRPC_ARG_LB_ADDRESSES,
+ // The fake resolver response generator, because we are replacing it
+ // with the one from the grpclb policy, used to propagate updates to
+ // the LB channel.
+ GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
+ };
+ // Channel args to add.
+ const grpc_arg args_to_add[] = {
+ // New LB addresses.
+ // Note that we pass these in both when creating the LB channel
+ // and via the fake resolver. The latter is what actually gets used.
+ grpc_lb_addresses_create_channel_arg(lb_addresses),
+ // The fake resolver response generator, which we use to inject
+ // address updates into the LB channel.
+ grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
+ response_generator),
+ };
+ // Construct channel args.
+ grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
+ args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), args_to_add,
+ GPR_ARRAY_SIZE(args_to_add));
+ // Make any necessary modifications for security.
+ new_args = grpc_lb_policy_grpclb_modify_lb_channel_args(new_args);
+ // Clean up.
+ grpc_lb_addresses_destroy(lb_addresses);
+ return new_args;
+}
+
+//
+// ctor and dtor
+//
+
+GrpcLb::GrpcLb(const grpc_lb_addresses* addresses,
+ const LoadBalancingPolicy::Args& args)
+ : LoadBalancingPolicy(args),
+ response_generator_(MakeRefCounted<FakeResolverResponseGenerator>()),
+ lb_call_backoff_(
+ BackOff::Options()
+ .set_initial_backoff(GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS *
+ 1000)
+ .set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER)
+ .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
+ .set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS *
+ 1000)) {
+ // Initialization.
+ grpc_subchannel_index_ref();
+ GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_,
+ &GrpcLb::OnBalancerChannelConnectivityChangedLocked, this,
+ grpc_combiner_scheduler(args.combiner));
+ GRPC_CLOSURE_INIT(&on_rr_connectivity_changed_,
+ &GrpcLb::OnRoundRobinConnectivityChangedLocked, this,
+ grpc_combiner_scheduler(args.combiner));
+ GRPC_CLOSURE_INIT(&on_rr_request_reresolution_,
+ &GrpcLb::OnRoundRobinRequestReresolutionLocked, this,
+ grpc_combiner_scheduler(args.combiner));
+ grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE, "grpclb");
+ // Record server name.
+ const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI);
+ const char* server_uri = grpc_channel_arg_get_string(arg);
+ GPR_ASSERT(server_uri != nullptr);
+ grpc_uri* uri = grpc_uri_parse(server_uri, true);
+ GPR_ASSERT(uri->path[0] != '\0');
+ server_name_ = gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "[grpclb %p] Will use '%s' as the server name for LB request.",
+ this, server_name_);
+ }
+ grpc_uri_destroy(uri);
+ // Record LB call timeout.
+ arg = grpc_channel_args_find(args.args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
+ lb_call_timeout_ms_ = grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
+ // Record fallback timeout.
+ arg = grpc_channel_args_find(args.args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
+ lb_fallback_timeout_ms_ = grpc_channel_arg_get_integer(
+ arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
+ // Process channel args.
+ ProcessChannelArgsLocked(*args.args);
+}
+
+GrpcLb::~GrpcLb() {
+ GPR_ASSERT(pending_picks_ == nullptr);
+ GPR_ASSERT(pending_pings_ == nullptr);
+ gpr_free((void*)server_name_);
+ grpc_channel_args_destroy(args_);
+ grpc_connectivity_state_destroy(&state_tracker_);
+ if (serverlist_ != nullptr) {
+ grpc_grpclb_destroy_serverlist(serverlist_);
+ }
+ if (fallback_backend_addresses_ != nullptr) {
+ grpc_lb_addresses_destroy(fallback_backend_addresses_);
+ }
+ grpc_subchannel_index_unref();
+}
+
+void GrpcLb::ShutdownLocked() {
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
+ shutting_down_ = true;
+ lb_calld_.reset();
+ if (retry_timer_callback_pending_) {
+ grpc_timer_cancel(&lb_call_retry_timer_);
+ }
+ if (fallback_timer_callback_pending_) {
+ grpc_timer_cancel(&lb_fallback_timer_);
+ }
+ rr_policy_.reset();
+ TryReresolutionLocked(&grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
+ // We destroy the LB channel here instead of in our destructor because
+ // destroying the channel triggers a last callback to
+ // OnBalancerChannelConnectivityChangedLocked(), and we need to be
+ // alive when that callback is invoked.
+ if (lb_channel_ != nullptr) {
+ grpc_channel_destroy(lb_channel_);
+ lb_channel_ = nullptr;
+ }
+ grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
+ GRPC_ERROR_REF(error), "grpclb_shutdown");
+ // Clear pending picks.
+ PendingPick* pp;
+ while ((pp = pending_picks_) != nullptr) {
+ pending_picks_ = pp->next;
+ pp->pick->connected_subchannel.reset();
+ // Note: pp is deleted in this callback.
+ GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error));
+ }
+ // Clear pending pings.
+ PendingPing* pping;
+ while ((pping = pending_pings_) != nullptr) {
+ pending_pings_ = pping->next;
+ GRPC_CLOSURE_SCHED(pping->on_initiate, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(pping->on_ack, GRPC_ERROR_REF(error));
+ Delete(pping);
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+//
+// public methods
+//
+
+void GrpcLb::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
+ PendingPick* pp;
+ while ((pp = pending_picks_) != nullptr) {
+ pending_picks_ = pp->next;
+ pp->pick->on_complete = pp->original_on_complete;
+ pp->pick->user_data = nullptr;
+ if (new_policy->PickLocked(pp->pick)) {
+ // Synchronous return; schedule closure.
+ GRPC_CLOSURE_SCHED(pp->pick->on_complete, GRPC_ERROR_NONE);
}
- GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
- rr_handover_locked(glb_policy);
+ Delete(pp);
}
- GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_fallback_timer");
}
-static void fallback_update_locked(glb_lb_policy* glb_policy,
- const grpc_lb_addresses* addresses) {
- GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
- grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
- glb_policy->fallback_backend_addresses =
- extract_backend_addresses_locked(addresses);
- if (glb_policy->lb_fallback_timeout_ms > 0 &&
- glb_policy->rr_policy != nullptr) {
- rr_handover_locked(glb_policy);
+// Cancel a specific pending pick.
+//
+// A grpclb pick progresses as follows:
+// - If there's a Round Robin policy (rr_policy_) available, it'll be
+// handed over to the RR policy (in CreateRoundRobinPolicyLocked()). From
+// that point onwards, it'll be RR's responsibility. For cancellations, that
+// implies the pick needs also be cancelled by the RR instance.
+// - Otherwise, without an RR instance, picks stay pending at this policy's
+// level (grpclb), inside the pending_picks_ list. To cancel these,
+// we invoke the completion closure and set the pick's connected
+// subchannel to nullptr right here.
+void GrpcLb::CancelPickLocked(PickState* pick, grpc_error* error) {
+ PendingPick* pp = pending_picks_;
+ pending_picks_ = nullptr;
+ while (pp != nullptr) {
+ PendingPick* next = pp->next;
+ if (pp->pick == pick) {
+ pick->connected_subchannel.reset();
+ // Note: pp is deleted in this callback.
+ GRPC_CLOSURE_SCHED(&pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
+ } else {
+ pp->next = pending_picks_;
+ pending_picks_ = pp;
+ }
+ pp = next;
}
+ if (rr_policy_ != nullptr) {
+ rr_policy_->CancelPickLocked(pick, GRPC_ERROR_REF(error));
+ }
+ GRPC_ERROR_UNREF(error);
}
-static void glb_update_locked(grpc_lb_policy* policy,
- const grpc_lb_policy_args* args) {
- glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(policy);
- const grpc_arg* arg =
- grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
- if (glb_policy->lb_channel == nullptr) {
- // If we don't have a current channel to the LB, go into TRANSIENT
- // FAILURE.
- grpc_connectivity_state_set(
- &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
- "glb_update_missing");
+// Cancel all pending picks.
+//
+// A grpclb pick progresses as follows:
+// - If there's a Round Robin policy (rr_policy_) available, it'll be
+// handed over to the RR policy (in CreateRoundRobinPolicyLocked()). From
+// that point onwards, it'll be RR's responsibility. For cancellations, that
+// implies the pick needs also be cancelled by the RR instance.
+// - Otherwise, without an RR instance, picks stay pending at this policy's
+// level (grpclb), inside the pending_picks_ list. To cancel these,
+// we invoke the completion closure and set the pick's connected
+// subchannel to nullptr right here.
+void GrpcLb::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error* error) {
+ PendingPick* pp = pending_picks_;
+ pending_picks_ = nullptr;
+ while (pp != nullptr) {
+ PendingPick* next = pp->next;
+ if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
+ initial_metadata_flags_eq) {
+ // Note: pp is deleted in this callback.
+ GRPC_CLOSURE_SCHED(&pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
} else {
- // otherwise, keep using the current LB channel (ignore this update).
- gpr_log(
- GPR_ERROR,
- "[grpclb %p] No valid LB addresses channel arg in update, ignoring.",
- glb_policy);
+ pp->next = pending_picks_;
+ pending_picks_ = pp;
}
+ pp = next;
+ }
+ if (rr_policy_ != nullptr) {
+ rr_policy_->CancelMatchingPicksLocked(initial_metadata_flags_mask,
+ initial_metadata_flags_eq,
+ GRPC_ERROR_REF(error));
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+void GrpcLb::ExitIdleLocked() {
+ if (!started_picking_) {
+ StartPickingLocked();
+ }
+}
+
+bool GrpcLb::PickLocked(PickState* pick) {
+ PendingPick* pp = PendingPickCreate(pick);
+ bool pick_done = false;
+ if (rr_policy_ != nullptr) {
+ const grpc_connectivity_state rr_connectivity_state =
+ rr_policy_->CheckConnectivityLocked(nullptr);
+ // The RR policy may have transitioned to SHUTDOWN but the callback
+ // registered to capture this event (on_rr_connectivity_changed_) may not
+ // have been invoked yet. We need to make sure we aren't trying to pick
+ // from an RR policy instance that's in shutdown.
+ if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "[grpclb %p] NOT picking from from RR %p: RR conn state=%s",
+ this, rr_policy_.get(),
+ grpc_connectivity_state_name(rr_connectivity_state));
+ }
+ AddPendingPick(pp);
+ pick_done = false;
+ } else { // RR not in shutdown
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", this,
+ rr_policy_.get());
+ }
+ pick_done = PickFromRoundRobinPolicyLocked(false /* force_async */, pp);
+ }
+ } else { // rr_policy_ == NULL
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
+ this);
+ }
+ AddPendingPick(pp);
+ if (!started_picking_) {
+ StartPickingLocked();
+ }
+ pick_done = false;
+ }
+ return pick_done;
+}
+
+void GrpcLb::PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) {
+ if (rr_policy_ != nullptr) {
+ rr_policy_->PingOneLocked(on_initiate, on_ack);
+ } else {
+ AddPendingPing(on_initiate, on_ack);
+ if (!started_picking_) {
+ StartPickingLocked();
+ }
+ }
+}
+
+grpc_connectivity_state GrpcLb::CheckConnectivityLocked(
+ grpc_error** connectivity_error) {
+ return grpc_connectivity_state_get(&state_tracker_, connectivity_error);
+}
+
+void GrpcLb::NotifyOnStateChangeLocked(grpc_connectivity_state* current,
+ grpc_closure* notify) {
+ grpc_connectivity_state_notify_on_state_change(&state_tracker_, current,
+ notify);
+}
+
+void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
+ const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
+ if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
+ // Ignore this update.
+ gpr_log(
+ GPR_ERROR,
+ "[grpclb %p] No valid LB addresses channel arg in update, ignoring.",
+ this);
return;
}
const grpc_lb_addresses* addresses =
static_cast<const grpc_lb_addresses*>(arg->value.pointer.p);
- // If a non-empty serverlist hasn't been received from the balancer,
- // propagate the update to fallback_backend_addresses.
- if (glb_policy->serverlist == nullptr) {
- fallback_update_locked(glb_policy, addresses);
+ // Update fallback address list.
+ if (fallback_backend_addresses_ != nullptr) {
+ grpc_lb_addresses_destroy(fallback_backend_addresses_);
+ }
+ fallback_backend_addresses_ = ExtractBackendAddresses(addresses);
+ // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
+ // since we use this to trigger the client_load_reporting filter.
+ static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
+ grpc_arg new_arg = grpc_channel_arg_string_create(
+ (char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb");
+ grpc_channel_args_destroy(args_);
+ args_ = grpc_channel_args_copy_and_add_and_remove(
+ &args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
+ // Construct args for balancer channel.
+ grpc_channel_args* lb_channel_args =
+ BuildBalancerChannelArgs(addresses, response_generator_.get(), &args);
+ // Create balancer channel if needed.
+ if (lb_channel_ == nullptr) {
+ char* uri_str;
+ gpr_asprintf(&uri_str, "fake:///%s", server_name_);
+ lb_channel_ = grpc_client_channel_factory_create_channel(
+ client_channel_factory(), uri_str,
+ GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, lb_channel_args);
+ GPR_ASSERT(lb_channel_ != nullptr);
+ gpr_free(uri_str);
}
- GPR_ASSERT(glb_policy->lb_channel != nullptr);
// Propagate updates to the LB channel (pick_first) through the fake
// resolver.
- grpc_channel_args* lb_channel_args = build_lb_channel_args(
- addresses, glb_policy->response_generator.get(), args->args);
- glb_policy->response_generator->SetResponse(lb_channel_args);
+ response_generator_->SetResponse(lb_channel_args);
grpc_channel_args_destroy(lb_channel_args);
+}
+
+void GrpcLb::UpdateLocked(const grpc_channel_args& args) {
+ ProcessChannelArgsLocked(args);
+ // If fallback is configured and the RR policy already exists, update
+ // it with the new fallback addresses.
+ if (lb_fallback_timeout_ms_ > 0 && rr_policy_ != nullptr) {
+ CreateOrUpdateRoundRobinPolicyLocked();
+ }
// Start watching the LB channel connectivity for connection, if not
// already doing so.
- if (!glb_policy->watching_lb_channel) {
- glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
- glb_policy->lb_channel, true /* try to connect */);
+ if (!watching_lb_channel_) {
+ lb_channel_connectivity_ = grpc_channel_check_connectivity_state(
+ lb_channel_, true /* try to connect */);
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
- grpc_channel_get_channel_stack(glb_policy->lb_channel));
+ grpc_channel_get_channel_stack(lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
- glb_policy->watching_lb_channel = true;
- GRPC_LB_POLICY_REF(&glb_policy->base, "watch_lb_channel_connectivity");
+ watching_lb_channel_ = true;
+ // TODO(roth): We currently track this ref manually. Once the
+ // ClosureRef API is ready, we should pass the RefCountedPtr<> along
+ // with the callback.
+ auto self = Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity");
+ self.release();
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
- grpc_polling_entity_create_from_pollset_set(
- glb_policy->base.interested_parties),
- &glb_policy->lb_channel_connectivity,
- &glb_policy->lb_channel_on_connectivity_changed, nullptr);
+ grpc_polling_entity_create_from_pollset_set(interested_parties()),
+ &lb_channel_connectivity_, &lb_channel_on_connectivity_changed_,
+ nullptr);
}
}
+//
+// code for balancer channel and call
+//
+
+void GrpcLb::StartPickingLocked() {
+ // Start a timer to fall back.
+ if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr &&
+ !fallback_timer_callback_pending_) {
+ grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_;
+ // TODO(roth): We currently track this ref manually. Once the
+ // ClosureRef API is ready, we should pass the RefCountedPtr<> along
+ // with the callback.
+ auto self = Ref(DEBUG_LOCATION, "on_fallback_timer");
+ self.release();
+ GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimerLocked, this,
+ grpc_combiner_scheduler(combiner()));
+ fallback_timer_callback_pending_ = true;
+ grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
+ }
+ started_picking_ = true;
+ StartBalancerCallLocked();
+}
+
+void GrpcLb::StartBalancerCallLocked() {
+ GPR_ASSERT(lb_channel_ != nullptr);
+ if (shutting_down_) return;
+ // Init the LB call data.
+ GPR_ASSERT(lb_calld_ == nullptr);
+ lb_calld_ = MakeOrphanable<BalancerCallState>(Ref());
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "[grpclb %p] Query for backends (lb_channel: %p, lb_calld: %p)",
+ this, lb_channel_, lb_calld_.get());
+ }
+ lb_calld_->StartQuery();
+}
+
+void GrpcLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
+ GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
+ grpclb_policy->fallback_timer_callback_pending_ = false;
+ // If we receive a serverlist after the timer fires but before this callback
+ // actually runs, don't fall back.
+ if (grpclb_policy->serverlist_ == nullptr && !grpclb_policy->shutting_down_ &&
+ error == GRPC_ERROR_NONE) {
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "[grpclb %p] Falling back to use backends from resolver",
+ grpclb_policy);
+ }
+ GPR_ASSERT(grpclb_policy->fallback_backend_addresses_ != nullptr);
+ grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked();
+ }
+ grpclb_policy->Unref(DEBUG_LOCATION, "on_fallback_timer");
+}
+
+void GrpcLb::StartBalancerCallRetryTimerLocked() {
+ grpc_millis next_try = lb_call_backoff_.NextAttemptTime();
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...", this);
+ grpc_millis timeout = next_try - ExecCtx::Get()->Now();
+ if (timeout > 0) {
+ gpr_log(GPR_DEBUG,
+ "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.", this,
+ timeout);
+ } else {
+ gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
+ this);
+ }
+ }
+ // TODO(roth): We currently track this ref manually. Once the
+ // ClosureRef API is ready, we should pass the RefCountedPtr<> along
+ // with the callback.
+ auto self = Ref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
+ self.release();
+ GRPC_CLOSURE_INIT(&lb_on_call_retry_, &GrpcLb::OnBalancerCallRetryTimerLocked,
+ this, grpc_combiner_scheduler(combiner()));
+ retry_timer_callback_pending_ = true;
+ grpc_timer_init(&lb_call_retry_timer_, next_try, &lb_on_call_retry_);
+}
+
+void GrpcLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) {
+ GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
+ grpclb_policy->retry_timer_callback_pending_ = false;
+ if (!grpclb_policy->shutting_down_ && error == GRPC_ERROR_NONE &&
+ grpclb_policy->lb_calld_ == nullptr) {
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server",
+ grpclb_policy);
+ }
+ grpclb_policy->StartBalancerCallLocked();
+ }
+ grpclb_policy->Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
+}
+
// Invoked as part of the update process. It continues watching the LB channel
// until it shuts down or becomes READY. It's invoked even if the LB channel
// stayed READY throughout the update (for example if the update is identical).
-static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
- grpc_error* error) {
- glb_lb_policy* glb_policy = static_cast<glb_lb_policy*>(arg);
- if (glb_policy->shutting_down) goto done;
+void GrpcLb::OnBalancerChannelConnectivityChangedLocked(void* arg,
+ grpc_error* error) {
+ GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
+ if (grpclb_policy->shutting_down_) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// embedded RR policy. Note that the current RR policy, if any, will stay in
// effect until an update from the new lb_call is received.
- switch (glb_policy->lb_channel_connectivity) {
+ switch (grpclb_policy->lb_channel_connectivity_) {
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
// Keep watching the LB channel.
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(
- grpc_channel_get_channel_stack(glb_policy->lb_channel));
+ grpc_channel_get_channel_stack(grpclb_policy->lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(
- glb_policy->base.interested_parties),
- &glb_policy->lb_channel_connectivity,
- &glb_policy->lb_channel_on_connectivity_changed, nullptr);
+ grpclb_policy->interested_parties()),
+ &grpclb_policy->lb_channel_connectivity_,
+ &grpclb_policy->lb_channel_on_connectivity_changed_, nullptr);
break;
}
// The LB channel may be IDLE because it's shut down before the update.
// Restart the LB call to kick the LB channel into gear.
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_READY:
- if (glb_policy->lb_calld != nullptr) {
- lb_call_data_shutdown(glb_policy);
- }
- if (glb_policy->started_picking) {
- if (glb_policy->retry_timer_callback_pending) {
- grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
+ grpclb_policy->lb_calld_.reset();
+ if (grpclb_policy->started_picking_) {
+ if (grpclb_policy->retry_timer_callback_pending_) {
+ grpc_timer_cancel(&grpclb_policy->lb_call_retry_timer_);
}
- glb_policy->lb_call_backoff->Reset();
- query_for_backends_locked(glb_policy);
+ grpclb_policy->lb_call_backoff_.Reset();
+ grpclb_policy->StartBalancerCallLocked();
}
// Fall through.
case GRPC_CHANNEL_SHUTDOWN:
done:
- glb_policy->watching_lb_channel = false;
- GRPC_LB_POLICY_UNREF(&glb_policy->base,
+ grpclb_policy->watching_lb_channel_ = false;
+ grpclb_policy->Unref(DEBUG_LOCATION,
"watch_lb_channel_connectivity_cb_shutdown");
}
}
-/* Code wiring the policy with the rest of the core */
-static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
- glb_destroy,
- glb_shutdown_locked,
- glb_pick_locked,
- glb_cancel_pick_locked,
- glb_cancel_picks_locked,
- glb_ping_one_locked,
- glb_exit_idle_locked,
- glb_check_connectivity_locked,
- glb_notify_on_state_change_locked,
- glb_update_locked};
-
-static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
- grpc_lb_policy_args* args) {
- /* Count the number of gRPC-LB addresses. There must be at least one. */
- const grpc_arg* arg =
- grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
- return nullptr;
+//
+// PendingPick
+//
+
+// Adds lb_token of selected subchannel (address) to the call's initial
+// metadata.
+grpc_error* AddLbTokenToInitialMetadata(
+ grpc_mdelem lb_token, grpc_linked_mdelem* lb_token_mdelem_storage,
+ grpc_metadata_batch* initial_metadata) {
+ GPR_ASSERT(lb_token_mdelem_storage != nullptr);
+ GPR_ASSERT(!GRPC_MDISNULL(lb_token));
+ return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
+ lb_token);
+}
+
+// Destroy function used when embedding client stats in call context.
+void DestroyClientStats(void* arg) {
+ grpc_grpclb_client_stats_unref(static_cast<grpc_grpclb_client_stats*>(arg));
+}
+
+void GrpcLb::PendingPickSetMetadataAndContext(PendingPick* pp) {
+ /* if connected_subchannel is nullptr, no pick has been made by the RR
+ * policy (e.g., all addresses failed to connect). There won't be any
+ * user_data/token available */
+ if (pp->pick->connected_subchannel != nullptr) {
+ if (!GRPC_MDISNULL(pp->lb_token)) {
+ AddLbTokenToInitialMetadata(GRPC_MDELEM_REF(pp->lb_token),
+ &pp->pick->lb_token_mdelem_storage,
+ pp->pick->initial_metadata);
+ } else {
+ gpr_log(GPR_ERROR,
+ "[grpclb %p] No LB token for connected subchannel pick %p",
+ pp->grpclb_policy, pp->pick);
+ abort();
+ }
+ // Pass on client stats via context. Passes ownership of the reference.
+ if (pp->client_stats != nullptr) {
+ pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
+ pp->client_stats;
+ pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
+ DestroyClientStats;
+ }
+ } else {
+ if (pp->client_stats != nullptr) {
+ grpc_grpclb_client_stats_unref(pp->client_stats);
+ }
}
- grpc_lb_addresses* addresses =
- static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
- size_t num_grpclb_addrs = 0;
- for (size_t i = 0; i < addresses->num_addresses; ++i) {
- if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
+}
+
+/* The \a on_complete closure passed as part of the pick requires keeping a
+ * reference to its associated round robin instance. We wrap this closure in
+ * order to unref the round robin instance upon its invocation */
+void GrpcLb::OnPendingPickComplete(void* arg, grpc_error* error) {
+ PendingPick* pp = static_cast<PendingPick*>(arg);
+ PendingPickSetMetadataAndContext(pp);
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
+ Delete(pp);
+}
+
+GrpcLb::PendingPick* GrpcLb::PendingPickCreate(PickState* pick) {
+ PendingPick* pp = New<PendingPick>();
+ pp->grpclb_policy = this;
+ pp->pick = pick;
+ GRPC_CLOSURE_INIT(&pp->on_complete, &GrpcLb::OnPendingPickComplete, pp,
+ grpc_schedule_on_exec_ctx);
+ pp->original_on_complete = pick->on_complete;
+ pick->on_complete = &pp->on_complete;
+ return pp;
+}
+
+void GrpcLb::AddPendingPick(PendingPick* pp) {
+ pp->next = pending_picks_;
+ pending_picks_ = pp;
+}
+
+//
+// PendingPing
+//
+
+void GrpcLb::AddPendingPing(grpc_closure* on_initiate, grpc_closure* on_ack) {
+ PendingPing* pping = New<PendingPing>();
+ pping->on_initiate = on_initiate;
+ pping->on_ack = on_ack;
+ pping->next = pending_pings_;
+ pending_pings_ = pping;
+}
+
+//
+// code for interacting with the RR policy
+//
+
+// Performs a pick over \a rr_policy_. Given that a pick can return
+// immediately (ignoring its completion callback), we need to perform the
+// cleanups this callback would otherwise be responsible for.
+// If \a force_async is true, then we will manually schedule the
+// completion callback even if the pick is available immediately.
+bool GrpcLb::PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp) {
+ // Check for drops if we are not using fallback backend addresses.
+ if (serverlist_ != nullptr) {
+ // Look at the index into the serverlist to see if we should drop this call.
+ grpc_grpclb_server* server = serverlist_->servers[serverlist_index_++];
+ if (serverlist_index_ == serverlist_->num_servers) {
+ serverlist_index_ = 0; // Wrap-around.
+ }
+ if (server->drop) {
+ // Update client load reporting stats to indicate the number of
+ // dropped calls. Note that we have to do this here instead of in
+ // the client_load_reporting filter, because we do not create a
+ // subchannel call (and therefore no client_load_reporting filter)
+ // for dropped calls.
+ if (lb_calld_ != nullptr && lb_calld_->client_stats() != nullptr) {
+ grpc_grpclb_client_stats_add_call_dropped_locked(
+ server->load_balance_token, lb_calld_->client_stats());
+ }
+ if (force_async) {
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
+ Delete(pp);
+ return false;
+ }
+ Delete(pp);
+ return true;
+ }
+ }
+ // Set client_stats and user_data.
+ if (lb_calld_ != nullptr && lb_calld_->client_stats() != nullptr) {
+ pp->client_stats = grpc_grpclb_client_stats_ref(lb_calld_->client_stats());
}
- if (num_grpclb_addrs == 0) return nullptr;
+ GPR_ASSERT(pp->pick->user_data == nullptr);
+ pp->pick->user_data = (void**)&pp->lb_token;
+ // Pick via the RR policy.
+ bool pick_done = rr_policy_->PickLocked(pp->pick);
+ if (pick_done) {
+ PendingPickSetMetadataAndContext(pp);
+ if (force_async) {
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
+ pick_done = false;
+ }
+ Delete(pp);
+ }
+ // else, the pending pick will be registered and taken care of by the
+ // pending pick list inside the RR policy. Eventually,
+ // OnPendingPickComplete() will be called, which will (among other
+ // things) add the LB token to the call's initial metadata.
+ return pick_done;
+}
- glb_lb_policy* glb_policy =
- static_cast<glb_lb_policy*>(gpr_zalloc(sizeof(*glb_policy)));
+void GrpcLb::CreateRoundRobinPolicyLocked(const Args& args) {
+ GPR_ASSERT(rr_policy_ == nullptr);
+ rr_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
+ "round_robin", args);
+ if (rr_policy_ == nullptr) {
+ gpr_log(GPR_ERROR, "[grpclb %p] Failure creating a RoundRobin policy",
+ this);
+ return;
+ }
+ // TODO(roth): We currently track this ref manually. Once the new
+ // ClosureRef API is done, pass the RefCountedPtr<> along with the closure.
+ auto self = Ref(DEBUG_LOCATION, "on_rr_reresolution_requested");
+ self.release();
+ rr_policy_->SetReresolutionClosureLocked(&on_rr_request_reresolution_);
+ grpc_error* rr_state_error = nullptr;
+ rr_connectivity_state_ = rr_policy_->CheckConnectivityLocked(&rr_state_error);
+ // Connectivity state is a function of the RR policy updated/created.
+ UpdateConnectivityStateFromRoundRobinPolicyLocked(rr_state_error);
+ // Add the gRPC LB's interested_parties pollset_set to that of the newly
+ // created RR policy. This will make the RR policy progress upon activity on
+ // gRPC LB, which in turn is tied to the application's call.
+ grpc_pollset_set_add_pollset_set(rr_policy_->interested_parties(),
+ interested_parties());
+ // Subscribe to changes to the connectivity of the new RR.
+ // TODO(roth): We currently track this ref manually. Once the new
+ // ClosureRef API is done, pass the RefCountedPtr<> along with the closure.
+ self = Ref(DEBUG_LOCATION, "on_rr_connectivity_changed");
+ self.release();
+ rr_policy_->NotifyOnStateChangeLocked(&rr_connectivity_state_,
+ &on_rr_connectivity_changed_);
+ rr_policy_->ExitIdleLocked();
+ // Send pending picks to RR policy.
+ PendingPick* pp;
+ while ((pp = pending_picks_)) {
+ pending_picks_ = pp->next;
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "[grpclb %p] Pending pick about to (async) PICK from RR %p", this,
+ rr_policy_.get());
+ }
+ PickFromRoundRobinPolicyLocked(true /* force_async */, pp);
+ }
+ // Send pending pings to RR policy.
+ PendingPing* pping;
+ while ((pping = pending_pings_)) {
+ pending_pings_ = pping->next;
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
+ this, rr_policy_.get());
+ }
+ rr_policy_->PingOneLocked(pping->on_initiate, pping->on_ack);
+ Delete(pping);
+ }
+}
- /* Get server name. */
- arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
- const char* server_uri = grpc_channel_arg_get_string(arg);
- GPR_ASSERT(server_uri != nullptr);
- grpc_uri* uri = grpc_uri_parse(server_uri, true);
- GPR_ASSERT(uri->path[0] != '\0');
- glb_policy->server_name =
- gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO,
- "[grpclb %p] Will use '%s' as the server name for LB request.",
- glb_policy, glb_policy->server_name);
+grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() {
+ grpc_lb_addresses* addresses;
+ if (serverlist_ != nullptr) {
+ GPR_ASSERT(serverlist_->num_servers > 0);
+ addresses = ProcessServerlist(serverlist_);
+ } else {
+ // If CreateOrUpdateRoundRobinPolicyLocked() is invoked when we haven't
+ // received any serverlist from the balancer, we use the fallback backends
+ // returned by the resolver. Note that the fallback backend list may be
+ // empty, in which case the new round_robin policy will keep the requested
+ // picks pending.
+ GPR_ASSERT(fallback_backend_addresses_ != nullptr);
+ addresses = grpc_lb_addresses_copy(fallback_backend_addresses_);
}
- grpc_uri_destroy(uri);
+ GPR_ASSERT(addresses != nullptr);
+ // Replace the LB addresses in the channel args that we pass down to
+ // the subchannel.
+ static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
+ const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
+ grpc_channel_args* args = grpc_channel_args_copy_and_add_and_remove(
+ args_, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg, 1);
+ grpc_lb_addresses_destroy(addresses);
+ return args;
+}
- glb_policy->cc_factory = args->client_channel_factory;
- GPR_ASSERT(glb_policy->cc_factory != nullptr);
+void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
+ if (shutting_down_) return;
+ grpc_channel_args* args = CreateRoundRobinPolicyArgsLocked();
+ GPR_ASSERT(args != nullptr);
+ if (rr_policy_ != nullptr) {
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", this,
+ rr_policy_.get());
+ }
+ rr_policy_->UpdateLocked(*args);
+ } else {
+ LoadBalancingPolicy::Args lb_policy_args;
+ lb_policy_args.combiner = combiner();
+ lb_policy_args.client_channel_factory = client_channel_factory();
+ lb_policy_args.args = args;
+ CreateRoundRobinPolicyLocked(lb_policy_args);
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", this,
+ rr_policy_.get());
+ }
+ }
+ grpc_channel_args_destroy(args);
+}
- arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
- glb_policy->lb_call_timeout_ms =
- grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
+void GrpcLb::OnRoundRobinRequestReresolutionLocked(void* arg,
+ grpc_error* error) {
+ GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
+ if (grpclb_policy->shutting_down_ || error != GRPC_ERROR_NONE) {
+ grpclb_policy->Unref(DEBUG_LOCATION, "on_rr_reresolution_requested");
+ return;
+ }
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(
+ GPR_DEBUG,
+ "[grpclb %p] Re-resolution requested from the internal RR policy (%p).",
+ grpclb_policy, grpclb_policy->rr_policy_.get());
+ }
+ // If we are talking to a balancer, we expect to get updated addresses form
+ // the balancer, so we can ignore the re-resolution request from the RR
+ // policy. Otherwise, handle the re-resolution request using the
+ // grpclb policy's original re-resolution closure.
+ if (grpclb_policy->lb_calld_ == nullptr ||
+ !grpclb_policy->lb_calld_->seen_initial_response()) {
+ grpclb_policy->TryReresolutionLocked(&grpc_lb_glb_trace, GRPC_ERROR_NONE);
+ }
+ // Give back the wrapper closure to the RR policy.
+ grpclb_policy->rr_policy_->SetReresolutionClosureLocked(
+ &grpclb_policy->on_rr_request_reresolution_);
+}
- arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
- glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
- arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
+void GrpcLb::UpdateConnectivityStateFromRoundRobinPolicyLocked(
+ grpc_error* rr_state_error) {
+ const grpc_connectivity_state curr_glb_state =
+ grpc_connectivity_state_check(&state_tracker_);
+ /* The new connectivity status is a function of the previous one and the new
+ * input coming from the status of the RR policy.
+ *
+ * current state (grpclb's)
+ * |
+ * v || I | C | R | TF | SD | <- new state (RR's)
+ * ===++====+=====+=====+======+======+
+ * I || I | C | R | [I] | [I] |
+ * ---++----+-----+-----+------+------+
+ * C || I | C | R | [C] | [C] |
+ * ---++----+-----+-----+------+------+
+ * R || I | C | R | [R] | [R] |
+ * ---++----+-----+-----+------+------+
+ * TF || I | C | R | [TF] | [TF] |
+ * ---++----+-----+-----+------+------+
+ * SD || NA | NA | NA | NA | NA | (*)
+ * ---++----+-----+-----+------+------+
+ *
+ * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
+ * is the current state of grpclb, which is left untouched.
+ *
+ * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
+ * the previous RR instance.
+ *
+ * Note that the status is never updated to SHUTDOWN as a result of calling
+ * this function. Only glb_shutdown() has the power to set that state.
+ *
+ * (*) This function mustn't be called during shutting down. */
+ GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
+ switch (rr_connectivity_state_) {
+ case GRPC_CHANNEL_TRANSIENT_FAILURE:
+ case GRPC_CHANNEL_SHUTDOWN:
+ GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
+ break;
+ case GRPC_CHANNEL_IDLE:
+ case GRPC_CHANNEL_CONNECTING:
+ case GRPC_CHANNEL_READY:
+ GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
+ }
+ if (grpc_lb_glb_trace.enabled()) {
+ gpr_log(
+ GPR_INFO,
+ "[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.",
+ this, grpc_connectivity_state_name(rr_connectivity_state_),
+ rr_policy_.get());
+ }
+ grpc_connectivity_state_set(&state_tracker_, rr_connectivity_state_,
+ rr_state_error,
+ "update_lb_connectivity_status_locked");
+}
- // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
- // since we use this to trigger the client_load_reporting filter.
- grpc_arg new_arg = grpc_channel_arg_string_create(
- (char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb");
- static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
- glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
- args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
-
- /* Extract the backend addresses (may be empty) from the resolver for
- * fallback. */
- glb_policy->fallback_backend_addresses =
- extract_backend_addresses_locked(addresses);
-
- /* Create a client channel over them to communicate with a LB service */
- glb_policy->response_generator =
- grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
- grpc_channel_args* lb_channel_args = build_lb_channel_args(
- addresses, glb_policy->response_generator.get(), args->args);
- char* uri_str;
- gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
- glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
- uri_str, args->client_channel_factory, lb_channel_args);
-
- /* Propagate initial resolution */
- glb_policy->response_generator->SetResponse(lb_channel_args);
- grpc_channel_args_destroy(lb_channel_args);
- gpr_free(uri_str);
- if (glb_policy->lb_channel == nullptr) {
- gpr_free((void*)glb_policy->server_name);
- grpc_channel_args_destroy(glb_policy->args);
- gpr_free(glb_policy);
- return nullptr;
+void GrpcLb::OnRoundRobinConnectivityChangedLocked(void* arg,
+ grpc_error* error) {
+ GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
+ if (grpclb_policy->shutting_down_) {
+ grpclb_policy->Unref(DEBUG_LOCATION, "on_rr_connectivity_changed");
+ return;
}
- grpc_subchannel_index_ref();
- GRPC_CLOSURE_INIT(&glb_policy->rr_on_connectivity_changed,
- rr_on_connectivity_changed_locked, glb_policy,
- grpc_combiner_scheduler(args->combiner));
- GRPC_CLOSURE_INIT(&glb_policy->rr_on_reresolution_requested,
- rr_on_reresolution_requested_locked, glb_policy,
- grpc_combiner_scheduler(args->combiner));
- GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
- glb_lb_channel_on_connectivity_changed_cb, glb_policy,
- grpc_combiner_scheduler(args->combiner));
- grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
- grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
- "grpclb");
- // Init LB call backoff option.
- grpc_core::BackOff::Options backoff_options;
- backoff_options
- .set_initial_backoff(GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
- .set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER)
- .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
- .set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
- glb_policy->lb_call_backoff.Init(backoff_options);
- return &glb_policy->base;
+ grpclb_policy->UpdateConnectivityStateFromRoundRobinPolicyLocked(
+ GRPC_ERROR_REF(error));
+ // Resubscribe. Reuse the "on_rr_connectivity_changed" ref.
+ grpclb_policy->rr_policy_->NotifyOnStateChangeLocked(
+ &grpclb_policy->rr_connectivity_state_,
+ &grpclb_policy->on_rr_connectivity_changed_);
}
-static void glb_factory_ref(grpc_lb_policy_factory* factory) {}
+//
+// factory
+//
-static void glb_factory_unref(grpc_lb_policy_factory* factory) {}
+class GrpcLbFactory : public LoadBalancingPolicyFactory {
+ public:
+ OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
+ const LoadBalancingPolicy::Args& args) const override {
+ /* Count the number of gRPC-LB addresses. There must be at least one. */
+ const grpc_arg* arg =
+ grpc_channel_args_find(args.args, GRPC_ARG_LB_ADDRESSES);
+ if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
+ return nullptr;
+ }
+ grpc_lb_addresses* addresses =
+ static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
+ size_t num_grpclb_addrs = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
+ }
+ if (num_grpclb_addrs == 0) return nullptr;
+ return OrphanablePtr<LoadBalancingPolicy>(New<GrpcLb>(addresses, args));
+ }
-static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
- glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
+ const char* name() const override { return "grpclb"; }
+};
-static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
+} // namespace
-grpc_lb_policy_factory* grpc_glb_lb_factory_create() {
- return &glb_lb_policy_factory;
-}
+} // namespace grpc_core
-/* Plugin registration */
+//
+// Plugin registration
+//
+
+namespace {
// Only add client_load_reporting filter if the grpclb LB policy is used.
-static bool maybe_add_client_load_reporting_filter(
- grpc_channel_stack_builder* builder, void* arg) {
+bool maybe_add_client_load_reporting_filter(grpc_channel_stack_builder* builder,
+ void* arg) {
const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
const grpc_arg* channel_arg =
@@ -1938,14 +1879,18 @@ static bool maybe_add_client_load_reporting_filter(
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING &&
strcmp(channel_arg->value.string, "grpclb") == 0) {
return grpc_channel_stack_builder_append_filter(
- builder, static_cast<const grpc_channel_filter*>(arg), nullptr,
- nullptr);
+ builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
}
return true;
}
+} // namespace
+
void grpc_lb_policy_grpclb_init() {
- grpc_register_lb_policy(grpc_glb_lb_factory_create());
+ grpc_core::LoadBalancingPolicyRegistry::Builder::
+ RegisterLoadBalancingPolicyFactory(
+ grpc_core::UniquePtr<grpc_core::LoadBalancingPolicyFactory>(
+ grpc_core::New<grpc_core::GrpcLbFactory>()));
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_client_load_reporting_filter,
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
deleted file mode 100644
index 0a2edb0e3d..0000000000
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H
-#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H
-
-#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
-
-/** Returns a load balancing factory for the glb policy, which tries to connect
- * to a load balancing server to decide the next successfully connected
- * subchannel to pick. */
-grpc_lb_policy_factory* grpc_glb_lb_factory_create();
-
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
index 013fb12aea..fd873f096d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
@@ -16,57 +16,11 @@
*
*/
-#include <grpc/support/alloc.h>
-#include <grpc/support/string_util.h>
+#include <grpc/support/port_platform.h>
-#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
-#include "src/core/lib/channel/channel_args.h"
-#include "src/core/lib/gpr/string.h"
-#include "src/core/lib/iomgr/sockaddr_utils.h"
-grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
- const char* lb_service_target_addresses,
- grpc_client_channel_factory* client_channel_factory,
+grpc_channel_args* grpc_lb_policy_grpclb_modify_lb_channel_args(
grpc_channel_args* args) {
- grpc_channel* lb_channel = grpc_client_channel_factory_create_channel(
- client_channel_factory, lb_service_target_addresses,
- GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, args);
- return lb_channel;
-}
-
-grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
- grpc_slice_hash_table* targets_info,
- grpc_core::FakeResolverResponseGenerator* response_generator,
- const grpc_channel_args* args) {
- const grpc_arg to_add[] = {
- grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
- response_generator)};
- /* We remove:
- *
- * - The channel arg for the LB policy name, since we want to use the default
- * (pick_first) in this case.
- *
- * - The channel arg for the resolved addresses, since that will be generated
- * by the name resolver used in the LB channel. Note that the LB channel
- * will use the fake resolver, so this won't actually generate a query
- * to DNS (or some other name service). However, the addresses returned by
- * the fake resolver will have is_balancer=false, whereas our own
- * addresses have is_balancer=true. We need the LB channel to return
- * addresses with is_balancer=false so that it does not wind up recursively
- * using the grpclb LB policy, as per the special case logic in
- * client_channel.c.
- *
- * - The channel arg for the server URI, since that will be different for the
- * LB channel than for the parent channel (the client channel factory will
- * re-add this arg with the right value).
- *
- * - The fake resolver generator, because we are replacing it with the one
- * from the grpclb policy, used to propagate updates to the LB channel. */
- static const char* keys_to_remove[] = {
- GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI,
- GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR};
- return grpc_channel_args_copy_and_add_and_remove(
- args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), to_add,
- GPR_ARRAY_SIZE(to_add));
+ return args;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
index 2e34e3cab5..825065a9c3 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
@@ -19,26 +19,18 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
-#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
-#include "src/core/lib/slice/slice_hash_table.h"
-/** Create the channel used for communicating with an LB service.
- * Note that an LB *service* may be comprised of several LB *servers*.
- *
- * \a lb_service_target_addresses is the target URI containing the addresses
- * from resolving the LB service's name (eg, ipv4:10.0.0.1:1234,10.2.3.4:9876).
- * \a client_channel_factory will be used for the creation of the LB channel,
- * alongside the channel args passed in \a args. */
-grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
- const char* lb_service_target_addresses,
- grpc_client_channel_factory* client_channel_factory,
+/// Makes any necessary modifications to \a args for use in the grpclb
+/// balancer channel.
+///
+/// Takes ownership of \a args.
+///
+/// Caller takes ownership of the returned args.
+grpc_channel_args* grpc_lb_policy_grpclb_modify_lb_channel_args(
grpc_channel_args* args);
-grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
- grpc_slice_hash_table* targets_info,
- grpc_core::FakeResolverResponseGenerator* response_generator,
- const grpc_channel_args* args);
-
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H \
*/
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
index 5e615addbf..441efd5e23 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
@@ -16,85 +16,93 @@
*
*/
+#include <grpc/support/port_platform.h>
+
+#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
+
+#include <string.h>
+
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
-#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/security/credentials/credentials.h"
-#include "src/core/lib/security/transport/lb_targets_info.h"
+#include "src/core/lib/security/transport/target_authority_table.h"
#include "src/core/lib/slice/slice_internal.h"
-grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
- const char* lb_service_target_addresses,
- grpc_client_channel_factory* client_channel_factory,
+namespace grpc_core {
+namespace {
+
+int BalancerNameCmp(const grpc_core::UniquePtr<char>& a,
+ const grpc_core::UniquePtr<char>& b) {
+ return strcmp(a.get(), b.get());
+}
+
+RefCountedPtr<TargetAuthorityTable> CreateTargetAuthorityTable(
+ grpc_lb_addresses* addresses) {
+ TargetAuthorityTable::Entry* target_authority_entries =
+ static_cast<TargetAuthorityTable::Entry*>(gpr_zalloc(
+ sizeof(*target_authority_entries) * addresses->num_addresses));
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ char* addr_str;
+ GPR_ASSERT(grpc_sockaddr_to_string(
+ &addr_str, &addresses->addresses[i].address, true) > 0);
+ target_authority_entries[i].key = grpc_slice_from_copied_string(addr_str);
+ target_authority_entries[i].value.reset(
+ gpr_strdup(addresses->addresses[i].balancer_name));
+ gpr_free(addr_str);
+ }
+ RefCountedPtr<TargetAuthorityTable> target_authority_table =
+ TargetAuthorityTable::Create(addresses->num_addresses,
+ target_authority_entries, BalancerNameCmp);
+ gpr_free(target_authority_entries);
+ return target_authority_table;
+}
+
+} // namespace
+} // namespace grpc_core
+
+grpc_channel_args* grpc_lb_policy_grpclb_modify_lb_channel_args(
grpc_channel_args* args) {
- grpc_channel_args* new_args = args;
+ const char* args_to_remove[1];
+ size_t num_args_to_remove = 0;
+ grpc_arg args_to_add[2];
+ size_t num_args_to_add = 0;
+ // Add arg for targets info table.
+ const grpc_arg* arg = grpc_channel_args_find(args, GRPC_ARG_LB_ADDRESSES);
+ GPR_ASSERT(arg != nullptr);
+ GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
+ grpc_lb_addresses* addresses =
+ static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
+ grpc_core::RefCountedPtr<grpc_core::TargetAuthorityTable>
+ target_authority_table = grpc_core::CreateTargetAuthorityTable(addresses);
+ args_to_add[num_args_to_add++] =
+ grpc_core::CreateTargetAuthorityTableChannelArg(
+ target_authority_table.get());
+ // Substitute the channel credentials with a version without call
+ // credentials: the load balancer is not necessarily trusted to handle
+ // bearer token credentials.
grpc_channel_credentials* channel_credentials =
grpc_channel_credentials_find_in_args(args);
+ grpc_channel_credentials* creds_sans_call_creds = nullptr;
if (channel_credentials != nullptr) {
- /* Substitute the channel credentials with a version without call
- * credentials: the load balancer is not necessarily trusted to handle
- * bearer token credentials */
- static const char* keys_to_remove[] = {GRPC_ARG_CHANNEL_CREDENTIALS};
- grpc_channel_credentials* creds_sans_call_creds =
+ creds_sans_call_creds =
grpc_channel_credentials_duplicate_without_call_credentials(
channel_credentials);
GPR_ASSERT(creds_sans_call_creds != nullptr);
- grpc_arg args_to_add[] = {
- grpc_channel_credentials_to_arg(creds_sans_call_creds)};
- /* Create the new set of channel args */
- new_args = grpc_channel_args_copy_and_add_and_remove(
- args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), args_to_add,
- GPR_ARRAY_SIZE(args_to_add));
- grpc_channel_credentials_unref(creds_sans_call_creds);
+ args_to_remove[num_args_to_remove++] = GRPC_ARG_CHANNEL_CREDENTIALS;
+ args_to_add[num_args_to_add++] =
+ grpc_channel_credentials_to_arg(creds_sans_call_creds);
}
- grpc_channel* lb_channel = grpc_client_channel_factory_create_channel(
- client_channel_factory, lb_service_target_addresses,
- GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, new_args);
- if (channel_credentials != nullptr) {
- grpc_channel_args_destroy(new_args);
+ grpc_channel_args* result = grpc_channel_args_copy_and_add_and_remove(
+ args, args_to_remove, num_args_to_remove, args_to_add, num_args_to_add);
+ // Clean up.
+ grpc_channel_args_destroy(args);
+ if (creds_sans_call_creds != nullptr) {
+ grpc_channel_credentials_unref(creds_sans_call_creds);
}
- return lb_channel;
-}
-
-grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
- grpc_slice_hash_table* targets_info,
- grpc_core::FakeResolverResponseGenerator* response_generator,
- const grpc_channel_args* args) {
- const grpc_arg to_add[] = {
- grpc_lb_targets_info_create_channel_arg(targets_info),
- grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
- response_generator)};
- /* We remove:
- *
- * - The channel arg for the LB policy name, since we want to use the default
- * (pick_first) in this case.
- *
- * - The channel arg for the resolved addresses, since that will be generated
- * by the name resolver used in the LB channel. Note that the LB channel
- * will use the fake resolver, so this won't actually generate a query
- * to DNS (or some other name service). However, the addresses returned by
- * the fake resolver will have is_balancer=false, whereas our own
- * addresses have is_balancer=true. We need the LB channel to return
- * addresses with is_balancer=false so that it does not wind up recursively
- * using the grpclb LB policy, as per the special case logic in
- * client_channel.c.
- *
- * - The channel arg for the server URI, since that will be different for the
- * LB channel than for the parent channel (the client channel factory will
- * re-add this arg with the right value).
- *
- * - The fake resolver generator, because we are replacing it with the one
- * from the grpclb policy, used to propagate updates to the LB channel. */
- static const char* keys_to_remove[] = {
- GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI,
- GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR};
- /* Add the targets info table to be used for secure naming */
- return grpc_channel_args_copy_and_add_and_remove(
- args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), to_add,
- GPR_ARRAY_SIZE(to_add));
+ return result;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
index 0b5a798be3..dfbaead7d5 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
#include <string.h>
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
index d4b9d06848..c971e56883 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H
+#include <grpc/support/port_platform.h>
+
#include <stdbool.h>
#include <grpc/impl/codegen/grpc_types.h>
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
index c388b6ba77..7ef3bcf24f 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
#include "third_party/nanopb/pb_decode.h"
#include "third_party/nanopb/pb_encode.h"
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
index ccb0212643..d4270f2536 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H
+#include <grpc/support/port_platform.h>
+
#include <grpc/slice_buffer.h>
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index 296bdcb247..9090c34412 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include <string.h>
#include <grpc/support/alloc.h>
@@ -29,194 +31,225 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
-grpc_core::TraceFlag grpc_lb_pick_first_trace(false, "pick_first");
+namespace grpc_core {
+
+TraceFlag grpc_lb_pick_first_trace(false, "pick_first");
+
+namespace {
+
+//
+// pick_first LB policy
+//
+
+class PickFirst : public LoadBalancingPolicy {
+ public:
+ explicit PickFirst(const Args& args);
+
+ void UpdateLocked(const grpc_channel_args& args) override;
+ bool PickLocked(PickState* pick) override;
+ void CancelPickLocked(PickState* pick, grpc_error* error) override;
+ void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error* error) override;
+ void NotifyOnStateChangeLocked(grpc_connectivity_state* state,
+ grpc_closure* closure) override;
+ grpc_connectivity_state CheckConnectivityLocked(
+ grpc_error** connectivity_error) override;
+ void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
+ void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
+ void ExitIdleLocked() override;
+
+ private:
+ ~PickFirst();
+
+ void ShutdownLocked() override;
+
+ void StartPickingLocked();
+ void DestroyUnselectedSubchannelsLocked();
+
+ static void OnConnectivityChangedLocked(void* arg, grpc_error* error);
+
+ void SubchannelListRefForConnectivityWatch(
+ grpc_lb_subchannel_list* subchannel_list, const char* reason);
+ void SubchannelListUnrefForConnectivityWatch(
+ grpc_lb_subchannel_list* subchannel_list, const char* reason);
-typedef struct {
- /** base policy: must be first */
- grpc_lb_policy base;
/** all our subchannels */
- grpc_lb_subchannel_list* subchannel_list;
+ grpc_lb_subchannel_list* subchannel_list_ = nullptr;
/** latest pending subchannel list */
- grpc_lb_subchannel_list* latest_pending_subchannel_list;
+ grpc_lb_subchannel_list* latest_pending_subchannel_list_ = nullptr;
/** selected subchannel in \a subchannel_list */
- grpc_lb_subchannel_data* selected;
+ grpc_lb_subchannel_data* selected_ = nullptr;
/** have we started picking? */
- bool started_picking;
+ bool started_picking_ = false;
/** are we shut down? */
- bool shutdown;
+ bool shutdown_ = false;
/** list of picks that are waiting on connectivity */
- grpc_lb_policy_pick_state* pending_picks;
+ PickState* pending_picks_ = nullptr;
/** our connectivity state tracker */
- grpc_connectivity_state_tracker state_tracker;
-} pick_first_lb_policy;
-
-static void pf_destroy(grpc_lb_policy* pol) {
- pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
- GPR_ASSERT(p->subchannel_list == nullptr);
- GPR_ASSERT(p->latest_pending_subchannel_list == nullptr);
- GPR_ASSERT(p->pending_picks == nullptr);
- grpc_connectivity_state_destroy(&p->state_tracker);
- gpr_free(p);
- grpc_subchannel_index_unref();
+ grpc_connectivity_state_tracker state_tracker_;
+};
+
+PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) {
+ GPR_ASSERT(args.client_channel_factory != nullptr);
+ grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
+ "pick_first");
if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void*)p);
+ gpr_log(GPR_DEBUG, "Pick First %p created.", this);
}
+ UpdateLocked(*args.args);
+ grpc_subchannel_index_ref();
}
-static void pf_shutdown_locked(grpc_lb_policy* pol,
- grpc_lb_policy* new_policy) {
- pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
- grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
+PickFirst::~PickFirst() {
if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
+ gpr_log(GPR_DEBUG, "Destroying Pick First %p", this);
}
- p->shutdown = true;
- grpc_lb_policy_pick_state* pick;
- while ((pick = p->pending_picks) != nullptr) {
- p->pending_picks = pick->next;
- if (new_policy != nullptr) {
- // Hand off to new LB policy.
- if (grpc_lb_policy_pick_locked(new_policy, pick)) {
- // Synchronous return, schedule closure.
- GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
- }
- } else {
- pick->connected_subchannel.reset();
- GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
+ GPR_ASSERT(subchannel_list_ == nullptr);
+ GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
+ GPR_ASSERT(pending_picks_ == nullptr);
+ grpc_connectivity_state_destroy(&state_tracker_);
+ grpc_subchannel_index_unref();
+}
+
+void PickFirst::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
+ PickState* pick;
+ while ((pick = pending_picks_) != nullptr) {
+ pending_picks_ = pick->next;
+ if (new_policy->PickLocked(pick)) {
+ // Synchronous return, schedule closure.
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
}
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
+}
+
+void PickFirst::ShutdownLocked() {
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
+ if (grpc_lb_pick_first_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "Pick First %p Shutting down", this);
+ }
+ shutdown_ = true;
+ PickState* pick;
+ while ((pick = pending_picks_) != nullptr) {
+ pending_picks_ = pick->next;
+ pick->connected_subchannel.reset();
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
+ }
+ grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "shutdown");
- if (p->subchannel_list != nullptr) {
- grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
- "pf_shutdown");
- p->subchannel_list = nullptr;
+ if (subchannel_list_ != nullptr) {
+ grpc_lb_subchannel_list_shutdown_and_unref(subchannel_list_, "pf_shutdown");
+ subchannel_list_ = nullptr;
}
- if (p->latest_pending_subchannel_list != nullptr) {
- grpc_lb_subchannel_list_shutdown_and_unref(
- p->latest_pending_subchannel_list, "pf_shutdown");
- p->latest_pending_subchannel_list = nullptr;
+ if (latest_pending_subchannel_list_ != nullptr) {
+ grpc_lb_subchannel_list_shutdown_and_unref(latest_pending_subchannel_list_,
+ "pf_shutdown");
+ latest_pending_subchannel_list_ = nullptr;
}
- grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_pick_first_trace,
- GRPC_ERROR_CANCELLED);
+ TryReresolutionLocked(&grpc_lb_pick_first_trace, GRPC_ERROR_CANCELLED);
GRPC_ERROR_UNREF(error);
}
-static void pf_cancel_pick_locked(grpc_lb_policy* pol,
- grpc_lb_policy_pick_state* pick,
- grpc_error* error) {
- pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
- grpc_lb_policy_pick_state* pp = p->pending_picks;
- p->pending_picks = nullptr;
+void PickFirst::CancelPickLocked(PickState* pick, grpc_error* error) {
+ PickState* pp = pending_picks_;
+ pending_picks_ = nullptr;
while (pp != nullptr) {
- grpc_lb_policy_pick_state* next = pp->next;
+ PickState* next = pp->next;
if (pp == pick) {
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
- pp->next = p->pending_picks;
- p->pending_picks = pp;
+ pp->next = pending_picks_;
+ pending_picks_ = pp;
}
pp = next;
}
GRPC_ERROR_UNREF(error);
}
-static void pf_cancel_picks_locked(grpc_lb_policy* pol,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error* error) {
- pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
- grpc_lb_policy_pick_state* pick = p->pending_picks;
- p->pending_picks = nullptr;
+void PickFirst::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error* error) {
+ PickState* pick = pending_picks_;
+ pending_picks_ = nullptr;
while (pick != nullptr) {
- grpc_lb_policy_pick_state* next = pick->next;
+ PickState* next = pick->next;
if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
- pick->next = p->pending_picks;
- p->pending_picks = pick;
+ pick->next = pending_picks_;
+ pending_picks_ = pick;
}
pick = next;
}
GRPC_ERROR_UNREF(error);
}
-static void start_picking_locked(pick_first_lb_policy* p) {
- p->started_picking = true;
- if (p->subchannel_list != nullptr &&
- p->subchannel_list->num_subchannels > 0) {
- p->subchannel_list->checking_subchannel = 0;
- for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
- if (p->subchannel_list->subchannels[i].subchannel != nullptr) {
- grpc_lb_subchannel_list_ref_for_connectivity_watch(
- p->subchannel_list, "connectivity_watch+start_picking");
+void PickFirst::StartPickingLocked() {
+ started_picking_ = true;
+ if (subchannel_list_ != nullptr && subchannel_list_->num_subchannels > 0) {
+ subchannel_list_->checking_subchannel = 0;
+ for (size_t i = 0; i < subchannel_list_->num_subchannels; ++i) {
+ if (subchannel_list_->subchannels[i].subchannel != nullptr) {
+ SubchannelListRefForConnectivityWatch(
+ subchannel_list_, "connectivity_watch+start_picking");
grpc_lb_subchannel_data_start_connectivity_watch(
- &p->subchannel_list->subchannels[i]);
+ &subchannel_list_->subchannels[i]);
break;
}
}
}
}
-static void pf_exit_idle_locked(grpc_lb_policy* pol) {
- pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
- if (!p->started_picking) {
- start_picking_locked(p);
+void PickFirst::ExitIdleLocked() {
+ if (!started_picking_) {
+ StartPickingLocked();
}
}
-static int pf_pick_locked(grpc_lb_policy* pol,
- grpc_lb_policy_pick_state* pick) {
- pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
+bool PickFirst::PickLocked(PickState* pick) {
// If we have a selected subchannel already, return synchronously.
- if (p->selected != nullptr) {
- pick->connected_subchannel = p->selected->connected_subchannel;
- return 1;
+ if (selected_ != nullptr) {
+ pick->connected_subchannel = selected_->connected_subchannel;
+ return true;
}
// No subchannel selected yet, so handle asynchronously.
- if (!p->started_picking) {
- start_picking_locked(p);
+ if (!started_picking_) {
+ StartPickingLocked();
}
- pick->next = p->pending_picks;
- p->pending_picks = pick;
- return 0;
+ pick->next = pending_picks_;
+ pending_picks_ = pick;
+ return false;
}
-static void destroy_unselected_subchannels_locked(pick_first_lb_policy* p) {
- for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
- grpc_lb_subchannel_data* sd = &p->subchannel_list->subchannels[i];
- if (p->selected != sd) {
+void PickFirst::DestroyUnselectedSubchannelsLocked() {
+ for (size_t i = 0; i < subchannel_list_->num_subchannels; ++i) {
+ grpc_lb_subchannel_data* sd = &subchannel_list_->subchannels[i];
+ if (selected_ != sd) {
grpc_lb_subchannel_data_unref_subchannel(sd,
"selected_different_subchannel");
}
}
}
-static grpc_connectivity_state pf_check_connectivity_locked(
- grpc_lb_policy* pol, grpc_error** error) {
- pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
- return grpc_connectivity_state_get(&p->state_tracker, error);
+grpc_connectivity_state PickFirst::CheckConnectivityLocked(grpc_error** error) {
+ return grpc_connectivity_state_get(&state_tracker_, error);
}
-static void pf_notify_on_state_change_locked(grpc_lb_policy* pol,
- grpc_connectivity_state* current,
- grpc_closure* notify) {
- pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
- grpc_connectivity_state_notify_on_state_change(&p->state_tracker, current,
+void PickFirst::NotifyOnStateChangeLocked(grpc_connectivity_state* current,
+ grpc_closure* notify) {
+ grpc_connectivity_state_notify_on_state_change(&state_tracker_, current,
notify);
}
-static void pf_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
- grpc_closure* on_ack) {
- pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
- if (p->selected) {
- p->selected->connected_subchannel->Ping(on_initiate, on_ack);
+void PickFirst::PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) {
+ if (selected_ != nullptr) {
+ selected_->connected_subchannel->Ping(on_initiate, on_ack);
} else {
GRPC_CLOSURE_SCHED(on_initiate,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
@@ -225,18 +258,31 @@ static void pf_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
}
}
-static void pf_connectivity_changed_locked(void* arg, grpc_error* error);
+void PickFirst::SubchannelListRefForConnectivityWatch(
+ grpc_lb_subchannel_list* subchannel_list, const char* reason) {
+ // TODO(roth): We currently track this ref manually. Once the new
+ // ClosureRef API is ready and the subchannel_list code has been
+ // converted to a C++ API, find a way to hold the RefCountedPtr<>
+ // somewhere (maybe in the subchannel_data object) instead of doing
+ // this manually.
+ auto self = Ref(DEBUG_LOCATION, reason);
+ self.release();
+ grpc_lb_subchannel_list_ref(subchannel_list, reason);
+}
+
+void PickFirst::SubchannelListUnrefForConnectivityWatch(
+ grpc_lb_subchannel_list* subchannel_list, const char* reason) {
+ Unref(DEBUG_LOCATION, reason);
+ grpc_lb_subchannel_list_unref(subchannel_list, reason);
+}
-static void pf_update_locked(grpc_lb_policy* policy,
- const grpc_lb_policy_args* args) {
- pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(policy);
- const grpc_arg* arg =
- grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+void PickFirst::UpdateLocked(const grpc_channel_args& args) {
+ const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
- if (p->subchannel_list == nullptr) {
+ if (subchannel_list_ == nullptr) {
// If we don't have a current subchannel list, go into TRANSIENT FAILURE.
grpc_connectivity_state_set(
- &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ &state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
"pf_update_missing");
} else {
@@ -244,77 +290,78 @@ static void pf_update_locked(grpc_lb_policy* policy,
gpr_log(GPR_ERROR,
"No valid LB addresses channel arg for Pick First %p update, "
"ignoring.",
- (void*)p);
+ this);
}
return;
}
const grpc_lb_addresses* addresses =
- static_cast<const grpc_lb_addresses*>(arg->value.pointer.p);
+ (const grpc_lb_addresses*)arg->value.pointer.p;
if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
- (void*)p, static_cast<unsigned long>(addresses->num_addresses));
+ gpr_log(GPR_INFO,
+ "Pick First %p received update with %" PRIuPTR " addresses", this,
+ addresses->num_addresses);
}
grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
- &p->base, &grpc_lb_pick_first_trace, addresses, args,
- pf_connectivity_changed_locked);
+ this, &grpc_lb_pick_first_trace, addresses, combiner(),
+ client_channel_factory(), args, &PickFirst::OnConnectivityChangedLocked);
if (subchannel_list->num_subchannels == 0) {
// Empty update or no valid subchannels. Unsubscribe from all current
// subchannels and put the channel in TRANSIENT_FAILURE.
grpc_connectivity_state_set(
- &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ &state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"pf_update_empty");
- if (p->subchannel_list != nullptr) {
- grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
+ if (subchannel_list_ != nullptr) {
+ grpc_lb_subchannel_list_shutdown_and_unref(subchannel_list_,
"sl_shutdown_empty_update");
}
- p->subchannel_list = subchannel_list; // Empty list.
- p->selected = nullptr;
+ subchannel_list_ = subchannel_list; // Empty list.
+ selected_ = nullptr;
return;
}
- if (p->selected == nullptr) {
+ if (selected_ == nullptr) {
// We don't yet have a selected subchannel, so replace the current
// subchannel list immediately.
- if (p->subchannel_list != nullptr) {
- grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
+ if (subchannel_list_ != nullptr) {
+ grpc_lb_subchannel_list_shutdown_and_unref(subchannel_list_,
"pf_update_before_selected");
}
- p->subchannel_list = subchannel_list;
+ subchannel_list_ = subchannel_list;
} else {
// We do have a selected subchannel.
// Check if it's present in the new list. If so, we're done.
for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
- if (sd->subchannel == p->selected->subchannel) {
+ if (sd->subchannel == selected_->subchannel) {
// The currently selected subchannel is in the update: we are done.
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Pick First %p found already selected subchannel %p "
"at update index %" PRIuPTR " of %" PRIuPTR "; update done",
- p, p->selected->subchannel, i,
+ this, selected_->subchannel, i,
subchannel_list->num_subchannels);
}
- if (p->selected->connected_subchannel != nullptr) {
- sd->connected_subchannel = p->selected->connected_subchannel;
+ if (selected_->connected_subchannel != nullptr) {
+ sd->connected_subchannel = selected_->connected_subchannel;
}
- p->selected = sd;
- if (p->subchannel_list != nullptr) {
+ selected_ = sd;
+ if (subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
- p->subchannel_list, "pf_update_includes_selected");
+ subchannel_list_, "pf_update_includes_selected");
}
- p->subchannel_list = subchannel_list;
- destroy_unselected_subchannels_locked(p);
- grpc_lb_subchannel_list_ref_for_connectivity_watch(
+ subchannel_list_ = subchannel_list;
+ DestroyUnselectedSubchannelsLocked();
+ SubchannelListRefForConnectivityWatch(
subchannel_list, "connectivity_watch+replace_selected");
grpc_lb_subchannel_data_start_connectivity_watch(sd);
// If there was a previously pending update (which may or may
// not have contained the currently selected subchannel), drop
// it, so that it doesn't override what we've done here.
- if (p->latest_pending_subchannel_list != nullptr) {
+ if (latest_pending_subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
- p->latest_pending_subchannel_list,
+ latest_pending_subchannel_list_,
"pf_update_includes_selected+outdated");
- p->latest_pending_subchannel_list = nullptr;
+ latest_pending_subchannel_list_ = nullptr;
}
return;
}
@@ -323,84 +370,81 @@ static void pf_update_locked(grpc_lb_policy* policy,
// pending subchannel list to the new subchannel list. We will wait
// for it to report READY before swapping it into the current
// subchannel list.
- if (p->latest_pending_subchannel_list != nullptr) {
+ if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG,
"Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p",
- (void*)p, (void*)p->latest_pending_subchannel_list,
- (void*)subchannel_list);
+ this, latest_pending_subchannel_list_, subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
- p->latest_pending_subchannel_list, "sl_outdated_dont_smash");
+ latest_pending_subchannel_list_, "sl_outdated_dont_smash");
}
- p->latest_pending_subchannel_list = subchannel_list;
+ latest_pending_subchannel_list_ = subchannel_list;
}
// If we've started picking, start trying to connect to the first
// subchannel in the new list.
- if (p->started_picking) {
- grpc_lb_subchannel_list_ref_for_connectivity_watch(
- subchannel_list, "connectivity_watch+update");
+ if (started_picking_) {
+ SubchannelListRefForConnectivityWatch(subchannel_list,
+ "connectivity_watch+update");
grpc_lb_subchannel_data_start_connectivity_watch(
&subchannel_list->subchannels[0]);
}
}
-static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
+void PickFirst::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
grpc_lb_subchannel_data* sd = static_cast<grpc_lb_subchannel_data*>(arg);
- pick_first_lb_policy* p =
- reinterpret_cast<pick_first_lb_policy*>(sd->subchannel_list->policy);
+ PickFirst* p = static_cast<PickFirst*>(sd->subchannel_list->policy);
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
" of %" PRIuPTR
- "), subchannel_list %p: state=%s p->shutdown=%d "
+ "), subchannel_list %p: state=%s p->shutdown_=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
- (void*)p, (void*)sd->subchannel,
- sd->subchannel_list->checking_subchannel,
- sd->subchannel_list->num_subchannels, (void*)sd->subchannel_list,
+ p, sd->subchannel, sd->subchannel_list->checking_subchannel,
+ sd->subchannel_list->num_subchannels, sd->subchannel_list,
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
- p->shutdown, sd->subchannel_list->shutting_down,
+ p->shutdown_, sd->subchannel_list->shutting_down,
grpc_error_string(error));
}
// If the policy is shutting down, unref and return.
- if (p->shutdown) {
+ if (p->shutdown_) {
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
grpc_lb_subchannel_data_unref_subchannel(sd, "pf_shutdown");
- grpc_lb_subchannel_list_unref_for_connectivity_watch(sd->subchannel_list,
- "pf_shutdown");
+ p->SubchannelListUnrefForConnectivityWatch(sd->subchannel_list,
+ "pf_shutdown");
return;
}
// If the subchannel list is shutting down, stop watching.
if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
grpc_lb_subchannel_data_unref_subchannel(sd, "pf_sl_shutdown");
- grpc_lb_subchannel_list_unref_for_connectivity_watch(sd->subchannel_list,
- "pf_sl_shutdown");
+ p->SubchannelListUnrefForConnectivityWatch(sd->subchannel_list,
+ "pf_sl_shutdown");
return;
}
// If we're still here, the notification must be for a subchannel in
// either the current or latest pending subchannel lists.
- GPR_ASSERT(sd->subchannel_list == p->subchannel_list ||
- sd->subchannel_list == p->latest_pending_subchannel_list);
+ GPR_ASSERT(sd->subchannel_list == p->subchannel_list_ ||
+ sd->subchannel_list == p->latest_pending_subchannel_list_);
// Update state.
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
// Handle updates for the currently selected subchannel.
- if (p->selected == sd) {
+ if (p->selected_ == sd) {
// If the new state is anything other than READY and there is a
// pending update, switch to the pending update.
if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
- p->latest_pending_subchannel_list != nullptr) {
- p->selected = nullptr;
+ p->latest_pending_subchannel_list_ != nullptr) {
+ p->selected_ = nullptr;
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
- grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ p->SubchannelListUnrefForConnectivityWatch(
sd->subchannel_list, "selected_not_ready+switch_to_update");
grpc_lb_subchannel_list_shutdown_and_unref(
- p->subchannel_list, "selected_not_ready+switch_to_update");
- p->subchannel_list = p->latest_pending_subchannel_list;
- p->latest_pending_subchannel_list = nullptr;
+ p->subchannel_list_, "selected_not_ready+switch_to_update");
+ p->subchannel_list_ = p->latest_pending_subchannel_list_;
+ p->latest_pending_subchannel_list_ = nullptr;
grpc_connectivity_state_set(
- &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ &p->state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "selected_not_ready+switch_to_update");
} else {
// TODO(juanlishen): we re-resolve when the selected subchannel goes to
@@ -411,21 +455,20 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN);
if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// If the selected channel goes bad, request a re-resolution.
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_IDLE,
+ grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_IDLE,
GRPC_ERROR_NONE,
"selected_changed+reresolve");
- p->started_picking = false;
- grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_pick_first_trace,
- GRPC_ERROR_NONE);
- // in transient failure. Rely on re-resolution to recover.
- p->selected = nullptr;
+ p->started_picking_ = false;
+ p->TryReresolutionLocked(&grpc_lb_pick_first_trace, GRPC_ERROR_NONE);
+ // In transient failure. Rely on re-resolution to recover.
+ p->selected_ = nullptr;
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
- grpc_lb_subchannel_list_unref_for_connectivity_watch(
- sd->subchannel_list, "pf_selected_shutdown");
+ p->SubchannelListUnrefForConnectivityWatch(sd->subchannel_list,
+ "pf_selected_shutdown");
grpc_lb_subchannel_data_unref_subchannel(
sd, "pf_selected_shutdown"); // Unrefs connected subchannel
} else {
- grpc_connectivity_state_set(&p->state_tracker,
+ grpc_connectivity_state_set(&p->state_tracker_,
sd->curr_connectivity_state,
GRPC_ERROR_REF(error), "selected_changed");
// Renew notification.
@@ -436,45 +479,45 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
}
// If we get here, there are two possible cases:
// 1. We do not currently have a selected subchannel, and the update is
- // for a subchannel in p->subchannel_list that we're trying to
+ // for a subchannel in p->subchannel_list_ that we're trying to
// connect to. The goal here is to find a subchannel that we can
// select.
// 2. We do currently have a selected subchannel, and the update is
- // for a subchannel in p->latest_pending_subchannel_list. The
+ // for a subchannel in p->latest_pending_subchannel_list_. The
// goal here is to find a subchannel from the update that we can
// select in place of the current one.
switch (sd->curr_connectivity_state) {
case GRPC_CHANNEL_READY: {
- // Case 2. Promote p->latest_pending_subchannel_list to
- // p->subchannel_list.
+ // Case 2. Promote p->latest_pending_subchannel_list_ to
+ // p->subchannel_list_.
sd->connected_subchannel =
grpc_subchannel_get_connected_subchannel(sd->subchannel);
- if (sd->subchannel_list == p->latest_pending_subchannel_list) {
- GPR_ASSERT(p->subchannel_list != nullptr);
- grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
+ if (sd->subchannel_list == p->latest_pending_subchannel_list_) {
+ GPR_ASSERT(p->subchannel_list_ != nullptr);
+ grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list_,
"finish_update");
- p->subchannel_list = p->latest_pending_subchannel_list;
- p->latest_pending_subchannel_list = nullptr;
+ p->subchannel_list_ = p->latest_pending_subchannel_list_;
+ p->latest_pending_subchannel_list_ = nullptr;
}
// Cases 1 and 2.
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_READY,
+ grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "connecting_ready");
- p->selected = sd;
+ p->selected_ = sd;
if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void*)p,
- (void*)sd->subchannel);
+ gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", p,
+ sd->subchannel);
}
// Drop all other subchannels, since we are now connected.
- destroy_unselected_subchannels_locked(p);
+ p->DestroyUnselectedSubchannelsLocked();
// Update any calls that were waiting for a pick.
- grpc_lb_policy_pick_state* pick;
- while ((pick = p->pending_picks)) {
- p->pending_picks = pick->next;
- pick->connected_subchannel = p->selected->connected_subchannel;
+ PickState* pick;
+ while ((pick = p->pending_picks_)) {
+ p->pending_picks_ = pick->next;
+ pick->connected_subchannel = p->selected_->connected_subchannel;
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
- (void*)p->selected);
+ p->selected_);
}
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
@@ -494,9 +537,9 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
// Case 1: Only set state to TRANSIENT_FAILURE if we've tried
// all subchannels.
if (sd->subchannel_list->checking_subchannel == 0 &&
- sd->subchannel_list == p->subchannel_list) {
+ sd->subchannel_list == p->subchannel_list_) {
grpc_connectivity_state_set(
- &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ &p->state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connecting_transient_failure");
}
// Reuses the connectivity refs from the previous watch.
@@ -506,8 +549,8 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE: {
// Only update connectivity state in case 1.
- if (sd->subchannel_list == p->subchannel_list) {
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_CONNECTING,
+ if (sd->subchannel_list == p->subchannel_list_) {
+ grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_CONNECTING,
GRPC_ERROR_REF(error),
"connecting_changed");
}
@@ -520,51 +563,29 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
}
}
-static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
- pf_destroy,
- pf_shutdown_locked,
- pf_pick_locked,
- pf_cancel_pick_locked,
- pf_cancel_picks_locked,
- pf_ping_one_locked,
- pf_exit_idle_locked,
- pf_check_connectivity_locked,
- pf_notify_on_state_change_locked,
- pf_update_locked};
-
-static void pick_first_factory_ref(grpc_lb_policy_factory* factory) {}
-
-static void pick_first_factory_unref(grpc_lb_policy_factory* factory) {}
-
-static grpc_lb_policy* create_pick_first(grpc_lb_policy_factory* factory,
- grpc_lb_policy_args* args) {
- GPR_ASSERT(args->client_channel_factory != nullptr);
- pick_first_lb_policy* p =
- static_cast<pick_first_lb_policy*>(gpr_zalloc(sizeof(*p)));
- if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_DEBUG, "Pick First %p created.", (void*)p);
- }
- pf_update_locked(&p->base, args);
- grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
- grpc_subchannel_index_ref();
- return &p->base;
-}
+//
+// factory
+//
-static const grpc_lb_policy_factory_vtable pick_first_factory_vtable = {
- pick_first_factory_ref, pick_first_factory_unref, create_pick_first,
- "pick_first"};
+class PickFirstFactory : public LoadBalancingPolicyFactory {
+ public:
+ OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
+ const LoadBalancingPolicy::Args& args) const override {
+ return OrphanablePtr<LoadBalancingPolicy>(New<PickFirst>(args));
+ }
-static grpc_lb_policy_factory pick_first_lb_policy_factory = {
- &pick_first_factory_vtable};
+ const char* name() const override { return "pick_first"; }
+};
-static grpc_lb_policy_factory* pick_first_lb_factory_create() {
- return &pick_first_lb_policy_factory;
-}
+} // namespace
-/* Plugin registration */
+} // namespace grpc_core
void grpc_lb_policy_pick_first_init() {
- grpc_register_lb_policy(pick_first_lb_factory_create());
+ grpc_core::LoadBalancingPolicyRegistry::Builder::
+ RegisterLoadBalancingPolicyFactory(
+ grpc_core::UniquePtr<grpc_core::LoadBalancingPolicyFactory>(
+ grpc_core::New<grpc_core::PickFirstFactory>()));
}
void grpc_lb_policy_pick_first_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index b5b4c44ef1..e534131c02 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -24,6 +24,8 @@
* updates. Note however that updates will start picking from the beginning of
* the updated list. */
+#include <grpc/support/port_platform.h>
+
#include <string.h>
#include <grpc/support/alloc.h>
@@ -40,34 +42,94 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/static_metadata.h"
-grpc_core::TraceFlag grpc_lb_round_robin_trace(false, "round_robin");
+namespace grpc_core {
+
+TraceFlag grpc_lb_round_robin_trace(false, "round_robin");
+
+namespace {
+
+//
+// round_robin LB policy
+//
+
+class RoundRobin : public LoadBalancingPolicy {
+ public:
+ explicit RoundRobin(const Args& args);
+
+ void UpdateLocked(const grpc_channel_args& args) override;
+ bool PickLocked(PickState* pick) override;
+ void CancelPickLocked(PickState* pick, grpc_error* error) override;
+ void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error* error) override;
+ void NotifyOnStateChangeLocked(grpc_connectivity_state* state,
+ grpc_closure* closure) override;
+ grpc_connectivity_state CheckConnectivityLocked(
+ grpc_error** connectivity_error) override;
+ void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
+ void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override;
+ void ExitIdleLocked() override;
-typedef struct round_robin_lb_policy {
- /** base policy: must be first */
- grpc_lb_policy base;
+ private:
+ ~RoundRobin();
- grpc_lb_subchannel_list* subchannel_list;
+ void ShutdownLocked() override;
+ void StartPickingLocked();
+ size_t GetNextReadySubchannelIndexLocked();
+ void UpdateLastReadySubchannelIndexLocked(size_t last_ready_index);
+ void UpdateConnectivityStatusLocked(grpc_lb_subchannel_data* sd,
+ grpc_error* error);
+
+ static void OnConnectivityChangedLocked(void* arg, grpc_error* error);
+
+ void SubchannelListRefForConnectivityWatch(
+ grpc_lb_subchannel_list* subchannel_list, const char* reason);
+ void SubchannelListUnrefForConnectivityWatch(
+ grpc_lb_subchannel_list* subchannel_list, const char* reason);
+
+ /** list of subchannels */
+ grpc_lb_subchannel_list* subchannel_list_ = nullptr;
/** have we started picking? */
- bool started_picking;
+ bool started_picking_ = false;
/** are we shutting down? */
- bool shutdown;
+ bool shutdown_ = false;
/** List of picks that are waiting on connectivity */
- grpc_lb_policy_pick_state* pending_picks;
-
+ PickState* pending_picks_ = nullptr;
/** our connectivity state tracker */
- grpc_connectivity_state_tracker state_tracker;
-
+ grpc_connectivity_state_tracker state_tracker_;
/** Index into subchannels for last pick. */
- size_t last_ready_subchannel_index;
-
+ size_t last_ready_subchannel_index_ = 0;
/** Latest version of the subchannel list.
* Subchannel connectivity callbacks will only promote updated subchannel
* lists if they equal \a latest_pending_subchannel_list. In other words,
* racing callbacks that reference outdated subchannel lists won't perform any
* update. */
- grpc_lb_subchannel_list* latest_pending_subchannel_list;
-} round_robin_lb_policy;
+ grpc_lb_subchannel_list* latest_pending_subchannel_list_ = nullptr;
+};
+
+RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
+ GPR_ASSERT(args.client_channel_factory != nullptr);
+ grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
+ "round_robin");
+ UpdateLocked(*args.args);
+ if (grpc_lb_round_robin_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "[RR %p] Created with %" PRIuPTR " subchannels", this,
+ subchannel_list_->num_subchannels);
+ }
+ grpc_subchannel_index_ref();
+}
+
+RoundRobin::~RoundRobin() {
+ if (grpc_lb_round_robin_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy", this);
+ }
+ GPR_ASSERT(subchannel_list_ == nullptr);
+ GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
+ GPR_ASSERT(pending_picks_ == nullptr);
+ grpc_connectivity_state_destroy(&state_tracker_);
+ grpc_subchannel_index_unref();
+}
/** Returns the index into p->subchannel_list->subchannels of the next
* subchannel in READY state, or p->subchannel_list->num_subchannels if no
@@ -75,195 +137,190 @@ typedef struct round_robin_lb_policy {
*
* Note that this function does *not* update p->last_ready_subchannel_index.
* The caller must do that if it returns a pick. */
-static size_t get_next_ready_subchannel_index_locked(
- const round_robin_lb_policy* p) {
- GPR_ASSERT(p->subchannel_list != nullptr);
+size_t RoundRobin::GetNextReadySubchannelIndexLocked() {
+ GPR_ASSERT(subchannel_list_ != nullptr);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
- "[RR %p] getting next ready subchannel (out of %lu), "
- "last_ready_subchannel_index=%lu",
- (void*)p,
- static_cast<unsigned long>(p->subchannel_list->num_subchannels),
- static_cast<unsigned long>(p->last_ready_subchannel_index));
- }
- for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
- const size_t index = (i + p->last_ready_subchannel_index + 1) %
- p->subchannel_list->num_subchannels;
+ "[RR %p] getting next ready subchannel (out of %" PRIuPTR
+ "), "
+ "last_ready_subchannel_index=%" PRIuPTR,
+ this, subchannel_list_->num_subchannels,
+ last_ready_subchannel_index_);
+ }
+ for (size_t i = 0; i < subchannel_list_->num_subchannels; ++i) {
+ const size_t index = (i + last_ready_subchannel_index_ + 1) %
+ subchannel_list_->num_subchannels;
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
- "[RR %p] checking subchannel %p, subchannel_list %p, index %lu: "
- "state=%s",
- (void*)p, (void*)p->subchannel_list->subchannels[index].subchannel,
- (void*)p->subchannel_list, static_cast<unsigned long>(index),
+ "[RR %p] checking subchannel %p, subchannel_list %p, index %" PRIuPTR
+ ": state=%s",
+ this, subchannel_list_->subchannels[index].subchannel,
+ subchannel_list_, index,
grpc_connectivity_state_name(
- p->subchannel_list->subchannels[index].curr_connectivity_state));
+ subchannel_list_->subchannels[index].curr_connectivity_state));
}
- if (p->subchannel_list->subchannels[index].curr_connectivity_state ==
+ if (subchannel_list_->subchannels[index].curr_connectivity_state ==
GRPC_CHANNEL_READY) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
- "[RR %p] found next ready subchannel (%p) at index %lu of "
- "subchannel_list %p",
- (void*)p,
- (void*)p->subchannel_list->subchannels[index].subchannel,
- static_cast<unsigned long>(index), (void*)p->subchannel_list);
+ "[RR %p] found next ready subchannel (%p) at index %" PRIuPTR
+ " of subchannel_list %p",
+ this, subchannel_list_->subchannels[index].subchannel, index,
+ subchannel_list_);
}
return index;
}
}
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", (void*)p);
+ gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", this);
}
- return p->subchannel_list->num_subchannels;
+ return subchannel_list_->num_subchannels;
}
-// Sets p->last_ready_subchannel_index to last_ready_index.
-static void update_last_ready_subchannel_index_locked(round_robin_lb_policy* p,
- size_t last_ready_index) {
- GPR_ASSERT(last_ready_index < p->subchannel_list->num_subchannels);
- p->last_ready_subchannel_index = last_ready_index;
+// Sets last_ready_subchannel_index_ to last_ready_index.
+void RoundRobin::UpdateLastReadySubchannelIndexLocked(size_t last_ready_index) {
+ GPR_ASSERT(last_ready_index < subchannel_list_->num_subchannels);
+ last_ready_subchannel_index_ = last_ready_index;
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
- "[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
- (void*)p, static_cast<unsigned long>(last_ready_index),
- (void*)p->subchannel_list->subchannels[last_ready_index].subchannel,
- (void*)p->subchannel_list->subchannels[last_ready_index]
+ "[RR %p] setting last_ready_subchannel_index=%" PRIuPTR
+ " (SC %p, CSC %p)",
+ this, last_ready_index,
+ subchannel_list_->subchannels[last_ready_index].subchannel,
+ subchannel_list_->subchannels[last_ready_index]
.connected_subchannel.get());
}
}
-static void rr_destroy(grpc_lb_policy* pol) {
- round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
- if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
- (void*)pol, (void*)pol);
+void RoundRobin::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
+ PickState* pick;
+ while ((pick = pending_picks_) != nullptr) {
+ pending_picks_ = pick->next;
+ if (new_policy->PickLocked(pick)) {
+ // Synchronous return, schedule closure.
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
+ }
}
- GPR_ASSERT(p->subchannel_list == nullptr);
- GPR_ASSERT(p->latest_pending_subchannel_list == nullptr);
- grpc_connectivity_state_destroy(&p->state_tracker);
- grpc_subchannel_index_unref();
- gpr_free(p);
}
-static void rr_shutdown_locked(grpc_lb_policy* pol,
- grpc_lb_policy* new_policy) {
- round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
+void RoundRobin::ShutdownLocked() {
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
- }
- p->shutdown = true;
- grpc_lb_policy_pick_state* pick;
- while ((pick = p->pending_picks) != nullptr) {
- p->pending_picks = pick->next;
- if (new_policy != nullptr) {
- // Hand off to new LB policy.
- if (grpc_lb_policy_pick_locked(new_policy, pick)) {
- // Synchronous return; schedule callback.
- GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
- }
- } else {
- pick->connected_subchannel.reset();
- GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
- }
+ gpr_log(GPR_DEBUG, "[RR %p] Shutting down", this);
+ }
+ shutdown_ = true;
+ PickState* pick;
+ while ((pick = pending_picks_) != nullptr) {
+ pending_picks_ = pick->next;
+ pick->connected_subchannel.reset();
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
}
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
+ grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "rr_shutdown");
- if (p->subchannel_list != nullptr) {
- grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
+ if (subchannel_list_ != nullptr) {
+ grpc_lb_subchannel_list_shutdown_and_unref(subchannel_list_,
"sl_shutdown_rr_shutdown");
- p->subchannel_list = nullptr;
+ subchannel_list_ = nullptr;
}
- if (p->latest_pending_subchannel_list != nullptr) {
+ if (latest_pending_subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
- p->latest_pending_subchannel_list, "sl_shutdown_pending_rr_shutdown");
- p->latest_pending_subchannel_list = nullptr;
+ latest_pending_subchannel_list_, "sl_shutdown_pending_rr_shutdown");
+ latest_pending_subchannel_list_ = nullptr;
}
- grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_round_robin_trace,
- GRPC_ERROR_CANCELLED);
+ TryReresolutionLocked(&grpc_lb_round_robin_trace, GRPC_ERROR_CANCELLED);
GRPC_ERROR_UNREF(error);
}
-static void rr_cancel_pick_locked(grpc_lb_policy* pol,
- grpc_lb_policy_pick_state* pick,
- grpc_error* error) {
- round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
- grpc_lb_policy_pick_state* pp = p->pending_picks;
- p->pending_picks = nullptr;
+void RoundRobin::CancelPickLocked(PickState* pick, grpc_error* error) {
+ PickState* pp = pending_picks_;
+ pending_picks_ = nullptr;
while (pp != nullptr) {
- grpc_lb_policy_pick_state* next = pp->next;
+ PickState* next = pp->next;
if (pp == pick) {
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick cancelled", &error, 1));
+ "Pick Cancelled", &error, 1));
} else {
- pp->next = p->pending_picks;
- p->pending_picks = pp;
+ pp->next = pending_picks_;
+ pending_picks_ = pp;
}
pp = next;
}
GRPC_ERROR_UNREF(error);
}
-static void rr_cancel_picks_locked(grpc_lb_policy* pol,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error* error) {
- round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
- grpc_lb_policy_pick_state* pick = p->pending_picks;
- p->pending_picks = nullptr;
+void RoundRobin::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error* error) {
+ PickState* pick = pending_picks_;
+ pending_picks_ = nullptr;
while (pick != nullptr) {
- grpc_lb_policy_pick_state* next = pick->next;
+ PickState* next = pick->next;
if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick cancelled", &error, 1));
+ "Pick Cancelled", &error, 1));
} else {
- pick->next = p->pending_picks;
- p->pending_picks = pick;
+ pick->next = pending_picks_;
+ pending_picks_ = pick;
}
pick = next;
}
GRPC_ERROR_UNREF(error);
}
-static void start_picking_locked(round_robin_lb_policy* p) {
- p->started_picking = true;
- for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) {
- if (p->subchannel_list->subchannels[i].subchannel != nullptr) {
- grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list,
- "connectivity_watch");
+void RoundRobin::SubchannelListRefForConnectivityWatch(
+ grpc_lb_subchannel_list* subchannel_list, const char* reason) {
+ // TODO(roth): We currently track this ref manually. Once the new
+ // ClosureRef API is ready and the subchannel_list code has been
+ // converted to a C++ API, find a way to hold the RefCountedPtr<>
+ // somewhere (maybe in the subchannel_data object) instead of doing
+ // this manually.
+ auto self = Ref(DEBUG_LOCATION, reason);
+ self.release();
+ grpc_lb_subchannel_list_ref(subchannel_list, reason);
+}
+
+void RoundRobin::SubchannelListUnrefForConnectivityWatch(
+ grpc_lb_subchannel_list* subchannel_list, const char* reason) {
+ Unref(DEBUG_LOCATION, reason);
+ grpc_lb_subchannel_list_unref(subchannel_list, reason);
+}
+
+void RoundRobin::StartPickingLocked() {
+ started_picking_ = true;
+ for (size_t i = 0; i < subchannel_list_->num_subchannels; i++) {
+ if (subchannel_list_->subchannels[i].subchannel != nullptr) {
+ SubchannelListRefForConnectivityWatch(subchannel_list_,
+ "connectivity_watch");
grpc_lb_subchannel_data_start_connectivity_watch(
- &p->subchannel_list->subchannels[i]);
+ &subchannel_list_->subchannels[i]);
}
}
}
-static void rr_exit_idle_locked(grpc_lb_policy* pol) {
- round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
- if (!p->started_picking) {
- start_picking_locked(p);
+void RoundRobin::ExitIdleLocked() {
+ if (!started_picking_) {
+ StartPickingLocked();
}
}
-static int rr_pick_locked(grpc_lb_policy* pol,
- grpc_lb_policy_pick_state* pick) {
- round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
+bool RoundRobin::PickLocked(PickState* pick) {
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", pol,
- p->shutdown);
+ gpr_log(GPR_DEBUG, "[RR %p] Trying to pick (shutdown: %d)", this,
+ shutdown_);
}
- GPR_ASSERT(!p->shutdown);
- if (p->subchannel_list != nullptr) {
- const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
- if (next_ready_index < p->subchannel_list->num_subchannels) {
+ GPR_ASSERT(!shutdown_);
+ if (subchannel_list_ != nullptr) {
+ const size_t next_ready_index = GetNextReadySubchannelIndexLocked();
+ if (next_ready_index < subchannel_list_->num_subchannels) {
/* readily available, report right away */
grpc_lb_subchannel_data* sd =
- &p->subchannel_list->subchannels[next_ready_index];
+ &subchannel_list_->subchannels[next_ready_index];
pick->connected_subchannel = sd->connected_subchannel;
if (pick->user_data != nullptr) {
*pick->user_data = sd->user_data;
@@ -273,24 +330,24 @@ static int rr_pick_locked(grpc_lb_policy* pol,
GPR_DEBUG,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
"index %" PRIuPTR ")",
- p, sd->subchannel, pick->connected_subchannel.get(),
+ this, sd->subchannel, pick->connected_subchannel.get(),
sd->subchannel_list, next_ready_index);
}
/* only advance the last picked pointer if the selection was used */
- update_last_ready_subchannel_index_locked(p, next_ready_index);
- return 1;
+ UpdateLastReadySubchannelIndexLocked(next_ready_index);
+ return true;
}
}
/* no pick currently available. Save for later in list of pending picks */
- if (!p->started_picking) {
- start_picking_locked(p);
+ if (!started_picking_) {
+ StartPickingLocked();
}
- pick->next = p->pending_picks;
- p->pending_picks = pick;
- return 0;
+ pick->next = pending_picks_;
+ pending_picks_ = pick;
+ return false;
}
-static void update_state_counters_locked(grpc_lb_subchannel_data* sd) {
+void UpdateStateCountersLocked(grpc_lb_subchannel_data* sd) {
grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
GPR_ASSERT(sd->prev_connectivity_state != GRPC_CHANNEL_SHUTDOWN);
GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN);
@@ -318,8 +375,8 @@ static void update_state_counters_locked(grpc_lb_subchannel_data* sd) {
* (the grpc_lb_subchannel_data associated with the updated subchannel) and the
* subchannel list \a sd belongs to (sd->subchannel_list). \a error will be used
* only if the policy transitions to state TRANSIENT_FAILURE. */
-static void update_lb_connectivity_status_locked(grpc_lb_subchannel_data* sd,
- grpc_error* error) {
+void RoundRobin::UpdateConnectivityStatusLocked(grpc_lb_subchannel_data* sd,
+ grpc_error* error) {
/* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled).
*
@@ -335,64 +392,61 @@ static void update_lb_connectivity_status_locked(grpc_lb_subchannel_data* sd,
* subchannel_list->num_subchannels.
*/
grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
- round_robin_lb_policy* p =
- reinterpret_cast<round_robin_lb_policy*>(subchannel_list->policy);
GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_IDLE);
if (subchannel_list->num_ready > 0) {
/* 1) READY */
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_READY,
+ grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_CONNECTING) {
/* 2) CONNECTING */
- grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_CONNECTING,
+ grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_CONNECTING,
GRPC_ERROR_NONE, "rr_connecting");
} else if (subchannel_list->num_transient_failures ==
subchannel_list->num_subchannels) {
/* 3) TRANSIENT_FAILURE */
- grpc_connectivity_state_set(&p->state_tracker,
- GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_REF(error), "rr_transient_failure");
+ grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_REF(error),
+ "rr_exhausted_subchannels");
}
GRPC_ERROR_UNREF(error);
}
-static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
+void RoundRobin::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
grpc_lb_subchannel_data* sd = static_cast<grpc_lb_subchannel_data*>(arg);
- round_robin_lb_policy* p =
- reinterpret_cast<round_robin_lb_policy*>(sd->subchannel_list->policy);
+ RoundRobin* p = static_cast<RoundRobin*>(sd->subchannel_list->policy);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p: "
"prev_state=%s new_state=%s p->shutdown=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
- (void*)p, (void*)sd->subchannel, (void*)sd->subchannel_list,
+ p, sd->subchannel, sd->subchannel_list,
grpc_connectivity_state_name(sd->prev_connectivity_state),
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
- p->shutdown, sd->subchannel_list->shutting_down,
+ p->shutdown_, sd->subchannel_list->shutting_down,
grpc_error_string(error));
}
GPR_ASSERT(sd->subchannel != nullptr);
// If the policy is shutting down, unref and return.
- if (p->shutdown) {
+ if (p->shutdown_) {
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
grpc_lb_subchannel_data_unref_subchannel(sd, "rr_shutdown");
- grpc_lb_subchannel_list_unref_for_connectivity_watch(sd->subchannel_list,
- "rr_shutdown");
+ p->SubchannelListUnrefForConnectivityWatch(sd->subchannel_list,
+ "rr_shutdown");
return;
}
// If the subchannel list is shutting down, stop watching.
if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
grpc_lb_subchannel_data_stop_connectivity_watch(sd);
grpc_lb_subchannel_data_unref_subchannel(sd, "rr_sl_shutdown");
- grpc_lb_subchannel_list_unref_for_connectivity_watch(sd->subchannel_list,
- "rr_sl_shutdown");
+ p->SubchannelListUnrefForConnectivityWatch(sd->subchannel_list,
+ "rr_sl_shutdown");
return;
}
// If we're still here, the notification must be for a subchannel in
// either the current or latest pending subchannel lists.
- GPR_ASSERT(sd->subchannel_list == p->subchannel_list ||
- sd->subchannel_list == p->latest_pending_subchannel_list);
+ GPR_ASSERT(sd->subchannel_list == p->subchannel_list_ ||
+ sd->subchannel_list == p->latest_pending_subchannel_list_);
GPR_ASSERT(sd->pending_connectivity_state_unsafe != GRPC_CHANNEL_SHUTDOWN);
// Now that we're inside the combiner, copy the pending connectivity
// state (which was set by the connectivity state watcher) to
@@ -409,8 +463,7 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
"Requesting re-resolution",
p, sd->subchannel);
}
- grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_round_robin_trace,
- GRPC_ERROR_NONE);
+ p->TryReresolutionLocked(&grpc_lb_round_robin_trace, GRPC_ERROR_NONE);
break;
}
case GRPC_CHANNEL_READY: {
@@ -418,49 +471,47 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
sd->connected_subchannel =
grpc_subchannel_get_connected_subchannel(sd->subchannel);
}
- if (sd->subchannel_list != p->subchannel_list) {
- // promote sd->subchannel_list to p->subchannel_list.
+ if (sd->subchannel_list != p->subchannel_list_) {
+ // promote sd->subchannel_list to p->subchannel_list_.
// sd->subchannel_list must be equal to
- // p->latest_pending_subchannel_list because we have already filtered
+ // p->latest_pending_subchannel_list_ because we have already filtered
// for sds belonging to outdated subchannel lists.
- GPR_ASSERT(sd->subchannel_list == p->latest_pending_subchannel_list);
+ GPR_ASSERT(sd->subchannel_list == p->latest_pending_subchannel_list_);
GPR_ASSERT(!sd->subchannel_list->shutting_down);
if (grpc_lb_round_robin_trace.enabled()) {
- const unsigned long num_subchannels =
- p->subchannel_list != nullptr
- ? static_cast<unsigned long>(
- p->subchannel_list->num_subchannels)
+ const size_t num_subchannels =
+ p->subchannel_list_ != nullptr
+ ? p->subchannel_list_->num_subchannels
: 0;
gpr_log(GPR_DEBUG,
- "[RR %p] phasing out subchannel list %p (size %lu) in favor "
- "of %p (size %lu)",
- p, p->subchannel_list, num_subchannels, sd->subchannel_list,
+ "[RR %p] phasing out subchannel list %p (size %" PRIuPTR
+ ") in favor of %p (size %" PRIuPTR ")",
+ p, p->subchannel_list_, num_subchannels, sd->subchannel_list,
num_subchannels);
}
- if (p->subchannel_list != nullptr) {
+ if (p->subchannel_list_ != nullptr) {
// dispose of the current subchannel_list
- grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
+ grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list_,
"sl_phase_out_shutdown");
}
- p->subchannel_list = p->latest_pending_subchannel_list;
- p->latest_pending_subchannel_list = nullptr;
+ p->subchannel_list_ = p->latest_pending_subchannel_list_;
+ p->latest_pending_subchannel_list_ = nullptr;
}
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
- * p->pending_picks. This preemptively replicates rr_pick()'s actions.
- */
- const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
- GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
+ * p->pending_picks. This preemptively replicates rr_pick()'s actions. */
+ const size_t next_ready_index = p->GetNextReadySubchannelIndexLocked();
+ GPR_ASSERT(next_ready_index < p->subchannel_list_->num_subchannels);
grpc_lb_subchannel_data* selected =
- &p->subchannel_list->subchannels[next_ready_index];
- if (p->pending_picks != nullptr) {
+ &p->subchannel_list_->subchannels[next_ready_index];
+ if (p->pending_picks_ != nullptr) {
// if the selected subchannel is going to be used for the pending
// picks, update the last picked pointer
- update_last_ready_subchannel_index_locked(p, next_ready_index);
+ p->UpdateLastReadySubchannelIndexLocked(next_ready_index);
}
- grpc_lb_policy_pick_state* pick;
- while ((pick = p->pending_picks)) {
- p->pending_picks = pick->next;
+ PickState* pick;
+ while ((pick = p->pending_picks_)) {
+ p->pending_picks_ = pick->next;
pick->connected_subchannel = selected->connected_subchannel;
if (pick->user_data != nullptr) {
*pick->user_data = selected->user_data;
@@ -468,10 +519,9 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] Fulfilling pending pick. Target <-- subchannel %p "
- "(subchannel_list %p, index %lu)",
- (void*)p, (void*)selected->subchannel,
- (void*)p->subchannel_list,
- static_cast<unsigned long>(next_ready_index));
+ "(subchannel_list %p, index %" PRIuPTR ")",
+ p, selected->subchannel, p->subchannel_list_,
+ next_ready_index);
}
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
@@ -482,40 +532,34 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:; // fallthrough
}
- // Update state counters and new overall state.
- update_state_counters_locked(sd);
+ // Update state counters.
+ UpdateStateCountersLocked(sd);
// Only update connectivity based on the selected subchannel list.
- if (sd->subchannel_list == p->subchannel_list) {
- update_lb_connectivity_status_locked(sd, GRPC_ERROR_REF(error));
+ if (sd->subchannel_list == p->subchannel_list_) {
+ p->UpdateConnectivityStatusLocked(sd, GRPC_ERROR_REF(error));
}
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
}
-static grpc_connectivity_state rr_check_connectivity_locked(
- grpc_lb_policy* pol, grpc_error** error) {
- round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
- return grpc_connectivity_state_get(&p->state_tracker, error);
+grpc_connectivity_state RoundRobin::CheckConnectivityLocked(
+ grpc_error** error) {
+ return grpc_connectivity_state_get(&state_tracker_, error);
}
-static void rr_notify_on_state_change_locked(grpc_lb_policy* pol,
- grpc_connectivity_state* current,
- grpc_closure* notify) {
- round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
- grpc_connectivity_state_notify_on_state_change(&p->state_tracker, current,
+void RoundRobin::NotifyOnStateChangeLocked(grpc_connectivity_state* current,
+ grpc_closure* notify) {
+ grpc_connectivity_state_notify_on_state_change(&state_tracker_, current,
notify);
}
-static void rr_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
+void RoundRobin::PingOneLocked(grpc_closure* on_initiate,
grpc_closure* on_ack) {
- round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
- const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
- if (next_ready_index < p->subchannel_list->num_subchannels) {
+ const size_t next_ready_index = GetNextReadySubchannelIndexLocked();
+ if (next_ready_index < subchannel_list_->num_subchannels) {
grpc_lb_subchannel_data* selected =
- &p->subchannel_list->subchannels[next_ready_index];
- grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel> target =
- selected->connected_subchannel;
- target->Ping(on_initiate, on_ack);
+ &subchannel_list_->subchannels[next_ready_index];
+ selected->connected_subchannel->Ping(on_initiate, on_ack);
} else {
GRPC_CLOSURE_SCHED(on_initiate, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
@@ -524,45 +568,41 @@ static void rr_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
}
}
-static void rr_update_locked(grpc_lb_policy* policy,
- const grpc_lb_policy_args* args) {
- round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(policy);
- const grpc_arg* arg =
- grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+void RoundRobin::UpdateLocked(const grpc_channel_args& args) {
+ const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
- gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", p);
+ gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", this);
// If we don't have a current subchannel list, go into TRANSIENT_FAILURE.
// Otherwise, keep using the current subchannel list (ignore this update).
- if (p->subchannel_list == nullptr) {
+ if (subchannel_list_ == nullptr) {
grpc_connectivity_state_set(
- &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ &state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
"rr_update_missing");
}
return;
}
- grpc_lb_addresses* addresses =
- static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
+ grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", p,
- addresses->num_addresses);
+ gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses",
+ this, addresses->num_addresses);
}
grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
- &p->base, &grpc_lb_round_robin_trace, addresses, args,
- rr_connectivity_changed_locked);
+ this, &grpc_lb_round_robin_trace, addresses, combiner(),
+ client_channel_factory(), args, &RoundRobin::OnConnectivityChangedLocked);
if (subchannel_list->num_subchannels == 0) {
grpc_connectivity_state_set(
- &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ &state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"rr_update_empty");
- if (p->subchannel_list != nullptr) {
- grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
+ if (subchannel_list_ != nullptr) {
+ grpc_lb_subchannel_list_shutdown_and_unref(subchannel_list_,
"sl_shutdown_empty_update");
}
- p->subchannel_list = subchannel_list; // empty list
+ subchannel_list_ = subchannel_list; // empty list
return;
}
- if (p->started_picking) {
+ if (started_picking_) {
for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
const grpc_connectivity_state subchannel_state =
grpc_subchannel_check_connectivity(
@@ -587,87 +627,61 @@ static void rr_update_locked(grpc_lb_policy* policy,
++subchannel_list->num_transient_failures;
}
}
- if (p->latest_pending_subchannel_list != nullptr) {
+ if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] Shutting down latest pending subchannel list %p, "
"about to be replaced by newer latest %p",
- (void*)p, (void*)p->latest_pending_subchannel_list,
- (void*)subchannel_list);
+ this, latest_pending_subchannel_list_, subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
- p->latest_pending_subchannel_list, "sl_outdated");
+ latest_pending_subchannel_list_, "sl_outdated");
}
- p->latest_pending_subchannel_list = subchannel_list;
+ latest_pending_subchannel_list_ = subchannel_list;
for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
/* Watch every new subchannel. A subchannel list becomes active the
* moment one of its subchannels is READY. At that moment, we swap
* p->subchannel_list for sd->subchannel_list, provided the subchannel
* list is still valid (ie, isn't shutting down) */
- grpc_lb_subchannel_list_ref_for_connectivity_watch(subchannel_list,
- "connectivity_watch");
+ SubchannelListRefForConnectivityWatch(subchannel_list,
+ "connectivity_watch");
grpc_lb_subchannel_data_start_connectivity_watch(
&subchannel_list->subchannels[i]);
}
} else {
// The policy isn't picking yet. Save the update for later, disposing of
// previous version if any.
- if (p->subchannel_list != nullptr) {
+ if (subchannel_list_ != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
- p->subchannel_list, "rr_update_before_started_picking");
+ subchannel_list_, "rr_update_before_started_picking");
}
- p->subchannel_list = subchannel_list;
+ subchannel_list_ = subchannel_list;
}
}
-static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
- rr_destroy,
- rr_shutdown_locked,
- rr_pick_locked,
- rr_cancel_pick_locked,
- rr_cancel_picks_locked,
- rr_ping_one_locked,
- rr_exit_idle_locked,
- rr_check_connectivity_locked,
- rr_notify_on_state_change_locked,
- rr_update_locked};
-
-static void round_robin_factory_ref(grpc_lb_policy_factory* factory) {}
-
-static void round_robin_factory_unref(grpc_lb_policy_factory* factory) {}
-
-static grpc_lb_policy* round_robin_create(grpc_lb_policy_factory* factory,
- grpc_lb_policy_args* args) {
- GPR_ASSERT(args->client_channel_factory != nullptr);
- round_robin_lb_policy* p =
- static_cast<round_robin_lb_policy*>(gpr_zalloc(sizeof(*p)));
- grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
- grpc_subchannel_index_ref();
- grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
- "round_robin");
- rr_update_locked(&p->base, args);
- if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void*)p,
- static_cast<unsigned long>(p->subchannel_list->num_subchannels));
- }
- return &p->base;
-}
+//
+// factory
+//
-static const grpc_lb_policy_factory_vtable round_robin_factory_vtable = {
- round_robin_factory_ref, round_robin_factory_unref, round_robin_create,
- "round_robin"};
+class RoundRobinFactory : public LoadBalancingPolicyFactory {
+ public:
+ OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
+ const LoadBalancingPolicy::Args& args) const override {
+ return OrphanablePtr<LoadBalancingPolicy>(New<RoundRobin>(args));
+ }
-static grpc_lb_policy_factory round_robin_lb_policy_factory = {
- &round_robin_factory_vtable};
+ const char* name() const override { return "round_robin"; }
+};
-static grpc_lb_policy_factory* round_robin_lb_factory_create() {
- return &round_robin_lb_policy_factory;
-}
+} // namespace
-/* Plugin registration */
+} // namespace grpc_core
void grpc_lb_policy_round_robin_init() {
- grpc_register_lb_policy(round_robin_lb_factory_create());
+ grpc_core::LoadBalancingPolicyRegistry::Builder::
+ RegisterLoadBalancingPolicyFactory(
+ grpc_core::UniquePtr<grpc_core::LoadBalancingPolicyFactory>(
+ grpc_core::New<grpc_core::RoundRobinFactory>()));
}
void grpc_lb_policy_round_robin_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
index e35c5e8db3..79cb64c6c6 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include <string.h>
#include <grpc/support/alloc.h>
@@ -67,7 +69,7 @@ void grpc_lb_subchannel_data_start_connectivity_watch(
}
sd->connectivity_notification_pending = true;
grpc_subchannel_notify_on_state_change(
- sd->subchannel, sd->subchannel_list->policy->interested_parties,
+ sd->subchannel, sd->subchannel_list->policy->interested_parties(),
&sd->pending_connectivity_state_unsafe,
&sd->connectivity_changed_closure);
}
@@ -88,9 +90,10 @@ void grpc_lb_subchannel_data_stop_connectivity_watch(
}
grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
- grpc_lb_policy* p, grpc_core::TraceFlag* tracer,
- const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
- grpc_iomgr_cb_func connectivity_changed_cb) {
+ grpc_core::LoadBalancingPolicy* p, grpc_core::TraceFlag* tracer,
+ const grpc_lb_addresses* addresses, grpc_combiner* combiner,
+ grpc_client_channel_factory* client_channel_factory,
+ const grpc_channel_args& args, grpc_iomgr_cb_func connectivity_changed_cb) {
grpc_lb_subchannel_list* subchannel_list =
static_cast<grpc_lb_subchannel_list*>(
gpr_zalloc(sizeof(*subchannel_list)));
@@ -118,12 +121,11 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
- args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
- 1);
+ &args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg, 1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
- args->client_channel_factory, &sc_args);
+ client_channel_factory, &sc_args);
grpc_channel_args_destroy(new_args);
if (subchannel == nullptr) {
// Subchannel could not be created.
@@ -154,7 +156,7 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
sd->subchannel = subchannel;
GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
connectivity_changed_cb, sd,
- grpc_combiner_scheduler(args->combiner));
+ grpc_combiner_scheduler(combiner));
// We assume that the current state is IDLE. If not, we'll get a
// callback telling us that.
sd->prev_connectivity_state = GRPC_CHANNEL_IDLE;
@@ -212,18 +214,6 @@ void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
}
}
-void grpc_lb_subchannel_list_ref_for_connectivity_watch(
- grpc_lb_subchannel_list* subchannel_list, const char* reason) {
- GRPC_LB_POLICY_REF(subchannel_list->policy, reason);
- grpc_lb_subchannel_list_ref(subchannel_list, reason);
-}
-
-void grpc_lb_subchannel_list_unref_for_connectivity_watch(
- grpc_lb_subchannel_list* subchannel_list, const char* reason) {
- GRPC_LB_POLICY_UNREF(subchannel_list->policy, reason);
- grpc_lb_subchannel_list_unref(subchannel_list, reason);
-}
-
static void subchannel_data_cancel_connectivity_watch(
grpc_lb_subchannel_data* sd, const char* reason) {
if (sd->subchannel_list->tracer->enabled()) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
index 91537f3afe..6889d596ac 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/debug/trace.h"
@@ -82,7 +84,7 @@ void grpc_lb_subchannel_data_stop_connectivity_watch(
struct grpc_lb_subchannel_list {
/** backpointer to owning policy */
- grpc_lb_policy* policy;
+ grpc_core::LoadBalancingPolicy* policy;
grpc_core::TraceFlag* tracer;
@@ -115,9 +117,10 @@ struct grpc_lb_subchannel_list {
};
grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
- grpc_lb_policy* p, grpc_core::TraceFlag* tracer,
- const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
- grpc_iomgr_cb_func connectivity_changed_cb);
+ grpc_core::LoadBalancingPolicy* p, grpc_core::TraceFlag* tracer,
+ const grpc_lb_addresses* addresses, grpc_combiner* combiner,
+ grpc_client_channel_factory* client_channel_factory,
+ const grpc_channel_args& args, grpc_iomgr_cb_func connectivity_changed_cb);
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
const char* reason);
@@ -125,13 +128,6 @@ void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
const char* reason);
-/// Takes and releases refs needed for a connectivity notification.
-/// This includes a ref to subchannel_list and a weak ref to the LB policy.
-void grpc_lb_subchannel_list_ref_for_connectivity_watch(
- grpc_lb_subchannel_list* subchannel_list, const char* reason);
-void grpc_lb_subchannel_list_unref_for_connectivity_watch(
- grpc_lb_subchannel_list* subchannel_list, const char* reason);
-
/// Mark subchannel_list as discarded. Unsubscribes all its subchannels. The
/// connectivity state notification callback will ultimately unref it.
void grpc_lb_subchannel_list_shutdown_and_unref(
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.cc b/src/core/ext/filters/client_channel/lb_policy_factory.cc
index 4c367ce3c5..7c8cba55b7 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.cc
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include <string.h>
#include <grpc/support/alloc.h>
@@ -151,17 +153,3 @@ grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
return nullptr;
return static_cast<grpc_lb_addresses*>(lb_addresses_arg->value.pointer.p);
}
-
-void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory) {
- factory->vtable->ref(factory);
-}
-
-void grpc_lb_policy_factory_unref(grpc_lb_policy_factory* factory) {
- factory->vtable->unref(factory);
-}
-
-grpc_lb_policy* grpc_lb_policy_factory_create_lb_policy(
- grpc_lb_policy_factory* factory, grpc_lb_policy_args* args) {
- if (factory == nullptr) return nullptr;
- return factory->vtable->create_lb_policy(factory, args);
-}
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h
index 9da231b657..b8bbd32072 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.h
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/resolve_address.h"
@@ -26,21 +28,20 @@
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/uri_parser.h"
+//
+// representation of an LB address
+//
+
// Channel arg key for grpc_lb_addresses.
#define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses"
-typedef struct grpc_lb_policy_factory grpc_lb_policy_factory;
-typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable;
-
-struct grpc_lb_policy_factory {
- const grpc_lb_policy_factory_vtable* vtable;
-};
-
/** A resolved address alongside any LB related information associated with it.
* \a user_data, if not NULL, contains opaque data meant to be consumed by the
* gRPC LB policy. Note that no all LB policies support \a user_data as input.
* Those who don't will simply ignore it and will correspondingly return NULL in
* their namesake pick() output argument. */
+// TODO(roth): Once we figure out a better way of handling user_data in
+// LB policies, convert these structs to C++ classes.
typedef struct grpc_lb_address {
grpc_resolved_address address;
bool is_balancer;
@@ -101,30 +102,27 @@ grpc_arg grpc_lb_addresses_create_channel_arg(
grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
const grpc_channel_args* channel_args);
-/** Arguments passed to LB policies. */
-struct grpc_lb_policy_args {
- grpc_client_channel_factory* client_channel_factory;
- grpc_channel_args* args;
- grpc_combiner* combiner;
-};
+//
+// LB policy factory
+//
-struct grpc_lb_policy_factory_vtable {
- void (*ref)(grpc_lb_policy_factory* factory);
- void (*unref)(grpc_lb_policy_factory* factory);
+namespace grpc_core {
- /** Implementation of grpc_lb_policy_factory_create_lb_policy */
- grpc_lb_policy* (*create_lb_policy)(grpc_lb_policy_factory* factory,
- grpc_lb_policy_args* args);
+class LoadBalancingPolicyFactory {
+ public:
+ /// Returns a new LB policy instance.
+ virtual OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
+ const LoadBalancingPolicy::Args& args) const GRPC_ABSTRACT;
- /** Name for the LB policy this factory implements */
- const char* name;
-};
+ /// Returns the LB policy name that this factory provides.
+ /// Caller does NOT take ownership of result.
+ virtual const char* name() const GRPC_ABSTRACT;
-void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory);
-void grpc_lb_policy_factory_unref(grpc_lb_policy_factory* factory);
+ virtual ~LoadBalancingPolicyFactory() {}
+
+ GRPC_ABSTRACT_BASE_CLASS
+};
-/** Create a lb_policy instance. */
-grpc_lb_policy* grpc_lb_policy_factory_create_lb_policy(
- grpc_lb_policy_factory* factory, grpc_lb_policy_args* args);
+} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.cc b/src/core/ext/filters/client_channel/lb_policy_registry.cc
index 8414504e8f..d651b1120d 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.cc
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.cc
@@ -16,55 +16,82 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include <string.h>
#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/inlined_vector.h"
-#define MAX_POLICIES 10
+namespace grpc_core {
-static grpc_lb_policy_factory* g_all_of_the_lb_policies[MAX_POLICIES];
-static int g_number_of_lb_policies = 0;
+namespace {
-void grpc_lb_policy_registry_init(void) { g_number_of_lb_policies = 0; }
+class RegistryState {
+ public:
+ RegistryState() {}
-void grpc_lb_policy_registry_shutdown(void) {
- int i;
- for (i = 0; i < g_number_of_lb_policies; i++) {
- grpc_lb_policy_factory_unref(g_all_of_the_lb_policies[i]);
+ void RegisterLoadBalancingPolicyFactory(
+ UniquePtr<LoadBalancingPolicyFactory> factory) {
+ for (size_t i = 0; i < factories_.size(); ++i) {
+ GPR_ASSERT(strcmp(factories_[i]->name(), factory->name()) != 0);
+ }
+ factories_.push_back(std::move(factory));
}
-}
-void grpc_register_lb_policy(grpc_lb_policy_factory* factory) {
- int i;
- for (i = 0; i < g_number_of_lb_policies; i++) {
- GPR_ASSERT(0 != gpr_stricmp(factory->vtable->name,
- g_all_of_the_lb_policies[i]->vtable->name));
+ LoadBalancingPolicyFactory* GetLoadBalancingPolicyFactory(
+ const char* name) const {
+ for (size_t i = 0; i < factories_.size(); ++i) {
+ if (strcmp(name, factories_[i]->name()) == 0) {
+ return factories_[i].get();
+ }
+ }
+ return nullptr;
}
- GPR_ASSERT(g_number_of_lb_policies != MAX_POLICIES);
- grpc_lb_policy_factory_ref(factory);
- g_all_of_the_lb_policies[g_number_of_lb_policies++] = factory;
-}
-static grpc_lb_policy_factory* lookup_factory(const char* name) {
- int i;
+ private:
+ InlinedVector<UniquePtr<LoadBalancingPolicyFactory>, 10> factories_;
+};
- if (name == nullptr) return nullptr;
+RegistryState* g_state = nullptr;
- for (i = 0; i < g_number_of_lb_policies; i++) {
- if (0 == gpr_stricmp(name, g_all_of_the_lb_policies[i]->vtable->name)) {
- return g_all_of_the_lb_policies[i];
- }
- }
+} // namespace
- return nullptr;
+//
+// LoadBalancingPolicyRegistry::Builder
+//
+
+void LoadBalancingPolicyRegistry::Builder::InitRegistry() {
+ if (g_state == nullptr) g_state = New<RegistryState>();
+}
+
+void LoadBalancingPolicyRegistry::Builder::ShutdownRegistry() {
+ Delete(g_state);
+ g_state = nullptr;
}
-grpc_lb_policy* grpc_lb_policy_create(const char* name,
- grpc_lb_policy_args* args) {
- grpc_lb_policy_factory* factory = lookup_factory(name);
- grpc_lb_policy* lb_policy =
- grpc_lb_policy_factory_create_lb_policy(factory, args);
- return lb_policy;
+void LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory(
+ UniquePtr<LoadBalancingPolicyFactory> factory) {
+ InitRegistry();
+ g_state->RegisterLoadBalancingPolicyFactory(std::move(factory));
}
+
+//
+// LoadBalancingPolicyRegistry
+//
+
+OrphanablePtr<LoadBalancingPolicy>
+LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
+ const char* name, const LoadBalancingPolicy::Args& args) {
+ GPR_ASSERT(g_state != nullptr);
+ // Find factory.
+ LoadBalancingPolicyFactory* factory =
+ g_state->GetLoadBalancingPolicyFactory(name);
+ if (factory == nullptr) return nullptr; // Specified name not found.
+ // Create policy via factory.
+ return factory->CreateLoadBalancingPolicy(args);
+}
+
+} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.h b/src/core/ext/filters/client_channel/lb_policy_registry.h
index 5aff79376b..2283d848bd 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.h
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.h
@@ -19,22 +19,37 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
+#include "src/core/lib/gprpp/memory.h"
+#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/exec_ctx.h"
-/** Initialize the registry and set \a default_factory as the factory to be
- * returned when no name is provided in a lookup */
-void grpc_lb_policy_registry_init(void);
-void grpc_lb_policy_registry_shutdown(void);
+namespace grpc_core {
-/** Register a LB policy factory. */
-void grpc_register_lb_policy(grpc_lb_policy_factory* factory);
+class LoadBalancingPolicyRegistry {
+ public:
+ /// Methods used to create and populate the LoadBalancingPolicyRegistry.
+ /// NOT THREAD SAFE -- to be used only during global gRPC
+ /// initialization and shutdown.
+ class Builder {
+ public:
+ /// Global initialization and shutdown hooks.
+ static void InitRegistry();
+ static void ShutdownRegistry();
-/** Create a \a grpc_lb_policy instance.
- *
- * If \a name is NULL, the default factory from \a grpc_lb_policy_registry_init
- * will be returned. */
-grpc_lb_policy* grpc_lb_policy_create(const char* name,
- grpc_lb_policy_args* args);
+ /// Registers an LB policy factory. The factory will be used to create an
+ /// LB policy whose name matches that of the factory.
+ static void RegisterLoadBalancingPolicyFactory(
+ UniquePtr<LoadBalancingPolicyFactory> factory);
+ };
+
+ /// Creates an LB policy of the type specified by \a name.
+ static OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
+ const char* name, const LoadBalancingPolicy::Args& args);
+};
+
+} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H */
diff --git a/src/core/ext/filters/client_channel/method_params.cc b/src/core/ext/filters/client_channel/method_params.cc
new file mode 100644
index 0000000000..374b87e170
--- /dev/null
+++ b/src/core/ext/filters/client_channel/method_params.cc
@@ -0,0 +1,178 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/ext/filters/client_channel/method_params.h"
+#include "src/core/ext/filters/client_channel/status_util.h"
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/memory.h"
+
+// As per the retry design, we do not allow more than 5 retry attempts.
+#define MAX_MAX_RETRY_ATTEMPTS 5
+
+namespace grpc_core {
+namespace internal {
+
+namespace {
+
+bool ParseWaitForReady(
+ grpc_json* field, ClientChannelMethodParams::WaitForReady* wait_for_ready) {
+ if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
+ return false;
+ }
+ *wait_for_ready = field->type == GRPC_JSON_TRUE
+ ? ClientChannelMethodParams::WAIT_FOR_READY_TRUE
+ : ClientChannelMethodParams::WAIT_FOR_READY_FALSE;
+ return true;
+}
+
+// Parses a JSON field of the form generated for a google.proto.Duration
+// proto message, as per:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+bool ParseDuration(grpc_json* field, grpc_millis* duration) {
+ if (field->type != GRPC_JSON_STRING) return false;
+ size_t len = strlen(field->value);
+ if (field->value[len - 1] != 's') return false;
+ UniquePtr<char> buf(gpr_strdup(field->value));
+ *(buf.get() + len - 1) = '\0'; // Remove trailing 's'.
+ char* decimal_point = strchr(buf.get(), '.');
+ int nanos = 0;
+ if (decimal_point != nullptr) {
+ *decimal_point = '\0';
+ nanos = gpr_parse_nonnegative_int(decimal_point + 1);
+ if (nanos == -1) {
+ return false;
+ }
+ int num_digits = static_cast<int>(strlen(decimal_point + 1));
+ if (num_digits > 9) { // We don't accept greater precision than nanos.
+ return false;
+ }
+ for (int i = 0; i < (9 - num_digits); ++i) {
+ nanos *= 10;
+ }
+ }
+ int seconds =
+ decimal_point == buf.get() ? 0 : gpr_parse_nonnegative_int(buf.get());
+ if (seconds == -1) return false;
+ *duration = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
+ return true;
+}
+
+UniquePtr<ClientChannelMethodParams::RetryPolicy> ParseRetryPolicy(
+ grpc_json* field) {
+ auto retry_policy = MakeUnique<ClientChannelMethodParams::RetryPolicy>();
+ if (field->type != GRPC_JSON_OBJECT) return nullptr;
+ for (grpc_json* sub_field = field->child; sub_field != nullptr;
+ sub_field = sub_field->next) {
+ if (sub_field->key == nullptr) return nullptr;
+ if (strcmp(sub_field->key, "maxAttempts") == 0) {
+ if (retry_policy->max_attempts != 0) return nullptr; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
+ retry_policy->max_attempts = gpr_parse_nonnegative_int(sub_field->value);
+ if (retry_policy->max_attempts <= 1) return nullptr;
+ if (retry_policy->max_attempts > MAX_MAX_RETRY_ATTEMPTS) {
+ gpr_log(GPR_ERROR,
+ "service config: clamped retryPolicy.maxAttempts at %d",
+ MAX_MAX_RETRY_ATTEMPTS);
+ retry_policy->max_attempts = MAX_MAX_RETRY_ATTEMPTS;
+ }
+ } else if (strcmp(sub_field->key, "initialBackoff") == 0) {
+ if (retry_policy->initial_backoff > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(sub_field, &retry_policy->initial_backoff)) {
+ return nullptr;
+ }
+ if (retry_policy->initial_backoff == 0) return nullptr;
+ } else if (strcmp(sub_field->key, "maxBackoff") == 0) {
+ if (retry_policy->max_backoff > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(sub_field, &retry_policy->max_backoff)) {
+ return nullptr;
+ }
+ if (retry_policy->max_backoff == 0) return nullptr;
+ } else if (strcmp(sub_field->key, "backoffMultiplier") == 0) {
+ if (retry_policy->backoff_multiplier != 0) return nullptr; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
+ if (sscanf(sub_field->value, "%f", &retry_policy->backoff_multiplier) !=
+ 1) {
+ return nullptr;
+ }
+ if (retry_policy->backoff_multiplier <= 0) return nullptr;
+ } else if (strcmp(sub_field->key, "retryableStatusCodes") == 0) {
+ if (!retry_policy->retryable_status_codes.Empty()) {
+ return nullptr; // Duplicate.
+ }
+ if (sub_field->type != GRPC_JSON_ARRAY) return nullptr;
+ for (grpc_json* element = sub_field->child; element != nullptr;
+ element = element->next) {
+ if (element->type != GRPC_JSON_STRING) return nullptr;
+ grpc_status_code status;
+ if (!grpc_status_code_from_string(element->value, &status)) {
+ return nullptr;
+ }
+ retry_policy->retryable_status_codes.Add(status);
+ }
+ if (retry_policy->retryable_status_codes.Empty()) return nullptr;
+ }
+ }
+ // Make sure required fields are set.
+ if (retry_policy->max_attempts == 0 || retry_policy->initial_backoff == 0 ||
+ retry_policy->max_backoff == 0 || retry_policy->backoff_multiplier == 0 ||
+ retry_policy->retryable_status_codes.Empty()) {
+ return nullptr;
+ }
+ return retry_policy;
+}
+
+} // namespace
+
+RefCountedPtr<ClientChannelMethodParams>
+ClientChannelMethodParams::CreateFromJson(const grpc_json* json) {
+ RefCountedPtr<ClientChannelMethodParams> method_params =
+ MakeRefCounted<ClientChannelMethodParams>();
+ for (grpc_json* field = json->child; field != nullptr; field = field->next) {
+ if (field->key == nullptr) continue;
+ if (strcmp(field->key, "waitForReady") == 0) {
+ if (method_params->wait_for_ready_ != WAIT_FOR_READY_UNSET) {
+ return nullptr; // Duplicate.
+ }
+ if (!ParseWaitForReady(field, &method_params->wait_for_ready_)) {
+ return nullptr;
+ }
+ } else if (strcmp(field->key, "timeout") == 0) {
+ if (method_params->timeout_ > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(field, &method_params->timeout_)) return nullptr;
+ } else if (strcmp(field->key, "retryPolicy") == 0) {
+ if (method_params->retry_policy_ != nullptr) {
+ return nullptr; // Duplicate.
+ }
+ method_params->retry_policy_ = ParseRetryPolicy(field);
+ if (method_params->retry_policy_ == nullptr) return nullptr;
+ }
+ }
+ return method_params;
+}
+
+} // namespace internal
+} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/method_params.h b/src/core/ext/filters/client_channel/method_params.h
new file mode 100644
index 0000000000..48ece29867
--- /dev/null
+++ b/src/core/ext/filters/client_channel/method_params.h
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/ext/filters/client_channel/status_util.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis
+#include "src/core/lib/json/json.h"
+
+namespace grpc_core {
+namespace internal {
+
+class ClientChannelMethodParams : public RefCounted<ClientChannelMethodParams> {
+ public:
+ enum WaitForReady {
+ WAIT_FOR_READY_UNSET = 0,
+ WAIT_FOR_READY_FALSE,
+ WAIT_FOR_READY_TRUE
+ };
+
+ struct RetryPolicy {
+ int max_attempts = 0;
+ grpc_millis initial_backoff = 0;
+ grpc_millis max_backoff = 0;
+ float backoff_multiplier = 0;
+ StatusCodeSet retryable_status_codes;
+ };
+
+ /// Creates a method_parameters object from \a json.
+ /// Intended for use with ServiceConfig::CreateMethodConfigTable().
+ static RefCountedPtr<ClientChannelMethodParams> CreateFromJson(
+ const grpc_json* json);
+
+ grpc_millis timeout() const { return timeout_; }
+ WaitForReady wait_for_ready() const { return wait_for_ready_; }
+ const RetryPolicy* retry_policy() const { return retry_policy_.get(); }
+
+ private:
+ // So New() can call our private ctor.
+ template <typename T, typename... Args>
+ friend T* grpc_core::New(Args&&... args);
+
+ ClientChannelMethodParams() {}
+ virtual ~ClientChannelMethodParams() {}
+
+ grpc_millis timeout_ = 0;
+ WaitForReady wait_for_ready_ = WAIT_FOR_READY_UNSET;
+ UniquePtr<RetryPolicy> retry_policy_;
+};
+
+} // namespace internal
+} // namespace grpc_core
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H */
diff --git a/src/core/ext/filters/client_channel/parse_address.cc b/src/core/ext/filters/client_channel/parse_address.cc
index 473c7542df..e78dc99e0b 100644
--- a/src/core/ext/filters/client_channel/parse_address.cc
+++ b/src/core/ext/filters/client_channel/parse_address.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
diff --git a/src/core/ext/filters/client_channel/parse_address.h b/src/core/ext/filters/client_channel/parse_address.h
index ca0a0d18f0..9a88b66edc 100644
--- a/src/core/ext/filters/client_channel/parse_address.h
+++ b/src/core/ext/filters/client_channel/parse_address.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H
+#include <grpc/support/port_platform.h>
+
#include <stddef.h>
#include "src/core/ext/filters/client_channel/uri_parser.h"
diff --git a/src/core/ext/filters/client_channel/proxy_mapper.cc b/src/core/ext/filters/client_channel/proxy_mapper.cc
index be85cfcced..c4da06778d 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper.cc
+++ b/src/core/ext/filters/client_channel/proxy_mapper.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/proxy_mapper.h"
void grpc_proxy_mapper_init(const grpc_proxy_mapper_vtable* vtable,
diff --git a/src/core/ext/filters/client_channel/proxy_mapper.h b/src/core/ext/filters/client_channel/proxy_mapper.h
index ce3e65ee46..634b0ed7bf 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper.h
+++ b/src/core/ext/filters/client_channel/proxy_mapper.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_H
+#include <grpc/support/port_platform.h>
+
#include <stdbool.h>
#include <grpc/impl/codegen/grpc_types.h>
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.cc b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
index b42597e363..a02a5f5e2c 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include <string.h>
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.h b/src/core/ext/filters/client_channel/proxy_mapper_registry.h
index 2ad6c04e1d..326b582b99 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.h
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_REGISTRY_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_REGISTRY_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/proxy_mapper.h"
void grpc_proxy_mapper_registry_init();
diff --git a/src/core/ext/filters/client_channel/resolver.cc b/src/core/ext/filters/client_channel/resolver.cc
index 860c2eea1e..cd11eeb9e4 100644
--- a/src/core/ext/filters/client_channel/resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/iomgr/combiner.h"
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index 62fcb49a41..1685a6c803 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_H
+#include <grpc/support/port_platform.h>
+
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/lib/gprpp/abstract.h"
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index 0442b1e496..aa93e5d8de 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -17,6 +17,7 @@
*/
#include <grpc/support/port_platform.h>
+
#if GRPC_ARES == 1 && !defined(GRPC_UV)
#include <limits.h>
@@ -294,7 +295,7 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
size_t num_args_to_add = 0;
new_args[num_args_to_add++] =
grpc_lb_addresses_create_channel_arg(r->lb_addresses_);
- grpc_service_config* service_config = nullptr;
+ grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config;
char* service_config_string = nullptr;
if (r->service_config_json_ != nullptr) {
service_config_string = ChooseServiceConfig(r->service_config_json_);
@@ -305,10 +306,11 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
(char*)GRPC_ARG_SERVICE_CONFIG, service_config_string);
- service_config = grpc_service_config_create(service_config_string);
+ service_config =
+ grpc_core::ServiceConfig::Create(service_config_string);
if (service_config != nullptr) {
const char* lb_policy_name =
- grpc_service_config_get_lb_policy_name(service_config);
+ service_config->GetLoadBalancingPolicyName();
if (lb_policy_name != nullptr) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
@@ -321,7 +323,6 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
result = grpc_channel_args_copy_and_add_and_remove(
r->channel_args_, args_to_remove, num_args_to_remove, new_args,
num_args_to_add);
- if (service_config != nullptr) grpc_service_config_destroy(service_config);
gpr_free(service_config_string);
grpc_lb_addresses_destroy(r->lb_addresses_);
// Reset backoff state so that we start from the beginning when the
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
index ba7dad63cf..0bc13e35f4 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H
+#include <grpc/support/port_platform.h>
+
#include <ares.h>
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/pollset_set.h"
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
index 10bc8f6074..b604f2bf14 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
@@ -16,6 +16,7 @@
*
*/
#include <grpc/support/port_platform.h>
+
#include "src/core/lib/iomgr/port.h"
#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET)
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index 82b5545601..71b06eb87e 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
@@ -17,6 +17,7 @@
*/
#include <grpc/support/port_platform.h>
+
#if GRPC_ARES == 1 && !defined(GRPC_UV)
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
index 86d870e0a6..bda9cd1729 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr.h"
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
index a184cf2d57..5096e480bc 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
@@ -17,6 +17,7 @@
*/
#include <grpc/support/port_platform.h>
+
#if GRPC_ARES != 1 || defined(GRPC_UV)
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
index b01e608c3f..4d8958f519 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
@@ -17,6 +17,8 @@
// This is similar to the sockaddr resolver, except that it supports a
// bunch of query args that are useful for dependency injection in tests.
+#include <grpc/support/port_platform.h>
+
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
@@ -24,7 +26,6 @@
#include <string.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
index d42811d913..858f35851d 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
@@ -17,6 +17,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FAKE_FAKE_RESOLVER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FAKE_FAKE_RESOLVER_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
diff --git a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
index 966b9fd3f2..f74ac5aebe 100644
--- a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
@@ -16,13 +16,14 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
-#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
diff --git a/src/core/ext/filters/client_channel/resolver_factory.h b/src/core/ext/filters/client_channel/resolver_factory.h
index f9b9501236..ee3cfeeb9b 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.h
+++ b/src/core/ext/filters/client_channel/resolver_factory.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H
+#include <grpc/support/port_platform.h>
+
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/resolver.h"
diff --git a/src/core/ext/filters/client_channel/resolver_registry.cc b/src/core/ext/filters/client_channel/resolver_registry.cc
index 036e81d0ae..91c0267f95 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.cc
+++ b/src/core/ext/filters/client_channel/resolver_registry.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include <string.h>
diff --git a/src/core/ext/filters/client_channel/resolver_registry.h b/src/core/ext/filters/client_channel/resolver_registry.h
index 260336de83..d6ec6811bd 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.h
+++ b/src/core/ext/filters/client_channel/resolver_registry.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/resolver_factory.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/memory.h"
diff --git a/src/core/ext/filters/client_channel/retry_throttle.cc b/src/core/ext/filters/client_channel/retry_throttle.cc
index a98e27860a..45de6667c8 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.cc
+++ b/src/core/ext/filters/client_channel/retry_throttle.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/retry_throttle.h"
#include <limits.h>
@@ -38,7 +40,7 @@ struct grpc_server_retry_throttle_data {
int milli_token_ratio;
gpr_atm milli_tokens;
// A pointer to the replacement for this grpc_server_retry_throttle_data
- // entry. If non-NULL, then this entry is stale and must not be used.
+ // entry. If non-nullptr, then this entry is stale and must not be used.
// We hold a reference to the replacement.
gpr_atm replacement;
};
@@ -56,6 +58,7 @@ static void get_replacement_throttle_data_if_needed(
bool grpc_server_retry_throttle_data_record_failure(
grpc_server_retry_throttle_data* throttle_data) {
+ if (throttle_data == nullptr) return true;
// First, check if we are stale and need to be replaced.
get_replacement_throttle_data_if_needed(&throttle_data);
// We decrement milli_tokens by 1000 (1 token) for each failure.
@@ -70,6 +73,7 @@ bool grpc_server_retry_throttle_data_record_failure(
void grpc_server_retry_throttle_data_record_success(
grpc_server_retry_throttle_data* throttle_data) {
+ if (throttle_data == nullptr) return;
// First, check if we are stale and need to be replaced.
get_replacement_throttle_data_if_needed(&throttle_data);
// We increment milli_tokens by milli_token_ratio for each success.
diff --git a/src/core/ext/filters/client_channel/retry_throttle.h b/src/core/ext/filters/client_channel/retry_throttle.h
index bf99297e98..0505fc27f2 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.h
+++ b/src/core/ext/filters/client_channel/retry_throttle.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RETRY_THROTTLE_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RETRY_THROTTLE_H
+#include <grpc/support/port_platform.h>
+
#include <stdbool.h>
/// Tracks retry throttling data for an individual server name.
diff --git a/src/core/ext/filters/client_channel/status_util.cc b/src/core/ext/filters/client_channel/status_util.cc
new file mode 100644
index 0000000000..11f732ab44
--- /dev/null
+++ b/src/core/ext/filters/client_channel/status_util.cc
@@ -0,0 +1,100 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/ext/filters/client_channel/status_util.h"
+
+#include "src/core/lib/gpr/useful.h"
+
+typedef struct {
+ const char* str;
+ grpc_status_code status;
+} status_string_entry;
+
+static const status_string_entry g_status_string_entries[] = {
+ {"OK", GRPC_STATUS_OK},
+ {"CANCELLED", GRPC_STATUS_CANCELLED},
+ {"UNKNOWN", GRPC_STATUS_UNKNOWN},
+ {"INVALID_ARGUMENT", GRPC_STATUS_INVALID_ARGUMENT},
+ {"DEADLINE_EXCEEDED", GRPC_STATUS_DEADLINE_EXCEEDED},
+ {"NOT_FOUND", GRPC_STATUS_NOT_FOUND},
+ {"ALREADY_EXISTS", GRPC_STATUS_ALREADY_EXISTS},
+ {"PERMISSION_DENIED", GRPC_STATUS_PERMISSION_DENIED},
+ {"UNAUTHENTICATED", GRPC_STATUS_UNAUTHENTICATED},
+ {"RESOURCE_EXHAUSTED", GRPC_STATUS_RESOURCE_EXHAUSTED},
+ {"FAILED_PRECONDITION", GRPC_STATUS_FAILED_PRECONDITION},
+ {"ABORTED", GRPC_STATUS_ABORTED},
+ {"OUT_OF_RANGE", GRPC_STATUS_OUT_OF_RANGE},
+ {"UNIMPLEMENTED", GRPC_STATUS_UNIMPLEMENTED},
+ {"INTERNAL", GRPC_STATUS_INTERNAL},
+ {"UNAVAILABLE", GRPC_STATUS_UNAVAILABLE},
+ {"DATA_LOSS", GRPC_STATUS_DATA_LOSS},
+};
+
+bool grpc_status_code_from_string(const char* status_str,
+ grpc_status_code* status) {
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(g_status_string_entries); ++i) {
+ if (strcmp(status_str, g_status_string_entries[i].str) == 0) {
+ *status = g_status_string_entries[i].status;
+ return true;
+ }
+ }
+ return false;
+}
+
+const char* grpc_status_code_to_string(grpc_status_code status) {
+ switch (status) {
+ case GRPC_STATUS_OK:
+ return "OK";
+ case GRPC_STATUS_CANCELLED:
+ return "CANCELLED";
+ case GRPC_STATUS_UNKNOWN:
+ return "UNKNOWN";
+ case GRPC_STATUS_INVALID_ARGUMENT:
+ return "INVALID_ARGUMENT";
+ case GRPC_STATUS_DEADLINE_EXCEEDED:
+ return "DEADLINE_EXCEEDED";
+ case GRPC_STATUS_NOT_FOUND:
+ return "NOT_FOUND";
+ case GRPC_STATUS_ALREADY_EXISTS:
+ return "ALREADY_EXISTS";
+ case GRPC_STATUS_PERMISSION_DENIED:
+ return "PERMISSION_DENIED";
+ case GRPC_STATUS_UNAUTHENTICATED:
+ return "UNAUTHENTICATED";
+ case GRPC_STATUS_RESOURCE_EXHAUSTED:
+ return "RESOURCE_EXHAUSTED";
+ case GRPC_STATUS_FAILED_PRECONDITION:
+ return "FAILED_PRECONDITION";
+ case GRPC_STATUS_ABORTED:
+ return "ABORTED";
+ case GRPC_STATUS_OUT_OF_RANGE:
+ return "OUT_OF_RANGE";
+ case GRPC_STATUS_UNIMPLEMENTED:
+ return "UNIMPLEMENTED";
+ case GRPC_STATUS_INTERNAL:
+ return "INTERNAL";
+ case GRPC_STATUS_UNAVAILABLE:
+ return "UNAVAILABLE";
+ case GRPC_STATUS_DATA_LOSS:
+ return "DATA_LOSS";
+ default:
+ return "UNKNOWN";
+ }
+}
diff --git a/src/core/ext/filters/client_channel/status_util.h b/src/core/ext/filters/client_channel/status_util.h
new file mode 100644
index 0000000000..e018709730
--- /dev/null
+++ b/src/core/ext/filters/client_channel/status_util.h
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_STATUS_UTIL_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_STATUS_UTIL_H
+
+#include <grpc/support/port_platform.h>
+
+#include <grpc/status.h>
+
+#include <stdbool.h>
+#include <string.h>
+
+/// If \a status_str is a valid status string, sets \a status to the
+/// corresponding status value and returns true.
+bool grpc_status_code_from_string(const char* status_str,
+ grpc_status_code* status);
+
+/// Returns the string form of \a status, or "UNKNOWN" if invalid.
+const char* grpc_status_code_to_string(grpc_status_code status);
+
+namespace grpc_core {
+namespace internal {
+
+/// A set of grpc_status_code values.
+class StatusCodeSet {
+ public:
+ bool Empty() const { return status_code_mask_ == 0; }
+
+ void Add(grpc_status_code status) { status_code_mask_ |= (1 << status); }
+
+ bool Contains(grpc_status_code status) const {
+ return status_code_mask_ & (1 << status);
+ }
+
+ private:
+ int status_code_mask_ = 0; // A bitfield of status codes in the set.
+};
+
+} // namespace internal
+} // namespace grpc_core
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_STATUS_UTIL_H */
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index fbe07c58f7..cae7cc35e3 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/subchannel.h"
#include <inttypes.h>
@@ -657,7 +659,6 @@ static void on_subchannel_connected(void* arg, grpc_error* error) {
static void subchannel_call_destroy(void* call, grpc_error* error) {
GPR_TIMER_SCOPE("grpc_subchannel_call_unref.destroy", 0);
grpc_subchannel_call* c = static_cast<grpc_subchannel_call*>(call);
- GPR_ASSERT(c->schedule_closure_after_destroy != nullptr);
grpc_core::ConnectedSubchannel* connection = c->connection;
grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr,
c->schedule_closure_after_destroy);
@@ -671,9 +672,10 @@ void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call* call,
call->schedule_closure_after_destroy = closure;
}
-void grpc_subchannel_call_ref(
+grpc_subchannel_call* grpc_subchannel_call_ref(
grpc_subchannel_call* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
+ return c;
}
void grpc_subchannel_call_unref(
@@ -703,6 +705,13 @@ const grpc_subchannel_key* grpc_subchannel_get_key(
return subchannel->key;
}
+void* grpc_connected_subchannel_call_get_parent_data(
+ grpc_subchannel_call* subchannel_call) {
+ grpc_channel_stack* chanstk = subchannel_call->connection->channel_stack();
+ return (char*)subchannel_call + sizeof(grpc_subchannel_call) +
+ chanstk->call_stack_size;
+}
+
grpc_call_stack* grpc_subchannel_call_get_call_stack(
grpc_subchannel_call* subchannel_call) {
return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call);
@@ -774,8 +783,8 @@ void ConnectedSubchannel::Ping(grpc_closure* on_initiate,
grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
grpc_subchannel_call** call) {
*call = static_cast<grpc_subchannel_call*>(gpr_arena_alloc(
- args.arena,
- sizeof(grpc_subchannel_call) + channel_stack_->call_stack_size));
+ args.arena, sizeof(grpc_subchannel_call) +
+ channel_stack_->call_stack_size + args.parent_data_size));
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
RefCountedPtr<ConnectedSubchannel> connection =
Ref(DEBUG_LOCATION, "subchannel_call");
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index d2b45ae9c8..e23aec12df 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/connector.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/gpr/arena.h"
@@ -79,6 +81,7 @@ class ConnectedSubchannel : public RefCountedWithTracing<ConnectedSubchannel> {
gpr_arena* arena;
grpc_call_context_element* context;
grpc_call_combiner* call_combiner;
+ size_t parent_data_size;
};
explicit ConnectedSubchannel(grpc_channel_stack* channel_stack);
@@ -107,11 +110,17 @@ grpc_subchannel* grpc_subchannel_weak_ref(
grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_weak_unref(
grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_call_ref(
+grpc_subchannel_call* grpc_subchannel_call_ref(
grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_unref(
grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+/** Returns a pointer to the parent data associated with \a subchannel_call.
+ The data will be of the size specified in \a parent_data_size
+ field of the args passed to \a grpc_connected_subchannel_create_call(). */
+void* grpc_connected_subchannel_call_get_parent_data(
+ grpc_subchannel_call* subchannel_call);
+
/** poll the current connectivity state of a channel */
grpc_connectivity_state grpc_subchannel_check_connectivity(
grpc_subchannel* channel, grpc_error** error);
diff --git a/src/core/ext/filters/client_channel/subchannel_index.cc b/src/core/ext/filters/client_channel/subchannel_index.cc
index d1dc5ee970..cb02b1a748 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.cc
+++ b/src/core/ext/filters/client_channel/subchannel_index.cc
@@ -16,6 +16,8 @@
//
//
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include <stdbool.h>
diff --git a/src/core/ext/filters/client_channel/subchannel_index.h b/src/core/ext/filters/client_channel/subchannel_index.h
index bd160a3b13..a7dae9d47d 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.h
+++ b/src/core/ext/filters/client_channel/subchannel_index.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/subchannel.h"
/** \file Provides an index of active subchannels so that they can be
diff --git a/src/core/ext/filters/client_channel/uri_parser.cc b/src/core/ext/filters/client_channel/uri_parser.cc
index cd07a6fbf5..0572034a9c 100644
--- a/src/core/ext/filters/client_channel/uri_parser.cc
+++ b/src/core/ext/filters/client_channel/uri_parser.cc
@@ -16,6 +16,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include <string.h>
@@ -23,7 +25,6 @@
#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gpr/string.h"
diff --git a/src/core/ext/filters/client_channel/uri_parser.h b/src/core/ext/filters/client_channel/uri_parser.h
index 24ff06c0b5..1966da932b 100644
--- a/src/core/ext/filters/client_channel/uri_parser.h
+++ b/src/core/ext/filters/client_channel/uri_parser.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H
+#include <grpc/support/port_platform.h>
+
#include <stddef.h>
#include "src/core/lib/iomgr/exec_ctx.h"