aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext/filters
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/ext/filters')
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc490
-rw-r--r--src/core/ext/filters/client_channel/health/health_check_client.cc18
-rw-r--r--src/core/ext/filters/client_channel/health/health_check_client.h7
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.cc2
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.cc2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h24
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc26
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc103
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc82
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/xds/xds.cc458
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.cc5
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.h4
-rw-r--r--src/core/ext/filters/client_channel/method_params.cc178
-rw-r--r--src/core/ext/filters/client_channel/method_params.h78
-rw-r--r--src/core/ext/filters/client_channel/parse_address.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver.cc2
-rw-r--r--src/core/ext/filters/client_channel/resolver.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc44
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc36
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h1
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc12
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h4
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc3
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc17
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h6
-rw-r--r--src/core/ext/filters/client_channel/resolver_factory.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver_result_parsing.cc369
-rw-r--r--src/core/ext/filters/client_channel/resolver_result_parsing.h146
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc32
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h2
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.cc47
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.cc314
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.h50
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.cc42
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.h22
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.cc48
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.cc44
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.cc51
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.cc2
-rw-r--r--src/core/ext/filters/message_size/message_size_filter.cc95
44 files changed, 1378 insertions, 1514 deletions
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index daf1b89b09..ebc412b468 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -34,9 +34,9 @@
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
-#include "src/core/ext/filters/client_channel/method_params.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
+#include "src/core/ext/filters/client_channel/resolver_result_parsing.h"
#include "src/core/ext/filters/client_channel/retry_throttle.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/deadline/deadline_filter.h"
@@ -63,6 +63,8 @@
#include "src/core/lib/transport/status_metadata.h"
using grpc_core::internal::ClientChannelMethodParams;
+using grpc_core::internal::ClientChannelMethodParamsTable;
+using grpc_core::internal::ProcessedResolverResult;
using grpc_core::internal::ServerRetryThrottleData;
/* Client channel implementation */
@@ -83,10 +85,6 @@ grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
struct external_connectivity_watcher;
-typedef grpc_core::SliceHashTable<
- grpc_core::RefCountedPtr<ClientChannelMethodParams>>
- MethodParamsTable;
-
typedef struct client_channel_channel_data {
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver;
bool started_resolving;
@@ -102,7 +100,7 @@ typedef struct client_channel_channel_data {
/** retry throttle data */
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
/** maps method names to method_parameters structs */
- grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
+ grpc_core::RefCountedPtr<ClientChannelMethodParamsTable> method_params_table;
/** incoming resolver result - set by resolver.next() */
grpc_channel_args* resolver_result;
/** a list of closures that are all waiting for resolver result to come in */
@@ -251,66 +249,6 @@ static void start_resolving_locked(channel_data* chand) {
&chand->on_resolver_result_changed);
}
-typedef struct {
- char* server_name;
- grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
-} service_config_parsing_state;
-
-static void parse_retry_throttle_params(
- const grpc_json* field, service_config_parsing_state* parsing_state) {
- if (strcmp(field->key, "retryThrottling") == 0) {
- if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
- if (field->type != GRPC_JSON_OBJECT) return;
- int max_milli_tokens = 0;
- int milli_token_ratio = 0;
- for (grpc_json* sub_field = field->child; sub_field != nullptr;
- sub_field = sub_field->next) {
- if (sub_field->key == nullptr) return;
- if (strcmp(sub_field->key, "maxTokens") == 0) {
- if (max_milli_tokens != 0) return; // Duplicate.
- if (sub_field->type != GRPC_JSON_NUMBER) return;
- max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value);
- if (max_milli_tokens == -1) return;
- max_milli_tokens *= 1000;
- } else if (strcmp(sub_field->key, "tokenRatio") == 0) {
- if (milli_token_ratio != 0) return; // Duplicate.
- if (sub_field->type != GRPC_JSON_NUMBER) return;
- // We support up to 3 decimal digits.
- size_t whole_len = strlen(sub_field->value);
- uint32_t multiplier = 1;
- uint32_t decimal_value = 0;
- const char* decimal_point = strchr(sub_field->value, '.');
- if (decimal_point != nullptr) {
- whole_len = static_cast<size_t>(decimal_point - sub_field->value);
- multiplier = 1000;
- size_t decimal_len = strlen(decimal_point + 1);
- if (decimal_len > 3) decimal_len = 3;
- if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
- &decimal_value)) {
- return;
- }
- uint32_t decimal_multiplier = 1;
- for (size_t i = 0; i < (3 - decimal_len); ++i) {
- decimal_multiplier *= 10;
- }
- decimal_value *= decimal_multiplier;
- }
- uint32_t whole_value;
- if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
- &whole_value)) {
- return;
- }
- milli_token_ratio =
- static_cast<int>((whole_value * multiplier) + decimal_value);
- if (milli_token_ratio <= 0) return;
- }
- }
- parsing_state->retry_throttle_data =
- grpc_core::internal::ServerRetryThrottleMap::GetDataForServer(
- parsing_state->server_name, max_milli_tokens, milli_token_ratio);
- }
-}
-
// Invoked from the resolver NextLocked() callback when the resolver
// is shutting down.
static void on_resolver_shutdown_locked(channel_data* chand,
@@ -352,37 +290,6 @@ static void on_resolver_shutdown_locked(channel_data* chand,
GRPC_ERROR_UNREF(error);
}
-// Returns the LB policy name from the resolver result.
-static grpc_core::UniquePtr<char>
-get_lb_policy_name_from_resolver_result_locked(channel_data* chand) {
- // Find LB policy name in channel args.
- const grpc_arg* channel_arg =
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
- const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg);
- // Special case: If at least one balancer address is present, we use
- // the grpclb policy, regardless of what the resolver actually specified.
- channel_arg =
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
- if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
- grpc_lb_addresses* addresses =
- static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
- if (grpc_lb_addresses_contains_balancer_address(*addresses)) {
- if (lb_policy_name != nullptr &&
- gpr_stricmp(lb_policy_name, "grpclb") != 0) {
- gpr_log(GPR_INFO,
- "resolver requested LB policy %s but provided at least one "
- "balancer address -- forcing use of grpclb LB policy",
- lb_policy_name);
- }
- lb_policy_name = "grpclb";
- }
- }
- // Use pick_first if nothing was specified and we didn't select grpclb
- // above.
- if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
- return grpc_core::UniquePtr<char>(gpr_strdup(lb_policy_name));
-}
-
static void request_reresolution_locked(void* arg, grpc_error* error) {
reresolution_request_args* args =
static_cast<reresolution_request_args*>(arg);
@@ -410,13 +317,14 @@ using TraceStringVector = grpc_core::InlinedVector<char*, 3>;
// *connectivity_error to its initial connectivity state; otherwise,
// leaves them unchanged.
static void create_new_lb_policy_locked(
- channel_data* chand, char* lb_policy_name,
+ channel_data* chand, char* lb_policy_name, grpc_json* lb_config,
grpc_connectivity_state* connectivity_state,
grpc_error** connectivity_error, TraceStringVector* trace_strings) {
grpc_core::LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = chand->combiner;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
lb_policy_args.args = chand->resolver_result;
+ lb_policy_args.lb_config = lb_config;
grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy =
grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
lb_policy_name, lb_policy_args);
@@ -473,44 +381,6 @@ static void create_new_lb_policy_locked(
}
}
-// Returns the service config (as a JSON string) from the resolver result.
-// Also updates state in chand.
-static grpc_core::UniquePtr<char>
-get_service_config_from_resolver_result_locked(channel_data* chand) {
- const grpc_arg* channel_arg =
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
- const char* service_config_json = grpc_channel_arg_get_string(channel_arg);
- if (service_config_json != nullptr) {
- if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
- chand, service_config_json);
- }
- grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
- grpc_core::ServiceConfig::Create(service_config_json);
- if (service_config != nullptr) {
- if (chand->enable_retries) {
- channel_arg =
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
- const char* server_uri = grpc_channel_arg_get_string(channel_arg);
- GPR_ASSERT(server_uri != nullptr);
- grpc_uri* uri = grpc_uri_parse(server_uri, true);
- GPR_ASSERT(uri->path[0] != '\0');
- service_config_parsing_state parsing_state;
- parsing_state.server_name =
- uri->path[0] == '/' ? uri->path + 1 : uri->path;
- service_config->ParseGlobalParams(parse_retry_throttle_params,
- &parsing_state);
- grpc_uri_destroy(uri);
- chand->retry_throttle_data =
- std::move(parsing_state.retry_throttle_data);
- }
- chand->method_params_table = service_config->CreateMethodConfigTable(
- ClientChannelMethodParams::CreateFromJson);
- }
- }
- return grpc_core::UniquePtr<char>(gpr_strdup(service_config_json));
-}
-
static void maybe_add_trace_message_for_address_changes_locked(
channel_data* chand, TraceStringVector* trace_strings) {
int resolution_contains_addresses = false;
@@ -597,36 +467,47 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: resolver transient failure", chand);
}
+ // Don't override connectivity state if we already have an LB policy.
+ if (chand->lb_policy != nullptr) set_connectivity_state = false;
} else {
+ // Parse the resolver result.
+ ProcessedResolverResult resolver_result(chand->resolver_result,
+ chand->enable_retries);
+ chand->retry_throttle_data = resolver_result.retry_throttle_data();
+ chand->method_params_table = resolver_result.method_params_table();
+ grpc_core::UniquePtr<char> service_config_json =
+ resolver_result.service_config_json();
+ if (service_config_json != nullptr && grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
+ chand, service_config_json.get());
+ }
grpc_core::UniquePtr<char> lb_policy_name =
- get_lb_policy_name_from_resolver_result_locked(chand);
+ resolver_result.lb_policy_name();
+ grpc_json* lb_policy_config = resolver_result.lb_policy_config();
// Check to see if we're already using the right LB policy.
// Note: It's safe to use chand->info_lb_policy_name here without
// taking a lock on chand->info_mu, because this function is the
// only thing that modifies its value, and it can only be invoked
// once at any given time.
- bool lb_policy_name_changed = chand->info_lb_policy_name == nullptr ||
- gpr_stricmp(chand->info_lb_policy_name.get(),
- lb_policy_name.get()) != 0;
+ bool lb_policy_name_changed =
+ chand->info_lb_policy_name == nullptr ||
+ strcmp(chand->info_lb_policy_name.get(), lb_policy_name.get()) != 0;
if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: updating existing LB policy \"%s\" (%p)",
chand, lb_policy_name.get(), chand->lb_policy.get());
}
- chand->lb_policy->UpdateLocked(*chand->resolver_result);
+ chand->lb_policy->UpdateLocked(*chand->resolver_result, lb_policy_config);
// No need to set the channel's connectivity state; the existing
// watch on the LB policy will take care of that.
set_connectivity_state = false;
} else {
// Instantiate new LB policy.
- create_new_lb_policy_locked(chand, lb_policy_name.get(),
+ create_new_lb_policy_locked(chand, lb_policy_name.get(), lb_policy_config,
&connectivity_state, &connectivity_error,
&trace_strings);
}
- // Find service config.
- grpc_core::UniquePtr<char> service_config_json =
- get_service_config_from_resolver_result_locked(chand);
// Note: It's safe to use chand->info_service_config_json here without
// taking a lock on chand->info_mu, because this function is the
// only thing that modifies its value, and it can only be invoked
@@ -689,12 +570,6 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
} else {
grpc_error* error = GRPC_ERROR_NONE;
grpc_core::LoadBalancingPolicy::PickState pick_state;
- pick_state.initial_metadata = nullptr;
- pick_state.initial_metadata_flags = 0;
- pick_state.on_complete = nullptr;
- memset(&pick_state.subchannel_call_context, 0,
- sizeof(pick_state.subchannel_call_context));
- pick_state.user_data = nullptr;
// Pick must return synchronously, because pick_state.on_complete is null.
GPR_ASSERT(chand->lb_policy->PickLocked(&pick_state, &error));
if (pick_state.connected_subchannel != nullptr) {
@@ -938,12 +813,26 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
// (census filter is on top of this one)
// - add census stats for retries
+namespace {
+struct call_data;
+
// State used for starting a retryable batch on a subchannel call.
// This provides its own grpc_transport_stream_op_batch and other data
// structures needed to populate the ops in the batch.
// We allocate one struct on the arena for each attempt at starting a
// batch on a given subchannel call.
-typedef struct {
+struct subchannel_batch_data {
+ subchannel_batch_data(grpc_call_element* elem, call_data* calld, int refcount,
+ bool set_on_complete);
+ // All dtor code must be added in `destroy`. This is because we may
+ // call closures in `subchannel_batch_data` after they are unrefed by
+ // `batch_data_unref`, and msan would complain about accessing this class
+ // after calling dtor. As a result we cannot call the `dtor` in
+ // `batch_data_unref`.
+ // TODO(soheil): We should try to call the dtor in `batch_data_unref`.
+ ~subchannel_batch_data() { destroy(); }
+ void destroy();
+
gpr_refcount refs;
grpc_call_element* elem;
grpc_subchannel_call* subchannel_call; // Holds a ref.
@@ -952,11 +841,23 @@ typedef struct {
grpc_transport_stream_op_batch batch;
// For intercepting on_complete.
grpc_closure on_complete;
-} subchannel_batch_data;
+};
// Retry state associated with a subchannel call.
// Stored in the parent_data of the subchannel call object.
-typedef struct {
+struct subchannel_call_retry_state {
+ explicit subchannel_call_retry_state(grpc_call_context_element* context)
+ : batch_payload(context),
+ started_send_initial_metadata(false),
+ completed_send_initial_metadata(false),
+ started_send_trailing_metadata(false),
+ completed_send_trailing_metadata(false),
+ started_recv_initial_metadata(false),
+ completed_recv_initial_metadata(false),
+ started_recv_trailing_metadata(false),
+ completed_recv_trailing_metadata(false),
+ retry_dispatched(false) {}
+
// subchannel_batch_data.batch.payload points to this.
grpc_transport_stream_op_batch_payload batch_payload;
// For send_initial_metadata.
@@ -975,7 +876,7 @@ typedef struct {
// For intercepting recv_initial_metadata.
grpc_metadata_batch recv_initial_metadata;
grpc_closure recv_initial_metadata_ready;
- bool trailing_metadata_available;
+ bool trailing_metadata_available = false;
// For intercepting recv_message.
grpc_closure recv_message_ready;
grpc_core::OrphanablePtr<grpc_core::ByteStream> recv_message;
@@ -985,10 +886,10 @@ typedef struct {
grpc_closure recv_trailing_metadata_ready;
// These fields indicate which ops have been started and completed on
// this subchannel call.
- size_t started_send_message_count;
- size_t completed_send_message_count;
- size_t started_recv_message_count;
- size_t completed_recv_message_count;
+ size_t started_send_message_count = 0;
+ size_t completed_send_message_count = 0;
+ size_t started_recv_message_count = 0;
+ size_t completed_recv_message_count = 0;
bool started_send_initial_metadata : 1;
bool completed_send_initial_metadata : 1;
bool started_send_trailing_metadata : 1;
@@ -997,14 +898,18 @@ typedef struct {
bool completed_recv_initial_metadata : 1;
bool started_recv_trailing_metadata : 1;
bool completed_recv_trailing_metadata : 1;
+ subchannel_batch_data* recv_initial_metadata_ready_deferred_batch = nullptr;
+ grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
+ subchannel_batch_data* recv_message_ready_deferred_batch = nullptr;
+ grpc_error* recv_message_error = GRPC_ERROR_NONE;
+ subchannel_batch_data* recv_trailing_metadata_internal_batch = nullptr;
// State for callback processing.
+ // NOTE: Do not move this next to the metadata bitfields above. That would
+ // save space but will also result in a data race because compiler will
+ // generate a 2 byte store which overwrites the meta-data fields upon
+ // setting this field.
bool retry_dispatched : 1;
- subchannel_batch_data* recv_initial_metadata_ready_deferred_batch;
- grpc_error* recv_initial_metadata_error;
- subchannel_batch_data* recv_message_ready_deferred_batch;
- grpc_error* recv_message_error;
- subchannel_batch_data* recv_trailing_metadata_internal_batch;
-} subchannel_call_retry_state;
+};
// Pending batches stored in call data.
typedef struct {
@@ -1019,7 +924,44 @@ typedef struct {
Handles queueing of stream ops until a call object is ready, waiting
for initial metadata before trying to create a call object,
and handling cancellation gracefully. */
-typedef struct client_channel_call_data {
+struct call_data {
+ call_data(grpc_call_element* elem, const channel_data& chand,
+ const grpc_call_element_args& args)
+ : deadline_state(elem, args.call_stack, args.call_combiner,
+ GPR_LIKELY(chand.deadline_checking_enabled)
+ ? args.deadline
+ : GRPC_MILLIS_INF_FUTURE),
+ path(grpc_slice_ref_internal(args.path)),
+ call_start_time(args.start_time),
+ deadline(args.deadline),
+ arena(args.arena),
+ owning_call(args.call_stack),
+ call_combiner(args.call_combiner),
+ pending_send_initial_metadata(false),
+ pending_send_message(false),
+ pending_send_trailing_metadata(false),
+ enable_retries(chand.enable_retries),
+ retry_committed(false),
+ last_attempt_got_server_pushback(false) {}
+
+ ~call_data() {
+ if (GPR_LIKELY(subchannel_call != nullptr)) {
+ GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call,
+ "client_channel_destroy_call");
+ }
+ grpc_slice_unref_internal(path);
+ GRPC_ERROR_UNREF(cancel_error);
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches); ++i) {
+ GPR_ASSERT(pending_batches[i].batch == nullptr);
+ }
+ for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
+ if (pick.subchannel_call_context[i].value != nullptr) {
+ pick.subchannel_call_context[i].destroy(
+ pick.subchannel_call_context[i].value);
+ }
+ }
+ }
+
// State for handling deadlines.
// The code in deadline_filter.c requires this to be the first field.
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
@@ -1038,24 +980,24 @@ typedef struct client_channel_call_data {
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
- grpc_subchannel_call* subchannel_call;
+ grpc_subchannel_call* subchannel_call = nullptr;
// Set when we get a cancel_stream op.
- grpc_error* cancel_error;
+ grpc_error* cancel_error = GRPC_ERROR_NONE;
grpc_core::LoadBalancingPolicy::PickState pick;
grpc_closure pick_closure;
grpc_closure pick_cancel_closure;
- grpc_polling_entity* pollent;
- bool pollent_added_to_interested_parties;
+ grpc_polling_entity* pollent = nullptr;
+ bool pollent_added_to_interested_parties = false;
// Batches are added to this list when received from above.
// They are removed when we are done handling the batch (i.e., when
// either we have invoked all of the batch's callbacks or we have
// passed the batch down to the subchannel call and are not
// intercepting any of its callbacks).
- pending_batch pending_batches[MAX_PENDING_BATCHES];
+ pending_batch pending_batches[MAX_PENDING_BATCHES] = {};
bool pending_send_initial_metadata : 1;
bool pending_send_message : 1;
bool pending_send_trailing_metadata : 1;
@@ -1064,8 +1006,8 @@ typedef struct client_channel_call_data {
bool enable_retries : 1;
bool retry_committed : 1;
bool last_attempt_got_server_pushback : 1;
- int num_attempts_completed;
- size_t bytes_buffered_for_retry;
+ int num_attempts_completed = 0;
+ size_t bytes_buffered_for_retry = 0;
grpc_core::ManualConstructor<grpc_core::BackOff> retry_backoff;
grpc_timer retry_timer;
@@ -1076,12 +1018,12 @@ typedef struct client_channel_call_data {
// until all of these batches have completed.
// Note that we actually only need to track replay batches, but it's
// easier to track all batches with send ops.
- int num_pending_retriable_subchannel_send_batches;
+ int num_pending_retriable_subchannel_send_batches = 0;
// Cached data for retrying send ops.
// send_initial_metadata
- bool seen_send_initial_metadata;
- grpc_linked_mdelem* send_initial_metadata_storage;
+ bool seen_send_initial_metadata = false;
+ grpc_linked_mdelem* send_initial_metadata_storage = nullptr;
grpc_metadata_batch send_initial_metadata;
uint32_t send_initial_metadata_flags;
gpr_atm* peer_string;
@@ -1092,14 +1034,13 @@ typedef struct client_channel_call_data {
// Note: We inline the cache for the first 3 send_message ops and use
// dynamic allocation after that. This number was essentially picked
// at random; it could be changed in the future to tune performance.
- grpc_core::ManualConstructor<
- grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3>>
- send_messages;
+ grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3> send_messages;
// send_trailing_metadata
- bool seen_send_trailing_metadata;
- grpc_linked_mdelem* send_trailing_metadata_storage;
+ bool seen_send_trailing_metadata = false;
+ grpc_linked_mdelem* send_trailing_metadata_storage = nullptr;
grpc_metadata_batch send_trailing_metadata;
-} call_data;
+};
+} // namespace
// Forward declarations.
static void retry_commit(grpc_call_element* elem,
@@ -1143,7 +1084,7 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
gpr_arena_alloc(calld->arena, sizeof(grpc_core::ByteStreamCache)));
new (cache) grpc_core::ByteStreamCache(
std::move(batch->payload->send_message.send_message));
- calld->send_messages->push_back(cache);
+ calld->send_messages.push_back(cache);
}
// Save metadata batch for send_trailing_metadata ops.
if (batch->send_trailing_metadata) {
@@ -1180,7 +1121,7 @@ static void free_cached_send_message(channel_data* chand, call_data* calld,
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]",
chand, calld, idx);
}
- (*calld->send_messages)[idx]->Destroy();
+ calld->send_messages[idx]->Destroy();
}
// Frees cached send_trailing_metadata.
@@ -1650,55 +1591,66 @@ static bool maybe_retry(grpc_call_element* elem,
// subchannel_batch_data
//
-// Creates a subchannel_batch_data object on the call's arena with the
-// specified refcount. If set_on_complete is true, the batch's
-// on_complete callback will be set to point to on_complete();
-// otherwise, the batch's on_complete callback will be null.
-static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
- int refcount,
- bool set_on_complete) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
+namespace {
+subchannel_batch_data::subchannel_batch_data(grpc_call_element* elem,
+ call_data* calld, int refcount,
+ bool set_on_complete)
+ : elem(elem),
+ subchannel_call(GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call,
+ "batch_data_create")) {
subchannel_call_retry_state* retry_state =
static_cast<subchannel_call_retry_state*>(
grpc_connected_subchannel_call_get_parent_data(
calld->subchannel_call));
- subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(
- gpr_arena_alloc(calld->arena, sizeof(*batch_data)));
- batch_data->elem = elem;
- batch_data->subchannel_call =
- GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create");
- batch_data->batch.payload = &retry_state->batch_payload;
- gpr_ref_init(&batch_data->refs, refcount);
+ batch.payload = &retry_state->batch_payload;
+ gpr_ref_init(&refs, refcount);
if (set_on_complete) {
- GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
+ GRPC_CLOSURE_INIT(&on_complete, ::on_complete, this,
grpc_schedule_on_exec_ctx);
- batch_data->batch.on_complete = &batch_data->on_complete;
+ batch.on_complete = &on_complete;
}
GRPC_CALL_STACK_REF(calld->owning_call, "batch_data");
+}
+
+void subchannel_batch_data::destroy() {
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(subchannel_call));
+ if (batch.send_initial_metadata) {
+ grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
+ }
+ if (batch.send_trailing_metadata) {
+ grpc_metadata_batch_destroy(&retry_state->send_trailing_metadata);
+ }
+ if (batch.recv_initial_metadata) {
+ grpc_metadata_batch_destroy(&retry_state->recv_initial_metadata);
+ }
+ if (batch.recv_trailing_metadata) {
+ grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
+ }
+ GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call, "batch_data_unref");
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
+}
+} // namespace
+
+// Creates a subchannel_batch_data object on the call's arena with the
+// specified refcount. If set_on_complete is true, the batch's
+// on_complete callback will be set to point to on_complete();
+// otherwise, the batch's on_complete callback will be null.
+static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
+ int refcount,
+ bool set_on_complete) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ subchannel_batch_data* batch_data =
+ new (gpr_arena_alloc(calld->arena, sizeof(*batch_data)))
+ subchannel_batch_data(elem, calld, refcount, set_on_complete);
return batch_data;
}
static void batch_data_unref(subchannel_batch_data* batch_data) {
if (gpr_unref(&batch_data->refs)) {
- subchannel_call_retry_state* retry_state =
- static_cast<subchannel_call_retry_state*>(
- grpc_connected_subchannel_call_get_parent_data(
- batch_data->subchannel_call));
- if (batch_data->batch.send_initial_metadata) {
- grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
- }
- if (batch_data->batch.send_trailing_metadata) {
- grpc_metadata_batch_destroy(&retry_state->send_trailing_metadata);
- }
- if (batch_data->batch.recv_initial_metadata) {
- grpc_metadata_batch_destroy(&retry_state->recv_initial_metadata);
- }
- if (batch_data->batch.recv_trailing_metadata) {
- grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
- }
- GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref");
- call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
- GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
+ batch_data->destroy();
}
}
@@ -1996,7 +1948,7 @@ static bool pending_batch_is_unstarted(
return true;
}
if (pending->batch->send_message &&
- retry_state->started_send_message_count < calld->send_messages->size()) {
+ retry_state->started_send_message_count < calld->send_messages.size()) {
return true;
}
if (pending->batch->send_trailing_metadata &&
@@ -2152,7 +2104,7 @@ static void add_closures_for_replay_or_pending_send_ops(
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
bool have_pending_send_message_ops =
- retry_state->started_send_message_count < calld->send_messages->size();
+ retry_state->started_send_message_count < calld->send_messages.size();
bool have_pending_send_trailing_metadata_op =
calld->seen_send_trailing_metadata &&
!retry_state->started_send_trailing_metadata;
@@ -2344,7 +2296,7 @@ static void add_retriable_send_message_op(
chand, calld, retry_state->started_send_message_count);
}
grpc_core::ByteStreamCache* cache =
- (*calld->send_messages)[retry_state->started_send_message_count];
+ calld->send_messages[retry_state->started_send_message_count];
++retry_state->started_send_message_count;
retry_state->send_message.Init(cache);
batch_data->batch.send_message = true;
@@ -2476,7 +2428,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
}
// send_message.
// Note that we can only have one send_message op in flight at a time.
- if (retry_state->started_send_message_count < calld->send_messages->size() &&
+ if (retry_state->started_send_message_count < calld->send_messages.size() &&
retry_state->started_send_message_count ==
retry_state->completed_send_message_count &&
!calld->pending_send_message) {
@@ -2497,7 +2449,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
// to start, since we can't send down any more send_message ops after
// send_trailing_metadata.
if (calld->seen_send_trailing_metadata &&
- retry_state->started_send_message_count == calld->send_messages->size() &&
+ retry_state->started_send_message_count == calld->send_messages.size() &&
!retry_state->started_send_trailing_metadata &&
!calld->pending_send_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) {
@@ -2549,7 +2501,7 @@ static void add_subchannel_batches_for_pending_batches(
// send_message ops after send_trailing_metadata.
if (batch->send_trailing_metadata &&
(retry_state->started_send_message_count + batch->send_message <
- calld->send_messages->size() ||
+ calld->send_messages.size() ||
retry_state->started_send_trailing_metadata)) {
continue;
}
@@ -2715,17 +2667,10 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
new_error = grpc_error_add_child(new_error, error);
pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
} else {
- grpc_core::channelz::SubchannelNode* channelz_subchannel =
- calld->pick.connected_subchannel->channelz_subchannel();
- if (channelz_subchannel != nullptr) {
- channelz_subchannel->RecordCallStarted();
- }
if (parent_data_size > 0) {
- subchannel_call_retry_state* retry_state =
- static_cast<subchannel_call_retry_state*>(
- grpc_connected_subchannel_call_get_parent_data(
- calld->subchannel_call));
- retry_state->batch_payload.context = calld->pick.subchannel_call_context;
+ new (grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call))
+ subchannel_call_retry_state(calld->pick.subchannel_call_context);
}
pending_batches_resume(elem);
}
@@ -2951,6 +2896,27 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
}
}
+// If the channel is in TRANSIENT_FAILURE and the call is not
+// wait_for_ready=true, fails the call and returns true.
+static bool fail_call_if_in_transient_failure(grpc_call_element* elem) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ grpc_transport_stream_op_batch* batch = calld->pending_batches[0].batch;
+ if (grpc_connectivity_state_check(&chand->state_tracker) ==
+ GRPC_CHANNEL_TRANSIENT_FAILURE &&
+ (batch->payload->send_initial_metadata.send_initial_metadata_flags &
+ GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
+ pending_batches_fail(
+ elem,
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "channel is in state TRANSIENT_FAILURE"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
+ true /* yield_call_combiner */);
+ return true;
+ }
+ return false;
+}
+
// Invoked once resolver results are available.
static void process_service_config_and_start_lb_pick_locked(
grpc_call_element* elem) {
@@ -2958,6 +2924,9 @@ static void process_service_config_and_start_lb_pick_locked(
// Only get service config data on the first attempt.
if (GPR_LIKELY(calld->num_attempts_completed == 0)) {
apply_service_config_to_call_locked(elem);
+ // Check this after applying service config, since it may have
+ // affected the call's wait_for_ready value.
+ if (fail_call_if_in_transient_failure(elem)) return;
}
// Start LB pick.
grpc_core::LbPicker::StartLocked(elem);
@@ -3127,6 +3096,16 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
// We do not yet have an LB policy, so wait for a resolver result.
if (GPR_UNLIKELY(!chand->started_resolving)) {
start_resolving_locked(chand);
+ } else {
+ // Normally, we want to do this check in
+ // process_service_config_and_start_lb_pick_locked(), so that we
+ // can honor the wait_for_ready setting in the service config.
+ // However, if the channel is in TRANSIENT_FAILURE at this point, that
+ // means that the resolver has returned a failure, so we're not going
+ // to get a service config right away. In that case, we fail the
+ // call now based on the wait_for_ready value passed in from the
+ // application.
+ if (fail_call_if_in_transient_failure(elem)) return;
}
// Create a new waiter, which will delete itself when done.
grpc_core::New<grpc_core::ResolverResultWaiter>(elem);
@@ -3231,21 +3210,8 @@ static void cc_start_transport_stream_op_batch(
/* Constructor for call_data */
static grpc_error* cc_init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
- // Initialize data members.
- calld->path = grpc_slice_ref_internal(args->path);
- calld->call_start_time = args->start_time;
- calld->deadline = args->deadline;
- calld->arena = args->arena;
- calld->owning_call = args->call_stack;
- calld->call_combiner = args->call_combiner;
- if (GPR_LIKELY(chand->deadline_checking_enabled)) {
- grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
- calld->deadline);
- }
- calld->enable_retries = chand->enable_retries;
- calld->send_messages.Init();
+ new (elem->call_data) call_data(elem, *chand, *args);
return GRPC_ERROR_NONE;
}
@@ -3254,34 +3220,12 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* then_schedule_closure) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
- if (GPR_LIKELY(chand->deadline_checking_enabled)) {
- grpc_deadline_state_destroy(elem);
- }
- grpc_slice_unref_internal(calld->path);
- calld->retry_throttle_data.reset();
- calld->method_params.reset();
- GRPC_ERROR_UNREF(calld->cancel_error);
if (GPR_LIKELY(calld->subchannel_call != nullptr)) {
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
then_schedule_closure);
then_schedule_closure = nullptr;
- GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
- "client_channel_destroy_call");
- }
- for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
- GPR_ASSERT(calld->pending_batches[i].batch == nullptr);
- }
- if (GPR_LIKELY(calld->pick.connected_subchannel != nullptr)) {
- calld->pick.connected_subchannel.reset();
- }
- for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
- if (calld->pick.subchannel_call_context[i].value != nullptr) {
- calld->pick.subchannel_call_context[i].destroy(
- calld->pick.subchannel_call_context[i].value);
- }
}
- calld->send_messages.Destroy();
+ calld->~call_data();
GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/filters/client_channel/health/health_check_client.cc b/src/core/ext/filters/client_channel/health/health_check_client.cc
index 400d99b07c..2232c57120 100644
--- a/src/core/ext/filters/client_channel/health/health_check_client.cc
+++ b/src/core/ext/filters/client_channel/health/health_check_client.cc
@@ -19,6 +19,7 @@
#include <grpc/support/port_platform.h>
#include <stdint.h>
+#include <stdio.h>
#include "src/core/ext/filters/client_channel/health/health_check_client.h"
@@ -50,8 +51,7 @@ HealthCheckClient::HealthCheckClient(
RefCountedPtr<ConnectedSubchannel> connected_subchannel,
grpc_pollset_set* interested_parties,
grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode> channelz_node)
- : InternallyRefCountedWithTracing<HealthCheckClient>(
- &grpc_health_check_client_trace),
+ : InternallyRefCounted<HealthCheckClient>(&grpc_health_check_client_trace),
service_name_(service_name),
connected_subchannel_(std::move(connected_subchannel)),
interested_parties_(interested_parties),
@@ -280,15 +280,13 @@ bool DecodeResponse(grpc_slice_buffer* slice_buffer, grpc_error** error) {
HealthCheckClient::CallState::CallState(
RefCountedPtr<HealthCheckClient> health_check_client,
grpc_pollset_set* interested_parties)
- : InternallyRefCountedWithTracing<CallState>(
- &grpc_health_check_client_trace),
+ : InternallyRefCounted<CallState>(&grpc_health_check_client_trace),
health_check_client_(std::move(health_check_client)),
pollent_(grpc_polling_entity_create_from_pollset_set(interested_parties)),
arena_(gpr_arena_create(health_check_client_->connected_subchannel_
- ->GetInitialCallSizeEstimate(0))) {
- memset(&call_combiner_, 0, sizeof(call_combiner_));
+ ->GetInitialCallSizeEstimate(0))),
+ payload_(context_) {
grpc_call_combiner_init(&call_combiner_);
- memset(context_, 0, sizeof(context_));
gpr_atm_rel_store(&seen_response_, static_cast<gpr_atm>(0));
}
@@ -298,6 +296,11 @@ HealthCheckClient::CallState::~CallState() {
health_check_client_.get(), this);
}
if (call_ != nullptr) GRPC_SUBCHANNEL_CALL_UNREF(call_, "call_ended");
+ for (size_t i = 0; i < GRPC_CONTEXT_COUNT; i++) {
+ if (context_[i].destroy != nullptr) {
+ context_[i].destroy(context_[i].value);
+ }
+ }
// Unset the call combiner cancellation closure. This has the
// effect of scheduling the previously set cancellation closure, if
// any, so that it can release any internal references it may be
@@ -345,6 +348,7 @@ void HealthCheckClient::CallState::StartCall() {
}
// Initialize payload and batch.
memset(&batch_, 0, sizeof(batch_));
+ payload_.context = context_;
batch_.payload = &payload_;
// on_complete callback takes ref, handled manually.
Ref(DEBUG_LOCATION, "on_complete").release();
diff --git a/src/core/ext/filters/client_channel/health/health_check_client.h b/src/core/ext/filters/client_channel/health/health_check_client.h
index 7f77348f18..2369b73fea 100644
--- a/src/core/ext/filters/client_channel/health/health_check_client.h
+++ b/src/core/ext/filters/client_channel/health/health_check_client.h
@@ -41,8 +41,7 @@
namespace grpc_core {
-class HealthCheckClient
- : public InternallyRefCountedWithTracing<HealthCheckClient> {
+class HealthCheckClient : public InternallyRefCounted<HealthCheckClient> {
public:
HealthCheckClient(const char* service_name,
RefCountedPtr<ConnectedSubchannel> connected_subchannel,
@@ -61,7 +60,7 @@ class HealthCheckClient
private:
// Contains a call to the backend and all the data related to the call.
- class CallState : public InternallyRefCountedWithTracing<CallState> {
+ class CallState : public InternallyRefCounted<CallState> {
public:
CallState(RefCountedPtr<HealthCheckClient> health_check_client,
grpc_pollset_set* interested_parties_);
@@ -97,7 +96,7 @@ class HealthCheckClient
gpr_arena* arena_;
grpc_call_combiner call_combiner_;
- grpc_call_context_element context_[GRPC_CONTEXT_COUNT];
+ grpc_call_context_element context_[GRPC_CONTEXT_COUNT] = {};
// The streaming call to the backend. Always non-NULL.
grpc_subchannel_call* call_;
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.cc b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
index bfabc68c66..0716e46818 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.cc
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -29,7 +29,6 @@
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker_registry.h"
#include "src/core/lib/gpr/env.h"
@@ -37,6 +36,7 @@
#include "src/core/lib/http/format_request.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/uri/uri_parser.h"
typedef struct http_connect_handshaker {
// Base class. Must be first.
diff --git a/src/core/ext/filters/client_channel/http_proxy.cc b/src/core/ext/filters/client_channel/http_proxy.cc
index 26d3f479b7..8951a2920c 100644
--- a/src/core/ext/filters/client_channel/http_proxy.cc
+++ b/src/core/ext/filters/client_channel/http_proxy.cc
@@ -29,12 +29,12 @@
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/slice/b64.h"
+#include "src/core/lib/uri/uri_parser.h"
/**
* Parses the 'https_proxy' env var (fallback on 'http_proxy') and returns the
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index e065f45639..b4e803689e 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -27,7 +27,7 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
namespace grpc_core {
LoadBalancingPolicy::LoadBalancingPolicy(const Args& args)
- : InternallyRefCountedWithTracing(&grpc_trace_lb_policy_refcount),
+ : InternallyRefCounted(&grpc_trace_lb_policy_refcount),
combiner_(GRPC_COMBINER_REF(args.combiner, "lb_policy")),
client_channel_factory_(args.client_channel_factory),
interested_parties_(grpc_pollset_set_create()),
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 21f80b7b94..7034da6249 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -42,8 +42,7 @@ namespace grpc_core {
///
/// Any I/O done by the LB policy should be done under the pollset_set
/// returned by \a interested_parties().
-class LoadBalancingPolicy
- : public InternallyRefCountedWithTracing<LoadBalancingPolicy> {
+class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
public:
struct Args {
/// The combiner under which all LB policy calls will be run.
@@ -58,44 +57,47 @@ class LoadBalancingPolicy
/// Note that the LB policy gets the set of addresses from the
/// GRPC_ARG_LB_ADDRESSES channel arg.
grpc_channel_args* args = nullptr;
+ /// Load balancing config from the resolver.
+ grpc_json* lb_config = nullptr;
};
/// State used for an LB pick.
struct PickState {
/// Initial metadata associated with the picking call.
- grpc_metadata_batch* initial_metadata;
+ grpc_metadata_batch* initial_metadata = nullptr;
/// Bitmask used for selective cancelling. See
/// \a CancelMatchingPicksLocked() and \a GRPC_INITIAL_METADATA_* in
/// grpc_types.h.
- uint32_t initial_metadata_flags;
+ uint32_t initial_metadata_flags = 0;
/// Storage for LB token in \a initial_metadata, or nullptr if not used.
grpc_linked_mdelem lb_token_mdelem_storage;
/// Closure to run when pick is complete, if not completed synchronously.
/// If null, pick will fail if a result is not available synchronously.
- grpc_closure* on_complete;
+ grpc_closure* on_complete = nullptr;
/// Will be set to the selected subchannel, or nullptr on failure or when
/// the LB policy decides to drop the call.
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
/// Will be populated with context to pass to the subchannel call, if
/// needed.
- grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
+ grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT] = {};
/// Upon success, \a *user_data will be set to whatever opaque information
/// may need to be propagated from the LB policy, or nullptr if not needed.
// TODO(roth): As part of revamping our metadata APIs, try to find a
// way to clean this up and C++-ify it.
- void** user_data;
+ void** user_data = nullptr;
/// Next pointer. For internal use by LB policy.
- PickState* next;
+ PickState* next = nullptr;
};
// Not copyable nor movable.
LoadBalancingPolicy(const LoadBalancingPolicy&) = delete;
LoadBalancingPolicy& operator=(const LoadBalancingPolicy&) = delete;
- /// Updates the policy with a new set of \a args from the resolver.
- /// Note that the LB policy gets the set of addresses from the
+ /// Updates the policy with a new set of \a args and a new \a lb_config from
+ /// the resolver. Note that the LB policy gets the set of addresses from the
/// GRPC_ARG_LB_ADDRESSES channel arg.
- virtual void UpdateLocked(const grpc_channel_args& args) GRPC_ABSTRACT;
+ virtual void UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) GRPC_ABSTRACT;
/// Finds an appropriate subchannel for a call, based on data in \a pick.
/// \a pick must remain alive until the pick is complete.
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index cc259bcdbf..399bb452f4 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -37,16 +37,27 @@ static void destroy_channel_elem(grpc_channel_element* elem) {}
namespace {
struct call_data {
+ call_data(const grpc_call_element_args& args) {
+ if (args.context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) {
+ // Get stats object from context and take a ref.
+ client_stats = static_cast<grpc_core::GrpcLbClientStats*>(
+ args.context[GRPC_GRPCLB_CLIENT_STATS].value)
+ ->Ref();
+ // Record call started.
+ client_stats->AddCallStarted();
+ }
+ }
+
// Stats object to update.
grpc_core::RefCountedPtr<grpc_core::GrpcLbClientStats> client_stats;
// State for intercepting send_initial_metadata.
grpc_closure on_complete_for_send;
grpc_closure* original_on_complete_for_send;
- bool send_initial_metadata_succeeded;
+ bool send_initial_metadata_succeeded = false;
// State for intercepting recv_initial_metadata.
grpc_closure recv_initial_metadata_ready;
grpc_closure* original_recv_initial_metadata_ready;
- bool recv_initial_metadata_succeeded;
+ bool recv_initial_metadata_succeeded = false;
};
} // namespace
@@ -70,16 +81,8 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- // Get stats object from context and take a ref.
GPR_ASSERT(args->context != nullptr);
- if (args->context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) {
- calld->client_stats = static_cast<grpc_core::GrpcLbClientStats*>(
- args->context[GRPC_GRPCLB_CLIENT_STATS].value)
- ->Ref();
- // Record call started.
- calld->client_stats->AddCallStarted();
- }
+ new (elem->call_data) call_data(*args);
return GRPC_ERROR_NONE;
}
@@ -97,6 +100,7 @@ static void destroy_call_elem(grpc_call_element* elem,
// TODO(roth): Eliminate this once filter stack is converted to C++.
calld->client_stats.reset();
}
+ calld->~call_data();
}
static void start_transport_stream_op_batch(
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 17e0d26875..a46579c7f7 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -123,7 +123,8 @@ class GrpcLb : public LoadBalancingPolicy {
public:
GrpcLb(const grpc_lb_addresses* addresses, const Args& args);
- void UpdateLocked(const grpc_channel_args& args) override;
+ void UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
void CancelPickLocked(PickState* pick, grpc_error* error) override;
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
@@ -170,8 +171,7 @@ class GrpcLb : public LoadBalancingPolicy {
};
/// Contains a call to the LB server and all the data related to the call.
- class BalancerCallState
- : public InternallyRefCountedWithTracing<BalancerCallState> {
+ class BalancerCallState : public InternallyRefCounted<BalancerCallState> {
public:
explicit BalancerCallState(
RefCountedPtr<LoadBalancingPolicy> parent_grpclb_policy);
@@ -498,7 +498,7 @@ grpc_lb_addresses* ProcessServerlist(const grpc_grpclb_serverlist* serverlist) {
GrpcLb::BalancerCallState::BalancerCallState(
RefCountedPtr<LoadBalancingPolicy> parent_grpclb_policy)
- : InternallyRefCountedWithTracing<BalancerCallState>(&grpc_lb_glb_trace),
+ : InternallyRefCounted<BalancerCallState>(&grpc_lb_glb_trace),
grpclb_policy_(std::move(parent_grpclb_policy)) {
GPR_ASSERT(grpclb_policy_ != nullptr);
GPR_ASSERT(!grpclb_policy()->shutting_down_);
@@ -748,7 +748,7 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
void* arg, grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
- // Empty payload means the LB call was cancelled.
+ // Null payload means the LB call was cancelled.
if (lb_calld != grpclb_policy->lb_calld_.get() ||
lb_calld->recv_message_payload_ == nullptr) {
lb_calld->Unref(DEBUG_LOCATION, "on_message_received");
@@ -802,54 +802,45 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
gpr_free(ipport);
}
}
- /* update serverlist */
- if (serverlist->num_servers > 0) {
- // Start sending client load report only after we start using the
- // serverlist returned from the current LB call.
- if (lb_calld->client_stats_report_interval_ > 0 &&
- lb_calld->client_stats_ == nullptr) {
- lb_calld->client_stats_.reset(New<GrpcLbClientStats>());
- // TODO(roth): We currently track this ref manually. Once the
- // ClosureRef API is ready, we should pass the RefCountedPtr<> along
- // with the callback.
- auto self = lb_calld->Ref(DEBUG_LOCATION, "client_load_report");
- self.release();
- lb_calld->ScheduleNextClientLoadReportLocked();
- }
- if (grpc_grpclb_serverlist_equals(grpclb_policy->serverlist_,
- serverlist)) {
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO,
- "[grpclb %p] Incoming server list identical to current, "
- "ignoring.",
- grpclb_policy);
- }
- grpc_grpclb_destroy_serverlist(serverlist);
- } else { /* new serverlist */
- if (grpclb_policy->serverlist_ != nullptr) {
- /* dispose of the old serverlist */
- grpc_grpclb_destroy_serverlist(grpclb_policy->serverlist_);
- } else {
- /* or dispose of the fallback */
- grpc_lb_addresses_destroy(grpclb_policy->fallback_backend_addresses_);
- grpclb_policy->fallback_backend_addresses_ = nullptr;
- if (grpclb_policy->fallback_timer_callback_pending_) {
- grpc_timer_cancel(&grpclb_policy->lb_fallback_timer_);
- }
- }
- // and update the copy in the GrpcLb instance. This
- // serverlist instance will be destroyed either upon the next
- // update or when the GrpcLb instance is destroyed.
- grpclb_policy->serverlist_ = serverlist;
- grpclb_policy->serverlist_index_ = 0;
- grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked();
- }
- } else {
+ // Start sending client load report only after we start using the
+ // serverlist returned from the current LB call.
+ if (lb_calld->client_stats_report_interval_ > 0 &&
+ lb_calld->client_stats_ == nullptr) {
+ lb_calld->client_stats_.reset(New<GrpcLbClientStats>());
+ // TODO(roth): We currently track this ref manually. Once the
+ // ClosureRef API is ready, we should pass the RefCountedPtr<> along
+ // with the callback.
+ auto self = lb_calld->Ref(DEBUG_LOCATION, "client_load_report");
+ self.release();
+ lb_calld->ScheduleNextClientLoadReportLocked();
+ }
+ // Check if the serverlist differs from the previous one.
+ if (grpc_grpclb_serverlist_equals(grpclb_policy->serverlist_, serverlist)) {
if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO, "[grpclb %p] Received empty server list, ignoring.",
+ gpr_log(GPR_INFO,
+ "[grpclb %p] Incoming server list identical to current, "
+ "ignoring.",
grpclb_policy);
}
grpc_grpclb_destroy_serverlist(serverlist);
+ } else { // New serverlist.
+ if (grpclb_policy->serverlist_ != nullptr) {
+ // Dispose of the old serverlist.
+ grpc_grpclb_destroy_serverlist(grpclb_policy->serverlist_);
+ } else {
+ // Dispose of the fallback.
+ grpc_lb_addresses_destroy(grpclb_policy->fallback_backend_addresses_);
+ grpclb_policy->fallback_backend_addresses_ = nullptr;
+ if (grpclb_policy->fallback_timer_callback_pending_) {
+ grpc_timer_cancel(&grpclb_policy->lb_fallback_timer_);
+ }
+ }
+ // Update the serverlist in the GrpcLb instance. This serverlist
+ // instance will be destroyed either upon the next update or when the
+ // GrpcLb instance is destroyed.
+ grpclb_policy->serverlist_ = serverlist;
+ grpclb_policy->serverlist_index_ = 0;
+ grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked();
}
} else {
// No valid initial response or serverlist found.
@@ -1331,13 +1322,10 @@ void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
grpc_channel_args_destroy(lb_channel_args);
}
-void GrpcLb::UpdateLocked(const grpc_channel_args& args) {
+void GrpcLb::UpdateLocked(const grpc_channel_args& args, grpc_json* lb_config) {
ProcessChannelArgsLocked(args);
- // If fallback is configured and the RR policy already exists, update
- // it with the new fallback addresses.
- if (lb_fallback_timeout_ms_ > 0 && rr_policy_ != nullptr) {
- CreateOrUpdateRoundRobinPolicyLocked();
- }
+ // Update the existing RR policy.
+ if (rr_policy_ != nullptr) CreateOrUpdateRoundRobinPolicyLocked();
// Start watching the LB channel connectivity for connection, if not
// already doing so.
if (!watching_lb_channel_) {
@@ -1585,7 +1573,7 @@ void GrpcLb::AddPendingPick(PendingPick* pp) {
bool GrpcLb::PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
grpc_error** error) {
// Check for drops if we are not using fallback backend addresses.
- if (serverlist_ != nullptr) {
+ if (serverlist_ != nullptr && serverlist_->num_servers > 0) {
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server* server = serverlist_->servers[serverlist_index_++];
if (serverlist_index_ == serverlist_->num_servers) {
@@ -1683,7 +1671,6 @@ grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() {
grpc_lb_addresses* addresses;
bool is_backend_from_grpclb_load_balancer = false;
if (serverlist_ != nullptr) {
- GPR_ASSERT(serverlist_->num_servers > 0);
addresses = ProcessServerlist(serverlist_);
is_backend_from_grpclb_load_balancer = true;
} else {
@@ -1730,7 +1717,7 @@ void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
gpr_log(GPR_INFO, "[grpclb %p] Updating RR policy %p", this,
rr_policy_.get());
}
- rr_policy_->UpdateLocked(*args);
+ rr_policy_->UpdateLocked(*args, nullptr);
} else {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index eb494486b9..d1a05f1255 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -46,7 +46,8 @@ class PickFirst : public LoadBalancingPolicy {
public:
explicit PickFirst(const Args& args);
- void UpdateLocked(const grpc_channel_args& args) override;
+ void UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
void CancelPickLocked(PickState* pick, grpc_error* error) override;
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
@@ -159,7 +160,7 @@ PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p created.", this);
}
- UpdateLocked(*args.args);
+ UpdateLocked(*args.args, args.lb_config);
grpc_subchannel_index_ref();
}
@@ -333,7 +334,8 @@ void PickFirst::UpdateChildRefsLocked() {
child_subchannels_ = std::move(cs);
}
-void PickFirst::UpdateLocked(const grpc_channel_args& args) {
+void PickFirst::UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) {
AutoChildRefsUpdater guard(this);
const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
@@ -378,6 +380,31 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
selected_ = nullptr;
return;
}
+ // If one of the subchannels in the new list is already in state
+ // READY, then select it immediately. This can happen when the
+ // currently selected subchannel is also present in the update. It
+ // can also happen if one of the subchannels in the update is already
+ // in the subchannel index because it's in use by another channel.
+ for (size_t i = 0; i < subchannel_list->num_subchannels(); ++i) {
+ PickFirstSubchannelData* sd = subchannel_list->subchannel(i);
+ grpc_error* error = GRPC_ERROR_NONE;
+ grpc_connectivity_state state = sd->CheckConnectivityStateLocked(&error);
+ GRPC_ERROR_UNREF(error);
+ if (state == GRPC_CHANNEL_READY) {
+ subchannel_list_ = std::move(subchannel_list);
+ sd->ProcessUnselectedReadyLocked();
+ sd->StartConnectivityWatchLocked();
+ // If there was a previously pending update (which may or may
+ // not have contained the currently selected subchannel), drop
+ // it, so that it doesn't override what we've done here.
+ latest_pending_subchannel_list_.reset();
+ // Make sure that subsequent calls to ExitIdleLocked() don't cause
+ // us to start watching a subchannel other than the one we've
+ // selected.
+ started_picking_ = true;
+ return;
+ }
+ }
if (selected_ == nullptr) {
// We don't yet have a selected subchannel, so replace the current
// subchannel list immediately.
@@ -385,46 +412,14 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
// If we've started picking, start trying to connect to the first
// subchannel in the new list.
if (started_picking_) {
- subchannel_list_->subchannel(0)
- ->CheckConnectivityStateAndStartWatchingLocked();
+ // Note: No need to use CheckConnectivityStateAndStartWatchingLocked()
+ // here, since we've already checked the initial connectivity
+ // state of all subchannels above.
+ subchannel_list_->subchannel(0)->StartConnectivityWatchLocked();
}
} else {
- // We do have a selected subchannel.
- // Check if it's present in the new list. If so, we're done.
- for (size_t i = 0; i < subchannel_list->num_subchannels(); ++i) {
- PickFirstSubchannelData* sd = subchannel_list->subchannel(i);
- if (sd->subchannel() == selected_->subchannel()) {
- // The currently selected subchannel is in the update: we are done.
- if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_INFO,
- "Pick First %p found already selected subchannel %p "
- "at update index %" PRIuPTR " of %" PRIuPTR "; update done",
- this, selected_->subchannel(), i,
- subchannel_list->num_subchannels());
- }
- // Make sure it's in state READY. It might not be if we grabbed
- // the combiner while a connectivity state notification
- // informing us otherwise is pending.
- // Note that CheckConnectivityStateLocked() also takes a ref to
- // the connected subchannel.
- grpc_error* error = GRPC_ERROR_NONE;
- if (sd->CheckConnectivityStateLocked(&error) == GRPC_CHANNEL_READY) {
- selected_ = sd;
- subchannel_list_ = std::move(subchannel_list);
- sd->StartConnectivityWatchLocked();
- // If there was a previously pending update (which may or may
- // not have contained the currently selected subchannel), drop
- // it, so that it doesn't override what we've done here.
- latest_pending_subchannel_list_.reset();
- return;
- }
- GRPC_ERROR_UNREF(error);
- }
- }
- // Not keeping the previous selected subchannel, so set the latest
- // pending subchannel list to the new subchannel list. We will wait
- // for it to report READY before swapping it into the current
- // subchannel list.
+ // We do have a selected subchannel, so keep using it until one of
+ // the subchannels in the new list reports READY.
if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
@@ -438,8 +433,11 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
// If we've started picking, start trying to connect to the first
// subchannel in the new list.
if (started_picking_) {
+ // Note: No need to use CheckConnectivityStateAndStartWatchingLocked()
+ // here, since we've already checked the initial connectivity
+ // state of all subchannels above.
latest_pending_subchannel_list_->subchannel(0)
- ->CheckConnectivityStateAndStartWatchingLocked();
+ ->StartConnectivityWatchLocked();
}
}
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index e9ed85cf66..2a16975131 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -57,7 +57,8 @@ class RoundRobin : public LoadBalancingPolicy {
public:
explicit RoundRobin(const Args& args);
- void UpdateLocked(const grpc_channel_args& args) override;
+ void UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
void CancelPickLocked(PickState* pick, grpc_error* error) override;
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
@@ -232,7 +233,7 @@ RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
gpr_mu_init(&child_refs_mu_);
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"round_robin");
- UpdateLocked(*args.args);
+ UpdateLocked(*args.args, args.lb_config);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Created with %" PRIuPTR " subchannels", this,
subchannel_list_->num_subchannels());
@@ -664,7 +665,8 @@ void RoundRobin::NotifyOnStateChangeLocked(grpc_connectivity_state* current,
notify);
}
-void RoundRobin::UpdateLocked(const grpc_channel_args& args) {
+void RoundRobin::UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) {
const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
AutoChildRefsUpdater guard(this);
if (GPR_UNLIKELY(arg == nullptr || arg->type != GRPC_ARG_POINTER)) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
index 4ec9e935ed..f31401502c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -186,8 +186,7 @@ class SubchannelData {
// A list of subchannels.
template <typename SubchannelListType, typename SubchannelDataType>
-class SubchannelList
- : public InternallyRefCountedWithTracing<SubchannelListType> {
+class SubchannelList : public InternallyRefCounted<SubchannelListType> {
public:
typedef InlinedVector<SubchannelDataType, 10> SubchannelVector;
@@ -226,8 +225,7 @@ class SubchannelList
// Note: Caller must ensure that this is invoked inside of the combiner.
void Orphan() override {
ShutdownLocked();
- InternallyRefCountedWithTracing<SubchannelListType>::Unref(DEBUG_LOCATION,
- "shutdown");
+ InternallyRefCounted<SubchannelListType>::Unref(DEBUG_LOCATION, "shutdown");
}
GRPC_ABSTRACT_BASE_CLASS
@@ -493,7 +491,7 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
const grpc_lb_addresses* addresses, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
const grpc_channel_args& args)
- : InternallyRefCountedWithTracing<SubchannelListType>(tracer),
+ : InternallyRefCounted<SubchannelListType>(tracer),
policy_(policy),
tracer_(tracer),
combiner_(GRPC_COMBINER_REF(combiner, "subchannel_list")) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
index 7fb4cbdcd2..faedc0a919 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
@@ -26,30 +26,26 @@
/// channel that uses pick_first to select from the list of balancer
/// addresses.
///
-/// The first time the policy gets a request for a pick, a ping, or to exit
-/// the idle state, \a StartPickingLocked() is called. This method is
-/// responsible for instantiating the internal *streaming* call to the LB
-/// server (whichever address pick_first chose). The call will be complete
-/// when either the balancer sends status or when we cancel the call (e.g.,
-/// because we are shutting down). In needed, we retry the call. If we
-/// received at least one valid message from the server, a new call attempt
-/// will be made immediately; otherwise, we apply back-off delays between
-/// attempts.
+/// The first time the xDS policy gets a request for a pick or to exit the idle
+/// state, \a StartPickingLocked() is called. This method is responsible for
+/// instantiating the internal *streaming* call to the LB server (whichever
+/// address pick_first chose). The call will be complete when either the
+/// balancer sends status or when we cancel the call (e.g., because we are
+/// shutting down). In needed, we retry the call. If we received at least one
+/// valid message from the server, a new call attempt will be made immediately;
+/// otherwise, we apply back-off delays between attempts.
///
-/// We maintain an internal round_robin policy instance for distributing
+/// We maintain an internal child policy (round_robin) instance for distributing
/// requests across backends. Whenever we receive a new serverlist from
-/// the balancer, we update the round_robin policy with the new list of
-/// addresses. If we cannot communicate with the balancer on startup,
-/// however, we may enter fallback mode, in which case we will populate
-/// the RR policy's addresses from the backend addresses returned by the
-/// resolver.
+/// the balancer, we update the child policy with the new list of
+/// addresses.
///
-/// Once an RR policy instance is in place (and getting updated as described),
-/// calls for a pick, a ping, or a cancellation will be serviced right
-/// away by forwarding them to the RR instance. Any time there's no RR
-/// policy available (i.e., right after the creation of the gRPCLB policy),
-/// pick and ping requests are added to a list of pending picks and pings
-/// to be flushed and serviced when the RR policy instance becomes available.
+/// Once a child policy instance is in place (and getting updated as
+/// described), calls for a pick, or a cancellation will be serviced right away
+/// by forwarding them to the child policy instance. Any time there's no child
+/// policy available (i.e., right after the creation of the xDS policy), pick
+/// requests are added to a list of pending picks to be flushed and serviced
+/// when the child policy instance becomes available.
///
/// \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
/// high level design and details.
@@ -122,7 +118,8 @@ class XdsLb : public LoadBalancingPolicy {
public:
XdsLb(const grpc_lb_addresses* addresses, const Args& args);
- void UpdateLocked(const grpc_channel_args& args) override;
+ void UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
void CancelPickLocked(PickState* pick, grpc_error* error) override;
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
@@ -141,10 +138,10 @@ class XdsLb : public LoadBalancingPolicy {
private:
/// Linked list of pending pick requests. It stores all information needed to
- /// eventually call (Round Robin's) pick() on them. They mainly stay pending
- /// waiting for the RR policy to be created.
+ /// eventually call pick() on them. They mainly stay pending waiting for the
+ /// child policy to be created.
///
- /// Note that when a pick is sent to the RR policy, we inject our own
+ /// Note that when a pick is sent to the child policy, we inject our own
/// on_complete callback, so that we can intercept the result before
/// invoking the original on_complete callback. This allows us to set the
/// LB token metadata and add client_stats to the call context.
@@ -169,8 +166,7 @@ class XdsLb : public LoadBalancingPolicy {
};
/// Contains a call to the LB server and all the data related to the call.
- class BalancerCallState
- : public InternallyRefCountedWithTracing<BalancerCallState> {
+ class BalancerCallState : public InternallyRefCounted<BalancerCallState> {
public:
explicit BalancerCallState(
RefCountedPtr<LoadBalancingPolicy> parent_xdslb_policy);
@@ -202,7 +198,6 @@ class XdsLb : public LoadBalancingPolicy {
static bool LoadReportCountersAreZero(xds_grpclb_request* request);
static void MaybeSendClientLoadReportLocked(void* arg, grpc_error* error);
- static void ClientLoadReportDoneLocked(void* arg, grpc_error* error);
static void OnInitialRequestSentLocked(void* arg, grpc_error* error);
static void OnBalancerMessageReceivedLocked(void* arg, grpc_error* error);
static void OnBalancerStatusReceivedLocked(void* arg, grpc_error* error);
@@ -266,18 +261,18 @@ class XdsLb : public LoadBalancingPolicy {
void AddPendingPick(PendingPick* pp);
static void OnPendingPickComplete(void* arg, grpc_error* error);
- // Methods for dealing with the RR policy.
- void CreateOrUpdateRoundRobinPolicyLocked();
- grpc_channel_args* CreateRoundRobinPolicyArgsLocked();
- void CreateRoundRobinPolicyLocked(const Args& args);
- bool PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
- grpc_error** error);
- void UpdateConnectivityStateFromRoundRobinPolicyLocked(
- grpc_error* rr_state_error);
- static void OnRoundRobinConnectivityChangedLocked(void* arg,
- grpc_error* error);
- static void OnRoundRobinRequestReresolutionLocked(void* arg,
- grpc_error* error);
+ // Methods for dealing with the child policy.
+ void CreateOrUpdateChildPolicyLocked();
+ grpc_channel_args* CreateChildPolicyArgsLocked();
+ void CreateChildPolicyLocked(const Args& args);
+ bool PickFromChildPolicyLocked(bool force_async, PendingPick* pp,
+ grpc_error** error);
+ void UpdateConnectivityStateFromChildPolicyLocked(
+ grpc_error* child_state_error);
+ static void OnChildPolicyConnectivityChangedLocked(void* arg,
+ grpc_error* error);
+ static void OnChildPolicyRequestReresolutionLocked(void* arg,
+ grpc_error* error);
// Who the client is trying to communicate with.
const char* server_name_ = nullptr;
@@ -319,10 +314,6 @@ class XdsLb : public LoadBalancingPolicy {
// The deserialized response from the balancer. May be nullptr until one
// such response has arrived.
xds_grpclb_serverlist* serverlist_ = nullptr;
- // Index into serverlist for next pick.
- // If the server at this index is a drop, we return a drop.
- // Otherwise, we delegate to the RR policy.
- size_t serverlist_index_ = 0;
// Timeout in milliseconds for before using fallback backend addresses.
// 0 means not using fallback.
@@ -334,14 +325,14 @@ class XdsLb : public LoadBalancingPolicy {
grpc_timer lb_fallback_timer_;
grpc_closure lb_on_fallback_;
- // Pending picks that are waiting on the RR policy's connectivity.
+ // Pending picks that are waiting on the xDS policy's connectivity.
PendingPick* pending_picks_ = nullptr;
- // The RR policy to use for the backends.
- OrphanablePtr<LoadBalancingPolicy> rr_policy_;
- grpc_connectivity_state rr_connectivity_state_;
- grpc_closure on_rr_connectivity_changed_;
- grpc_closure on_rr_request_reresolution_;
+ // The policy to use for the backends.
+ OrphanablePtr<LoadBalancingPolicy> child_policy_;
+ grpc_connectivity_state child_connectivity_state_;
+ grpc_closure on_child_connectivity_changed_;
+ grpc_closure on_child_request_reresolution_;
};
//
@@ -448,7 +439,7 @@ grpc_lb_addresses* ProcessServerlist(const xds_grpclb_serverlist* serverlist) {
grpc_lb_addresses* lb_addresses =
grpc_lb_addresses_create(num_valid, &lb_token_vtable);
/* second pass: actually populate the addresses and LB tokens (aka user data
- * to the outside world) to be read by the RR policy during its creation.
+ * to the outside world) to be read by the child policy during its creation.
* Given that the validity tests are very cheap, they are performed again
* instead of marking the valid ones during the first pass, as this would
* incurr in an allocation due to the arbitrary number of server */
@@ -496,7 +487,7 @@ grpc_lb_addresses* ProcessServerlist(const xds_grpclb_serverlist* serverlist) {
XdsLb::BalancerCallState::BalancerCallState(
RefCountedPtr<LoadBalancingPolicy> parent_xdslb_policy)
- : InternallyRefCountedWithTracing<BalancerCallState>(&grpc_lb_xds_trace),
+ : InternallyRefCounted<BalancerCallState>(&grpc_lb_xds_trace),
xdslb_policy_(std::move(parent_xdslb_policy)) {
GPR_ASSERT(xdslb_policy_ != nullptr);
GPR_ASSERT(!xdslb_policy()->shutting_down_);
@@ -675,6 +666,7 @@ bool XdsLb::BalancerCallState::LoadReportCountersAreZero(
(drop_entries == nullptr || drop_entries->empty());
}
+// TODO(vpowar): Use LRS to send the client Load Report.
void XdsLb::BalancerCallState::SendClientLoadReportLocked() {
// Construct message payload.
GPR_ASSERT(send_message_payload_ == nullptr);
@@ -692,38 +684,8 @@ void XdsLb::BalancerCallState::SendClientLoadReportLocked() {
} else {
last_client_load_report_counters_were_zero_ = false;
}
- grpc_slice request_payload_slice = xds_grpclb_request_encode(request);
- send_message_payload_ =
- grpc_raw_byte_buffer_create(&request_payload_slice, 1);
- grpc_slice_unref_internal(request_payload_slice);
+ // TODO(vpowar): Send the report on LRS stream.
xds_grpclb_request_destroy(request);
- // Send the report.
- grpc_op op;
- memset(&op, 0, sizeof(op));
- op.op = GRPC_OP_SEND_MESSAGE;
- op.data.send_message.send_message = send_message_payload_;
- GRPC_CLOSURE_INIT(&client_load_report_closure_, ClientLoadReportDoneLocked,
- this, grpc_combiner_scheduler(xdslb_policy()->combiner()));
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- lb_call_, &op, 1, &client_load_report_closure_);
- if (GPR_UNLIKELY(call_error != GRPC_CALL_OK)) {
- gpr_log(GPR_ERROR, "[xdslb %p] call_error=%d", xdslb_policy_.get(),
- call_error);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
- }
-}
-
-void XdsLb::BalancerCallState::ClientLoadReportDoneLocked(void* arg,
- grpc_error* error) {
- BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
- XdsLb* xdslb_policy = lb_calld->xdslb_policy();
- grpc_byte_buffer_destroy(lb_calld->send_message_payload_);
- lb_calld->send_message_payload_ = nullptr;
- if (error != GRPC_ERROR_NONE || lb_calld != xdslb_policy->lb_calld_.get()) {
- lb_calld->Unref(DEBUG_LOCATION, "client_load_report");
- return;
- }
- lb_calld->ScheduleNextClientLoadReportLocked();
}
void XdsLb::BalancerCallState::OnInitialRequestSentLocked(void* arg,
@@ -837,8 +799,7 @@ void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked(
// serverlist instance will be destroyed either upon the next
// update or when the XdsLb instance is destroyed.
xdslb_policy->serverlist_ = serverlist;
- xdslb_policy->serverlist_index_ = 0;
- xdslb_policy->CreateOrUpdateRoundRobinPolicyLocked();
+ xdslb_policy->CreateOrUpdateChildPolicyLocked();
}
} else {
if (grpc_lb_xds_trace.enabled()) {
@@ -871,7 +832,7 @@ void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked(
&lb_calld->lb_on_balancer_message_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
} else {
- lb_calld->Unref(DEBUG_LOCATION, "on_message_received+grpclb_shutdown");
+ lb_calld->Unref(DEBUG_LOCATION, "on_message_received+xds_shutdown");
}
}
@@ -949,7 +910,7 @@ grpc_lb_addresses* ExtractBalancerAddresses(
* - \a addresses: corresponding to the balancers.
* - \a response_generator: in order to propagate updates from the resolver
* above the grpclb policy.
- * - \a args: other args inherited from the grpclb policy. */
+ * - \a args: other args inherited from the xds policy. */
grpc_channel_args* BuildBalancerChannelArgs(
const grpc_lb_addresses* addresses,
FakeResolverResponseGenerator* response_generator,
@@ -971,10 +932,10 @@ grpc_channel_args* BuildBalancerChannelArgs(
// resolver will have is_balancer=false, whereas our own addresses have
// is_balancer=true. We need the LB channel to return addresses with
// is_balancer=false so that it does not wind up recursively using the
- // grpclb LB policy, as per the special case logic in client_channel.c.
+ // xds LB policy, as per the special case logic in client_channel.c.
GRPC_ARG_LB_ADDRESSES,
// The fake resolver response generator, because we are replacing it
- // with the one from the grpclb policy, used to propagate updates to
+ // with the one from the xds policy, used to propagate updates to
// the LB channel.
GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
// The LB channel should use the authority indicated by the target
@@ -996,7 +957,7 @@ grpc_channel_args* BuildBalancerChannelArgs(
// address updates into the LB channel.
grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
response_generator),
- // A channel arg indicating the target is a grpclb load balancer.
+ // A channel arg indicating the target is a xds load balancer.
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_XDS_LOAD_BALANCER), 1),
// A channel arg indicating this is an internal channels, aka it is
@@ -1019,6 +980,7 @@ grpc_channel_args* BuildBalancerChannelArgs(
// ctor and dtor
//
+// TODO(vishalpowar): Use lb_config in args to configure LB policy.
XdsLb::XdsLb(const grpc_lb_addresses* addresses,
const LoadBalancingPolicy::Args& args)
: LoadBalancingPolicy(args),
@@ -1036,11 +998,11 @@ XdsLb::XdsLb(const grpc_lb_addresses* addresses,
GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_,
&XdsLb::OnBalancerChannelConnectivityChangedLocked, this,
grpc_combiner_scheduler(args.combiner));
- GRPC_CLOSURE_INIT(&on_rr_connectivity_changed_,
- &XdsLb::OnRoundRobinConnectivityChangedLocked, this,
+ GRPC_CLOSURE_INIT(&on_child_connectivity_changed_,
+ &XdsLb::OnChildPolicyConnectivityChangedLocked, this,
grpc_combiner_scheduler(args.combiner));
- GRPC_CLOSURE_INIT(&on_rr_request_reresolution_,
- &XdsLb::OnRoundRobinRequestReresolutionLocked, this,
+ GRPC_CLOSURE_INIT(&on_child_request_reresolution_,
+ &XdsLb::OnChildPolicyRequestReresolutionLocked, this,
grpc_combiner_scheduler(args.combiner));
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE, "xds");
// Record server name.
@@ -1092,7 +1054,7 @@ void XdsLb::ShutdownLocked() {
if (fallback_timer_callback_pending_) {
grpc_timer_cancel(&lb_fallback_timer_);
}
- rr_policy_.reset();
+ child_policy_.reset();
TryReresolutionLocked(&grpc_lb_xds_trace, GRPC_ERROR_CANCELLED);
// We destroy the LB channel here instead of in our destructor because
// destroying the channel triggers a last callback to
@@ -1105,7 +1067,7 @@ void XdsLb::ShutdownLocked() {
gpr_mu_unlock(&lb_channel_mu_);
}
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_REF(error), "grpclb_shutdown");
+ GRPC_ERROR_REF(error), "xds_shutdown");
// Clear pending picks.
PendingPick* pp;
while ((pp = pending_picks_) != nullptr) {
@@ -1138,13 +1100,13 @@ void XdsLb::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
// Cancel a specific pending pick.
//
-// A grpclb pick progresses as follows:
-// - If there's a Round Robin policy (rr_policy_) available, it'll be
-// handed over to the RR policy (in CreateRoundRobinPolicyLocked()). From
-// that point onwards, it'll be RR's responsibility. For cancellations, that
-// implies the pick needs also be cancelled by the RR instance.
-// - Otherwise, without an RR instance, picks stay pending at this policy's
-// level (grpclb), inside the pending_picks_ list. To cancel these,
+// A pick progresses as follows:
+// - If there's a child policy available, it'll be handed over to child policy
+// (in CreateChildPolicyLocked()). From that point onwards, it'll be the
+// child policy's responsibility. For cancellations, that implies the pick
+// needs to be also cancelled by the child policy instance.
+// - Otherwise, without a child policy instance, picks stay pending at this
+// policy's level (xds), inside the pending_picks_ list. To cancel these,
// we invoke the completion closure and set the pick's connected
// subchannel to nullptr right here.
void XdsLb::CancelPickLocked(PickState* pick, grpc_error* error) {
@@ -1164,21 +1126,21 @@ void XdsLb::CancelPickLocked(PickState* pick, grpc_error* error) {
}
pp = next;
}
- if (rr_policy_ != nullptr) {
- rr_policy_->CancelPickLocked(pick, GRPC_ERROR_REF(error));
+ if (child_policy_ != nullptr) {
+ child_policy_->CancelPickLocked(pick, GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
}
// Cancel all pending picks.
//
-// A grpclb pick progresses as follows:
-// - If there's a Round Robin policy (rr_policy_) available, it'll be
-// handed over to the RR policy (in CreateRoundRobinPolicyLocked()). From
-// that point onwards, it'll be RR's responsibility. For cancellations, that
-// implies the pick needs also be cancelled by the RR instance.
-// - Otherwise, without an RR instance, picks stay pending at this policy's
-// level (grpclb), inside the pending_picks_ list. To cancel these,
+// A pick progresses as follows:
+// - If there's a child policy available, it'll be handed over to child policy
+// (in CreateChildPolicyLocked()). From that point onwards, it'll be the
+// child policy's responsibility. For cancellations, that implies the pick
+// needs to be also cancelled by the child policy instance.
+// - Otherwise, without a child policy instance, picks stay pending at this
+// policy's level (xds), inside the pending_picks_ list. To cancel these,
// we invoke the completion closure and set the pick's connected
// subchannel to nullptr right here.
void XdsLb::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
@@ -1200,10 +1162,10 @@ void XdsLb::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
}
pp = next;
}
- if (rr_policy_ != nullptr) {
- rr_policy_->CancelMatchingPicksLocked(initial_metadata_flags_mask,
- initial_metadata_flags_eq,
- GRPC_ERROR_REF(error));
+ if (child_policy_ != nullptr) {
+ child_policy_->CancelMatchingPicksLocked(initial_metadata_flags_mask,
+ initial_metadata_flags_eq,
+ GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
}
@@ -1218,22 +1180,21 @@ void XdsLb::ResetBackoffLocked() {
if (lb_channel_ != nullptr) {
grpc_channel_reset_connect_backoff(lb_channel_);
}
- if (rr_policy_ != nullptr) {
- rr_policy_->ResetBackoffLocked();
+ if (child_policy_ != nullptr) {
+ child_policy_->ResetBackoffLocked();
}
}
bool XdsLb::PickLocked(PickState* pick, grpc_error** error) {
PendingPick* pp = PendingPickCreate(pick);
bool pick_done = false;
- if (rr_policy_ != nullptr) {
+ if (child_policy_ != nullptr) {
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(GPR_INFO, "[xdslb %p] about to PICK from RR %p", this,
- rr_policy_.get());
+ gpr_log(GPR_INFO, "[xdslb %p] about to PICK from policy %p", this,
+ child_policy_.get());
}
- pick_done =
- PickFromRoundRobinPolicyLocked(false /* force_async */, pp, error);
- } else { // rr_policy_ == NULL
+ pick_done = PickFromChildPolicyLocked(false /* force_async */, pp, error);
+ } else { // child_policy_ == NULL
if (pick->on_complete == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No pick result available but synchronous result required.");
@@ -1241,7 +1202,7 @@ bool XdsLb::PickLocked(PickState* pick, grpc_error** error) {
} else {
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO,
- "[xdslb %p] No RR policy. Adding to grpclb's pending picks",
+ "[xdslb %p] No child policy. Adding to xds's pending picks",
this);
}
AddPendingPick(pp);
@@ -1256,8 +1217,8 @@ bool XdsLb::PickLocked(PickState* pick, grpc_error** error) {
void XdsLb::FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* child_channels) {
- // delegate to the RoundRobin to fill the children subchannels.
- rr_policy_->FillChildRefsForChannelz(child_subchannels, child_channels);
+ // delegate to the child_policy_ to fill the children subchannels.
+ child_policy_->FillChildRefsForChannelz(child_subchannels, child_channels);
MutexLock lock(&lb_channel_mu_);
if (lb_channel_ != nullptr) {
grpc_core::channelz::ChannelNode* channel_node =
@@ -1324,13 +1285,15 @@ void XdsLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
grpc_channel_args_destroy(lb_channel_args);
}
-void XdsLb::UpdateLocked(const grpc_channel_args& args) {
+// TODO(vishalpowar): Use lb_config to configure LB policy.
+void XdsLb::UpdateLocked(const grpc_channel_args& args, grpc_json* lb_config) {
ProcessChannelArgsLocked(args);
- // If fallback is configured and the RR policy already exists, update
- // it with the new fallback addresses.
- if (lb_fallback_timeout_ms_ > 0 && rr_policy_ != nullptr) {
- CreateOrUpdateRoundRobinPolicyLocked();
- }
+ // Update the existing child policy.
+ // Note: We have disabled fallback mode in the code, so this child policy must
+ // have been created from a serverlist.
+ // TODO(vpowar): Handle the fallback_address changes when we add support for
+ // fallback in xDS.
+ if (child_policy_ != nullptr) CreateOrUpdateChildPolicyLocked();
// Start watching the LB channel connectivity for connection, if not
// already doing so.
if (!watching_lb_channel_) {
@@ -1398,11 +1361,10 @@ void XdsLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
if (xdslb_policy->serverlist_ == nullptr && !xdslb_policy->shutting_down_ &&
error == GRPC_ERROR_NONE) {
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(GPR_INFO, "[xdslb %p] Falling back to use backends from resolver",
+ gpr_log(GPR_INFO,
+ "[xdslb %p] Fallback timer fired. Not using fallback backends",
xdslb_policy);
}
- GPR_ASSERT(xdslb_policy->fallback_backend_addresses_ != nullptr);
- xdslb_policy->CreateOrUpdateRoundRobinPolicyLocked();
}
xdslb_policy->Unref(DEBUG_LOCATION, "on_fallback_timer");
}
@@ -1452,8 +1414,8 @@ void XdsLb::OnBalancerChannelConnectivityChangedLocked(void* arg,
XdsLb* xdslb_policy = static_cast<XdsLb*>(arg);
if (xdslb_policy->shutting_down_) goto done;
// Re-initialize the lb_call. This should also take care of updating the
- // embedded RR policy. Note that the current RR policy, if any, will stay in
- // effect until an update from the new lb_call is received.
+ // child policy. Note that the current child policy, if any, will
+ // stay in effect until an update from the new lb_call is received.
switch (xdslb_policy->lb_channel_connectivity_) {
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
@@ -1512,8 +1474,8 @@ void DestroyClientStats(void* arg) {
}
void XdsLb::PendingPickSetMetadataAndContext(PendingPick* pp) {
- /* if connected_subchannel is nullptr, no pick has been made by the RR
- * policy (e.g., all addresses failed to connect). There won't be any
+ /* if connected_subchannel is nullptr, no pick has been made by the
+ * child policy (e.g., all addresses failed to connect). There won't be any
* user_data/token available */
if (pp->pick->connected_subchannel != nullptr) {
if (GPR_LIKELY(!GRPC_MDISNULL(pp->lb_token))) {
@@ -1539,8 +1501,8 @@ void XdsLb::PendingPickSetMetadataAndContext(PendingPick* pp) {
}
/* The \a on_complete closure passed as part of the pick requires keeping a
- * reference to its associated round robin instance. We wrap this closure in
- * order to unref the round robin instance upon its invocation */
+ * reference to its associated child policy instance. We wrap this closure in
+ * order to unref the child policy instance upon its invocation */
void XdsLb::OnPendingPickComplete(void* arg, grpc_error* error) {
PendingPick* pp = static_cast<PendingPick*>(arg);
PendingPickSetMetadataAndContext(pp);
@@ -1565,50 +1527,24 @@ void XdsLb::AddPendingPick(PendingPick* pp) {
}
//
-// code for interacting with the RR policy
+// code for interacting with the child policy
//
-// Performs a pick over \a rr_policy_. Given that a pick can return
+// Performs a pick over \a child_policy_. Given that a pick can return
// immediately (ignoring its completion callback), we need to perform the
// cleanups this callback would otherwise be responsible for.
// If \a force_async is true, then we will manually schedule the
// completion callback even if the pick is available immediately.
-bool XdsLb::PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
- grpc_error** error) {
- // Check for drops if we are not using fallback backend addresses.
- if (serverlist_ != nullptr) {
- // Look at the index into the serverlist to see if we should drop this call.
- xds_grpclb_server* server = serverlist_->servers[serverlist_index_++];
- if (serverlist_index_ == serverlist_->num_servers) {
- serverlist_index_ = 0; // Wrap-around.
- }
- if (server->drop) {
- // Update client load reporting stats to indicate the number of
- // dropped calls. Note that we have to do this here instead of in
- // the client_load_reporting filter, because we do not create a
- // subchannel call (and therefore no client_load_reporting filter)
- // for dropped calls.
- if (lb_calld_ != nullptr && lb_calld_->client_stats() != nullptr) {
- lb_calld_->client_stats()->AddCallDroppedLocked(
- server->load_balance_token);
- }
- if (force_async) {
- GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
- Delete(pp);
- return false;
- }
- Delete(pp);
- return true;
- }
- }
+bool XdsLb::PickFromChildPolicyLocked(bool force_async, PendingPick* pp,
+ grpc_error** error) {
// Set client_stats and user_data.
if (lb_calld_ != nullptr && lb_calld_->client_stats() != nullptr) {
pp->client_stats = lb_calld_->client_stats()->Ref();
}
GPR_ASSERT(pp->pick->user_data == nullptr);
pp->pick->user_data = (void**)&pp->lb_token;
- // Pick via the RR policy.
- bool pick_done = rr_policy_->PickLocked(pp->pick, error);
+ // Pick via the child policy.
+ bool pick_done = child_policy_->PickLocked(pp->pick, error);
if (pick_done) {
PendingPickSetMetadataAndContext(pp);
if (force_async) {
@@ -1619,72 +1555,67 @@ bool XdsLb::PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
Delete(pp);
}
// else, the pending pick will be registered and taken care of by the
- // pending pick list inside the RR policy. Eventually,
+ // pending pick list inside the child policy. Eventually,
// OnPendingPickComplete() will be called, which will (among other
// things) add the LB token to the call's initial metadata.
return pick_done;
}
-void XdsLb::CreateRoundRobinPolicyLocked(const Args& args) {
- GPR_ASSERT(rr_policy_ == nullptr);
- rr_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
+void XdsLb::CreateChildPolicyLocked(const Args& args) {
+ GPR_ASSERT(child_policy_ == nullptr);
+ child_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
"round_robin", args);
- if (GPR_UNLIKELY(rr_policy_ == nullptr)) {
- gpr_log(GPR_ERROR, "[xdslb %p] Failure creating a RoundRobin policy", this);
+ if (GPR_UNLIKELY(child_policy_ == nullptr)) {
+ gpr_log(GPR_ERROR, "[xdslb %p] Failure creating a child policy", this);
return;
}
// TODO(roth): We currently track this ref manually. Once the new
// ClosureRef API is done, pass the RefCountedPtr<> along with the closure.
- auto self = Ref(DEBUG_LOCATION, "on_rr_reresolution_requested");
+ auto self = Ref(DEBUG_LOCATION, "on_child_reresolution_requested");
self.release();
- rr_policy_->SetReresolutionClosureLocked(&on_rr_request_reresolution_);
- grpc_error* rr_state_error = nullptr;
- rr_connectivity_state_ = rr_policy_->CheckConnectivityLocked(&rr_state_error);
- // Connectivity state is a function of the RR policy updated/created.
- UpdateConnectivityStateFromRoundRobinPolicyLocked(rr_state_error);
- // Add the gRPC LB's interested_parties pollset_set to that of the newly
- // created RR policy. This will make the RR policy progress upon activity on
- // gRPC LB, which in turn is tied to the application's call.
- grpc_pollset_set_add_pollset_set(rr_policy_->interested_parties(),
+ child_policy_->SetReresolutionClosureLocked(&on_child_request_reresolution_);
+ grpc_error* child_state_error = nullptr;
+ child_connectivity_state_ =
+ child_policy_->CheckConnectivityLocked(&child_state_error);
+ // Connectivity state is a function of the child policy updated/created.
+ UpdateConnectivityStateFromChildPolicyLocked(child_state_error);
+ // Add the xDS's interested_parties pollset_set to that of the newly created
+ // child policy. This will make the child policy progress upon activity on
+ // xDS LB, which in turn is tied to the application's call.
+ grpc_pollset_set_add_pollset_set(child_policy_->interested_parties(),
interested_parties());
- // Subscribe to changes to the connectivity of the new RR.
+ // Subscribe to changes to the connectivity of the new child policy.
// TODO(roth): We currently track this ref manually. Once the new
// ClosureRef API is done, pass the RefCountedPtr<> along with the closure.
- self = Ref(DEBUG_LOCATION, "on_rr_connectivity_changed");
+ self = Ref(DEBUG_LOCATION, "on_child_connectivity_changed");
self.release();
- rr_policy_->NotifyOnStateChangeLocked(&rr_connectivity_state_,
- &on_rr_connectivity_changed_);
- rr_policy_->ExitIdleLocked();
- // Send pending picks to RR policy.
+ child_policy_->NotifyOnStateChangeLocked(&child_connectivity_state_,
+ &on_child_connectivity_changed_);
+ child_policy_->ExitIdleLocked();
+ // Send pending picks to child policy.
PendingPick* pp;
while ((pp = pending_picks_)) {
pending_picks_ = pp->next;
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(GPR_INFO,
- "[xdslb %p] Pending pick about to (async) PICK from RR %p", this,
- rr_policy_.get());
+ gpr_log(
+ GPR_INFO,
+ "[xdslb %p] Pending pick about to (async) PICK from child policy %p",
+ this, child_policy_.get());
}
grpc_error* error = GRPC_ERROR_NONE;
- PickFromRoundRobinPolicyLocked(true /* force_async */, pp, &error);
+ PickFromChildPolicyLocked(true /* force_async */, pp, &error);
}
}
-grpc_channel_args* XdsLb::CreateRoundRobinPolicyArgsLocked() {
+grpc_channel_args* XdsLb::CreateChildPolicyArgsLocked() {
grpc_lb_addresses* addresses;
bool is_backend_from_grpclb_load_balancer = false;
- if (serverlist_ != nullptr) {
- GPR_ASSERT(serverlist_->num_servers > 0);
- addresses = ProcessServerlist(serverlist_);
- is_backend_from_grpclb_load_balancer = true;
- } else {
- // If CreateOrUpdateRoundRobinPolicyLocked() is invoked when we haven't
- // received any serverlist from the balancer, we use the fallback backends
- // returned by the resolver. Note that the fallback backend list may be
- // empty, in which case the new round_robin policy will keep the requested
- // picks pending.
- GPR_ASSERT(fallback_backend_addresses_ != nullptr);
- addresses = grpc_lb_addresses_copy(fallback_backend_addresses_);
- }
+ // This should never be invoked if we do not have serverlist_, as fallback
+ // mode is disabled for xDS plugin.
+ GPR_ASSERT(serverlist_ != nullptr);
+ GPR_ASSERT(serverlist_->num_servers > 0);
+ addresses = ProcessServerlist(serverlist_);
+ is_backend_from_grpclb_load_balancer = true;
GPR_ASSERT(addresses != nullptr);
// Replace the LB addresses in the channel args that we pass down to
// the subchannel.
@@ -1704,66 +1635,68 @@ grpc_channel_args* XdsLb::CreateRoundRobinPolicyArgsLocked() {
return args;
}
-void XdsLb::CreateOrUpdateRoundRobinPolicyLocked() {
+void XdsLb::CreateOrUpdateChildPolicyLocked() {
if (shutting_down_) return;
- grpc_channel_args* args = CreateRoundRobinPolicyArgsLocked();
+ grpc_channel_args* args = CreateChildPolicyArgsLocked();
GPR_ASSERT(args != nullptr);
- if (rr_policy_ != nullptr) {
+ if (child_policy_ != nullptr) {
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(GPR_INFO, "[xdslb %p] Updating RR policy %p", this,
- rr_policy_.get());
+ gpr_log(GPR_INFO, "[xdslb %p] Updating the child policy %p", this,
+ child_policy_.get());
}
- rr_policy_->UpdateLocked(*args);
+ // TODO(vishalpowar): Pass the correct LB config.
+ child_policy_->UpdateLocked(*args, nullptr);
} else {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.client_channel_factory = client_channel_factory();
lb_policy_args.args = args;
- CreateRoundRobinPolicyLocked(lb_policy_args);
+ CreateChildPolicyLocked(lb_policy_args);
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(GPR_INFO, "[xdslb %p] Created new RR policy %p", this,
- rr_policy_.get());
+ gpr_log(GPR_INFO, "[xdslb %p] Created a new child policy %p", this,
+ child_policy_.get());
}
}
grpc_channel_args_destroy(args);
}
-void XdsLb::OnRoundRobinRequestReresolutionLocked(void* arg,
- grpc_error* error) {
+void XdsLb::OnChildPolicyRequestReresolutionLocked(void* arg,
+ grpc_error* error) {
XdsLb* xdslb_policy = static_cast<XdsLb*>(arg);
if (xdslb_policy->shutting_down_ || error != GRPC_ERROR_NONE) {
- xdslb_policy->Unref(DEBUG_LOCATION, "on_rr_reresolution_requested");
+ xdslb_policy->Unref(DEBUG_LOCATION, "on_child_reresolution_requested");
return;
}
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(
- GPR_INFO,
- "[xdslb %p] Re-resolution requested from the internal RR policy (%p).",
- xdslb_policy, xdslb_policy->rr_policy_.get());
+ gpr_log(GPR_INFO,
+ "[xdslb %p] Re-resolution requested from child policy "
+ "(%p).",
+ xdslb_policy, xdslb_policy->child_policy_.get());
}
// If we are talking to a balancer, we expect to get updated addresses form
- // the balancer, so we can ignore the re-resolution request from the RR
- // policy. Otherwise, handle the re-resolution request using the
- // grpclb policy's original re-resolution closure.
+ // the balancer, so we can ignore the re-resolution request from the child
+ // policy.
+ // Otherwise, handle the re-resolution request using the xds policy's
+ // original re-resolution closure.
if (xdslb_policy->lb_calld_ == nullptr ||
!xdslb_policy->lb_calld_->seen_initial_response()) {
xdslb_policy->TryReresolutionLocked(&grpc_lb_xds_trace, GRPC_ERROR_NONE);
}
- // Give back the wrapper closure to the RR policy.
- xdslb_policy->rr_policy_->SetReresolutionClosureLocked(
- &xdslb_policy->on_rr_request_reresolution_);
+ // Give back the wrapper closure to the child policy.
+ xdslb_policy->child_policy_->SetReresolutionClosureLocked(
+ &xdslb_policy->on_child_request_reresolution_);
}
-void XdsLb::UpdateConnectivityStateFromRoundRobinPolicyLocked(
- grpc_error* rr_state_error) {
+void XdsLb::UpdateConnectivityStateFromChildPolicyLocked(
+ grpc_error* child_state_error) {
const grpc_connectivity_state curr_glb_state =
grpc_connectivity_state_check(&state_tracker_);
/* The new connectivity status is a function of the previous one and the new
- * input coming from the status of the RR policy.
+ * input coming from the status of the child policy.
*
- * current state (grpclb's)
+ * current state (xds's)
* |
- * v || I | C | R | TF | SD | <- new state (RR's)
+ * v || I | C | R | TF | SD | <- new state (child policy's)
* ===++====+=====+=====+======+======+
* I || I | C | R | [I] | [I] |
* ---++----+-----+-----+------+------+
@@ -1776,52 +1709,51 @@ void XdsLb::UpdateConnectivityStateFromRoundRobinPolicyLocked(
* SD || NA | NA | NA | NA | NA | (*)
* ---++----+-----+-----+------+------+
*
- * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
- * is the current state of grpclb, which is left untouched.
+ * A [STATE] indicates that the old child policy is kept. In those cases,
+ * STATE is the current state of xds, which is left untouched.
*
* In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
- * the previous RR instance.
+ * the previous child policy instance.
*
* Note that the status is never updated to SHUTDOWN as a result of calling
* this function. Only glb_shutdown() has the power to set that state.
*
* (*) This function mustn't be called during shutting down. */
GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
- switch (rr_connectivity_state_) {
+ switch (child_connectivity_state_) {
case GRPC_CHANNEL_TRANSIENT_FAILURE:
case GRPC_CHANNEL_SHUTDOWN:
- GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
+ GPR_ASSERT(child_state_error != GRPC_ERROR_NONE);
break;
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_READY:
- GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
+ GPR_ASSERT(child_state_error == GRPC_ERROR_NONE);
}
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(
- GPR_INFO,
- "[xdslb %p] Setting grpclb's state to %s from new RR policy %p state.",
- this, grpc_connectivity_state_name(rr_connectivity_state_),
- rr_policy_.get());
+ gpr_log(GPR_INFO,
+ "[xdslb %p] Setting xds's state to %s from child policy %p state.",
+ this, grpc_connectivity_state_name(child_connectivity_state_),
+ child_policy_.get());
}
- grpc_connectivity_state_set(&state_tracker_, rr_connectivity_state_,
- rr_state_error,
+ grpc_connectivity_state_set(&state_tracker_, child_connectivity_state_,
+ child_state_error,
"update_lb_connectivity_status_locked");
}
-void XdsLb::OnRoundRobinConnectivityChangedLocked(void* arg,
- grpc_error* error) {
+void XdsLb::OnChildPolicyConnectivityChangedLocked(void* arg,
+ grpc_error* error) {
XdsLb* xdslb_policy = static_cast<XdsLb*>(arg);
if (xdslb_policy->shutting_down_) {
- xdslb_policy->Unref(DEBUG_LOCATION, "on_rr_connectivity_changed");
+ xdslb_policy->Unref(DEBUG_LOCATION, "on_child_connectivity_changed");
return;
}
- xdslb_policy->UpdateConnectivityStateFromRoundRobinPolicyLocked(
+ xdslb_policy->UpdateConnectivityStateFromChildPolicyLocked(
GRPC_ERROR_REF(error));
- // Resubscribe. Reuse the "on_rr_connectivity_changed" ref.
- xdslb_policy->rr_policy_->NotifyOnStateChangeLocked(
- &xdslb_policy->rr_connectivity_state_,
- &xdslb_policy->on_rr_connectivity_changed_);
+ // Resubscribe. Reuse the "on_child_connectivity_changed" ref.
+ xdslb_policy->child_policy_->NotifyOnStateChangeLocked(
+ &xdslb_policy->child_connectivity_state_,
+ &xdslb_policy->on_child_connectivity_changed_);
}
//
@@ -1848,7 +1780,7 @@ class XdsFactory : public LoadBalancingPolicyFactory {
return OrphanablePtr<LoadBalancingPolicy>(New<XdsLb>(addresses, args));
}
- const char* name() const override { return "xds"; }
+ const char* name() const override { return "xds_experimental"; }
};
} // namespace
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h
index 62bdbf2689..a59deadb26 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.h
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.h
@@ -25,7 +25,7 @@
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
+#include "src/core/lib/uri/uri_parser.h"
//
// representation of an LB address
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.cc b/src/core/ext/filters/client_channel/lb_policy_registry.cc
index d651b1120d..ad459c9c8c 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.cc
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.cc
@@ -94,4 +94,9 @@ LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
return factory->CreateLoadBalancingPolicy(args);
}
+bool LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(const char* name) {
+ GPR_ASSERT(g_state != nullptr);
+ return g_state->GetLoadBalancingPolicyFactory(name) != nullptr;
+}
+
} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.h b/src/core/ext/filters/client_channel/lb_policy_registry.h
index 2e9bb061ed..338f7c9f69 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.h
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.h
@@ -47,6 +47,10 @@ class LoadBalancingPolicyRegistry {
/// Creates an LB policy of the type specified by \a name.
static OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
const char* name, const LoadBalancingPolicy::Args& args);
+
+ /// Returns true if the LB policy factory specified by \a name exists in this
+ /// registry.
+ static bool LoadBalancingPolicyExists(const char* name);
};
} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/method_params.cc b/src/core/ext/filters/client_channel/method_params.cc
deleted file mode 100644
index 1f116bb67d..0000000000
--- a/src/core/ext/filters/client_channel/method_params.cc
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include <stdio.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-
-#include "src/core/ext/filters/client_channel/method_params.h"
-#include "src/core/lib/channel/status_util.h"
-#include "src/core/lib/gpr/string.h"
-#include "src/core/lib/gprpp/memory.h"
-
-// As per the retry design, we do not allow more than 5 retry attempts.
-#define MAX_MAX_RETRY_ATTEMPTS 5
-
-namespace grpc_core {
-namespace internal {
-
-namespace {
-
-bool ParseWaitForReady(
- grpc_json* field, ClientChannelMethodParams::WaitForReady* wait_for_ready) {
- if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
- return false;
- }
- *wait_for_ready = field->type == GRPC_JSON_TRUE
- ? ClientChannelMethodParams::WAIT_FOR_READY_TRUE
- : ClientChannelMethodParams::WAIT_FOR_READY_FALSE;
- return true;
-}
-
-// Parses a JSON field of the form generated for a google.proto.Duration
-// proto message, as per:
-// https://developers.google.com/protocol-buffers/docs/proto3#json
-bool ParseDuration(grpc_json* field, grpc_millis* duration) {
- if (field->type != GRPC_JSON_STRING) return false;
- size_t len = strlen(field->value);
- if (field->value[len - 1] != 's') return false;
- UniquePtr<char> buf(gpr_strdup(field->value));
- *(buf.get() + len - 1) = '\0'; // Remove trailing 's'.
- char* decimal_point = strchr(buf.get(), '.');
- int nanos = 0;
- if (decimal_point != nullptr) {
- *decimal_point = '\0';
- nanos = gpr_parse_nonnegative_int(decimal_point + 1);
- if (nanos == -1) {
- return false;
- }
- int num_digits = static_cast<int>(strlen(decimal_point + 1));
- if (num_digits > 9) { // We don't accept greater precision than nanos.
- return false;
- }
- for (int i = 0; i < (9 - num_digits); ++i) {
- nanos *= 10;
- }
- }
- int seconds =
- decimal_point == buf.get() ? 0 : gpr_parse_nonnegative_int(buf.get());
- if (seconds == -1) return false;
- *duration = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
- return true;
-}
-
-UniquePtr<ClientChannelMethodParams::RetryPolicy> ParseRetryPolicy(
- grpc_json* field) {
- auto retry_policy = MakeUnique<ClientChannelMethodParams::RetryPolicy>();
- if (field->type != GRPC_JSON_OBJECT) return nullptr;
- for (grpc_json* sub_field = field->child; sub_field != nullptr;
- sub_field = sub_field->next) {
- if (sub_field->key == nullptr) return nullptr;
- if (strcmp(sub_field->key, "maxAttempts") == 0) {
- if (retry_policy->max_attempts != 0) return nullptr; // Duplicate.
- if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
- retry_policy->max_attempts = gpr_parse_nonnegative_int(sub_field->value);
- if (retry_policy->max_attempts <= 1) return nullptr;
- if (retry_policy->max_attempts > MAX_MAX_RETRY_ATTEMPTS) {
- gpr_log(GPR_ERROR,
- "service config: clamped retryPolicy.maxAttempts at %d",
- MAX_MAX_RETRY_ATTEMPTS);
- retry_policy->max_attempts = MAX_MAX_RETRY_ATTEMPTS;
- }
- } else if (strcmp(sub_field->key, "initialBackoff") == 0) {
- if (retry_policy->initial_backoff > 0) return nullptr; // Duplicate.
- if (!ParseDuration(sub_field, &retry_policy->initial_backoff)) {
- return nullptr;
- }
- if (retry_policy->initial_backoff == 0) return nullptr;
- } else if (strcmp(sub_field->key, "maxBackoff") == 0) {
- if (retry_policy->max_backoff > 0) return nullptr; // Duplicate.
- if (!ParseDuration(sub_field, &retry_policy->max_backoff)) {
- return nullptr;
- }
- if (retry_policy->max_backoff == 0) return nullptr;
- } else if (strcmp(sub_field->key, "backoffMultiplier") == 0) {
- if (retry_policy->backoff_multiplier != 0) return nullptr; // Duplicate.
- if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
- if (sscanf(sub_field->value, "%f", &retry_policy->backoff_multiplier) !=
- 1) {
- return nullptr;
- }
- if (retry_policy->backoff_multiplier <= 0) return nullptr;
- } else if (strcmp(sub_field->key, "retryableStatusCodes") == 0) {
- if (!retry_policy->retryable_status_codes.Empty()) {
- return nullptr; // Duplicate.
- }
- if (sub_field->type != GRPC_JSON_ARRAY) return nullptr;
- for (grpc_json* element = sub_field->child; element != nullptr;
- element = element->next) {
- if (element->type != GRPC_JSON_STRING) return nullptr;
- grpc_status_code status;
- if (!grpc_status_code_from_string(element->value, &status)) {
- return nullptr;
- }
- retry_policy->retryable_status_codes.Add(status);
- }
- if (retry_policy->retryable_status_codes.Empty()) return nullptr;
- }
- }
- // Make sure required fields are set.
- if (retry_policy->max_attempts == 0 || retry_policy->initial_backoff == 0 ||
- retry_policy->max_backoff == 0 || retry_policy->backoff_multiplier == 0 ||
- retry_policy->retryable_status_codes.Empty()) {
- return nullptr;
- }
- return retry_policy;
-}
-
-} // namespace
-
-RefCountedPtr<ClientChannelMethodParams>
-ClientChannelMethodParams::CreateFromJson(const grpc_json* json) {
- RefCountedPtr<ClientChannelMethodParams> method_params =
- MakeRefCounted<ClientChannelMethodParams>();
- for (grpc_json* field = json->child; field != nullptr; field = field->next) {
- if (field->key == nullptr) continue;
- if (strcmp(field->key, "waitForReady") == 0) {
- if (method_params->wait_for_ready_ != WAIT_FOR_READY_UNSET) {
- return nullptr; // Duplicate.
- }
- if (!ParseWaitForReady(field, &method_params->wait_for_ready_)) {
- return nullptr;
- }
- } else if (strcmp(field->key, "timeout") == 0) {
- if (method_params->timeout_ > 0) return nullptr; // Duplicate.
- if (!ParseDuration(field, &method_params->timeout_)) return nullptr;
- } else if (strcmp(field->key, "retryPolicy") == 0) {
- if (method_params->retry_policy_ != nullptr) {
- return nullptr; // Duplicate.
- }
- method_params->retry_policy_ = ParseRetryPolicy(field);
- if (method_params->retry_policy_ == nullptr) return nullptr;
- }
- }
- return method_params;
-}
-
-} // namespace internal
-} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/method_params.h b/src/core/ext/filters/client_channel/method_params.h
deleted file mode 100644
index a31d360f17..0000000000
--- a/src/core/ext/filters/client_channel/method_params.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H
-#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/lib/channel/status_util.h"
-#include "src/core/lib/gprpp/ref_counted.h"
-#include "src/core/lib/gprpp/ref_counted_ptr.h"
-#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis
-#include "src/core/lib/json/json.h"
-
-namespace grpc_core {
-namespace internal {
-
-class ClientChannelMethodParams : public RefCounted<ClientChannelMethodParams> {
- public:
- enum WaitForReady {
- WAIT_FOR_READY_UNSET = 0,
- WAIT_FOR_READY_FALSE,
- WAIT_FOR_READY_TRUE
- };
-
- struct RetryPolicy {
- int max_attempts = 0;
- grpc_millis initial_backoff = 0;
- grpc_millis max_backoff = 0;
- float backoff_multiplier = 0;
- StatusCodeSet retryable_status_codes;
- };
-
- /// Creates a method_parameters object from \a json.
- /// Intended for use with ServiceConfig::CreateMethodConfigTable().
- static RefCountedPtr<ClientChannelMethodParams> CreateFromJson(
- const grpc_json* json);
-
- grpc_millis timeout() const { return timeout_; }
- WaitForReady wait_for_ready() const { return wait_for_ready_; }
- const RetryPolicy* retry_policy() const { return retry_policy_.get(); }
-
- private:
- // So New() can call our private ctor.
- template <typename T, typename... Args>
- friend T* grpc_core::New(Args&&... args);
-
- // So Delete() can call our private dtor.
- template <typename T>
- friend void grpc_core::Delete(T*);
-
- ClientChannelMethodParams() {}
- virtual ~ClientChannelMethodParams() {}
-
- grpc_millis timeout_ = 0;
- WaitForReady wait_for_ready_ = WAIT_FOR_READY_UNSET;
- UniquePtr<RetryPolicy> retry_policy_;
-};
-
-} // namespace internal
-} // namespace grpc_core
-
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H */
diff --git a/src/core/ext/filters/client_channel/parse_address.h b/src/core/ext/filters/client_channel/parse_address.h
index c2af0e6c49..5c050a2333 100644
--- a/src/core/ext/filters/client_channel/parse_address.h
+++ b/src/core/ext/filters/client_channel/parse_address.h
@@ -23,8 +23,8 @@
#include <stddef.h>
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/uri/uri_parser.h"
/** Populate \a resolved_addr from \a uri, whose path is expected to contain a
* unix socket path. Returns true upon success. */
diff --git a/src/core/ext/filters/client_channel/resolver.cc b/src/core/ext/filters/client_channel/resolver.cc
index cd11eeb9e4..601b08be24 100644
--- a/src/core/ext/filters/client_channel/resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver.cc
@@ -27,7 +27,7 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_resolver_refcount(false,
namespace grpc_core {
Resolver::Resolver(grpc_combiner* combiner)
- : InternallyRefCountedWithTracing(&grpc_trace_resolver_refcount),
+ : InternallyRefCounted(&grpc_trace_resolver_refcount),
combiner_(GRPC_COMBINER_REF(combiner, "resolver")) {}
Resolver::~Resolver() { GRPC_COMBINER_UNREF(combiner_, "resolver"); }
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index e9acbb7c41..9da849a101 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -44,7 +44,7 @@ namespace grpc_core {
///
/// Note: All methods with a "Locked" suffix must be called from the
/// combiner passed to the constructor.
-class Resolver : public InternallyRefCountedWithTracing<Resolver> {
+class Resolver : public InternallyRefCounted<Resolver> {
public:
// Not copyable nor movable.
Resolver(const Resolver&) = delete;
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index 01796ca08f..4ebc2c8161 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -120,6 +120,10 @@ class AresDnsResolver : public Resolver {
grpc_lb_addresses* lb_addresses_ = nullptr;
/// currently resolving service config
char* service_config_json_ = nullptr;
+ // has shutdown been initiated
+ bool shutdown_initiated_ = false;
+ // timeout in milliseconds for active DNS queries
+ int query_timeout_ms_;
};
AresDnsResolver::AresDnsResolver(const ResolverArgs& args)
@@ -157,6 +161,11 @@ AresDnsResolver::AresDnsResolver(const ResolverArgs& args)
grpc_combiner_scheduler(combiner()));
GRPC_CLOSURE_INIT(&on_resolved_, OnResolvedLocked, this,
grpc_combiner_scheduler(combiner()));
+ const grpc_arg* query_timeout_ms_arg =
+ grpc_channel_args_find(channel_args_, GRPC_ARG_DNS_ARES_QUERY_TIMEOUT_MS);
+ query_timeout_ms_ = grpc_channel_arg_get_integer(
+ query_timeout_ms_arg,
+ {GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, 0, INT_MAX});
}
AresDnsResolver::~AresDnsResolver() {
@@ -197,6 +206,7 @@ void AresDnsResolver::ResetBackoffLocked() {
}
void AresDnsResolver::ShutdownLocked() {
+ shutdown_initiated_ = true;
if (have_next_resolution_timer_) {
grpc_timer_cancel(&next_resolution_timer_);
}
@@ -213,9 +223,13 @@ void AresDnsResolver::ShutdownLocked() {
void AresDnsResolver::OnNextResolutionLocked(void* arg, grpc_error* error) {
AresDnsResolver* r = static_cast<AresDnsResolver*>(arg);
+ GRPC_CARES_TRACE_LOG(
+ "%p re-resolution timer fired. error: %s. shutdown_initiated_: %d", r,
+ grpc_error_string(error), r->shutdown_initiated_);
r->have_next_resolution_timer_ = false;
- if (error == GRPC_ERROR_NONE) {
+ if (error == GRPC_ERROR_NONE && !r->shutdown_initiated_) {
if (!r->resolving_) {
+ GRPC_CARES_TRACE_LOG("%p start resolving due to re-resolution timer", r);
r->StartResolvingLocked();
}
}
@@ -301,13 +315,12 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
gpr_free(r->pending_request_);
r->pending_request_ = nullptr;
if (r->lb_addresses_ != nullptr) {
- static const char* args_to_remove[2];
+ static const char* args_to_remove[1];
size_t num_args_to_remove = 0;
- grpc_arg new_args[3];
+ grpc_arg args_to_add[2];
size_t num_args_to_add = 0;
- new_args[num_args_to_add++] =
+ args_to_add[num_args_to_add++] =
grpc_lb_addresses_create_channel_arg(r->lb_addresses_);
- grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config;
char* service_config_string = nullptr;
if (r->service_config_json_ != nullptr) {
service_config_string = ChooseServiceConfig(r->service_config_json_);
@@ -316,31 +329,19 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
gpr_log(GPR_INFO, "selected service config choice: %s",
service_config_string);
args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG;
- new_args[num_args_to_add++] = grpc_channel_arg_string_create(
+ args_to_add[num_args_to_add++] = grpc_channel_arg_string_create(
(char*)GRPC_ARG_SERVICE_CONFIG, service_config_string);
- service_config =
- grpc_core::ServiceConfig::Create(service_config_string);
- if (service_config != nullptr) {
- const char* lb_policy_name =
- service_config->GetLoadBalancingPolicyName();
- if (lb_policy_name != nullptr) {
- args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
- new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- (char*)GRPC_ARG_LB_POLICY_NAME,
- const_cast<char*>(lb_policy_name));
- }
- }
}
}
result = grpc_channel_args_copy_and_add_and_remove(
- r->channel_args_, args_to_remove, num_args_to_remove, new_args,
+ r->channel_args_, args_to_remove, num_args_to_remove, args_to_add,
num_args_to_add);
gpr_free(service_config_string);
grpc_lb_addresses_destroy(r->lb_addresses_);
// Reset backoff state so that we start from the beginning when the
// next request gets triggered.
r->backoff_.Reset();
- } else {
+ } else if (!r->shutdown_initiated_) {
const char* msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
grpc_millis next_try = r->backoff_.NextAttemptTime();
@@ -416,7 +417,8 @@ void AresDnsResolver::StartResolvingLocked() {
pending_request_ = grpc_dns_lookup_ares_locked(
dns_server_, name_to_resolve_, kDefaultPort, interested_parties_,
&on_resolved_, &lb_addresses_, true /* check_grpclb */,
- request_service_config_ ? &service_config_json_ : nullptr, combiner());
+ request_service_config_ ? &service_config_json_ : nullptr,
+ query_timeout_ms_, combiner());
last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now();
}
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc
index fdbd07ebf5..f42b1e309d 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc
@@ -33,6 +33,7 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/timer.h"
typedef struct fd_node {
/** the owner of this fd node */
@@ -76,6 +77,12 @@ struct grpc_ares_ev_driver {
grpc_ares_request* request;
/** Owned by the ev_driver. Creates new GrpcPolledFd's */
grpc_core::UniquePtr<grpc_core::GrpcPolledFdFactory> polled_fd_factory;
+ /** query timeout in milliseconds */
+ int query_timeout_ms;
+ /** alarm to cancel active queries */
+ grpc_timer query_timeout;
+ /** cancels queries on a timeout */
+ grpc_closure on_timeout_locked;
};
static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver);
@@ -116,8 +123,11 @@ static void fd_node_shutdown_locked(fd_node* fdn, const char* reason) {
}
}
+static void on_timeout_locked(void* arg, grpc_error* error);
+
grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
grpc_pollset_set* pollset_set,
+ int query_timeout_ms,
grpc_combiner* combiner,
grpc_ares_request* request) {
*ev_driver = grpc_core::New<grpc_ares_ev_driver>();
@@ -146,6 +156,9 @@ grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
grpc_core::NewGrpcPolledFdFactory((*ev_driver)->combiner);
(*ev_driver)
->polled_fd_factory->ConfigureAresChannelLocked((*ev_driver)->channel);
+ GRPC_CLOSURE_INIT(&(*ev_driver)->on_timeout_locked, on_timeout_locked,
+ *ev_driver, grpc_combiner_scheduler(combiner));
+ (*ev_driver)->query_timeout_ms = query_timeout_ms;
return GRPC_ERROR_NONE;
}
@@ -155,6 +168,7 @@ void grpc_ares_ev_driver_on_queries_complete_locked(
// is working, grpc_ares_notify_on_event_locked will shut down the
// fds; if it's not working, there are no fds to shut down.
ev_driver->shutting_down = true;
+ grpc_timer_cancel(&ev_driver->query_timeout);
grpc_ares_ev_driver_unref(ev_driver);
}
@@ -185,6 +199,17 @@ static fd_node* pop_fd_node_locked(fd_node** head, ares_socket_t as) {
return nullptr;
}
+static void on_timeout_locked(void* arg, grpc_error* error) {
+ grpc_ares_ev_driver* driver = static_cast<grpc_ares_ev_driver*>(arg);
+ GRPC_CARES_TRACE_LOG(
+ "ev_driver=%p on_timeout_locked. driver->shutting_down=%d. err=%s",
+ driver, driver->shutting_down, grpc_error_string(error));
+ if (!driver->shutting_down && error == GRPC_ERROR_NONE) {
+ grpc_ares_ev_driver_shutdown_locked(driver);
+ }
+ grpc_ares_ev_driver_unref(driver);
+}
+
static void on_readable_locked(void* arg, grpc_error* error) {
fd_node* fdn = static_cast<fd_node*>(arg);
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
@@ -314,6 +339,17 @@ void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver) {
if (!ev_driver->working) {
ev_driver->working = true;
grpc_ares_notify_on_event_locked(ev_driver);
+ grpc_millis timeout =
+ ev_driver->query_timeout_ms == 0
+ ? GRPC_MILLIS_INF_FUTURE
+ : ev_driver->query_timeout_ms + grpc_core::ExecCtx::Get()->Now();
+ GRPC_CARES_TRACE_LOG(
+ "ev_driver=%p grpc_ares_ev_driver_start_locked. timeout in %" PRId64
+ " ms",
+ ev_driver, timeout);
+ grpc_ares_ev_driver_ref(ev_driver);
+ grpc_timer_init(&ev_driver->query_timeout, timeout,
+ &ev_driver->on_timeout_locked);
}
}
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
index 671c537fe7..b8cefd9470 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -43,6 +43,7 @@ ares_channel* grpc_ares_ev_driver_get_channel_locked(
created successfully. */
grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
grpc_pollset_set* pollset_set,
+ int query_timeout_ms,
grpc_combiner* combiner,
grpc_ares_request* request);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index 582e2203fc..55715869b6 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
@@ -359,7 +359,7 @@ done:
void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
grpc_ares_request* r, const char* dns_server, const char* name,
const char* default_port, grpc_pollset_set* interested_parties,
- bool check_grpclb, grpc_combiner* combiner) {
+ bool check_grpclb, int query_timeout_ms, grpc_combiner* combiner) {
grpc_error* error = GRPC_ERROR_NONE;
grpc_ares_hostbyname_request* hr = nullptr;
ares_channel* channel = nullptr;
@@ -388,7 +388,7 @@ void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
port = gpr_strdup(default_port);
}
error = grpc_ares_ev_driver_create_locked(&r->ev_driver, interested_parties,
- combiner, r);
+ query_timeout_ms, combiner, r);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
channel = grpc_ares_ev_driver_get_channel_locked(r->ev_driver);
// If dns_server is specified, use it.
@@ -522,7 +522,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
- grpc_combiner* combiner) {
+ int query_timeout_ms, grpc_combiner* combiner) {
grpc_ares_request* r =
static_cast<grpc_ares_request*>(gpr_zalloc(sizeof(grpc_ares_request)));
r->ev_driver = nullptr;
@@ -546,7 +546,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
// Look up name using c-ares lib.
grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
r, dns_server, name, default_port, interested_parties, check_grpclb,
- combiner);
+ query_timeout_ms, combiner);
return r;
}
@@ -554,6 +554,7 @@ grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
+ int query_timeout_ms,
grpc_combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
static void grpc_cancel_ares_request_locked_impl(grpc_ares_request* r) {
@@ -648,7 +649,8 @@ static void grpc_resolve_address_invoke_dns_lookup_ares_locked(
r->ares_request = grpc_dns_lookup_ares_locked(
nullptr /* dns_server */, r->name, r->default_port, r->interested_parties,
&r->on_dns_lookup_done_locked, &r->lb_addrs, false /* check_grpclb */,
- nullptr /* service_config_json */, r->combiner);
+ nullptr /* service_config_json */, GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS,
+ r->combiner);
}
static void grpc_resolve_address_ares_impl(const char* name,
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
index a1231cc4e0..9acef1d0ca 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -26,6 +26,8 @@
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#define GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS 10000
+
extern grpc_core::TraceFlag grpc_trace_cares_address_sorting;
extern grpc_core::TraceFlag grpc_trace_cares_resolver;
@@ -60,7 +62,7 @@ extern grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_lb_addresses** addresses, bool check_grpclb,
- char** service_config_json, grpc_combiner* combiner);
+ char** service_config_json, int query_timeout_ms, grpc_combiner* combiner);
/* Cancel the pending grpc_ares_request \a request */
extern void (*grpc_cancel_ares_request_locked)(grpc_ares_request* request);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
index 9f293c1ac0..fc78b18304 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
@@ -30,7 +30,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
- grpc_combiner* combiner) {
+ int query_timeout_ms, grpc_combiner* combiner) {
return NULL;
}
@@ -38,6 +38,7 @@ grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
+ int query_timeout_ms,
grpc_combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
static void grpc_cancel_ares_request_locked_impl(grpc_ares_request* r) {}
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
index 144ac24a56..3aa690bea4 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
@@ -103,7 +103,7 @@ void FakeResolver::NextLocked(grpc_channel_args** target_result,
}
void FakeResolver::RequestReresolutionLocked() {
- if (reresolution_results_ != nullptr) {
+ if (reresolution_results_ != nullptr || return_failure_) {
grpc_channel_args_destroy(next_results_);
next_results_ = grpc_channel_args_copy(reresolution_results_);
MaybeFinishNextLocked();
@@ -141,6 +141,7 @@ struct SetResponseClosureArg {
grpc_closure set_response_closure;
FakeResolverResponseGenerator* generator;
grpc_channel_args* response;
+ bool immediate = true;
};
void FakeResolverResponseGenerator::SetResponseLocked(void* arg,
@@ -194,7 +195,7 @@ void FakeResolverResponseGenerator::SetFailureLocked(void* arg,
SetResponseClosureArg* closure_arg = static_cast<SetResponseClosureArg*>(arg);
FakeResolver* resolver = closure_arg->generator->resolver_;
resolver->return_failure_ = true;
- resolver->MaybeFinishNextLocked();
+ if (closure_arg->immediate) resolver->MaybeFinishNextLocked();
Delete(closure_arg);
}
@@ -209,6 +210,18 @@ void FakeResolverResponseGenerator::SetFailure() {
GRPC_ERROR_NONE);
}
+void FakeResolverResponseGenerator::SetFailureOnReresolution() {
+ GPR_ASSERT(resolver_ != nullptr);
+ SetResponseClosureArg* closure_arg = New<SetResponseClosureArg>();
+ closure_arg->generator = this;
+ closure_arg->immediate = false;
+ GRPC_CLOSURE_SCHED(
+ GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetFailureLocked,
+ closure_arg,
+ grpc_combiner_scheduler(resolver_->combiner())),
+ GRPC_ERROR_NONE);
+}
+
namespace {
static void* response_generator_arg_copy(void* p) {
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
index 708eaf1147..7f69059351 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
@@ -20,9 +20,9 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/uri/uri_parser.h"
#define GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR \
"grpc.fake_resolver.response_generator"
@@ -61,6 +61,10 @@ class FakeResolverResponseGenerator
// returning a null result with no error).
void SetFailure();
+ // Same as SetFailure(), but instead of returning the error
+ // immediately, waits for the next call to RequestReresolutionLocked().
+ void SetFailureOnReresolution();
+
// Returns a channel arg containing \a generator.
static grpc_arg MakeChannelArg(FakeResolverResponseGenerator* generator);
diff --git a/src/core/ext/filters/client_channel/resolver_factory.h b/src/core/ext/filters/client_channel/resolver_factory.h
index ee3cfeeb9b..d891ef62e1 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.h
+++ b/src/core/ext/filters/client_channel/resolver_factory.h
@@ -24,11 +24,11 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/resolver.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/gprpp/abstract.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#include "src/core/lib/uri/uri_parser.h"
namespace grpc_core {
diff --git a/src/core/ext/filters/client_channel/resolver_result_parsing.cc b/src/core/ext/filters/client_channel/resolver_result_parsing.cc
new file mode 100644
index 0000000000..4f7fd6b424
--- /dev/null
+++ b/src/core/ext/filters/client_channel/resolver_result_parsing.cc
@@ -0,0 +1,369 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/ext/filters/client_channel/resolver_result_parsing.h"
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/ext/filters/client_channel/client_channel.h"
+#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
+#include "src/core/lib/channel/status_util.h"
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/memory.h"
+
+// As per the retry design, we do not allow more than 5 retry attempts.
+#define MAX_MAX_RETRY_ATTEMPTS 5
+
+namespace grpc_core {
+namespace internal {
+
+ProcessedResolverResult::ProcessedResolverResult(
+ const grpc_channel_args* resolver_result, bool parse_retry) {
+ ProcessServiceConfig(resolver_result, parse_retry);
+ // If no LB config was found above, just find the LB policy name then.
+ if (lb_policy_name_ == nullptr) ProcessLbPolicyName(resolver_result);
+}
+
+void ProcessedResolverResult::ProcessServiceConfig(
+ const grpc_channel_args* resolver_result, bool parse_retry) {
+ const grpc_arg* channel_arg =
+ grpc_channel_args_find(resolver_result, GRPC_ARG_SERVICE_CONFIG);
+ const char* service_config_json = grpc_channel_arg_get_string(channel_arg);
+ if (service_config_json != nullptr) {
+ service_config_json_.reset(gpr_strdup(service_config_json));
+ service_config_ = grpc_core::ServiceConfig::Create(service_config_json);
+ if (service_config_ != nullptr) {
+ if (parse_retry) {
+ channel_arg =
+ grpc_channel_args_find(resolver_result, GRPC_ARG_SERVER_URI);
+ const char* server_uri = grpc_channel_arg_get_string(channel_arg);
+ GPR_ASSERT(server_uri != nullptr);
+ grpc_uri* uri = grpc_uri_parse(server_uri, true);
+ GPR_ASSERT(uri->path[0] != '\0');
+ server_name_ = uri->path[0] == '/' ? uri->path + 1 : uri->path;
+ service_config_->ParseGlobalParams(ParseServiceConfig, this);
+ grpc_uri_destroy(uri);
+ } else {
+ service_config_->ParseGlobalParams(ParseServiceConfig, this);
+ }
+ method_params_table_ = service_config_->CreateMethodConfigTable(
+ ClientChannelMethodParams::CreateFromJson);
+ }
+ }
+}
+
+void ProcessedResolverResult::ProcessLbPolicyName(
+ const grpc_channel_args* resolver_result) {
+ // Prefer the LB policy name found in the service config. Note that this is
+ // checking the deprecated loadBalancingPolicy field, rather than the new
+ // loadBalancingConfig field.
+ if (service_config_ != nullptr) {
+ lb_policy_name_.reset(
+ gpr_strdup(service_config_->GetLoadBalancingPolicyName()));
+ // Convert to lower-case.
+ if (lb_policy_name_ != nullptr) {
+ char* lb_policy_name = lb_policy_name_.get();
+ for (size_t i = 0; i < strlen(lb_policy_name); ++i) {
+ lb_policy_name[i] = tolower(lb_policy_name[i]);
+ }
+ }
+ }
+ // Otherwise, find the LB policy name set by the client API.
+ if (lb_policy_name_ == nullptr) {
+ const grpc_arg* channel_arg =
+ grpc_channel_args_find(resolver_result, GRPC_ARG_LB_POLICY_NAME);
+ lb_policy_name_.reset(gpr_strdup(grpc_channel_arg_get_string(channel_arg)));
+ }
+ // Special case: If at least one balancer address is present, we use
+ // the grpclb policy, regardless of what the resolver has returned.
+ const grpc_arg* channel_arg =
+ grpc_channel_args_find(resolver_result, GRPC_ARG_LB_ADDRESSES);
+ if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
+ grpc_lb_addresses* addresses =
+ static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
+ if (grpc_lb_addresses_contains_balancer_address(*addresses)) {
+ if (lb_policy_name_ != nullptr &&
+ strcmp(lb_policy_name_.get(), "grpclb") != 0) {
+ gpr_log(GPR_INFO,
+ "resolver requested LB policy %s but provided at least one "
+ "balancer address -- forcing use of grpclb LB policy",
+ lb_policy_name_.get());
+ }
+ lb_policy_name_.reset(gpr_strdup("grpclb"));
+ }
+ }
+ // Use pick_first if nothing was specified and we didn't select grpclb
+ // above.
+ if (lb_policy_name_ == nullptr) {
+ lb_policy_name_.reset(gpr_strdup("pick_first"));
+ }
+}
+
+void ProcessedResolverResult::ParseServiceConfig(
+ const grpc_json* field, ProcessedResolverResult* parsing_state) {
+ parsing_state->ParseLbConfigFromServiceConfig(field);
+ if (parsing_state->server_name_ != nullptr) {
+ parsing_state->ParseRetryThrottleParamsFromServiceConfig(field);
+ }
+}
+
+void ProcessedResolverResult::ParseLbConfigFromServiceConfig(
+ const grpc_json* field) {
+ if (lb_policy_config_ != nullptr) return; // Already found.
+ // Find the LB config global parameter.
+ if (field->key == nullptr || strcmp(field->key, "loadBalancingConfig") != 0 ||
+ field->type != GRPC_JSON_ARRAY) {
+ return; // Not valid lb config array.
+ }
+ // Find the first LB policy that this client supports.
+ for (grpc_json* lb_config = field->child; lb_config != nullptr;
+ lb_config = lb_config->next) {
+ if (lb_config->type != GRPC_JSON_OBJECT) return;
+ // Find the policy object.
+ grpc_json* policy = nullptr;
+ for (grpc_json* field = lb_config->child; field != nullptr;
+ field = field->next) {
+ if (field->key == nullptr || strcmp(field->key, "policy") != 0 ||
+ field->type != GRPC_JSON_OBJECT) {
+ return;
+ }
+ if (policy != nullptr) return; // Duplicate.
+ policy = field;
+ }
+ // Find the specific policy content since the policy object is of type
+ // "oneof".
+ grpc_json* policy_content = nullptr;
+ for (grpc_json* field = policy->child; field != nullptr;
+ field = field->next) {
+ if (field->key == nullptr || field->type != GRPC_JSON_OBJECT) return;
+ if (policy_content != nullptr) return; // Violate "oneof" type.
+ policy_content = field;
+ }
+ // If we support this policy, then select it.
+ if (grpc_core::LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(
+ policy_content->key)) {
+ lb_policy_name_.reset(gpr_strdup(policy_content->key));
+ lb_policy_config_ = policy_content->child;
+ return;
+ }
+ }
+}
+
+void ProcessedResolverResult::ParseRetryThrottleParamsFromServiceConfig(
+ const grpc_json* field) {
+ if (strcmp(field->key, "retryThrottling") == 0) {
+ if (retry_throttle_data_ != nullptr) return; // Duplicate.
+ if (field->type != GRPC_JSON_OBJECT) return;
+ int max_milli_tokens = 0;
+ int milli_token_ratio = 0;
+ for (grpc_json* sub_field = field->child; sub_field != nullptr;
+ sub_field = sub_field->next) {
+ if (sub_field->key == nullptr) return;
+ if (strcmp(sub_field->key, "maxTokens") == 0) {
+ if (max_milli_tokens != 0) return; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return;
+ max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value);
+ if (max_milli_tokens == -1) return;
+ max_milli_tokens *= 1000;
+ } else if (strcmp(sub_field->key, "tokenRatio") == 0) {
+ if (milli_token_ratio != 0) return; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return;
+ // We support up to 3 decimal digits.
+ size_t whole_len = strlen(sub_field->value);
+ uint32_t multiplier = 1;
+ uint32_t decimal_value = 0;
+ const char* decimal_point = strchr(sub_field->value, '.');
+ if (decimal_point != nullptr) {
+ whole_len = static_cast<size_t>(decimal_point - sub_field->value);
+ multiplier = 1000;
+ size_t decimal_len = strlen(decimal_point + 1);
+ if (decimal_len > 3) decimal_len = 3;
+ if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
+ &decimal_value)) {
+ return;
+ }
+ uint32_t decimal_multiplier = 1;
+ for (size_t i = 0; i < (3 - decimal_len); ++i) {
+ decimal_multiplier *= 10;
+ }
+ decimal_value *= decimal_multiplier;
+ }
+ uint32_t whole_value;
+ if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
+ &whole_value)) {
+ return;
+ }
+ milli_token_ratio =
+ static_cast<int>((whole_value * multiplier) + decimal_value);
+ if (milli_token_ratio <= 0) return;
+ }
+ }
+ retry_throttle_data_ =
+ grpc_core::internal::ServerRetryThrottleMap::GetDataForServer(
+ server_name_, max_milli_tokens, milli_token_ratio);
+ }
+}
+
+namespace {
+
+bool ParseWaitForReady(
+ grpc_json* field, ClientChannelMethodParams::WaitForReady* wait_for_ready) {
+ if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
+ return false;
+ }
+ *wait_for_ready = field->type == GRPC_JSON_TRUE
+ ? ClientChannelMethodParams::WAIT_FOR_READY_TRUE
+ : ClientChannelMethodParams::WAIT_FOR_READY_FALSE;
+ return true;
+}
+
+// Parses a JSON field of the form generated for a google.proto.Duration
+// proto message, as per:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+bool ParseDuration(grpc_json* field, grpc_millis* duration) {
+ if (field->type != GRPC_JSON_STRING) return false;
+ size_t len = strlen(field->value);
+ if (field->value[len - 1] != 's') return false;
+ UniquePtr<char> buf(gpr_strdup(field->value));
+ *(buf.get() + len - 1) = '\0'; // Remove trailing 's'.
+ char* decimal_point = strchr(buf.get(), '.');
+ int nanos = 0;
+ if (decimal_point != nullptr) {
+ *decimal_point = '\0';
+ nanos = gpr_parse_nonnegative_int(decimal_point + 1);
+ if (nanos == -1) {
+ return false;
+ }
+ int num_digits = static_cast<int>(strlen(decimal_point + 1));
+ if (num_digits > 9) { // We don't accept greater precision than nanos.
+ return false;
+ }
+ for (int i = 0; i < (9 - num_digits); ++i) {
+ nanos *= 10;
+ }
+ }
+ int seconds =
+ decimal_point == buf.get() ? 0 : gpr_parse_nonnegative_int(buf.get());
+ if (seconds == -1) return false;
+ *duration = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
+ return true;
+}
+
+UniquePtr<ClientChannelMethodParams::RetryPolicy> ParseRetryPolicy(
+ grpc_json* field) {
+ auto retry_policy = MakeUnique<ClientChannelMethodParams::RetryPolicy>();
+ if (field->type != GRPC_JSON_OBJECT) return nullptr;
+ for (grpc_json* sub_field = field->child; sub_field != nullptr;
+ sub_field = sub_field->next) {
+ if (sub_field->key == nullptr) return nullptr;
+ if (strcmp(sub_field->key, "maxAttempts") == 0) {
+ if (retry_policy->max_attempts != 0) return nullptr; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
+ retry_policy->max_attempts = gpr_parse_nonnegative_int(sub_field->value);
+ if (retry_policy->max_attempts <= 1) return nullptr;
+ if (retry_policy->max_attempts > MAX_MAX_RETRY_ATTEMPTS) {
+ gpr_log(GPR_ERROR,
+ "service config: clamped retryPolicy.maxAttempts at %d",
+ MAX_MAX_RETRY_ATTEMPTS);
+ retry_policy->max_attempts = MAX_MAX_RETRY_ATTEMPTS;
+ }
+ } else if (strcmp(sub_field->key, "initialBackoff") == 0) {
+ if (retry_policy->initial_backoff > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(sub_field, &retry_policy->initial_backoff)) {
+ return nullptr;
+ }
+ if (retry_policy->initial_backoff == 0) return nullptr;
+ } else if (strcmp(sub_field->key, "maxBackoff") == 0) {
+ if (retry_policy->max_backoff > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(sub_field, &retry_policy->max_backoff)) {
+ return nullptr;
+ }
+ if (retry_policy->max_backoff == 0) return nullptr;
+ } else if (strcmp(sub_field->key, "backoffMultiplier") == 0) {
+ if (retry_policy->backoff_multiplier != 0) return nullptr; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
+ if (sscanf(sub_field->value, "%f", &retry_policy->backoff_multiplier) !=
+ 1) {
+ return nullptr;
+ }
+ if (retry_policy->backoff_multiplier <= 0) return nullptr;
+ } else if (strcmp(sub_field->key, "retryableStatusCodes") == 0) {
+ if (!retry_policy->retryable_status_codes.Empty()) {
+ return nullptr; // Duplicate.
+ }
+ if (sub_field->type != GRPC_JSON_ARRAY) return nullptr;
+ for (grpc_json* element = sub_field->child; element != nullptr;
+ element = element->next) {
+ if (element->type != GRPC_JSON_STRING) return nullptr;
+ grpc_status_code status;
+ if (!grpc_status_code_from_string(element->value, &status)) {
+ return nullptr;
+ }
+ retry_policy->retryable_status_codes.Add(status);
+ }
+ if (retry_policy->retryable_status_codes.Empty()) return nullptr;
+ }
+ }
+ // Make sure required fields are set.
+ if (retry_policy->max_attempts == 0 || retry_policy->initial_backoff == 0 ||
+ retry_policy->max_backoff == 0 || retry_policy->backoff_multiplier == 0 ||
+ retry_policy->retryable_status_codes.Empty()) {
+ return nullptr;
+ }
+ return retry_policy;
+}
+
+} // namespace
+
+RefCountedPtr<ClientChannelMethodParams>
+ClientChannelMethodParams::CreateFromJson(const grpc_json* json) {
+ RefCountedPtr<ClientChannelMethodParams> method_params =
+ MakeRefCounted<ClientChannelMethodParams>();
+ for (grpc_json* field = json->child; field != nullptr; field = field->next) {
+ if (field->key == nullptr) continue;
+ if (strcmp(field->key, "waitForReady") == 0) {
+ if (method_params->wait_for_ready_ != WAIT_FOR_READY_UNSET) {
+ return nullptr; // Duplicate.
+ }
+ if (!ParseWaitForReady(field, &method_params->wait_for_ready_)) {
+ return nullptr;
+ }
+ } else if (strcmp(field->key, "timeout") == 0) {
+ if (method_params->timeout_ > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(field, &method_params->timeout_)) return nullptr;
+ } else if (strcmp(field->key, "retryPolicy") == 0) {
+ if (method_params->retry_policy_ != nullptr) {
+ return nullptr; // Duplicate.
+ }
+ method_params->retry_policy_ = ParseRetryPolicy(field);
+ if (method_params->retry_policy_ == nullptr) return nullptr;
+ }
+ }
+ return method_params;
+}
+
+} // namespace internal
+} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/resolver_result_parsing.h b/src/core/ext/filters/client_channel/resolver_result_parsing.h
new file mode 100644
index 0000000000..f1fb7406bc
--- /dev/null
+++ b/src/core/ext/filters/client_channel/resolver_result_parsing.h
@@ -0,0 +1,146 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_RESULT_PARSING_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_RESULT_PARSING_H
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/ext/filters/client_channel/retry_throttle.h"
+#include "src/core/lib/channel/status_util.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis
+#include "src/core/lib/json/json.h"
+#include "src/core/lib/slice/slice_hash_table.h"
+#include "src/core/lib/transport/service_config.h"
+
+namespace grpc_core {
+namespace internal {
+
+class ClientChannelMethodParams;
+
+// A table mapping from a method name to its method parameters.
+typedef grpc_core::SliceHashTable<
+ grpc_core::RefCountedPtr<ClientChannelMethodParams>>
+ ClientChannelMethodParamsTable;
+
+// A container of processed fields from the resolver result. Simplifies the
+// usage of resolver result.
+class ProcessedResolverResult {
+ public:
+ // Processes the resolver result and populates the relative members
+ // for later consumption. Tries to parse retry parameters only if parse_retry
+ // is true.
+ ProcessedResolverResult(const grpc_channel_args* resolver_result,
+ bool parse_retry);
+
+ // Getters. Any managed object's ownership is transferred.
+ grpc_core::UniquePtr<char> service_config_json() {
+ return std::move(service_config_json_);
+ }
+ grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data() {
+ return std::move(retry_throttle_data_);
+ }
+ grpc_core::RefCountedPtr<ClientChannelMethodParamsTable>
+ method_params_table() {
+ return std::move(method_params_table_);
+ }
+ grpc_core::UniquePtr<char> lb_policy_name() {
+ return std::move(lb_policy_name_);
+ }
+ grpc_json* lb_policy_config() { return lb_policy_config_; }
+
+ private:
+ // Finds the service config; extracts LB config and (maybe) retry throttle
+ // params from it.
+ void ProcessServiceConfig(const grpc_channel_args* resolver_result,
+ bool parse_retry);
+
+ // Finds the LB policy name (when no LB config was found).
+ void ProcessLbPolicyName(const grpc_channel_args* resolver_result);
+
+ // Parses the service config. Intended to be used by
+ // ServiceConfig::ParseGlobalParams.
+ static void ParseServiceConfig(const grpc_json* field,
+ ProcessedResolverResult* parsing_state);
+ // Parses the LB config from service config.
+ void ParseLbConfigFromServiceConfig(const grpc_json* field);
+ // Parses the retry throttle parameters from service config.
+ void ParseRetryThrottleParamsFromServiceConfig(const grpc_json* field);
+
+ // Service config.
+ grpc_core::UniquePtr<char> service_config_json_;
+ grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config_;
+ // LB policy.
+ grpc_json* lb_policy_config_ = nullptr;
+ grpc_core::UniquePtr<char> lb_policy_name_;
+ // Retry throttle data.
+ char* server_name_ = nullptr;
+ grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
+ // Method params table.
+ grpc_core::RefCountedPtr<ClientChannelMethodParamsTable> method_params_table_;
+};
+
+// The parameters of a method.
+class ClientChannelMethodParams : public RefCounted<ClientChannelMethodParams> {
+ public:
+ enum WaitForReady {
+ WAIT_FOR_READY_UNSET = 0,
+ WAIT_FOR_READY_FALSE,
+ WAIT_FOR_READY_TRUE
+ };
+
+ struct RetryPolicy {
+ int max_attempts = 0;
+ grpc_millis initial_backoff = 0;
+ grpc_millis max_backoff = 0;
+ float backoff_multiplier = 0;
+ StatusCodeSet retryable_status_codes;
+ };
+
+ /// Creates a method_parameters object from \a json.
+ /// Intended for use with ServiceConfig::CreateMethodConfigTable().
+ static RefCountedPtr<ClientChannelMethodParams> CreateFromJson(
+ const grpc_json* json);
+
+ grpc_millis timeout() const { return timeout_; }
+ WaitForReady wait_for_ready() const { return wait_for_ready_; }
+ const RetryPolicy* retry_policy() const { return retry_policy_.get(); }
+
+ private:
+ // So New() can call our private ctor.
+ template <typename T, typename... Args>
+ friend T* grpc_core::New(Args&&... args);
+
+ // So Delete() can call our private dtor.
+ template <typename T>
+ friend void grpc_core::Delete(T*);
+
+ ClientChannelMethodParams() {}
+ virtual ~ClientChannelMethodParams() {}
+
+ grpc_millis timeout_ = 0;
+ WaitForReady wait_for_ready_ = WAIT_FOR_READY_UNSET;
+ UniquePtr<RetryPolicy> retry_policy_;
+};
+
+} // namespace internal
+} // namespace grpc_core
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_RESULT_PARSING_H */
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index e4c6efe862..af55f7710e 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -34,7 +34,6 @@
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
@@ -54,6 +53,7 @@
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/service_config.h"
#include "src/core/lib/transport/status_metadata.h"
+#include "src/core/lib/uri/uri_parser.h"
#define INTERNAL_REF_BITS 16
#define STRONG_REF_MASK (~(gpr_atm)((1 << INTERNAL_REF_BITS) - 1))
@@ -153,7 +153,7 @@ struct grpc_subchannel {
/** have we started the backoff loop */
bool backoff_begun;
// reset_backoff() was called while alarm was pending
- bool deferred_reset_backoff;
+ bool retry_immediately;
/** our alarm */
grpc_timer alarm;
@@ -162,12 +162,16 @@ struct grpc_subchannel {
};
struct grpc_subchannel_call {
+ grpc_subchannel_call(grpc_core::ConnectedSubchannel* connection,
+ const grpc_core::ConnectedSubchannel::CallArgs& args)
+ : connection(connection), deadline(args.deadline) {}
+
grpc_core::ConnectedSubchannel* connection;
- grpc_closure* schedule_closure_after_destroy;
+ grpc_closure* schedule_closure_after_destroy = nullptr;
// state needed to support channelz interception of recv trailing metadata.
grpc_closure recv_trailing_metadata_ready;
grpc_closure* original_recv_trailing_metadata;
- grpc_metadata_batch* recv_trailing_metadata;
+ grpc_metadata_batch* recv_trailing_metadata = nullptr;
grpc_millis deadline;
};
@@ -705,8 +709,8 @@ static void on_alarm(void* arg, grpc_error* error) {
if (c->disconnected) {
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Disconnected",
&error, 1);
- } else if (c->deferred_reset_backoff) {
- c->deferred_reset_backoff = false;
+ } else if (c->retry_immediately) {
+ c->retry_immediately = false;
error = GRPC_ERROR_NONE;
} else {
GRPC_ERROR_REF(error);
@@ -883,12 +887,12 @@ static void on_subchannel_connected(void* arg, grpc_error* error) {
void grpc_subchannel_reset_backoff(grpc_subchannel* subchannel) {
gpr_mu_lock(&subchannel->mu);
+ subchannel->backoff->Reset();
if (subchannel->have_alarm) {
- subchannel->deferred_reset_backoff = true;
+ subchannel->retry_immediately = true;
grpc_timer_cancel(&subchannel->alarm);
} else {
subchannel->backoff_begun = false;
- subchannel->backoff->Reset();
maybe_start_connecting_locked(subchannel);
}
gpr_mu_unlock(&subchannel->mu);
@@ -905,6 +909,7 @@ static void subchannel_call_destroy(void* call, grpc_error* error) {
grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr,
c->schedule_closure_after_destroy);
connection->Unref(DEBUG_LOCATION, "subchannel_call");
+ c->~grpc_subchannel_call();
}
void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call* call,
@@ -1067,7 +1072,7 @@ ConnectedSubchannel::ConnectedSubchannel(
grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode>
channelz_subchannel,
intptr_t socket_uuid)
- : RefCountedWithTracing<ConnectedSubchannel>(&grpc_trace_stream_refcount),
+ : RefCounted<ConnectedSubchannel>(&grpc_trace_stream_refcount),
channel_stack_(channel_stack),
channelz_subchannel_(std::move(channelz_subchannel)),
socket_uuid_(socket_uuid) {}
@@ -1102,14 +1107,12 @@ grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
grpc_subchannel_call** call) {
const size_t allocation_size =
GetInitialCallSizeEstimate(args.parent_data_size);
- *call = static_cast<grpc_subchannel_call*>(
- gpr_arena_alloc(args.arena, allocation_size));
+ *call = new (gpr_arena_alloc(args.arena, allocation_size))
+ grpc_subchannel_call(this, args);
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
RefCountedPtr<ConnectedSubchannel> connection =
Ref(DEBUG_LOCATION, "subchannel_call");
connection.release(); // Ref is passed to the grpc_subchannel_call object.
- (*call)->connection = this;
- (*call)->deadline = args.deadline;
const grpc_call_element_args call_args = {
callstk, /* call_stack */
nullptr, /* server_transport_data */
@@ -1128,6 +1131,9 @@ grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
return error;
}
grpc_call_stack_set_pollset_or_pollset_set(callstk, args.pollent);
+ if (channelz_subchannel_ != nullptr) {
+ channelz_subchannel_->RecordCallStarted();
+ }
return GRPC_ERROR_NONE;
}
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index ec3b4d86e4..69c2456ec2 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -72,7 +72,7 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
namespace grpc_core {
-class ConnectedSubchannel : public RefCountedWithTracing<ConnectedSubchannel> {
+class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
public:
struct CallArgs {
grpc_polling_entity* pollent;
diff --git a/src/core/ext/filters/client_channel/subchannel_index.cc b/src/core/ext/filters/client_channel/subchannel_index.cc
index 1c23a6c4be..aa8441f17b 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.cc
+++ b/src/core/ext/filters/client_channel/subchannel_index.cc
@@ -91,7 +91,7 @@ void grpc_subchannel_key_destroy(grpc_subchannel_key* k) {
gpr_free(k);
}
-static void sck_avl_destroy(void* p, void* user_data) {
+static void sck_avl_destroy(void* p, void* unused) {
grpc_subchannel_key_destroy(static_cast<grpc_subchannel_key*>(p));
}
@@ -104,7 +104,7 @@ static long sck_avl_compare(void* a, void* b, void* unused) {
static_cast<grpc_subchannel_key*>(b));
}
-static void scv_avl_destroy(void* p, void* user_data) {
+static void scv_avl_destroy(void* p, void* unused) {
GRPC_SUBCHANNEL_WEAK_UNREF((grpc_subchannel*)p, "subchannel_index");
}
@@ -137,7 +137,7 @@ void grpc_subchannel_index_shutdown(void) {
void grpc_subchannel_index_unref(void) {
if (gpr_unref(&g_refcount)) {
gpr_mu_destroy(&g_mu);
- grpc_avl_unref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(g_subchannel_index, nullptr);
}
}
@@ -147,13 +147,12 @@ grpc_subchannel* grpc_subchannel_index_find(grpc_subchannel_key* key) {
// Lock, and take a reference to the subchannel index.
// We don't need to do the search under a lock as avl's are immutable.
gpr_mu_lock(&g_mu);
- grpc_avl index = grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl index = grpc_avl_ref(g_subchannel_index, nullptr);
gpr_mu_unlock(&g_mu);
grpc_subchannel* c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
- (grpc_subchannel*)grpc_avl_get(index, key, grpc_core::ExecCtx::Get()),
- "index_find");
- grpc_avl_unref(index, grpc_core::ExecCtx::Get());
+ (grpc_subchannel*)grpc_avl_get(index, key, nullptr), "index_find");
+ grpc_avl_unref(index, nullptr);
return c;
}
@@ -169,13 +168,11 @@ grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key,
// Compare and swap loop:
// - take a reference to the current index
gpr_mu_lock(&g_mu);
- grpc_avl index =
- grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl index = grpc_avl_ref(g_subchannel_index, nullptr);
gpr_mu_unlock(&g_mu);
// - Check to see if a subchannel already exists
- c = static_cast<grpc_subchannel*>(
- grpc_avl_get(index, key, grpc_core::ExecCtx::Get()));
+ c = static_cast<grpc_subchannel*>(grpc_avl_get(index, key, nullptr));
if (c != nullptr) {
c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
}
@@ -184,11 +181,9 @@ grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key,
need_to_unref_constructed = true;
} else {
// no -> update the avl and compare/swap
- grpc_avl updated =
- grpc_avl_add(grpc_avl_ref(index, grpc_core::ExecCtx::Get()),
- subchannel_key_copy(key),
- GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"),
- grpc_core::ExecCtx::Get());
+ grpc_avl updated = grpc_avl_add(
+ grpc_avl_ref(index, nullptr), subchannel_key_copy(key),
+ GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"), nullptr);
// it may happen (but it's expected to be unlikely)
// that some other thread has changed the index:
@@ -200,9 +195,9 @@ grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key,
}
gpr_mu_unlock(&g_mu);
- grpc_avl_unref(updated, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(updated, nullptr);
}
- grpc_avl_unref(index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(index, nullptr);
}
if (need_to_unref_constructed) {
@@ -219,24 +214,22 @@ void grpc_subchannel_index_unregister(grpc_subchannel_key* key,
// Compare and swap loop:
// - take a reference to the current index
gpr_mu_lock(&g_mu);
- grpc_avl index =
- grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl index = grpc_avl_ref(g_subchannel_index, nullptr);
gpr_mu_unlock(&g_mu);
// Check to see if this key still refers to the previously
// registered subchannel
- grpc_subchannel* c = static_cast<grpc_subchannel*>(
- grpc_avl_get(index, key, grpc_core::ExecCtx::Get()));
+ grpc_subchannel* c =
+ static_cast<grpc_subchannel*>(grpc_avl_get(index, key, nullptr));
if (c != constructed) {
- grpc_avl_unref(index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(index, nullptr);
break;
}
// compare and swap the update (some other thread may have
// mutated the index behind us)
grpc_avl updated =
- grpc_avl_remove(grpc_avl_ref(index, grpc_core::ExecCtx::Get()), key,
- grpc_core::ExecCtx::Get());
+ grpc_avl_remove(grpc_avl_ref(index, nullptr), key, nullptr);
gpr_mu_lock(&g_mu);
if (index.root == g_subchannel_index.root) {
@@ -245,8 +238,8 @@ void grpc_subchannel_index_unregister(grpc_subchannel_key* key,
}
gpr_mu_unlock(&g_mu);
- grpc_avl_unref(updated, grpc_core::ExecCtx::Get());
- grpc_avl_unref(index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(updated, nullptr);
+ grpc_avl_unref(index, nullptr);
}
}
diff --git a/src/core/ext/filters/client_channel/uri_parser.cc b/src/core/ext/filters/client_channel/uri_parser.cc
deleted file mode 100644
index 0572034a9c..0000000000
--- a/src/core/ext/filters/client_channel/uri_parser.cc
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/ext/filters/client_channel/uri_parser.h"
-
-#include <string.h>
-
-#include <grpc/slice_buffer.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-
-#include "src/core/lib/gpr/string.h"
-#include "src/core/lib/slice/percent_encoding.h"
-#include "src/core/lib/slice/slice_internal.h"
-#include "src/core/lib/slice/slice_string_helpers.h"
-
-/** a size_t default value... maps to all 1's */
-#define NOT_SET (~(size_t)0)
-
-static grpc_uri* bad_uri(const char* uri_text, size_t pos, const char* section,
- bool suppress_errors) {
- char* line_prefix;
- size_t pfx_len;
-
- if (!suppress_errors) {
- gpr_asprintf(&line_prefix, "bad uri.%s: '", section);
- pfx_len = strlen(line_prefix) + pos;
- gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text);
- gpr_free(line_prefix);
-
- line_prefix = static_cast<char*>(gpr_malloc(pfx_len + 1));
- memset(line_prefix, ' ', pfx_len);
- line_prefix[pfx_len] = 0;
- gpr_log(GPR_ERROR, "%s^ here", line_prefix);
- gpr_free(line_prefix);
- }
-
- return nullptr;
-}
-
-/** Returns a copy of percent decoded \a src[begin, end) */
-static char* decode_and_copy_component(const char* src, size_t begin,
- size_t end) {
- grpc_slice component =
- (begin == NOT_SET || end == NOT_SET)
- ? grpc_empty_slice()
- : grpc_slice_from_copied_buffer(src + begin, end - begin);
- grpc_slice decoded_component =
- grpc_permissive_percent_decode_slice(component);
- char* out = grpc_dump_slice(decoded_component, GPR_DUMP_ASCII);
- grpc_slice_unref_internal(component);
- grpc_slice_unref_internal(decoded_component);
- return out;
-}
-
-static bool valid_hex(char c) {
- return ((c >= 'a') && (c <= 'f')) || ((c >= 'A') && (c <= 'F')) ||
- ((c >= '0') && (c <= '9'));
-}
-
-/** Returns how many chars to advance if \a uri_text[i] begins a valid \a pchar
- * production. If \a uri_text[i] introduces an invalid \a pchar (such as percent
- * sign not followed by two hex digits), NOT_SET is returned. */
-static size_t parse_pchar(const char* uri_text, size_t i) {
- /* pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
- * unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
- * pct-encoded = "%" HEXDIG HEXDIG
- * sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
- / "*" / "+" / "," / ";" / "=" */
- char c = uri_text[i];
- switch (c) {
- default:
- if (((c >= 'a') && (c <= 'z')) || ((c >= 'A') && (c <= 'Z')) ||
- ((c >= '0') && (c <= '9'))) {
- return 1;
- }
- break;
- case ':':
- case '@':
- case '-':
- case '.':
- case '_':
- case '~':
- case '!':
- case '$':
- case '&':
- case '\'':
- case '(':
- case ')':
- case '*':
- case '+':
- case ',':
- case ';':
- case '=':
- return 1;
- case '%': /* pct-encoded */
- if (valid_hex(uri_text[i + 1]) && valid_hex(uri_text[i + 2])) {
- return 2;
- }
- return NOT_SET;
- }
- return 0;
-}
-
-/* *( pchar / "?" / "/" ) */
-static int parse_fragment_or_query(const char* uri_text, size_t* i) {
- char c;
- while ((c = uri_text[*i]) != 0) {
- const size_t advance = parse_pchar(uri_text, *i); /* pchar */
- switch (advance) {
- case 0: /* uri_text[i] isn't in pchar */
- /* maybe it's ? or / */
- if (uri_text[*i] == '?' || uri_text[*i] == '/') {
- (*i)++;
- break;
- } else {
- return 1;
- }
- GPR_UNREACHABLE_CODE(return 0);
- default:
- (*i) += advance;
- break;
- case NOT_SET: /* uri_text[i] introduces an invalid URI */
- return 0;
- }
- }
- /* *i is the first uri_text position past the \a query production, maybe \0 */
- return 1;
-}
-
-static void parse_query_parts(grpc_uri* uri) {
- static const char* QUERY_PARTS_SEPARATOR = "&";
- static const char* QUERY_PARTS_VALUE_SEPARATOR = "=";
- GPR_ASSERT(uri->query != nullptr);
- if (uri->query[0] == '\0') {
- uri->query_parts = nullptr;
- uri->query_parts_values = nullptr;
- uri->num_query_parts = 0;
- return;
- }
-
- gpr_string_split(uri->query, QUERY_PARTS_SEPARATOR, &uri->query_parts,
- &uri->num_query_parts);
- uri->query_parts_values =
- static_cast<char**>(gpr_malloc(uri->num_query_parts * sizeof(char**)));
- for (size_t i = 0; i < uri->num_query_parts; i++) {
- char** query_param_parts;
- size_t num_query_param_parts;
- char* full = uri->query_parts[i];
- gpr_string_split(full, QUERY_PARTS_VALUE_SEPARATOR, &query_param_parts,
- &num_query_param_parts);
- GPR_ASSERT(num_query_param_parts > 0);
- uri->query_parts[i] = query_param_parts[0];
- if (num_query_param_parts > 1) {
- /* TODO(dgq): only the first value after the separator is considered.
- * Perhaps all chars after the first separator for the query part should
- * be included, even if they include the separator. */
- uri->query_parts_values[i] = query_param_parts[1];
- } else {
- uri->query_parts_values[i] = nullptr;
- }
- for (size_t j = 2; j < num_query_param_parts; j++) {
- gpr_free(query_param_parts[j]);
- }
- gpr_free(query_param_parts);
- gpr_free(full);
- }
-}
-
-grpc_uri* grpc_uri_parse(const char* uri_text, bool suppress_errors) {
- grpc_uri* uri;
- size_t scheme_begin = 0;
- size_t scheme_end = NOT_SET;
- size_t authority_begin = NOT_SET;
- size_t authority_end = NOT_SET;
- size_t path_begin = NOT_SET;
- size_t path_end = NOT_SET;
- size_t query_begin = NOT_SET;
- size_t query_end = NOT_SET;
- size_t fragment_begin = NOT_SET;
- size_t fragment_end = NOT_SET;
- size_t i;
-
- for (i = scheme_begin; uri_text[i] != 0; i++) {
- if (uri_text[i] == ':') {
- scheme_end = i;
- break;
- }
- if (uri_text[i] >= 'a' && uri_text[i] <= 'z') continue;
- if (uri_text[i] >= 'A' && uri_text[i] <= 'Z') continue;
- if (i != scheme_begin) {
- if (uri_text[i] >= '0' && uri_text[i] <= '9') continue;
- if (uri_text[i] == '+') continue;
- if (uri_text[i] == '-') continue;
- if (uri_text[i] == '.') continue;
- }
- break;
- }
- if (scheme_end == NOT_SET) {
- return bad_uri(uri_text, i, "scheme", suppress_errors);
- }
-
- if (uri_text[scheme_end + 1] == '/' && uri_text[scheme_end + 2] == '/') {
- authority_begin = scheme_end + 3;
- for (i = authority_begin; uri_text[i] != 0 && authority_end == NOT_SET;
- i++) {
- if (uri_text[i] == '/' || uri_text[i] == '?' || uri_text[i] == '#') {
- authority_end = i;
- }
- }
- if (authority_end == NOT_SET && uri_text[i] == 0) {
- authority_end = i;
- }
- if (authority_end == NOT_SET) {
- return bad_uri(uri_text, i, "authority", suppress_errors);
- }
- /* TODO(ctiller): parse the authority correctly */
- path_begin = authority_end;
- } else {
- path_begin = scheme_end + 1;
- }
-
- for (i = path_begin; uri_text[i] != 0; i++) {
- if (uri_text[i] == '?' || uri_text[i] == '#') {
- path_end = i;
- break;
- }
- }
- if (path_end == NOT_SET && uri_text[i] == 0) {
- path_end = i;
- }
- if (path_end == NOT_SET) {
- return bad_uri(uri_text, i, "path", suppress_errors);
- }
-
- if (uri_text[i] == '?') {
- query_begin = ++i;
- if (!parse_fragment_or_query(uri_text, &i)) {
- return bad_uri(uri_text, i, "query", suppress_errors);
- } else if (uri_text[i] != 0 && uri_text[i] != '#') {
- /* We must be at the end or at the beginning of a fragment */
- return bad_uri(uri_text, i, "query", suppress_errors);
- }
- query_end = i;
- }
- if (uri_text[i] == '#') {
- fragment_begin = ++i;
- if (!parse_fragment_or_query(uri_text, &i)) {
- return bad_uri(uri_text, i - fragment_end, "fragment", suppress_errors);
- } else if (uri_text[i] != 0) {
- /* We must be at the end */
- return bad_uri(uri_text, i, "fragment", suppress_errors);
- }
- fragment_end = i;
- }
-
- uri = static_cast<grpc_uri*>(gpr_zalloc(sizeof(*uri)));
- uri->scheme = decode_and_copy_component(uri_text, scheme_begin, scheme_end);
- uri->authority =
- decode_and_copy_component(uri_text, authority_begin, authority_end);
- uri->path = decode_and_copy_component(uri_text, path_begin, path_end);
- uri->query = decode_and_copy_component(uri_text, query_begin, query_end);
- uri->fragment =
- decode_and_copy_component(uri_text, fragment_begin, fragment_end);
- parse_query_parts(uri);
-
- return uri;
-}
-
-const char* grpc_uri_get_query_arg(const grpc_uri* uri, const char* key) {
- GPR_ASSERT(key != nullptr);
- if (key[0] == '\0') return nullptr;
-
- for (size_t i = 0; i < uri->num_query_parts; ++i) {
- if (0 == strcmp(key, uri->query_parts[i])) {
- return uri->query_parts_values[i];
- }
- }
- return nullptr;
-}
-
-void grpc_uri_destroy(grpc_uri* uri) {
- if (!uri) return;
- gpr_free(uri->scheme);
- gpr_free(uri->authority);
- gpr_free(uri->path);
- gpr_free(uri->query);
- for (size_t i = 0; i < uri->num_query_parts; ++i) {
- gpr_free(uri->query_parts[i]);
- gpr_free(uri->query_parts_values[i]);
- }
- gpr_free(uri->query_parts);
- gpr_free(uri->query_parts_values);
- gpr_free(uri->fragment);
- gpr_free(uri);
-}
diff --git a/src/core/ext/filters/client_channel/uri_parser.h b/src/core/ext/filters/client_channel/uri_parser.h
deleted file mode 100644
index d749f23308..0000000000
--- a/src/core/ext/filters/client_channel/uri_parser.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H
-#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H
-
-#include <grpc/support/port_platform.h>
-
-#include <stddef.h>
-
-typedef struct {
- char* scheme;
- char* authority;
- char* path;
- char* query;
- /** Query substrings separated by '&' */
- char** query_parts;
- /** Number of elements in \a query_parts and \a query_parts_values */
- size_t num_query_parts;
- /** Split each query part by '='. NULL if not present. */
- char** query_parts_values;
- char* fragment;
-} grpc_uri;
-
-/** parse a uri, return NULL on failure */
-grpc_uri* grpc_uri_parse(const char* uri_text, bool suppress_errors);
-
-/** return the part of a query string after the '=' in "?key=xxx&...", or NULL
- * if key is not present */
-const char* grpc_uri_get_query_arg(const grpc_uri* uri, const char* key);
-
-/** destroy a uri */
-void grpc_uri_destroy(grpc_uri* uri);
-
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H */
diff --git a/src/core/ext/filters/deadline/deadline_filter.cc b/src/core/ext/filters/deadline/deadline_filter.cc
index d23ad67ad5..b4cb07f0f9 100644
--- a/src/core/ext/filters/deadline/deadline_filter.cc
+++ b/src/core/ext/filters/deadline/deadline_filter.cc
@@ -27,6 +27,7 @@
#include <grpc/support/time.h>
#include "src/core/lib/channel/channel_stack_builder.h"
+#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/channel_init.h"
@@ -152,7 +153,11 @@ static void inject_recv_trailing_metadata_ready(
// Callback and associated state for starting the timer after call stack
// initialization has been completed.
struct start_timer_after_init_state {
- bool in_call_combiner;
+ start_timer_after_init_state(grpc_call_element* elem, grpc_millis deadline)
+ : elem(elem), deadline(deadline) {}
+ ~start_timer_after_init_state() { start_timer_if_needed(elem, deadline); }
+
+ bool in_call_combiner = false;
grpc_call_element* elem;
grpc_millis deadline;
grpc_closure closure;
@@ -171,20 +176,16 @@ static void start_timer_after_init(void* arg, grpc_error* error) {
"scheduling deadline timer");
return;
}
- start_timer_if_needed(state->elem, state->deadline);
- gpr_free(state);
+ grpc_core::Delete(state);
GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
"done scheduling deadline timer");
}
-void grpc_deadline_state_init(grpc_call_element* elem,
- grpc_call_stack* call_stack,
- grpc_call_combiner* call_combiner,
- grpc_millis deadline) {
- grpc_deadline_state* deadline_state =
- static_cast<grpc_deadline_state*>(elem->call_data);
- deadline_state->call_stack = call_stack;
- deadline_state->call_combiner = call_combiner;
+grpc_deadline_state::grpc_deadline_state(grpc_call_element* elem,
+ grpc_call_stack* call_stack,
+ grpc_call_combiner* call_combiner,
+ grpc_millis deadline)
+ : call_stack(call_stack), call_combiner(call_combiner) {
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
if (deadline != GRPC_MILLIS_INF_FUTURE) {
@@ -196,21 +197,14 @@ void grpc_deadline_state_init(grpc_call_element* elem,
// create a closure to start the timer, and we schedule that closure
// to be run after call stack initialization is done.
struct start_timer_after_init_state* state =
- static_cast<struct start_timer_after_init_state*>(
- gpr_zalloc(sizeof(*state)));
- state->elem = elem;
- state->deadline = deadline;
+ grpc_core::New<start_timer_after_init_state>(elem, deadline);
GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_SCHED(&state->closure, GRPC_ERROR_NONE);
}
}
-void grpc_deadline_state_destroy(grpc_call_element* elem) {
- grpc_deadline_state* deadline_state =
- static_cast<grpc_deadline_state*>(elem->call_data);
- cancel_timer_if_needed(deadline_state);
-}
+grpc_deadline_state::~grpc_deadline_state() { cancel_timer_if_needed(this); }
void grpc_deadline_state_reset(grpc_call_element* elem,
grpc_millis new_deadline) {
@@ -269,8 +263,8 @@ typedef struct server_call_data {
// Constructor for call_data. Used for both client and server filters.
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
- args->deadline);
+ new (elem->call_data) grpc_deadline_state(
+ elem, args->call_stack, args->call_combiner, args->deadline);
return GRPC_ERROR_NONE;
}
@@ -278,7 +272,9 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- grpc_deadline_state_destroy(elem);
+ grpc_deadline_state* deadline_state =
+ static_cast<grpc_deadline_state*>(elem->call_data);
+ deadline_state->~grpc_deadline_state();
}
// Method for starting a call op for client filter.
diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h
index 1d797f445a..e37032999c 100644
--- a/src/core/ext/filters/deadline/deadline_filter.h
+++ b/src/core/ext/filters/deadline/deadline_filter.h
@@ -22,19 +22,23 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/timer.h"
-typedef enum grpc_deadline_timer_state {
+enum grpc_deadline_timer_state {
GRPC_DEADLINE_STATE_INITIAL,
GRPC_DEADLINE_STATE_PENDING,
GRPC_DEADLINE_STATE_FINISHED
-} grpc_deadline_timer_state;
+};
// State used for filters that enforce call deadlines.
// Must be the first field in the filter's call_data.
-typedef struct grpc_deadline_state {
+struct grpc_deadline_state {
+ grpc_deadline_state(grpc_call_element* elem, grpc_call_stack* call_stack,
+ grpc_call_combiner* call_combiner, grpc_millis deadline);
+ ~grpc_deadline_state();
+
// We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack;
grpc_call_combiner* call_combiner;
- grpc_deadline_timer_state timer_state;
+ grpc_deadline_timer_state timer_state = GRPC_DEADLINE_STATE_INITIAL;
grpc_timer timer;
grpc_closure timer_callback;
// Closure to invoke when we receive trailing metadata.
@@ -43,21 +47,13 @@ typedef struct grpc_deadline_state {
// The original recv_trailing_metadata_ready closure, which we chain to
// after our own closure is invoked.
grpc_closure* original_recv_trailing_metadata_ready;
-} grpc_deadline_state;
+};
//
// NOTE: All of these functions require that the first field in
// elem->call_data is a grpc_deadline_state.
//
-// assumes elem->call_data is zero'd
-void grpc_deadline_state_init(grpc_call_element* elem,
- grpc_call_stack* call_stack,
- grpc_call_combiner* call_combiner,
- grpc_millis deadline);
-
-void grpc_deadline_state_destroy(grpc_call_element* elem);
-
// Cancels the existing timer and starts a new one with new_deadline.
//
// Note: It is generally safe to call this with an earlier deadline
diff --git a/src/core/ext/filters/http/client/http_client_filter.cc b/src/core/ext/filters/http/client/http_client_filter.cc
index cd459e47cd..bf9a01f659 100644
--- a/src/core/ext/filters/http/client/http_client_filter.cc
+++ b/src/core/ext/filters/http/client/http_client_filter.cc
@@ -37,10 +37,31 @@
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
/* default maximum size of payload eligable for GET request */
-static const size_t kMaxPayloadSizeForGet = 2048;
+static constexpr size_t kMaxPayloadSizeForGet = 2048;
+
+static void recv_initial_metadata_ready(void* user_data, grpc_error* error);
+static void recv_trailing_metadata_ready(void* user_data, grpc_error* error);
+static void on_send_message_next_done(void* arg, grpc_error* error);
+static void send_message_on_complete(void* arg, grpc_error* error);
namespace {
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : call_combiner(args.call_combiner) {
+ GRPC_CLOSURE_INIT(&recv_initial_metadata_ready,
+ ::recv_initial_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+ ::recv_trailing_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&on_send_message_next_done, ::on_send_message_next_done,
+ elem, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&send_message_on_complete, ::send_message_on_complete,
+ elem, grpc_schedule_on_exec_ctx);
+ }
+
+ ~call_data() { GRPC_ERROR_UNREF(recv_initial_metadata_error); }
+
grpc_call_combiner* call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
@@ -51,18 +72,18 @@ struct call_data {
grpc_linked_mdelem user_agent;
// State for handling recv_initial_metadata ops.
grpc_metadata_batch* recv_initial_metadata;
- grpc_error* recv_initial_metadata_error;
- grpc_closure* original_recv_initial_metadata_ready;
+ grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
+ grpc_closure* original_recv_initial_metadata_ready = nullptr;
grpc_closure recv_initial_metadata_ready;
// State for handling recv_trailing_metadata ops.
grpc_metadata_batch* recv_trailing_metadata;
grpc_closure* original_recv_trailing_metadata_ready;
grpc_closure recv_trailing_metadata_ready;
- grpc_error* recv_trailing_metadata_error;
- bool seen_recv_trailing_metadata_ready;
+ grpc_error* recv_trailing_metadata_error = GRPC_ERROR_NONE;
+ bool seen_recv_trailing_metadata_ready = false;
// State for handling send_message ops.
grpc_transport_stream_op_batch* send_message_batch;
- size_t send_message_bytes_read;
+ size_t send_message_bytes_read = 0;
grpc_core::ManualConstructor<grpc_core::ByteStreamCache> send_message_cache;
grpc_core::ManualConstructor<grpc_core::ByteStreamCache::CachingByteStream>
send_message_caching_stream;
@@ -442,18 +463,7 @@ done:
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- calld->call_combiner = args->call_combiner;
- GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
- recv_initial_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
- recv_trailing_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
- elem, grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
- on_send_message_next_done, elem, grpc_schedule_on_exec_ctx);
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
@@ -462,7 +472,7 @@ static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- GRPC_ERROR_UNREF(calld->recv_initial_metadata_error);
+ calld->~call_data();
}
static grpc_mdelem scheme_from_args(const grpc_channel_args* args) {
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index 933fe3c77b..9c8c8d9e18 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -39,6 +39,10 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
+static void start_send_message_batch(void* arg, grpc_error* unused);
+static void send_message_on_complete(void* arg, grpc_error* error);
+static void on_send_message_next_done(void* arg, grpc_error* error);
+
namespace {
enum initial_metadata_state {
// Initial metadata not yet seen.
@@ -50,6 +54,23 @@ enum initial_metadata_state {
};
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : call_combiner(args.call_combiner) {
+ GRPC_CLOSURE_INIT(&start_send_message_batch_in_call_combiner,
+ start_send_message_batch, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_slice_buffer_init(&slices);
+ GRPC_CLOSURE_INIT(&send_message_on_complete, ::send_message_on_complete,
+ elem, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&on_send_message_next_done, ::on_send_message_next_done,
+ elem, grpc_schedule_on_exec_ctx);
+ }
+
+ ~call_data() {
+ grpc_slice_buffer_destroy_internal(&slices);
+ GRPC_ERROR_UNREF(cancel_error);
+ }
+
grpc_call_combiner* call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
@@ -57,11 +78,12 @@ struct call_data {
grpc_linked_mdelem accept_stream_encoding_storage;
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
- grpc_message_compression_algorithm message_compression_algorithm;
- initial_metadata_state send_initial_metadata_state;
- grpc_error* cancel_error;
+ grpc_message_compression_algorithm message_compression_algorithm =
+ GRPC_MESSAGE_COMPRESS_NONE;
+ initial_metadata_state send_initial_metadata_state = INITIAL_METADATA_UNSEEN;
+ grpc_error* cancel_error = GRPC_ERROR_NONE;
grpc_closure start_send_message_batch_in_call_combiner;
- grpc_transport_stream_op_batch* send_message_batch;
+ grpc_transport_stream_op_batch* send_message_batch = nullptr;
grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream>
replacement_stream;
@@ -424,16 +446,7 @@ static void compress_start_transport_stream_op_batch(
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- calld->call_combiner = args->call_combiner;
- calld->cancel_error = GRPC_ERROR_NONE;
- grpc_slice_buffer_init(&calld->slices);
- GRPC_CLOSURE_INIT(&calld->start_send_message_batch_in_call_combiner,
- start_send_message_batch, elem, grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
- on_send_message_next_done, elem, grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
- elem, grpc_schedule_on_exec_ctx);
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
@@ -442,8 +455,7 @@ static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- grpc_slice_buffer_destroy_internal(&calld->slices);
- GRPC_ERROR_UNREF(calld->cancel_error);
+ calld->~call_data();
}
/* Constructor for channel_data */
diff --git a/src/core/ext/filters/http/server/http_server_filter.cc b/src/core/ext/filters/http/server/http_server_filter.cc
index 436ea09d94..ce1be8370c 100644
--- a/src/core/ext/filters/http/server/http_server_filter.cc
+++ b/src/core/ext/filters/http/server/http_server_filter.cc
@@ -35,9 +35,32 @@
#define EXPECTED_CONTENT_TYPE "application/grpc"
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
+static void hs_recv_initial_metadata_ready(void* user_data, grpc_error* err);
+static void hs_recv_trailing_metadata_ready(void* user_data, grpc_error* err);
+static void hs_recv_message_ready(void* user_data, grpc_error* err);
+
namespace {
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : call_combiner(args.call_combiner) {
+ GRPC_CLOSURE_INIT(&recv_initial_metadata_ready,
+ hs_recv_initial_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_message_ready, hs_recv_message_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+ hs_recv_trailing_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ }
+
+ ~call_data() {
+ GRPC_ERROR_UNREF(recv_initial_metadata_ready_error);
+ if (have_read_stream) {
+ read_stream->Orphan();
+ }
+ }
+
grpc_call_combiner* call_combiner;
// Outgoing headers to add to send_initial_metadata.
@@ -47,27 +70,27 @@ struct call_data {
// If we see the recv_message contents in the GET query string, we
// store it here.
grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> read_stream;
- bool have_read_stream;
+ bool have_read_stream = false;
// State for intercepting recv_initial_metadata.
grpc_closure recv_initial_metadata_ready;
- grpc_error* recv_initial_metadata_ready_error;
+ grpc_error* recv_initial_metadata_ready_error = GRPC_ERROR_NONE;
grpc_closure* original_recv_initial_metadata_ready;
- grpc_metadata_batch* recv_initial_metadata;
+ grpc_metadata_batch* recv_initial_metadata = nullptr;
uint32_t* recv_initial_metadata_flags;
- bool seen_recv_initial_metadata_ready;
+ bool seen_recv_initial_metadata_ready = false;
// State for intercepting recv_message.
grpc_closure* original_recv_message_ready;
grpc_closure recv_message_ready;
grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
- bool seen_recv_message_ready;
+ bool seen_recv_message_ready = false;
// State for intercepting recv_trailing_metadata
grpc_closure recv_trailing_metadata_ready;
grpc_closure* original_recv_trailing_metadata_ready;
grpc_error* recv_trailing_metadata_ready_error;
- bool seen_recv_trailing_metadata_ready;
+ bool seen_recv_trailing_metadata_ready = false;
};
struct channel_data {
@@ -431,16 +454,7 @@ static void hs_start_transport_stream_op_batch(
/* Constructor for call_data */
static grpc_error* hs_init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- calld->call_combiner = args->call_combiner;
- GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
- hs_recv_initial_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_message_ready, hs_recv_message_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
- hs_recv_trailing_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
@@ -449,10 +463,7 @@ static void hs_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- GRPC_ERROR_UNREF(calld->recv_initial_metadata_ready_error);
- if (calld->have_read_stream) {
- calld->read_stream->Orphan();
- }
+ calld->~call_data();
}
/* Constructor for channel_data */
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
index 8ac34c629f..6a7231ff7d 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
@@ -25,7 +25,6 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/parse_address.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/ext/filters/load_reporting/registered_opencensus_objects.h"
#include "src/core/ext/filters/load_reporting/server_load_reporting_filter.h"
#include "src/core/lib/channel/channel_args.h"
@@ -36,6 +35,7 @@
#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/call.h"
+#include "src/core/lib/uri/uri_parser.h"
namespace grpc {
diff --git a/src/core/ext/filters/message_size/message_size_filter.cc b/src/core/ext/filters/message_size/message_size_filter.cc
index 2d3b16d992..94d6942aa4 100644
--- a/src/core/ext/filters/message_size/message_size_filter.cc
+++ b/src/core/ext/filters/message_size/message_size_filter.cc
@@ -90,9 +90,53 @@ RefCountedPtr<MessageSizeLimits> MessageSizeLimits::CreateFromJson(
} // namespace
} // namespace grpc_core
+static void recv_message_ready(void* user_data, grpc_error* error);
+static void recv_trailing_metadata_ready(void* user_data, grpc_error* error);
+
namespace {
+struct channel_data {
+ message_size_limits limits;
+ // Maps path names to refcounted_message_size_limits structs.
+ grpc_core::RefCountedPtr<grpc_core::SliceHashTable<
+ grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits>>>
+ method_limit_table;
+};
+
struct call_data {
+ call_data(grpc_call_element* elem, const channel_data& chand,
+ const grpc_call_element_args& args)
+ : call_combiner(args.call_combiner), limits(chand.limits) {
+ GRPC_CLOSURE_INIT(&recv_message_ready, ::recv_message_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+ ::recv_trailing_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ // Get max sizes from channel data, then merge in per-method config values.
+ // Note: Per-method config is only available on the client, so we
+ // apply the max request size to the send limit and the max response
+ // size to the receive limit.
+ if (chand.method_limit_table != nullptr) {
+ grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits> limits =
+ grpc_core::ServiceConfig::MethodConfigTableLookup(
+ *chand.method_limit_table, args.path);
+ if (limits != nullptr) {
+ if (limits->limits().max_send_size >= 0 &&
+ (limits->limits().max_send_size < this->limits.max_send_size ||
+ this->limits.max_send_size < 0)) {
+ this->limits.max_send_size = limits->limits().max_send_size;
+ }
+ if (limits->limits().max_recv_size >= 0 &&
+ (limits->limits().max_recv_size < this->limits.max_recv_size ||
+ this->limits.max_recv_size < 0)) {
+ this->limits.max_recv_size = limits->limits().max_recv_size;
+ }
+ }
+ }
+ }
+
+ ~call_data() { GRPC_ERROR_UNREF(error); }
+
grpc_call_combiner* call_combiner;
message_size_limits limits;
// Receive closures are chained: we inject this closure as the
@@ -101,25 +145,17 @@ struct call_data {
grpc_closure recv_message_ready;
grpc_closure recv_trailing_metadata_ready;
// The error caused by a message that is too large, or GRPC_ERROR_NONE
- grpc_error* error;
+ grpc_error* error = GRPC_ERROR_NONE;
// Used by recv_message_ready.
- grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
+ grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message = nullptr;
// Original recv_message_ready callback, invoked after our own.
- grpc_closure* next_recv_message_ready;
+ grpc_closure* next_recv_message_ready = nullptr;
// Original recv_trailing_metadata callback, invoked after our own.
grpc_closure* original_recv_trailing_metadata_ready;
- bool seen_recv_trailing_metadata;
+ bool seen_recv_trailing_metadata = false;
grpc_error* recv_trailing_metadata_error;
};
-struct channel_data {
- message_size_limits limits;
- // Maps path names to refcounted_message_size_limits structs.
- grpc_core::RefCountedPtr<grpc_core::SliceHashTable<
- grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits>>>
- method_limit_table;
-};
-
} // namespace
// Callback invoked when we receive a message. Here we check the max
@@ -228,38 +264,7 @@ static void start_transport_stream_op_batch(
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
- call_data* calld = static_cast<call_data*>(elem->call_data);
- calld->call_combiner = args->call_combiner;
- calld->next_recv_message_ready = nullptr;
- calld->original_recv_trailing_metadata_ready = nullptr;
- calld->error = GRPC_ERROR_NONE;
- GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
- recv_trailing_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- // Get max sizes from channel data, then merge in per-method config values.
- // Note: Per-method config is only available on the client, so we
- // apply the max request size to the send limit and the max response
- // size to the receive limit.
- calld->limits = chand->limits;
- if (chand->method_limit_table != nullptr) {
- grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits> limits =
- grpc_core::ServiceConfig::MethodConfigTableLookup(
- *chand->method_limit_table, args->path);
- if (limits != nullptr) {
- if (limits->limits().max_send_size >= 0 &&
- (limits->limits().max_send_size < calld->limits.max_send_size ||
- calld->limits.max_send_size < 0)) {
- calld->limits.max_send_size = limits->limits().max_send_size;
- }
- if (limits->limits().max_recv_size >= 0 &&
- (limits->limits().max_recv_size < calld->limits.max_recv_size ||
- calld->limits.max_recv_size < 0)) {
- calld->limits.max_recv_size = limits->limits().max_recv_size;
- }
- }
- }
+ new (elem->call_data) call_data(elem, *chand, *args);
return GRPC_ERROR_NONE;
}
@@ -268,7 +273,7 @@ static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = (call_data*)elem->call_data;
- GRPC_ERROR_UNREF(calld->error);
+ calld->~call_data();
}
static int default_size(const grpc_channel_args* args,