aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.c28
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c422
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.c2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.h2
-rw-r--r--src/core/lib/debug/stats_data.c62
-rw-r--r--src/core/lib/debug/stats_data.h26
-rw-r--r--src/core/lib/debug/stats_data.yaml8
-rw-r--r--src/core/lib/debug/stats_data_bq_schema.sql2
-rw-r--r--src/core/lib/iomgr/executor.c69
-rw-r--r--src/core/lib/security/credentials/composite/composite_credentials.c14
-rw-r--r--src/cpp/common/channel_arguments.cc4
-rw-r--r--src/python/grpcio/support.py10
-rw-r--r--src/ruby/lib/grpc/google_rpc_status_utils.rb9
-rw-r--r--src/ruby/spec/google_rpc_status_utils_spec.rb77
14 files changed, 416 insertions, 319 deletions
diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c
index c507a2750e..a16b44d3dc 100644
--- a/src/core/ext/filters/client_channel/http_proxy.c
+++ b/src/core/ext/filters/client_channel/http_proxy.c
@@ -91,6 +91,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
char* user_cred = NULL;
*name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred);
if (*name_to_resolve == NULL) return false;
+ char* no_proxy_str = NULL;
grpc_uri* uri =
grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */);
if (uri == NULL || uri->path[0] == '\0') {
@@ -98,20 +99,14 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
"'http_proxy' environment variable set, but cannot "
"parse server URI '%s' -- not using proxy",
server_uri);
- if (uri != NULL) {
- gpr_free(user_cred);
- grpc_uri_destroy(uri);
- }
- return false;
+ goto no_use_proxy;
}
if (strcmp(uri->scheme, "unix") == 0) {
gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'",
server_uri);
- gpr_free(user_cred);
- grpc_uri_destroy(uri);
- return false;
+ goto no_use_proxy;
}
- char* no_proxy_str = gpr_getenv("no_proxy");
+ no_proxy_str = gpr_getenv("no_proxy");
if (no_proxy_str != NULL) {
static const char* NO_PROXY_SEPARATOR = ",";
bool use_proxy = true;
@@ -147,12 +142,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
gpr_free(no_proxy_hosts);
gpr_free(server_host);
gpr_free(server_port);
- if (!use_proxy) {
- grpc_uri_destroy(uri);
- gpr_free(*name_to_resolve);
- *name_to_resolve = NULL;
- return false;
- }
+ if (!use_proxy) goto no_use_proxy;
}
}
grpc_arg args_to_add[2];
@@ -173,9 +163,15 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
} else {
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1);
}
- gpr_free(user_cred);
grpc_uri_destroy(uri);
+ gpr_free(user_cred);
return true;
+no_use_proxy:
+ if (uri != NULL) grpc_uri_destroy(uri);
+ gpr_free(*name_to_resolve);
+ *name_to_resolve = NULL;
+ gpr_free(user_cred);
+ return false;
}
static bool proxy_mapper_map_address(grpc_exec_ctx* exec_ctx,
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
index 85ef7894ea..8dc81b46d1 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
@@ -123,6 +123,7 @@
#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
+#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
@@ -299,6 +300,10 @@ typedef struct glb_lb_policy {
/** timeout in milliseconds for the LB call. 0 means no deadline. */
int lb_call_timeout_ms;
+ /** timeout in milliseconds for before using fallback backend addresses.
+ * 0 means not using fallback. */
+ int lb_fallback_timeout_ms;
+
/** for communicating with the LB server */
grpc_channel *lb_channel;
@@ -325,6 +330,9 @@ typedef struct glb_lb_policy {
* Otherwise, we delegate to the RR policy. */
size_t serverlist_index;
+ /** stores the backend addresses from the resolver */
+ grpc_lb_addresses *fallback_backend_addresses;
+
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
@@ -345,6 +353,9 @@ typedef struct glb_lb_policy {
/** is \a lb_call_retry_timer active? */
bool retry_timer_active;
+ /** is \a lb_fallback_timer active? */
+ bool fallback_timer_active;
+
/** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed;
@@ -354,9 +365,6 @@ typedef struct glb_lb_policy {
/************************************************************/
/* client data associated with the LB server communication */
/************************************************************/
- /* Finished sending initial request. */
- grpc_closure lb_on_sent_initial_request;
-
/* Status from the LB server has been received. This signals the end of the LB
* call. */
grpc_closure lb_on_server_status_received;
@@ -367,6 +375,9 @@ typedef struct glb_lb_policy {
/* LB call retry timer callback. */
grpc_closure lb_on_call_retry;
+ /* LB fallback timer callback. */
+ grpc_closure lb_on_fallback;
+
grpc_call *lb_call; /* streaming call to the LB server, */
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
@@ -390,7 +401,9 @@ typedef struct glb_lb_policy {
/** LB call retry timer */
grpc_timer lb_call_retry_timer;
- bool initial_request_sent;
+ /** LB fallback timer */
+ grpc_timer lb_fallback_timer;
+
bool seen_initial_response;
/* Stats for client-side load reporting. Should be unreffed and
@@ -536,6 +549,32 @@ static grpc_lb_addresses *process_serverlist_locked(
return lb_addresses;
}
+/* Returns the backend addresses extracted from the given addresses */
+static grpc_lb_addresses *extract_backend_addresses_locked(
+ grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
+ /* first pass: count the number of backend addresses */
+ size_t num_backends = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (!addresses->addresses[i].is_balancer) {
+ ++num_backends;
+ }
+ }
+ /* second pass: actually populate the addresses and (empty) LB tokens */
+ grpc_lb_addresses *backend_addresses =
+ grpc_lb_addresses_create(num_backends, &lb_token_vtable);
+ size_t num_copied = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (addresses->addresses[i].is_balancer) continue;
+ const grpc_resolved_address *addr = &addresses->addresses[i].address;
+ grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
+ addr->len, false /* is_balancer */,
+ NULL /* balancer_name */,
+ (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
+ ++num_copied;
+ }
+ return backend_addresses;
+}
+
static void update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
@@ -603,35 +642,38 @@ static bool pick_from_internal_rr_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
const grpc_lb_policy_pick_args *pick_args, bool force_async,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
- // Look at the index into the serverlist to see if we should drop this call.
- grpc_grpclb_server *server =
- glb_policy->serverlist->servers[glb_policy->serverlist_index++];
- if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
- glb_policy->serverlist_index = 0; // Wrap-around.
- }
- if (server->drop) {
- // Not using the RR policy, so unref it.
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
- (intptr_t)wc_arg->rr_policy);
+ // Check for drops if we are not using fallback backend addresses.
+ if (glb_policy->serverlist != NULL) {
+ // Look at the index into the serverlist to see if we should drop this call.
+ grpc_grpclb_server *server =
+ glb_policy->serverlist->servers[glb_policy->serverlist_index++];
+ if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
+ glb_policy->serverlist_index = 0; // Wrap-around.
}
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
- // Update client load reporting stats to indicate the number of
- // dropped calls. Note that we have to do this here instead of in
- // the client_load_reporting filter, because we do not create a
- // subchannel call (and therefore no client_load_reporting filter)
- // for dropped calls.
- grpc_grpclb_client_stats_add_call_dropped_locked(server->load_balance_token,
- wc_arg->client_stats);
- grpc_grpclb_client_stats_unref(wc_arg->client_stats);
- if (force_async) {
- GPR_ASSERT(wc_arg->wrapped_closure != NULL);
- GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+ if (server->drop) {
+ // Not using the RR policy, so unref it.
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
+ (intptr_t)wc_arg->rr_policy);
+ }
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
+ // Update client load reporting stats to indicate the number of
+ // dropped calls. Note that we have to do this here instead of in
+ // the client_load_reporting filter, because we do not create a
+ // subchannel call (and therefore no client_load_reporting filter)
+ // for dropped calls.
+ grpc_grpclb_client_stats_add_call_dropped_locked(
+ server->load_balance_token, wc_arg->client_stats);
+ grpc_grpclb_client_stats_unref(wc_arg->client_stats);
+ if (force_async) {
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
+ GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+ gpr_free(wc_arg->free_when_done);
+ return false;
+ }
gpr_free(wc_arg->free_when_done);
- return false;
+ return true;
}
- gpr_free(wc_arg->free_when_done);
- return true;
}
// Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked(
@@ -669,8 +711,18 @@ static bool pick_from_internal_rr_locked(
static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
- grpc_lb_addresses *addresses =
- process_serverlist_locked(exec_ctx, glb_policy->serverlist);
+ grpc_lb_addresses *addresses;
+ if (glb_policy->serverlist != NULL) {
+ GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
+ addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
+ } else {
+ // If rr_handover_locked() is invoked when we haven't received any
+ // serverlist from the balancer, we use the fallback backends returned by
+ // the resolver. Note that the fallback backend list may be empty, in which
+ // case the new round_robin policy will keep the requested picks pending.
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
+ }
GPR_ASSERT(addresses != NULL);
grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
@@ -776,8 +828,6 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
- GPR_ASSERT(glb_policy->serverlist != NULL &&
- glb_policy->serverlist->num_servers > 0);
if (glb_policy->shutting_down) return;
grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
GPR_ASSERT(args != NULL);
@@ -926,6 +976,9 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
+ if (glb_policy->fallback_backend_addresses != NULL) {
+ grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
+ }
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
grpc_subchannel_index_unref();
if (glb_policy->pending_update_args != NULL) {
@@ -1067,10 +1120,28 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
+static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy);
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
+ /* start a timer to fall back */
+ if (glb_policy->lb_fallback_timeout_ms > 0 &&
+ glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec deadline = gpr_time_add(
+ now,
+ gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
+ GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
+ glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
+ glb_policy->fallback_timer_active = true;
+ grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
+ &glb_policy->lb_on_fallback, now);
+ }
+
glb_policy->started_picking = true;
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
query_for_backends_locked(exec_ctx, glb_policy);
@@ -1173,6 +1244,58 @@ static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, &glb_policy->state_tracker, current, notify);
}
+static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+ glb_policy->retry_timer_active = false;
+ if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
+ (void *)glb_policy);
+ }
+ GPR_ASSERT(glb_policy->lb_call == NULL);
+ query_for_backends_locked(exec_ctx, glb_policy);
+ }
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
+}
+
+static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy) {
+ if (glb_policy->started_picking && glb_policy->updating_lb_call) {
+ if (glb_policy->retry_timer_active) {
+ grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
+ }
+ if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
+ glb_policy->updating_lb_call = false;
+ } else if (!glb_policy->shutting_down) {
+ /* if we aren't shutting down, restart the LB client call after some time */
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec next_try =
+ gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
+ (void *)glb_policy);
+ gpr_timespec timeout = gpr_time_sub(next_try, now);
+ if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
+ gpr_log(GPR_DEBUG,
+ "... retry_timer_active in %" PRId64 ".%09d seconds.",
+ timeout.tv_sec, timeout.tv_nsec);
+ } else {
+ gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
+ }
+ }
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
+ GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
+ lb_call_on_retry_timer_locked, glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
+ glb_policy->retry_timer_active = true;
+ grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
+ &glb_policy->lb_on_call_retry, now);
+ }
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_server_status_received_locked");
+}
+
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
@@ -1203,21 +1326,6 @@ static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
schedule_next_client_load_report(exec_ctx, glb_policy);
}
-static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
- grpc_op op;
- memset(&op, 0, sizeof(op));
- op.op = GRPC_OP_SEND_MESSAGE;
- op.data.send_message.send_message = glb_policy->client_load_report_payload;
- GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
- client_load_report_done_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_call, &op, 1,
- &glb_policy->client_load_report_closure);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-
static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
grpc_grpclb_dropped_call_counts *drop_entries =
(grpc_grpclb_dropped_call_counts *)
@@ -1237,6 +1345,9 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"client_load_report");
+ if (glb_policy->lb_call == NULL) {
+ maybe_restart_lb_call(exec_ctx, glb_policy);
+ }
return;
}
// Construct message payload.
@@ -1260,17 +1371,23 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
- // If we've already sent the initial request, then we can go ahead and
- // sent the load report. Otherwise, we need to wait until the initial
- // request has been sent to send this
- // (see lb_on_sent_initial_request_locked() below).
- if (glb_policy->initial_request_sent) {
- do_send_client_load_report_locked(exec_ctx, glb_policy);
+ // Send load report message.
+ grpc_op op;
+ memset(&op, 0, sizeof(op));
+ op.op = GRPC_OP_SEND_MESSAGE;
+ op.data.send_message.send_message = glb_policy->client_load_report_payload;
+ GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
+ client_load_report_done_locked, glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
+ grpc_call_error call_error = grpc_call_start_batch_and_execute(
+ exec_ctx, glb_policy->lb_call, &op, 1,
+ &glb_policy->client_load_report_closure);
+ if (call_error != GRPC_CALL_OK) {
+ gpr_log(GPR_ERROR, "call_error=%d", call_error);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
}
}
-static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error);
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error);
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -1315,9 +1432,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
- GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
- lb_on_sent_initial_request_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
@@ -1332,7 +1446,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
- glb_policy->initial_request_sent = false;
glb_policy->seen_initial_response = false;
glb_policy->last_client_load_report_counters_were_zero = false;
}
@@ -1349,7 +1462,7 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
- if (!glb_policy->client_load_report_timer_pending) {
+ if (glb_policy->client_load_report_timer_pending) {
grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
}
}
@@ -1373,7 +1486,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_error call_error;
- grpc_op ops[4];
+ grpc_op ops[3];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
@@ -1394,13 +1507,8 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
op->flags = 0;
op->reserved = NULL;
op++;
- /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
- * count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
- "lb_on_sent_initial_request_locked");
- call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
- &glb_policy->lb_on_sent_initial_request);
+ call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
+ ops, (size_t)(op - ops), NULL);
GPR_ASSERT(GRPC_CALL_OK == call_error);
op = ops;
@@ -1437,19 +1545,6 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
-static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
- glb_policy->initial_request_sent = true;
- // If we attempted to send a client load report before the initial
- // request was sent, send the load report now.
- if (glb_policy->client_load_report_payload != NULL) {
- do_send_client_load_report_locked(exec_ctx, glb_policy);
- }
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
- "lb_on_sent_initial_request_locked");
-}
-
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
@@ -1525,6 +1620,15 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
+ } else {
+ /* or dispose of the fallback */
+ grpc_lb_addresses_destroy(exec_ctx,
+ glb_policy->fallback_backend_addresses);
+ glb_policy->fallback_backend_addresses = NULL;
+ if (glb_policy->fallback_timer_active) {
+ grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
+ glb_policy->fallback_timer_active = false;
+ }
}
/* and update the copy in the glb_lb_policy instance. This
* serverlist instance will be destroyed either upon the next
@@ -1535,9 +1639,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
} else {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO,
- "Received empty server list. Picks will stay pending until "
- "a response with > 0 servers is received");
+ gpr_log(GPR_INFO, "Received empty server list, ignoring.");
}
grpc_grpclb_destroy_serverlist(serverlist);
}
@@ -1572,19 +1674,25 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
}
-static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
- glb_policy->retry_timer_active = false;
- if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
- (void *)glb_policy);
+ glb_policy->fallback_timer_active = false;
+ /* If we receive a serverlist after the timer fires but before this callback
+ * actually runs, don't fall back. */
+ if (glb_policy->serverlist == NULL) {
+ if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO,
+ "Falling back to use backends from resolver (grpclb %p)",
+ (void *)glb_policy);
+ }
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ rr_handover_locked(exec_ctx, glb_policy);
}
- GPR_ASSERT(glb_policy->lb_call == NULL);
- query_for_backends_locked(exec_ctx, glb_policy);
}
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "grpclb_fallback_timer");
}
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
@@ -1603,66 +1711,30 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
}
/* We need to perform cleanups no matter what. */
lb_call_destroy_locked(exec_ctx, glb_policy);
- if (glb_policy->started_picking && glb_policy->updating_lb_call) {
- if (glb_policy->retry_timer_active) {
- grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
- }
- if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
- glb_policy->updating_lb_call = false;
- } else if (!glb_policy->shutting_down) {
- /* if we aren't shutting down, restart the LB client call after some time */
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_try =
- gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
- (void *)glb_policy);
- gpr_timespec timeout = gpr_time_sub(next_try, now);
- if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
- gpr_log(GPR_DEBUG,
- "... retry_timer_active in %" PRId64 ".%09d seconds.",
- timeout.tv_sec, timeout.tv_nsec);
- } else {
- gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
- }
- }
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
- GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
- lb_call_on_retry_timer_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- glb_policy->retry_timer_active = true;
- grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
- &glb_policy->lb_on_call_retry, now);
+ // If the load report timer is still pending, we wait for it to be
+ // called before restarting the call. Otherwise, we restart the call
+ // here.
+ if (!glb_policy->client_load_report_timer_pending) {
+ maybe_restart_lb_call(exec_ctx, glb_policy);
+ }
+}
+
+static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy,
+ const grpc_lb_addresses *addresses) {
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
+ glb_policy->fallback_backend_addresses =
+ extract_backend_addresses_locked(exec_ctx, addresses);
+ if (glb_policy->lb_fallback_timeout_ms > 0 &&
+ !glb_policy->fallback_timer_active) {
+ rr_handover_locked(exec_ctx, glb_policy);
}
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
- "lb_on_server_status_received_locked");
}
static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) {
glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
- if (glb_policy->updating_lb_channel) {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO,
- "Update already in progress for grpclb %p. Deferring update.",
- (void *)glb_policy);
- }
- if (glb_policy->pending_update_args != NULL) {
- grpc_channel_args_destroy(exec_ctx,
- glb_policy->pending_update_args->args);
- gpr_free(glb_policy->pending_update_args);
- }
- glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
- sizeof(*glb_policy->pending_update_args));
- glb_policy->pending_update_args->client_channel_factory =
- args->client_channel_factory;
- glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
- glb_policy->pending_update_args->combiner = args->combiner;
- return;
- }
-
- glb_policy->updating_lb_channel = true;
- // Propagate update to lb_channel (pick first).
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@@ -1680,13 +1752,43 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
"ignoring.",
(void *)glb_policy);
}
+ return;
}
const grpc_lb_addresses *addresses =
(const grpc_lb_addresses *)arg->value.pointer.p;
+
+ if (glb_policy->serverlist == NULL) {
+ // If a non-empty serverlist hasn't been received from the balancer,
+ // propagate the update to fallback_backend_addresses.
+ fallback_update_locked(exec_ctx, glb_policy, addresses);
+ } else if (glb_policy->updating_lb_channel) {
+ // If we have recieved serverlist from the balancer, we need to defer update
+ // when there is an in-progress one.
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO,
+ "Update already in progress for grpclb %p. Deferring update.",
+ (void *)glb_policy);
+ }
+ if (glb_policy->pending_update_args != NULL) {
+ grpc_channel_args_destroy(exec_ctx,
+ glb_policy->pending_update_args->args);
+ gpr_free(glb_policy->pending_update_args);
+ }
+ glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
+ sizeof(*glb_policy->pending_update_args));
+ glb_policy->pending_update_args->client_channel_factory =
+ args->client_channel_factory;
+ glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
+ glb_policy->pending_update_args->combiner = args->combiner;
+ return;
+ }
+
+ glb_policy->updating_lb_channel = true;
GPR_ASSERT(glb_policy->lb_channel != NULL);
grpc_channel_args *lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
- /* Propagate updates to the LB channel through the fake resolver */
+ /* Propagate updates to the LB channel (pick first) through the fake resolver
+ */
grpc_fake_resolver_response_generator_set_response(
exec_ctx, glb_policy->response_generator, lb_channel_args);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
@@ -1789,13 +1891,7 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
- /* Count the number of gRPC-LB addresses. There must be at least one.
- * TODO(roth): For now, we ignore non-balancer addresses, but in the
- * future, we may change the behavior such that we fall back to using
- * the non-balancer addresses if we cannot reach any balancers. In the
- * fallback case, we should use the LB policy indicated by
- * GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
- * unset, we should default to pick_first). */
+ /* Count the number of gRPC-LB addresses. There must be at least one. */
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@@ -1831,6 +1927,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
glb_policy->lb_call_timeout_ms =
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
+ glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
+ arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0,
+ INT_MAX});
+
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
grpc_arg new_arg = grpc_channel_arg_string_create(
@@ -1839,6 +1940,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
+ /* Extract the backend addresses (may be empty) from the resolver for
+ * fallback. */
+ glb_policy->fallback_backend_addresses =
+ extract_backend_addresses_locked(exec_ctx, addresses);
+
/* Create a client channel over them to communicate with a LB service */
glb_policy->response_generator =
grpc_fake_resolver_response_generator_create();
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.c b/src/core/ext/filters/client_channel/lb_policy_factory.c
index 4d1405454c..05ab43d0b6 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.c
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.c
@@ -56,7 +56,7 @@ grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) {
}
void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
- void* address, size_t address_len,
+ const void* address, size_t address_len,
bool is_balancer, const char* balancer_name,
void* user_data) {
GPR_ASSERT(index < addresses->num_addresses);
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h
index 9d9fb143df..cf0f8cb615 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.h
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.h
@@ -73,7 +73,7 @@ grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
* \a address is a socket address of length \a address_len.
* Takes ownership of \a balancer_name. */
void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
- void *address, size_t address_len,
+ const void *address, size_t address_len,
bool is_balancer, const char *balancer_name,
void *user_data);
diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c
index fb6055f795..c0aec63c1d 100644
--- a/src/core/lib/debug/stats_data.c
+++ b/src/core/lib/debug/stats_data.c
@@ -109,8 +109,6 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"executor_wakeup_initiated",
"executor_queue_drained",
"executor_push_retries",
- "executor_threads_created",
- "executor_threads_used",
"server_requested_calls",
"server_slowpath_requests_queued",
};
@@ -219,8 +217,6 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of times an executor queue was drained",
"Number of times we raced and were forced to retry pushing a closure to "
"the executor",
- "Size of the backing thread pool for overflow gRPC Core work",
- "How many executor threads actually got used",
"How many calls were requested (not necessarily received) by the server",
"How many times was the server slow path taken (indicates too few "
"outstanding requests)",
@@ -238,7 +234,6 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"http2_send_message_per_write",
"http2_send_trailing_metadata_per_write",
"http2_send_flowctl_per_write",
- "executor_closures_per_wakeup",
"server_cqs_checked",
};
const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
@@ -254,7 +249,6 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of streams whose payload was written per TCP write",
"Number of streams terminated per TCP write",
"Number of flow control updates written per TCP write",
- "Number of closures executed each time an executor wakes up",
"How many completion queues were checked looking for a CQ that had "
"requested the incoming call",
};
@@ -326,7 +320,6 @@ const uint8_t grpc_stats_table_7[102] = {
const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 262144);
if (value < 6) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
@@ -352,7 +345,6 @@ void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_0, 64));
}
void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 29) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@@ -379,7 +371,6 @@ void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_2, 128));
}
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
@@ -405,7 +396,6 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@@ -431,7 +421,6 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
@@ -457,7 +446,6 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
(exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
@@ -484,7 +472,6 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
}
void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@@ -512,7 +499,6 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
}
void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM(
@@ -540,7 +526,6 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
}
void grpc_stats_inc_http2_send_initial_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@@ -570,7 +555,6 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write(
}
void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@@ -598,7 +582,6 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
}
void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@@ -628,7 +611,6 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
}
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@@ -654,36 +636,7 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_6, 64));
}
-void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
- int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
- value = GPR_CLAMP(value, 0, 1024);
- if (value < 13) {
- GRPC_STATS_INC_HISTOGRAM(
- (exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, value);
- return;
- }
- union {
- double dbl;
- uint64_t uint;
- } _val, _bkt;
- _val.dbl = value;
- if (_val.uint < 4637863191261478912ull) {
- int bucket =
- grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
- _bkt.dbl = grpc_stats_table_6[bucket];
- bucket -= (_val.uint < _bkt.uint);
- GRPC_STATS_INC_HISTOGRAM(
- (exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, bucket);
- return;
- }
- GRPC_STATS_INC_HISTOGRAM((exec_ctx),
- GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
- grpc_stats_histo_find_bucket_slow(
- (exec_ctx), value, grpc_stats_table_6, 64));
-}
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
- /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 64);
if (value < 3) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@@ -708,17 +661,17 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_8, 8));
}
-const int grpc_stats_histo_buckets[14] = {64, 128, 64, 64, 64, 64, 64,
- 64, 64, 64, 64, 64, 64, 8};
-const int grpc_stats_histo_start[14] = {0, 64, 192, 256, 320, 384, 448,
- 512, 576, 640, 704, 768, 832, 896};
-const int *const grpc_stats_histo_bucket_boundaries[14] = {
+const int grpc_stats_histo_buckets[13] = {64, 128, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 8};
+const int grpc_stats_histo_start[13] = {0, 64, 192, 256, 320, 384, 448,
+ 512, 576, 640, 704, 768, 832};
+const int *const grpc_stats_histo_bucket_boundaries[13] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6,
- grpc_stats_table_6, grpc_stats_table_8};
-void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = {
+ grpc_stats_table_8};
+void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_call_initial_size,
grpc_stats_inc_poll_events_returned,
grpc_stats_inc_tcp_write_size,
@@ -731,5 +684,4 @@ void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_http2_send_message_per_write,
grpc_stats_inc_http2_send_trailing_metadata_per_write,
grpc_stats_inc_http2_send_flowctl_per_write,
- grpc_stats_inc_executor_closures_per_wakeup,
grpc_stats_inc_server_cqs_checked};
diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h
index 6c0ad30543..28dab00117 100644
--- a/src/core/lib/debug/stats_data.h
+++ b/src/core/lib/debug/stats_data.h
@@ -111,8 +111,6 @@ typedef enum {
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
- GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED,
- GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED,
GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS,
GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED,
GRPC_STATS_COUNTER_COUNT
@@ -132,7 +130,6 @@ typedef enum {
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
- GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
@@ -163,11 +160,9 @@ typedef enum {
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
- GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_FIRST_SLOT = 832,
- GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_BUCKETS = 64,
- GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 896,
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 832,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
- GRPC_STATS_HISTOGRAM_BUCKETS = 904
+ GRPC_STATS_HISTOGRAM_BUCKETS = 840
} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
@@ -417,11 +412,6 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
-#define GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(exec_ctx) \
- GRPC_STATS_INC_COUNTER((exec_ctx), \
- GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED)
-#define GRPC_STATS_INC_EXECUTOR_THREADS_USED(exec_ctx) \
- GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED)
#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \
@@ -468,17 +458,13 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int x);
-#define GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, value) \
- grpc_stats_inc_executor_closures_per_wakeup((exec_ctx), (int)(value))
-void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
- int x);
#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x);
-extern const int grpc_stats_histo_buckets[14];
-extern const int grpc_stats_histo_start[14];
-extern const int *const grpc_stats_histo_bucket_boundaries[14];
-extern void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx,
+extern const int grpc_stats_histo_buckets[13];
+extern const int grpc_stats_histo_start[13];
+extern const int *const grpc_stats_histo_bucket_boundaries[13];
+extern void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx,
int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */
diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml
index de575f01c7..b5c15ff55c 100644
--- a/src/core/lib/debug/stats_data.yaml
+++ b/src/core/lib/debug/stats_data.yaml
@@ -259,14 +259,6 @@
- counter: executor_push_retries
doc: Number of times we raced and were forced to retry pushing a closure to
the executor
-- counter: executor_threads_created
- doc: Size of the backing thread pool for overflow gRPC Core work
-- counter: executor_threads_used
- doc: How many executor threads actually got used
-- histogram: executor_closures_per_wakeup
- max: 1024
- buckets: 64
- doc: Number of closures executed each time an executor wakes up
# server
- counter: server_requested_calls
doc: How many calls were requested (not necessarily received) by the server
diff --git a/src/core/lib/debug/stats_data_bq_schema.sql b/src/core/lib/debug/stats_data_bq_schema.sql
index 0611ccaff0..f96e40c00e 100644
--- a/src/core/lib/debug/stats_data_bq_schema.sql
+++ b/src/core/lib/debug/stats_data_bq_schema.sql
@@ -84,7 +84,5 @@ executor_scheduled_to_self_per_iteration:FLOAT,
executor_wakeup_initiated_per_iteration:FLOAT,
executor_queue_drained_per_iteration:FLOAT,
executor_push_retries_per_iteration:FLOAT,
-executor_threads_created_per_iteration:FLOAT,
-executor_threads_used_per_iteration:FLOAT,
server_requested_calls_per_iteration:FLOAT,
server_slowpath_requests_queued_per_iteration:FLOAT
diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c
index 2439f15a8a..892385d7d7 100644
--- a/src/core/lib/iomgr/executor.c
+++ b/src/core/lib/iomgr/executor.c
@@ -32,14 +32,16 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/spinlock.h"
+#define MAX_DEPTH 2
+
typedef struct {
gpr_mu mu;
gpr_cv cv;
grpc_closure_list elems;
+ size_t depth;
bool shutdown;
bool queued_long_job;
gpr_thd_id id;
- grpc_closure_list local_elems;
} thread_state;
static thread_state *g_thread_state;
@@ -54,35 +56,32 @@ static grpc_tracer_flag executor_trace =
static void executor_thread(void *arg);
-static void run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
- int n = 0; // number of closures executed
+static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
+ size_t n = 0;
- while (!grpc_closure_list_empty(*list)) {
- grpc_closure *c = list->head;
- grpc_closure_list_init(list);
- while (c != NULL) {
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error_data.error;
- if (GRPC_TRACER_ON(executor_trace)) {
+ grpc_closure *c = list.head;
+ while (c != NULL) {
+ grpc_closure *next = c->next_data.next;
+ grpc_error *error = c->error_data.error;
+ if (GRPC_TRACER_ON(executor_trace)) {
#ifndef NDEBUG
- gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
- c->file_created, c->line_created);
+ gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
+ c->file_created, c->line_created);
#else
- gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
+ gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
#endif
- }
+ }
#ifndef NDEBUG
- c->scheduled = false;
+ c->scheduled = false;
#endif
- n++;
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- c = next;
- grpc_exec_ctx_flush(exec_ctx);
- }
+ c->cb(exec_ctx, c->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
+ c = next;
+ n++;
+ grpc_exec_ctx_flush(exec_ctx);
}
- GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, n);
+ return n;
}
bool grpc_executor_is_threaded() {
@@ -127,7 +126,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_destroy(&g_thread_state[i].mu);
gpr_cv_destroy(&g_thread_state[i].cv);
- run_closures(exec_ctx, &g_thread_state[i].elems);
+ run_closures(exec_ctx, g_thread_state[i].elems);
}
gpr_free(g_thread_state);
gpr_tls_destroy(&g_this_thread_state);
@@ -151,14 +150,14 @@ static void executor_thread(void *arg) {
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
- GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(&exec_ctx);
-
- bool used = false;
+ size_t subtract_depth = 0;
for (;;) {
if (GRPC_TRACER_ON(executor_trace)) {
- gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step", (int)(ts - g_thread_state));
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
+ (int)(ts - g_thread_state), subtract_depth);
}
gpr_mu_lock(&ts->mu);
+ ts->depth -= subtract_depth;
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
ts->queued_long_job = false;
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
@@ -171,20 +170,15 @@ static void executor_thread(void *arg) {
gpr_mu_unlock(&ts->mu);
break;
}
- if (!used) {
- GRPC_STATS_INC_EXECUTOR_THREADS_USED(&exec_ctx);
- used = true;
- }
GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
- GPR_ASSERT(grpc_closure_list_empty(ts->local_elems));
- ts->local_elems = ts->elems;
+ grpc_closure_list exec = ts->elems;
ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
}
- run_closures(&exec_ctx, &ts->local_elems);
+ subtract_depth = run_closures(&exec_ctx, exec);
}
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -217,10 +211,6 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
} else {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
- if (is_short) {
- grpc_closure_list_append(&ts->local_elems, closure, error);
- return;
- }
}
thread_state *orig_ts = ts;
@@ -260,7 +250,8 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
gpr_cv_signal(&ts->cv);
}
grpc_closure_list_append(&ts->elems, closure, error);
- try_new_thread = ts->elems.head != closure &&
+ ts->depth++;
+ try_new_thread = ts->depth > MAX_DEPTH &&
cur_thread_count < g_max_threads && !ts->shutdown;
if (!is_short) ts->queued_long_job = true;
gpr_mu_unlock(&ts->mu);
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.c b/src/core/lib/security/credentials/composite/composite_credentials.c
index 09fd60a12c..b67ff48d0f 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.c
+++ b/src/core/lib/security/credentials/composite/composite_credentials.c
@@ -87,6 +87,7 @@ static bool composite_call_get_request_metadata(
ctx->on_request_metadata = on_request_metadata;
GRPC_CLOSURE_INIT(&ctx->internal_on_request_metadata,
composite_call_metadata_cb, ctx, grpc_schedule_on_exec_ctx);
+ bool synchronous = true;
while (ctx->creds_index < ctx->composite_creds->inner.num_creds) {
grpc_call_credentials *inner_creds =
ctx->composite_creds->inner.creds_array[ctx->creds_index++];
@@ -95,19 +96,12 @@ static bool composite_call_get_request_metadata(
ctx->md_array, &ctx->internal_on_request_metadata, error)) {
if (*error != GRPC_ERROR_NONE) break;
} else {
+ synchronous = false; // Async return.
break;
}
}
- // If we got through all creds synchronously or we got a synchronous
- // error on one of them, return synchronously.
- if (ctx->creds_index == ctx->composite_creds->inner.num_creds ||
- *error != GRPC_ERROR_NONE) {
- gpr_free(ctx);
- return true;
- }
- // At least one inner cred is returning asynchronously, so we'll
- // return asynchronously as well.
- return false;
+ if (synchronous) gpr_free(ctx);
+ return synchronous;
}
static void composite_call_cancel_get_request_metadata(
diff --git a/src/cpp/common/channel_arguments.cc b/src/cpp/common/channel_arguments.cc
index f130aecd4b..f89f5f1f03 100644
--- a/src/cpp/common/channel_arguments.cc
+++ b/src/cpp/common/channel_arguments.cc
@@ -86,6 +86,10 @@ void ChannelArguments::SetCompressionAlgorithm(
SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, algorithm);
}
+void ChannelArguments::SetGrpclbFallbackTimeout(int fallback_timeout) {
+ SetInt(GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS, fallback_timeout);
+}
+
void ChannelArguments::SetSocketMutator(grpc_socket_mutator* mutator) {
if (!mutator) {
return;
diff --git a/src/python/grpcio/support.py b/src/python/grpcio/support.py
index 510bf422a0..f2395eb26c 100644
--- a/src/python/grpcio/support.py
+++ b/src/python/grpcio/support.py
@@ -94,7 +94,7 @@ def diagnose_attribute_error(build_ext, error):
_ERROR_DIAGNOSES = {
errors.CompileError: diagnose_compile_error,
- AttributeError: diagnose_attribute_error
+ AttributeError: diagnose_attribute_error,
}
@@ -102,8 +102,10 @@ def diagnose_build_ext_error(build_ext, error, formatted):
diagnostic = _ERROR_DIAGNOSES.get(type(error))
if diagnostic is None:
raise commands.CommandError(
- "\n\nWe could not diagnose your build failure. Please file an issue at "
- "http://www.github.com/grpc/grpc with `[Python install]` in the title."
- "\n\n{}".format(formatted))
+ "\n\nWe could not diagnose your build failure. If you are unable to "
+ "proceed, please file an issue at http://www.github.com/grpc/grpc "
+ "with `[Python install]` in the title; please attach the whole log "
+ "(including everything that may have appeared above the Python "
+ "backtrace).\n\n{}".format(formatted))
else:
diagnostic(build_ext, error)
diff --git a/src/ruby/lib/grpc/google_rpc_status_utils.rb b/src/ruby/lib/grpc/google_rpc_status_utils.rb
index fdadd6b76e..f253b082b6 100644
--- a/src/ruby/lib/grpc/google_rpc_status_utils.rb
+++ b/src/ruby/lib/grpc/google_rpc_status_utils.rb
@@ -19,10 +19,17 @@ require 'google/rpc/status_pb'
module GRPC
# GoogleRpcStatusUtils provides utilities to convert between a
# GRPC::Core::Status and a deserialized Google::Rpc::Status proto
+ # Returns nil if the grpc-status-details-bin trailer could not be
+ # converted to a GoogleRpcStatus due to the server not providing
+ # the necessary trailers.
+ # Raises an error if the server did provide the necessary trailers
+ # but they fail to deseriliaze into a GoogleRpcStatus protobuf.
class GoogleRpcStatusUtils
def self.extract_google_rpc_status(status)
fail ArgumentError, 'bad type' unless status.is_a? Struct::Status
- Google::Rpc::Status.decode(status.metadata['grpc-status-details-bin'])
+ grpc_status_details_bin_trailer = 'grpc-status-details-bin'
+ return nil if status.metadata[grpc_status_details_bin_trailer].nil?
+ Google::Rpc::Status.decode(status.metadata[grpc_status_details_bin_trailer])
end
end
end
diff --git a/src/ruby/spec/google_rpc_status_utils_spec.rb b/src/ruby/spec/google_rpc_status_utils_spec.rb
index fe221c30dd..6f2a06b1d9 100644
--- a/src/ruby/spec/google_rpc_status_utils_spec.rb
+++ b/src/ruby/spec/google_rpc_status_utils_spec.rb
@@ -31,12 +31,11 @@ describe 'conversion from a status struct to a google protobuf status' do
expect(exception.message.include?('bad type')).to be true
end
- it 'fails with some error if the header key is missing' do
+ it 'returns nil if the header key is missing' do
status = Struct::Status.new(1, 'details', key: 'val')
expect(status.metadata.nil?).to be false
- expect do
- GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
- end.to raise_error(StandardError)
+ expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ status)).to be(nil)
end
it 'fails with some error if the header key fails to deserialize' do
@@ -221,3 +220,73 @@ describe 'receving a google rpc status from a remote endpoint' do
status_from_exception)).to eq(rpc_status)
end
end
+
+# A test service that fails without explicitly setting the
+# grpc-status-details-bin trailer. Tests assumptions about value
+# of grpc-status-details-bin on the client side when the trailer wasn't
+# set explicitly.
+class NoStatusDetailsBinTestService
+ include GRPC::GenericService
+ rpc :an_rpc, EchoMsg, EchoMsg
+
+ def an_rpc(_, _)
+ fail GRPC::Unknown
+ end
+end
+
+NoStatusDetailsBinTestServiceStub = NoStatusDetailsBinTestService.rpc_stub_class
+
+describe 'when the endpoint doesnt send grpc-status-details-bin' do
+ def start_server
+ @srv = GRPC::RpcServer.new(pool_size: 1)
+ @server_port = @srv.add_http2_port('localhost:0',
+ :this_port_is_insecure)
+ @srv.handle(NoStatusDetailsBinTestService)
+ @server_thd = Thread.new { @srv.run }
+ @srv.wait_till_running
+ end
+
+ def stop_server
+ expect(@srv.stopped?).to be(false)
+ @srv.stop
+ @server_thd.join
+ expect(@srv.stopped?).to be(true)
+ end
+
+ before(:each) do
+ start_server
+ end
+
+ after(:each) do
+ stop_server
+ end
+
+ it 'should receive nil when we extract try to extract a google '\
+ 'rpc status from a BadStatus exception that didnt have it' do
+ stub = NoStatusDetailsBinTestServiceStub.new("localhost:#{@server_port}",
+ :this_channel_is_insecure)
+ begin
+ stub.an_rpc(EchoMsg.new)
+ rescue GRPC::Unknown => e
+ rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ e.to_status)
+ end
+ expect(rpc_status).to be(nil)
+ end
+
+ it 'should receive nil when we extract try to extract a google '\
+ 'rpc status from an op views status object that didnt have it' do
+ stub = NoStatusDetailsBinTestServiceStub.new("localhost:#{@server_port}",
+ :this_channel_is_insecure)
+ op = stub.an_rpc(EchoMsg.new, return_op: true)
+ begin
+ op.execute
+ rescue GRPC::Unknown => e
+ status_from_exception = e.to_status
+ end
+ expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ status_from_exception)).to be(nil)
+ expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ op.status)).to be nil
+ end
+end