aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.cc4
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc14
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc5
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h5
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc67
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc9
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h1
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c7
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h21
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc9
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc10
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc6
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h3
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.cc5
-rw-r--r--src/core/lib/surface/channel_ping.cc2
-rw-r--r--src/core/lib/surface/lame_client.cc9
-rw-r--r--src/core/lib/transport/transport.h10
-rw-r--r--src/core/lib/transport/transport_op_string.cc2
-rw-r--r--src/proto/grpc/lb/v1/load_balancer.proto7
-rwxr-xr-xsrc/ruby/end2end/multiple_killed_watching_threads_driver.rb23
-rw-r--r--test/cpp/end2end/client_lb_end2end_test.cc33
-rw-r--r--test/cpp/grpclb/grpclb_api_test.cc8
-rw-r--r--test/cpp/grpclb/grpclb_test.cc22
-rw-r--r--test/cpp/qps/client_async.cc1
-rw-r--r--tools/internal_ci/linux/grpc_bazel_on_foundry_dbg.sh (renamed from tools/internal_ci/linux/grpc_bazel_on_foundry.sh)1
-rw-r--r--tools/internal_ci/linux/grpc_bazel_on_foundry_opt.sh56
-rw-r--r--tools/interop_matrix/client_matrix.py139
-rwxr-xr-xtools/interop_matrix/create_matrix_images.py39
-rw-r--r--tools/interop_matrix/patches/README.md38
-rw-r--r--tools/interop_matrix/patches/ruby_v1.0.1/git_repo.patch34
-rwxr-xr-xtools/interop_matrix/run_interop_matrix_tests.py6
-rwxr-xr-xtools/interop_matrix/testcases/ruby__v1.0.120
32 files changed, 433 insertions, 183 deletions
diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc
index ed437d255c..5b86e8631f 100644
--- a/src/core/ext/filters/client_channel/backup_poller.cc
+++ b/src/core/ext/filters/client_channel/backup_poller.cc
@@ -83,8 +83,8 @@ static void done_poller(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
}
static void g_poller_unref(grpc_exec_ctx* exec_ctx) {
+ gpr_mu_lock(&g_poller_mu);
if (gpr_unref(&g_poller->refs)) {
- gpr_mu_lock(&g_poller_mu);
backup_poller* p = g_poller;
g_poller = nullptr;
gpr_mu_unlock(&g_poller_mu);
@@ -95,6 +95,8 @@ static void g_poller_unref(grpc_exec_ctx* exec_ctx) {
p, grpc_schedule_on_exec_ctx));
gpr_mu_unlock(p->pollset_mu);
grpc_timer_cancel(exec_ctx, &p->polling_timer);
+ } else {
+ gpr_mu_unlock(&g_poller_mu);
}
}
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index aced9adf9f..fc8210569e 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -643,16 +643,22 @@ static void start_transport_op_locked(grpc_exec_ctx* exec_ctx, void* arg,
op->connectivity_state = nullptr;
}
- if (op->send_ping != nullptr) {
+ if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
if (chand->lb_policy == nullptr) {
GRPC_CLOSURE_SCHED(
- exec_ctx, op->send_ping,
+ exec_ctx, op->send_ping.on_initiate,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, op->send_ping.on_ack,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
} else {
- grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy, op->send_ping);
+ grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy,
+ op->send_ping.on_initiate,
+ op->send_ping.on_ack);
op->bind_pollset = nullptr;
}
- op->send_ping = nullptr;
+ op->send_ping.on_initiate = nullptr;
+ op->send_ping.on_ack = nullptr;
}
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index db566f1b56..6b6022c247 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -138,8 +138,9 @@ void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx* exec_ctx,
void grpc_lb_policy_ping_one_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
- grpc_closure* closure) {
- policy->vtable->ping_one_locked(exec_ctx, policy, closure);
+ grpc_closure* on_initiate,
+ grpc_closure* on_ack) {
+ policy->vtable->ping_one_locked(exec_ctx, policy, on_initiate, on_ack);
}
void grpc_lb_policy_notify_on_state_change_locked(
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index d3159eebf3..38cc26422f 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -78,7 +78,7 @@ struct grpc_lb_policy_vtable {
/** \see grpc_lb_policy_ping_one */
void (*ping_one_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
- grpc_closure* closure);
+ grpc_closure* on_initiate, grpc_closure* on_ack);
/** Try to enter a READY connectivity state */
void (*exit_idle_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
@@ -171,7 +171,8 @@ int grpc_lb_policy_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
against one of the connected subchannels managed by \a policy. */
void grpc_lb_policy_ping_one_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
- grpc_closure* closure);
+ grpc_closure* on_initiate,
+ grpc_closure* on_ack);
/** Cancel picks for \a target.
The \a on_complete callback of the pending picks will be invoked with \a
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index db06fc20b6..704cbd8a3b 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -275,18 +275,30 @@ static void add_pending_pick(pending_pick** root,
typedef struct pending_ping {
struct pending_ping* next;
- /* args for wrapped_notify */
- wrapped_rr_closure_arg wrapped_notify_arg;
+ /* args for sending the ping */
+ wrapped_rr_closure_arg* on_initiate;
+ wrapped_rr_closure_arg* on_ack;
} pending_ping;
-static void add_pending_ping(pending_ping** root, grpc_closure* notify) {
+static void add_pending_ping(pending_ping** root, grpc_closure* on_initiate,
+ grpc_closure* on_ack) {
pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
- pping->wrapped_notify_arg.wrapped_closure = notify;
- pping->wrapped_notify_arg.free_when_done = pping;
+ if (on_initiate != nullptr) {
+ pping->on_initiate =
+ (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_initiate));
+ pping->on_initiate->wrapped_closure = on_initiate;
+ pping->on_initiate->free_when_done = pping->on_initiate;
+ GRPC_CLOSURE_INIT(&pping->on_initiate->wrapper_closure, wrapped_rr_closure,
+ &pping->on_initiate, grpc_schedule_on_exec_ctx);
+ }
+ if (on_ack != nullptr) {
+ pping->on_ack = (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_ack));
+ pping->on_ack->wrapped_closure = on_ack;
+ pping->on_ack->free_when_done = pping->on_ack;
+ GRPC_CLOSURE_INIT(&pping->on_ack->wrapper_closure, wrapped_rr_closure,
+ &pping->on_ack, grpc_schedule_on_exec_ctx);
+ }
pping->next = *root;
- GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
- wrapped_rr_closure, &pping->wrapped_notify_arg,
- grpc_schedule_on_exec_ctx);
*root = pping;
}
@@ -822,14 +834,25 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
pending_ping* pping;
while ((pping = glb_policy->pending_pings)) {
glb_policy->pending_pings = pping->next;
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
- pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
+ grpc_closure* on_initiate = nullptr;
+ grpc_closure* on_ack = nullptr;
+ if (pping->on_initiate != nullptr) {
+ GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
+ pping->on_initiate->rr_policy = glb_policy->rr_policy;
+ on_initiate = &pping->on_initiate->wrapper_closure;
+ }
+ if (pping->on_ack != nullptr) {
+ GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
+ pping->on_ack->rr_policy = glb_policy->rr_policy;
+ on_ack = &pping->on_ack->wrapper_closure;
+ }
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
glb_policy, glb_policy->rr_policy);
}
- grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
- &pping->wrapped_notify_arg.wrapper_closure);
+ grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, on_initiate,
+ on_ack);
+ gpr_free(pping);
}
}
@@ -1052,8 +1075,16 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
while (pping != nullptr) {
pending_ping* next = pping->next;
- GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
- GRPC_ERROR_REF(error));
+ if (pping->on_initiate != nullptr) {
+ GRPC_CLOSURE_SCHED(exec_ctx, &pping->on_initiate->wrapper_closure,
+ GRPC_ERROR_REF(error));
+ gpr_free(pping->on_initiate);
+ }
+ if (pping->on_ack != nullptr) {
+ GRPC_CLOSURE_SCHED(exec_ctx, &pping->on_ack->wrapper_closure,
+ GRPC_ERROR_REF(error));
+ gpr_free(pping->on_ack);
+ }
gpr_free(pping);
pping = next;
}
@@ -1251,12 +1282,14 @@ static grpc_connectivity_state glb_check_connectivity_locked(
}
static void glb_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
- grpc_closure* closure) {
+ grpc_closure* on_initiate,
+ grpc_closure* on_ack) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
if (glb_policy->rr_policy) {
- grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
+ grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, on_initiate,
+ on_ack);
} else {
- add_pending_ping(&glb_policy->pending_pings, closure);
+ add_pending_ping(&glb_policy->pending_pings, on_initiate, on_ack);
if (!glb_policy->started_picking) {
start_picking_locked(exec_ctx, glb_policy);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
index 2c8d7f4291..fc781da330 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@@ -215,9 +215,6 @@ grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
return nullptr;
}
}
- if (res.server_list.has_expiration_interval) {
- sl->expiration_interval = res.server_list.expiration_interval;
- }
return sl;
}
@@ -237,8 +234,6 @@ grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy(
grpc_grpclb_serverlist* copy =
(grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
copy->num_servers = sl->num_servers;
- memcpy(&copy->expiration_interval, &sl->expiration_interval,
- sizeof(grpc_grpclb_duration));
copy->servers = (grpc_grpclb_server**)gpr_malloc(sizeof(grpc_grpclb_server*) *
sl->num_servers);
for (size_t i = 0; i < sl->num_servers; i++) {
@@ -257,10 +252,6 @@ bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist* lhs,
if (lhs->num_servers != rhs->num_servers) {
return false;
}
- if (grpc_grpclb_duration_compare(&lhs->expiration_interval,
- &rhs->expiration_interval) != 0) {
- return false;
- }
for (size_t i = 0; i < lhs->num_servers; i++) {
if (!grpc_grpclb_server_equals(lhs->servers[i], rhs->servers[i])) {
return false;
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
index 017c40ec1a..ccb0212643 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@@ -35,7 +35,6 @@ typedef grpc_lb_v1_Duration grpc_grpclb_duration;
typedef struct {
grpc_grpclb_server** servers;
size_t num_servers;
- grpc_grpclb_duration expiration_interval;
} grpc_grpclb_serverlist;
/** Create a request for a gRPC LB service under \a lb_service_name */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
index 6a5d54c82a..4e6c5cc832 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
@@ -61,9 +61,8 @@ const pb_field_t grpc_lb_v1_InitialLoadBalanceResponse_fields[3] = {
PB_LAST_FIELD
};
-const pb_field_t grpc_lb_v1_ServerList_fields[3] = {
+const pb_field_t grpc_lb_v1_ServerList_fields[2] = {
PB_FIELD( 1, MESSAGE , REPEATED, CALLBACK, FIRST, grpc_lb_v1_ServerList, servers, servers, &grpc_lb_v1_Server_fields),
- PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_lb_v1_ServerList, expiration_interval, servers, &grpc_lb_v1_Duration_fields),
PB_LAST_FIELD
};
@@ -85,7 +84,7 @@ const pb_field_t grpc_lb_v1_Server_fields[5] = {
* numbers or field sizes that are larger than what can fit in 8 or 16 bit
* field descriptors.
*/
-PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 65536 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 65536 && pb_membersize(grpc_lb_v1_ServerList, servers) < 65536 && pb_membersize(grpc_lb_v1_ServerList, expiration_interval) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server)
+PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 65536 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 65536 && pb_membersize(grpc_lb_v1_ServerList, servers) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server)
#endif
#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)
@@ -96,7 +95,7 @@ PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request)
* numbers or field sizes that are larger than what can fit in the default
* 8 bit descriptors.
*/
-PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 256 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 256 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 256 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 256 && pb_membersize(grpc_lb_v1_ServerList, servers) < 256 && pb_membersize(grpc_lb_v1_ServerList, expiration_interval) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server)
+PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 256 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 256 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 256 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 256 && pb_membersize(grpc_lb_v1_ServerList, servers) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server)
#endif
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
index 93333d1aed..066c076202 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
@@ -14,6 +14,11 @@ extern "C" {
#endif
/* Struct definitions */
+typedef struct _grpc_lb_v1_ServerList {
+ pb_callback_t servers;
+/* @@protoc_insertion_point(struct:grpc_lb_v1_ServerList) */
+} grpc_lb_v1_ServerList;
+
typedef struct _grpc_lb_v1_ClientStatsPerToken {
pb_callback_t load_balance_token;
bool has_num_calls;
@@ -79,13 +84,6 @@ typedef struct _grpc_lb_v1_InitialLoadBalanceResponse {
/* @@protoc_insertion_point(struct:grpc_lb_v1_InitialLoadBalanceResponse) */
} grpc_lb_v1_InitialLoadBalanceResponse;
-typedef struct _grpc_lb_v1_ServerList {
- pb_callback_t servers;
- bool has_expiration_interval;
- grpc_lb_v1_Duration expiration_interval;
-/* @@protoc_insertion_point(struct:grpc_lb_v1_ServerList) */
-} grpc_lb_v1_ServerList;
-
typedef struct _grpc_lb_v1_LoadBalanceRequest {
bool has_initial_request;
grpc_lb_v1_InitialLoadBalanceRequest initial_request;
@@ -113,7 +111,7 @@ typedef struct _grpc_lb_v1_LoadBalanceResponse {
#define grpc_lb_v1_ClientStats_init_default {false, grpc_lb_v1_Timestamp_init_default, false, 0, false, 0, false, 0, false, 0, {{NULL}, NULL}}
#define grpc_lb_v1_LoadBalanceResponse_init_default {false, grpc_lb_v1_InitialLoadBalanceResponse_init_default, false, grpc_lb_v1_ServerList_init_default}
#define grpc_lb_v1_InitialLoadBalanceResponse_init_default {false, "", false, grpc_lb_v1_Duration_init_default}
-#define grpc_lb_v1_ServerList_init_default {{{NULL}, NULL}, false, grpc_lb_v1_Duration_init_default}
+#define grpc_lb_v1_ServerList_init_default {{{NULL}, NULL}}
#define grpc_lb_v1_Server_init_default {false, {0, {0}}, false, 0, false, "", false, 0}
#define grpc_lb_v1_Duration_init_zero {false, 0, false, 0}
#define grpc_lb_v1_Timestamp_init_zero {false, 0, false, 0}
@@ -123,10 +121,11 @@ typedef struct _grpc_lb_v1_LoadBalanceResponse {
#define grpc_lb_v1_ClientStats_init_zero {false, grpc_lb_v1_Timestamp_init_zero, false, 0, false, 0, false, 0, false, 0, {{NULL}, NULL}}
#define grpc_lb_v1_LoadBalanceResponse_init_zero {false, grpc_lb_v1_InitialLoadBalanceResponse_init_zero, false, grpc_lb_v1_ServerList_init_zero}
#define grpc_lb_v1_InitialLoadBalanceResponse_init_zero {false, "", false, grpc_lb_v1_Duration_init_zero}
-#define grpc_lb_v1_ServerList_init_zero {{{NULL}, NULL}, false, grpc_lb_v1_Duration_init_zero}
+#define grpc_lb_v1_ServerList_init_zero {{{NULL}, NULL}}
#define grpc_lb_v1_Server_init_zero {false, {0, {0}}, false, 0, false, "", false, 0}
/* Field tags (for use in manual encoding/decoding) */
+#define grpc_lb_v1_ServerList_servers_tag 1
#define grpc_lb_v1_ClientStatsPerToken_load_balance_token_tag 1
#define grpc_lb_v1_ClientStatsPerToken_num_calls_tag 2
#define grpc_lb_v1_Duration_seconds_tag 1
@@ -146,8 +145,6 @@ typedef struct _grpc_lb_v1_LoadBalanceResponse {
#define grpc_lb_v1_ClientStats_calls_finished_with_drop_tag 8
#define grpc_lb_v1_InitialLoadBalanceResponse_load_balancer_delegate_tag 1
#define grpc_lb_v1_InitialLoadBalanceResponse_client_stats_report_interval_tag 2
-#define grpc_lb_v1_ServerList_servers_tag 1
-#define grpc_lb_v1_ServerList_expiration_interval_tag 3
#define grpc_lb_v1_LoadBalanceRequest_initial_request_tag 1
#define grpc_lb_v1_LoadBalanceRequest_client_stats_tag 2
#define grpc_lb_v1_LoadBalanceResponse_initial_response_tag 1
@@ -162,7 +159,7 @@ extern const pb_field_t grpc_lb_v1_ClientStatsPerToken_fields[3];
extern const pb_field_t grpc_lb_v1_ClientStats_fields[7];
extern const pb_field_t grpc_lb_v1_LoadBalanceResponse_fields[3];
extern const pb_field_t grpc_lb_v1_InitialLoadBalanceResponse_fields[3];
-extern const pb_field_t grpc_lb_v1_ServerList_fields[3];
+extern const pb_field_t grpc_lb_v1_ServerList_fields[2];
extern const pb_field_t grpc_lb_v1_Server_fields[5];
/* Maximum encoded size of messages (where known) */
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index 228a77d9db..b2007ca301 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -226,13 +226,16 @@ static void pf_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
}
static void pf_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
- grpc_closure* closure) {
+ grpc_closure* on_initiate,
+ grpc_closure* on_ack) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected->connected_subchannel,
- closure);
+ on_initiate, on_ack);
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, on_initiate,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
+ GRPC_CLOSURE_SCHED(exec_ctx, on_ack,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
}
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index c05557ba6f..df458d339d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -540,7 +540,8 @@ static void rr_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
}
static void rr_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
- grpc_closure* closure) {
+ grpc_closure* on_initiate,
+ grpc_closure* on_ack) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
@@ -548,11 +549,14 @@ static void rr_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
&p->subchannel_list->subchannels[next_ready_index];
grpc_connected_subchannel* target = GRPC_CONNECTED_SUBCHANNEL_REF(
selected->connected_subchannel, "rr_ping");
- grpc_connected_subchannel_ping(exec_ctx, target, closure);
+ grpc_connected_subchannel_ping(exec_ctx, target, on_initiate, on_ack);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_ping");
} else {
GRPC_CLOSURE_SCHED(
- exec_ctx, closure,
+ exec_ctx, on_initiate,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Round Robin not connected"));
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, on_ack,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Round Robin not connected"));
}
}
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index 58e294d597..bff2ae487d 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -584,10 +584,12 @@ void grpc_connected_subchannel_notify_on_state_change(
void grpc_connected_subchannel_ping(grpc_exec_ctx* exec_ctx,
grpc_connected_subchannel* con,
- grpc_closure* closure) {
+ grpc_closure* on_initiate,
+ grpc_closure* on_ack) {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
grpc_channel_element* elem;
- op->send_ping = closure;
+ op->send_ping.on_initiate = on_initiate;
+ op->send_ping.on_ack = on_ack;
elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
elem->filter->start_transport_op(exec_ctx, elem, op);
}
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index 1f326fc1d2..3916ea00ca 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -135,7 +135,8 @@ void grpc_connected_subchannel_notify_on_state_change(
grpc_closure* notify);
void grpc_connected_subchannel_ping(grpc_exec_ctx* exec_ctx,
grpc_connected_subchannel* channel,
- grpc_closure* notify);
+ grpc_closure* on_initiate,
+ grpc_closure* on_ack);
/** retrieve the grpc_connected_subchannel - or NULL if called before
the subchannel becomes connected */
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 63ac65ac78..ea637e6bec 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -1815,8 +1815,9 @@ static void perform_transport_op_locked(grpc_exec_ctx* exec_ctx,
grpc_endpoint_add_to_pollset_set(exec_ctx, t->ep, op->bind_pollset_set);
}
- if (op->send_ping) {
- send_ping_locked(exec_ctx, t, nullptr, op->send_ping);
+ if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
+ send_ping_locked(exec_ctx, t, op->send_ping.on_initiate,
+ op->send_ping.on_ack);
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING);
}
diff --git a/src/core/lib/surface/channel_ping.cc b/src/core/lib/surface/channel_ping.cc
index e8f47f01cf..7b1964fd55 100644
--- a/src/core/lib/surface/channel_ping.cc
+++ b/src/core/lib/surface/channel_ping.cc
@@ -57,7 +57,7 @@ void grpc_channel_ping(grpc_channel* channel, grpc_completion_queue* cq,
pr->tag = tag;
pr->cq = cq;
GRPC_CLOSURE_INIT(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx);
- op->send_ping = &pr->closure;
+ op->send_ping.on_ack = &pr->closure;
op->bind_pollset = grpc_cq_pollset(cq);
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
top_elem->filter->start_transport_op(&exec_ctx, top_elem, op);
diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc
index c32c9af50e..559d7af43e 100644
--- a/src/core/lib/surface/lame_client.cc
+++ b/src/core/lib/surface/lame_client.cc
@@ -104,9 +104,14 @@ static void lame_start_transport_op(grpc_exec_ctx* exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, op->on_connectivity_state_change,
GRPC_ERROR_NONE);
}
- if (op->send_ping != nullptr) {
+ if (op->send_ping.on_initiate != nullptr) {
GRPC_CLOSURE_SCHED(
- exec_ctx, op->send_ping,
+ exec_ctx, op->send_ping.on_initiate,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
+ }
+ if (op->send_ping.on_ack != nullptr) {
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, op->send_ping.on_ack,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index b3cf04c22d..73264142d9 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -245,8 +245,14 @@ typedef struct grpc_transport_op {
grpc_pollset* bind_pollset;
/** add this transport to a pollset_set */
grpc_pollset_set* bind_pollset_set;
- /** send a ping, call this back if not NULL */
- grpc_closure* send_ping;
+ /** send a ping, if either on_initiate or on_ack is not NULL */
+ struct {
+ /** Ping may be delayed by the transport, on_initiate callback will be
+ called when the ping is actually being sent. */
+ grpc_closure* on_initiate;
+ /** Called when the ping ack is received */
+ grpc_closure* on_ack;
+ } send_ping;
/***************************************************************************
* remaining fields are initialized and used at the discretion of the
diff --git a/src/core/lib/transport/transport_op_string.cc b/src/core/lib/transport/transport_op_string.cc
index e69ab02570..c0f82fea0d 100644
--- a/src/core/lib/transport/transport_op_string.cc
+++ b/src/core/lib/transport/transport_op_string.cc
@@ -187,7 +187,7 @@ char* grpc_transport_op_string(grpc_transport_op* op) {
gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET_SET"));
}
- if (op->send_ping != nullptr) {
+ if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
// first = false;
gpr_strvec_add(&b, gpr_strdup("SEND_PING"));
diff --git a/src/proto/grpc/lb/v1/load_balancer.proto b/src/proto/grpc/lb/v1/load_balancer.proto
index 0a33568bd6..75c916defa 100644
--- a/src/proto/grpc/lb/v1/load_balancer.proto
+++ b/src/proto/grpc/lb/v1/load_balancer.proto
@@ -133,11 +133,8 @@ message ServerList {
// unless instructed otherwise via the client_config.
repeated Server servers = 1;
- // Indicates the amount of time that the client should consider this server
- // list as valid. It may be considered stale after waiting this interval of
- // time after receiving the list. If the interval is not positive, the
- // client can assume the list is valid until the next list is received.
- Duration expiration_interval = 3;
+ // Was google.protobuf.Duration expiration_interval.
+ reserved 3;
}
// Contains server information. When the drop field is not true, use the other
diff --git a/src/ruby/end2end/multiple_killed_watching_threads_driver.rb b/src/ruby/end2end/multiple_killed_watching_threads_driver.rb
index 59f6f275e4..8f078cfbed 100755
--- a/src/ruby/end2end/multiple_killed_watching_threads_driver.rb
+++ b/src/ruby/end2end/multiple_killed_watching_threads_driver.rb
@@ -20,7 +20,7 @@ Thread.abort_on_exception = true
include GRPC::Core::ConnectivityStates
-def watch_state(ch)
+def watch_state(ch, sleep_time)
thd = Thread.new do
state = ch.connectivity_state(false)
fail "non-idle state: #{state}" unless state == IDLE
@@ -28,23 +28,34 @@ def watch_state(ch)
end
# sleep to get the thread into the middle of a
# "watch connectivity state" call
- sleep 0.1
+ sleep sleep_time
thd.kill
end
-def main
+def run_multiple_killed_watches(num_threads, sleep_time)
channels = []
- 10.times do
+ num_threads.times do
ch = GRPC::Core::Channel.new('dummy_host',
nil, :this_channel_is_insecure)
- watch_state(ch)
+ watch_state(ch, sleep_time)
channels << ch
end
# checking state should still be safe to call
channels.each do |c|
- fail unless c.connectivity_state(false) == FATAL_FAILURE
+ connectivity_state = c.connectivity_state(false)
+ # The state should be FATAL_FAILURE in the case that it was interrupted
+ # while watching connectivity state, and IDLE if it we never started
+ # watching the channel's connectivity state
+ unless [FATAL_FAILURE, IDLE].include?(connectivity_state)
+ fail "unexpected connectivity state: #{connectivity_state}"
+ end
end
end
+def main
+ run_multiple_killed_watches(10, 0.1)
+ run_multiple_killed_watches(1000, 0.001)
+end
+
main
diff --git a/test/cpp/end2end/client_lb_end2end_test.cc b/test/cpp/end2end/client_lb_end2end_test.cc
index f8bb12fde1..2696826ed8 100644
--- a/test/cpp/end2end/client_lb_end2end_test.cc
+++ b/test/cpp/end2end/client_lb_end2end_test.cc
@@ -156,7 +156,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
stub_ = grpc::testing::EchoTestService::NewStub(channel_);
}
- Status SendRpc(EchoResponse* response = nullptr) {
+ bool SendRpc(EchoResponse* response = nullptr) {
const bool local_response = (response == nullptr);
if (local_response) response = new EchoResponse;
EchoRequest request;
@@ -164,19 +164,19 @@ class ClientLbEnd2endTest : public ::testing::Test {
ClientContext context;
Status status = stub_->Echo(&context, request, response);
if (local_response) delete response;
- return status;
+ return status.ok();
}
void CheckRpcSendOk() {
EchoResponse response;
- const Status status = SendRpc(&response);
- EXPECT_TRUE(status.ok());
+ const bool success = SendRpc(&response);
+ EXPECT_TRUE(success);
EXPECT_EQ(response.message(), kRequestMessage_);
}
void CheckRpcSendFailure() {
- const Status status = SendRpc();
- EXPECT_FALSE(status.ok());
+ const bool success = SendRpc();
+ EXPECT_FALSE(success);
}
struct ServerData {
@@ -573,15 +573,28 @@ TEST_F(ClientLbEnd2endTest, RoundRobinReresolve) {
CheckRpcSendOk();
}
// Kill all servers
+ gpr_log(GPR_INFO, "****** ABOUT TO KILL SERVERS *******");
for (size_t i = 0; i < servers_.size(); ++i) {
servers_[i]->Shutdown(false);
}
- // Client request should fail.
- CheckRpcSendFailure();
+ gpr_log(GPR_INFO, "****** SERVERS KILLED *******");
+ gpr_log(GPR_INFO, "****** SENDING DOOMED REQUESTS *******");
+ // Client requests should fail. Send enough to tickle all subchannels.
+ for (size_t i = 0; i < servers_.size(); ++i) CheckRpcSendFailure();
+ gpr_log(GPR_INFO, "****** DOOMED REQUESTS SENT *******");
// Bring servers back up on the same port (we aren't recreating the channel).
+ gpr_log(GPR_INFO, "****** RESTARTING SERVERS *******");
StartServers(kNumServers, ports);
- // Client request should succeed.
- CheckRpcSendOk();
+ gpr_log(GPR_INFO, "****** SERVERS RESTARTED *******");
+ gpr_log(GPR_INFO, "****** SENDING REQUEST TO SUCCEED *******");
+ // Client request should eventually (but still fairly soon) succeed.
+ const gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5);
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ while (gpr_time_cmp(deadline, now) > 0) {
+ if (SendRpc()) break;
+ now = gpr_now(GPR_CLOCK_MONOTONIC);
+ }
+ GPR_ASSERT(gpr_time_cmp(deadline, now) > 0);
}
} // namespace
diff --git a/test/cpp/grpclb/grpclb_api_test.cc b/test/cpp/grpclb/grpclb_api_test.cc
index 7b62080b49..03778e1fc5 100644
--- a/test/cpp/grpclb/grpclb_api_test.cc
+++ b/test/cpp/grpclb/grpclb_api_test.cc
@@ -98,9 +98,6 @@ TEST_F(GrpclbTest, ParseResponseServerList) {
server->set_port(54321);
server->set_load_balance_token("load_balancing");
server->set_drop(true);
- auto* expiration_interval = serverlist->mutable_expiration_interval();
- expiration_interval->set_seconds(888);
- expiration_interval->set_nanos(999);
const grpc::string encoded_response = response.SerializeAsString();
const grpc_slice encoded_slice = grpc_slice_from_copied_buffer(
@@ -121,11 +118,6 @@ TEST_F(GrpclbTest, ParseResponseServerList) {
EXPECT_STREQ(c_serverlist->servers[1]->load_balance_token, "load_balancing");
EXPECT_TRUE(c_serverlist->servers[1]->drop);
- EXPECT_TRUE(c_serverlist->expiration_interval.has_seconds);
- EXPECT_EQ(c_serverlist->expiration_interval.seconds, 888);
- EXPECT_TRUE(c_serverlist->expiration_interval.has_nanos);
- EXPECT_EQ(c_serverlist->expiration_interval.nanos, 999);
-
grpc_slice_unref(encoded_slice);
grpc_grpclb_destroy_serverlist(c_serverlist);
}
diff --git a/test/cpp/grpclb/grpclb_test.cc b/test/cpp/grpclb/grpclb_test.cc
index a469fbb7e3..62ae495177 100644
--- a/test/cpp/grpclb/grpclb_test.cc
+++ b/test/cpp/grpclb/grpclb_test.cc
@@ -113,10 +113,9 @@ typedef struct test_fixture {
static void* tag(intptr_t t) { return (void*)t; }
-static grpc_slice build_response_payload_slice(
- const char* host, int* ports, size_t nports,
- int64_t expiration_interval_secs, int32_t expiration_interval_nanos,
- const char* token_prefix) {
+static grpc_slice build_response_payload_slice(const char* host, int* ports,
+ size_t nports,
+ const char* token_prefix) {
// server_list {
// servers {
// ip_address: <in_addr/6 bytes of an IP>
@@ -128,15 +127,6 @@ static grpc_slice build_response_payload_slice(
grpc::lb::v1::LoadBalanceResponse response;
auto* serverlist = response.mutable_server_list();
- if (expiration_interval_secs > 0 || expiration_interval_nanos > 0) {
- auto* expiration_interval = serverlist->mutable_expiration_interval();
- if (expiration_interval_secs > 0) {
- expiration_interval->set_seconds(expiration_interval_secs);
- }
- if (expiration_interval_nanos > 0) {
- expiration_interval->set_nanos(expiration_interval_nanos);
- }
- }
for (size_t i = 0; i < nports; i++) {
auto* server = serverlist->add_servers();
// TODO(dgq): test ipv6
@@ -248,13 +238,13 @@ static void start_lb_server(server_fixture* sf, int* ports, size_t nports,
if (i == 0) {
// First half of the ports.
response_payload_slice = build_response_payload_slice(
- "127.0.0.1", ports, nports / 2, -1, -1, sf->lb_token_prefix);
+ "127.0.0.1", ports, nports / 2, sf->lb_token_prefix);
} else {
// Second half of the ports.
sleep_ms(update_delay_ms);
response_payload_slice = build_response_payload_slice(
- "127.0.0.1", ports + (nports / 2), (nports + 1) / 2 /* ceil */, -1,
- -1, "" /* this half doesn't get to receive an LB token */);
+ "127.0.0.1", ports + (nports / 2), (nports + 1) / 2 /* ceil */,
+ "" /* this half doesn't get to receive an LB token */);
}
response_payload = grpc_raw_byte_buffer_create(&response_payload_slice, 1);
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index 07888214e7..7cf9d3ea7e 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -280,6 +280,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
},
&got_tag, &ok, gpr_inf_future(GPR_CLOCK_REALTIME))) {
t->UpdateHistogram(entry_ptr);
+ entry = HistogramEntry();
shutdown_mu->lock();
ctx = ProcessTag(thread_idx, got_tag);
if (ctx == nullptr) {
diff --git a/tools/internal_ci/linux/grpc_bazel_on_foundry.sh b/tools/internal_ci/linux/grpc_bazel_on_foundry_dbg.sh
index e328be8aab..c43ac0e708 100644
--- a/tools/internal_ci/linux/grpc_bazel_on_foundry.sh
+++ b/tools/internal_ci/linux/grpc_bazel_on_foundry_dbg.sh
@@ -52,4 +52,5 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_rc
--experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/asci-toolchain/nosla-debian8-clang-fl@sha256:aa20628a902f06a11a015caa94b0432eb60690de2d2525bd046b9eea046f5d8a" }' \
--crosstool_top=@bazel_toolchains//configs/debian8_clang/0.2.0/bazel_0.7.0:toolchain \
--define GRPC_PORT_ISOLATED_RUNTIME=1 \
+ -c dbg \
-- //test/...
diff --git a/tools/internal_ci/linux/grpc_bazel_on_foundry_opt.sh b/tools/internal_ci/linux/grpc_bazel_on_foundry_opt.sh
new file mode 100644
index 0000000000..b106b71a85
--- /dev/null
+++ b/tools/internal_ci/linux/grpc_bazel_on_foundry_opt.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+# A temporary solution to give Kokoro credentials.
+# The file name 4321_grpc-testing-service needs to match auth_credential in
+# the build config.
+mkdir -p ${KOKORO_KEYSTORE_DIR}
+cp ${KOKORO_GFILE_DIR}/GrpcTesting-d0eeee2db331.json ${KOKORO_KEYSTORE_DIR}/4321_grpc-testing-service
+
+mkdir -p /tmpfs/tmp/bazel-canary
+ln -f "${KOKORO_GFILE_DIR}/bazel-canary" /tmpfs/tmp/bazel-canary/bazel
+chmod 755 "${KOKORO_GFILE_DIR}/bazel-canary"
+export PATH="/tmpfs/tmp/bazel-canary:${PATH}"
+# This should show /tmpfs/tmp/bazel-canary/bazel
+which bazel
+chmod +x "${KOKORO_GFILE_DIR}/bazel_wrapper.py"
+
+# change to grpc repo root
+cd $(dirname $0)/../../..
+
+source tools/internal_ci/helper_scripts/prepare_build_linux_rc
+
+"${KOKORO_GFILE_DIR}/bazel_wrapper.py" \
+ --host_jvm_args=-Dbazel.DigestFunction=SHA1 \
+ test --jobs="50" \
+ --test_timeout="300,450,1200,3600" \
+ --test_output=errors \
+ --verbose_failures=true \
+ --keep_going \
+ --remote_accept_cached=true \
+ --spawn_strategy=remote \
+ --remote_local_fallback=false \
+ --remote_timeout=3600 \
+ --strategy=Javac=remote \
+ --strategy=Closure=remote \
+ --genrule_strategy=remote \
+ --experimental_strict_action_env=true \
+ --experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/asci-toolchain/nosla-debian8-clang-fl@sha256:aa20628a902f06a11a015caa94b0432eb60690de2d2525bd046b9eea046f5d8a" }' \
+ --crosstool_top=@bazel_toolchains//configs/debian8_clang/0.2.0/bazel_0.7.0:toolchain \
+ --define GRPC_PORT_ISOLATED_RUNTIME=1 \
+ -c opt \
+ -- //test/...
diff --git a/tools/interop_matrix/client_matrix.py b/tools/interop_matrix/client_matrix.py
index c9a4996029..7b02f51725 100644
--- a/tools/interop_matrix/client_matrix.py
+++ b/tools/interop_matrix/client_matrix.py
@@ -23,6 +23,18 @@ def get_github_repo(lang):
# all other languages use the grpc.git repo.
}.get(lang, 'git@github.com:grpc/grpc.git')
+def get_release_tags(lang):
+ return map(lambda r: get_release_tag_name(r), LANG_RELEASE_MATRIX[lang])
+
+def get_release_tag_name(release_info):
+ assert len(release_info.keys()) == 1
+ return release_info.keys()[0]
+
+def should_build_docker_interop_image_from_release_tag(lang):
+ if lang in ['go', 'java', 'node']:
+ return False
+ return True
+
# Dictionary of runtimes per language
LANG_RUNTIME_MATRIX = {
'cxx': ['cxx'], # This is actually debian8.
@@ -39,81 +51,84 @@ LANG_RUNTIME_MATRIX = {
# a release tag pointing to the latest build of the branch.
LANG_RELEASE_MATRIX = {
'cxx': [
- 'v1.0.1',
- 'v1.1.4',
- 'v1.2.5',
- 'v1.3.9',
- 'v1.4.2',
- 'v1.6.6',
- 'v1.7.2',
+ {'v1.0.1': None},
+ {'v1.1.4': None},
+ {'v1.2.5': None},
+ {'v1.3.9': None},
+ {'v1.4.2': None},
+ {'v1.6.6': None},
+ {'v1.7.2': None},
],
'go': [
- 'v1.0.5',
- 'v1.2.1',
- 'v1.3.0',
- 'v1.4.2',
- 'v1.5.2',
- 'v1.6.0',
- 'v1.7.0',
- 'v1.7.1',
- 'v1.7.2',
- 'v1.7.3',
- 'v1.8.0',
+ {'v1.0.5': None},
+ {'v1.2.1': None},
+ {'v1.3.0': None},
+ {'v1.4.2': None},
+ {'v1.5.2': None},
+ {'v1.6.0': None},
+ {'v1.7.0': None},
+ {'v1.7.1': None},
+ {'v1.7.2': None},
+ {'v1.7.3': None},
+ {'v1.8.0': None},
],
'java': [
- 'v1.0.3',
- 'v1.1.2',
- 'v1.2.0',
- 'v1.3.1',
- 'v1.4.0',
- 'v1.5.0',
- 'v1.6.1',
- 'v1.7.0',
- 'v1.8.0',
+ {'v1.0.3': None},
+ {'v1.1.2': None},
+ {'v1.2.0': None},
+ {'v1.3.1': None},
+ {'v1.4.0': None},
+ {'v1.5.0': None},
+ {'v1.6.1': None},
+ {'v1.7.0': None},
+ {'v1.8.0': None},
],
'python': [
- 'v1.0.x',
- 'v1.1.4',
- 'v1.2.5',
- 'v1.3.9',
- 'v1.4.2',
- 'v1.6.6',
- 'v1.7.2',
+ {'v1.0.x': None},
+ {'v1.1.4': None},
+ {'v1.2.5': None},
+ {'v1.3.9': None},
+ {'v1.4.2': None},
+ {'v1.6.6': None},
+ {'v1.7.2': None},
],
'node': [
- 'v1.0.1',
- 'v1.1.4',
- 'v1.2.5',
- 'v1.3.9',
- 'v1.4.2',
- 'v1.6.6',
- #'v1.7.1', Failing tests.
+ {'v1.0.1': None},
+ {'v1.1.4': None},
+ {'v1.2.5': None},
+ {'v1.3.9': None},
+ {'v1.4.2': None},
+ {'v1.6.6': None},
+ #{'v1.7.1': None}, Failing tests
],
'ruby': [
- # Ruby v1.0.x doesn't have the fix #8914, therefore not supported.
- 'v1.1.4',
- 'v1.2.5',
- 'v1.3.9',
- 'v1.4.2',
- 'v1.6.6',
- 'v1.7.2',
+ {'v1.0.1': {'patch': [
+ 'tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile',
+ 'tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh',
+ ]}},
+ {'v1.1.4': None},
+ {'v1.2.5': None},
+ {'v1.3.9': None},
+ {'v1.4.2': None},
+ {'v1.6.6': None},
+ {'v1.7.2': None},
],
'php': [
- 'v1.0.1',
- 'v1.1.4',
- 'v1.2.5',
- 'v1.3.9',
- 'v1.4.2',
- 'v1.6.6',
- 'v1.7.2',
+ {'v1.0.1': None},
+ {'v1.1.4': None},
+ {'v1.2.5': None},
+ {'v1.3.9': None},
+ {'v1.4.2': None},
+ {'v1.6.6': None},
+ {'v1.7.2': None},
],
'csharp': [
- #'v1.0.1',
- 'v1.1.4',
- 'v1.2.5',
- 'v1.3.9',
- 'v1.4.2',
- 'v1.6.6',
- 'v1.7.2',
+ #{'v1.0.1': None},
+ {'v1.1.4': None},
+ {'v1.2.5': None},
+ {'v1.3.9': None},
+ {'v1.4.2': None},
+ {'v1.6.6': None},
+ {'v1.7.2': None},
],
}
diff --git a/tools/interop_matrix/create_matrix_images.py b/tools/interop_matrix/create_matrix_images.py
index 493a7d5364..a292368131 100755
--- a/tools/interop_matrix/create_matrix_images.py
+++ b/tools/interop_matrix/create_matrix_images.py
@@ -39,7 +39,7 @@ _IMAGE_BUILDER = 'tools/run_tests/dockerize/build_interop_image.sh'
_LANGUAGES = client_matrix.LANG_RUNTIME_MATRIX.keys()
# All gRPC release tags, flattened, deduped and sorted.
_RELEASES = sorted(list(set(
- i for l in client_matrix.LANG_RELEASE_MATRIX.values() for i in l)))
+ client_matrix.get_release_tag_name(info) for lang in client_matrix.LANG_RELEASE_MATRIX.values() for info in lang)))
# Destination directory inside docker image to keep extra info from build time.
_BUILD_INFO = '/var/local/build_info'
@@ -141,8 +141,11 @@ def build_image_jobspec(runtime, env, gcr_tag, stack_base):
'TTY_FLAG': '-t'
}
build_env.update(env)
+ image_builder_path = _IMAGE_BUILDER
+ if client_matrix.should_build_docker_interop_image_from_release_tag(lang):
+ image_builder_path = os.path.join(stack_base, _IMAGE_BUILDER)
build_job = jobset.JobSpec(
- cmdline=[_IMAGE_BUILDER],
+ cmdline=[image_builder_path],
environ=build_env,
shortname='build_docker_%s' % runtime,
timeout_seconds=30*60)
@@ -157,10 +160,10 @@ def build_all_images_for_lang(lang):
releases = ['master']
else:
if args.release == 'all':
- releases = client_matrix.LANG_RELEASE_MATRIX[lang]
+ releases = client_matrix.get_release_tags(lang)
else:
# Build a particular release.
- if args.release not in ['master'] + client_matrix.LANG_RELEASE_MATRIX[lang]:
+ if args.release not in ['master'] + client_matrix.get_release_tags(lang):
jobset.message('SKIPPED',
'%s for %s is not defined' % (args.release, lang),
do_newline=True)
@@ -223,6 +226,33 @@ def cleanup():
docker_images_cleanup = []
atexit.register(cleanup)
+def maybe_apply_patches_on_git_tag(stack_base, lang, release):
+ files_to_patch = []
+ for release_info in client_matrix.LANG_RELEASE_MATRIX[lang]:
+ if client_matrix.get_release_tag_name(release_info) == release:
+ files_to_patch = release_info[release].get('patch')
+ break
+ if not files_to_patch:
+ return
+ patch_file_relative_path = 'patches/%s_%s/git_repo.patch' % (lang, release)
+ patch_file = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ patch_file_relative_path))
+ if not os.path.exists(patch_file):
+ jobset.message('FAILED', 'expected patch file |%s| to exist' % patch_file)
+ sys.exit(1)
+ subprocess.check_output(
+ ['git', 'apply', patch_file], cwd=stack_base, stderr=subprocess.STDOUT)
+ for repo_relative_path in files_to_patch:
+ subprocess.check_output(
+ ['git', 'add', repo_relative_path],
+ cwd=stack_base,
+ stderr=subprocess.STDOUT)
+ subprocess.check_output(
+ ['git', 'commit', '-m', ('Hack performed on top of %s git '
+ 'tag in order to build and run the %s '
+ 'interop tests on that tag.' % (lang, release))],
+ cwd=stack_base, stderr=subprocess.STDOUT)
+
def checkout_grpc_stack(lang, release):
"""Invokes 'git check' for the lang/release and returns directory created."""
assert args.git_checkout and args.git_checkout_root
@@ -252,6 +282,7 @@ def checkout_grpc_stack(lang, release):
assert not os.path.dirname(__file__).startswith(stack_base)
output = subprocess.check_output(
['git', 'checkout', release], cwd=stack_base, stderr=subprocess.STDOUT)
+ maybe_apply_patches_on_git_tag(stack_base, lang, release)
commit_log = subprocess.check_output(['git', 'log', '-1'], cwd=stack_base)
jobset.message('SUCCESS', 'git checkout',
'%s: %s' % (str(output), commit_log),
diff --git a/tools/interop_matrix/patches/README.md b/tools/interop_matrix/patches/README.md
new file mode 100644
index 0000000000..0c0893f6f2
--- /dev/null
+++ b/tools/interop_matrix/patches/README.md
@@ -0,0 +1,38 @@
+# Patches to grpc repo tags for the backwards compatibility interop tests
+
+This directory has patch files that can be applied to different tags
+of the grpc git repo in order to run the interop tests for a specific
+language based on that tag.
+
+For example, because the ruby interop tests do not run on the v1.0.1 tag out
+of the box, but we still want to test compatibility of the 1.0.1 ruby release
+with other versions, we can apply a patch to the v1.0.1 tag from this directory
+that makes the necessary changes that are needed to run the ruby interop tests
+from that tag. We can then use that patch to build the docker image for the
+ruby v1.0.1 interop tests.
+
+## How to add a new patch to this directory
+
+Patch files in this directory are meant to be applied to a git tag
+with a `git apply` command.
+
+1. Under the `patches` directory, create a new subdirectory
+titled `<language>_<git_tag>` for the git tag being modified.
+
+2. `git checkout <git_tag>`
+
+3. Make necessary modifications to the git repo at that tag.
+
+4.
+
+```
+git diff > ~/git_repo.patch
+git checkout <current working branch>
+cp ~/git_repo.patch tools/interop_matrix/patches/<language>_<git_tag>/
+```
+
+5. Edit the `LANGUAGE_RELEASE_MATRIX` in `client_matrix.py` for your language/tag
+and add a `'patch': [<files>,....]` entry to it's `dictionary`.
+
+After doing this, the interop image creation script can apply that patch to the
+tag with `git apply` before uploading to the test image repo.
diff --git a/tools/interop_matrix/patches/ruby_v1.0.1/git_repo.patch b/tools/interop_matrix/patches/ruby_v1.0.1/git_repo.patch
new file mode 100644
index 0000000000..0cd92d691d
--- /dev/null
+++ b/tools/interop_matrix/patches/ruby_v1.0.1/git_repo.patch
@@ -0,0 +1,34 @@
+diff --git a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
+index 88b5130..7ae9f7d 100644
+--- a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
++++ b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
+@@ -70,12 +70,12 @@ RUN apt-get update && apt-get install -y time && apt-get clean
+ RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
+ RUN \curl -sSL https://get.rvm.io | bash -s stable
+
+-# Install Ruby 2.1
+-RUN /bin/bash -l -c "rvm install ruby-2.1"
+-RUN /bin/bash -l -c "rvm use --default ruby-2.1"
++# Install Ruby 2.1.8
++RUN /bin/bash -l -c "rvm install ruby-2.1.8"
++RUN /bin/bash -l -c "rvm use --default ruby-2.1.8"
+ RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc"
+ RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
+-RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
++RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1.8' >> ~/.bashrc"
+ RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
+
+ # Prepare ccache
+diff --git a/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh b/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh
+index 97b3860..cec046d 100755
+--- a/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh
++++ b/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh
+@@ -38,7 +38,7 @@ git clone --recursive /var/local/jenkins/grpc /var/local/git/grpc
+ cp -r /var/local/jenkins/service_account $HOME || true
+
+ cd /var/local/git/grpc
+-rvm --default use ruby-2.1
++rvm --default use ruby-2.1.8
+
+ # build Ruby interop client and server
+ (cd src/ruby && gem update bundler && bundle && rake compile)
diff --git a/tools/interop_matrix/run_interop_matrix_tests.py b/tools/interop_matrix/run_interop_matrix_tests.py
index 4265bc5355..1bc35d9e02 100755
--- a/tools/interop_matrix/run_interop_matrix_tests.py
+++ b/tools/interop_matrix/run_interop_matrix_tests.py
@@ -41,7 +41,7 @@ import upload_test_results
_LANGUAGES = client_matrix.LANG_RUNTIME_MATRIX.keys()
# All gRPC release tags, flattened, deduped and sorted.
_RELEASES = sorted(list(set(
- i for l in client_matrix.LANG_RELEASE_MATRIX.values() for i in l)))
+ client_matrix.get_release_tag_name(info) for lang in client_matrix.LANG_RELEASE_MATRIX.values() for info in lang)))
_TEST_TIMEOUT = 30
argp = argparse.ArgumentParser(description='Run interop tests.')
@@ -93,10 +93,10 @@ def find_all_images_for_lang(lang):
"""
# Find all defined releases.
if args.release == 'all':
- releases = ['master'] + client_matrix.LANG_RELEASE_MATRIX[lang]
+ releases = ['master'] + client_matrix.get_release_tags(lang)
else:
# Look for a particular release.
- if args.release not in ['master'] + client_matrix.LANG_RELEASE_MATRIX[lang]:
+ if args.release not in ['master'] + client_matrix.get_release_tags(lang):
jobset.message('SKIPPED',
'%s for %s is not defined' % (args.release, lang),
do_newline=True)
diff --git a/tools/interop_matrix/testcases/ruby__v1.0.1 b/tools/interop_matrix/testcases/ruby__v1.0.1
new file mode 100755
index 0000000000..effbef1d18
--- /dev/null
+++ b/tools/interop_matrix/testcases/ruby__v1.0.1
@@ -0,0 +1,20 @@
+#!/bin/bash
+echo "Testing ${docker_image:=grpc_interop_ruby:6bd1f0eb-51a4-4ad8-861c-1cbd7a929f33}"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=large_unary"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_unary"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=ping_pong"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_stream"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=client_streaming"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=server_streaming"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_begin"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_first_response"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=timeout_on_sleeping_server"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=large_unary"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_unary"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=ping_pong"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_stream"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=client_streaming"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=server_streaming"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_begin"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_first_response"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=timeout_on_sleeping_server"