aboutsummaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2016-03-16 07:56:37 -0700
committerGravatar Craig Tiller <ctiller@google.com>2016-03-16 07:56:37 -0700
commit5ee157cd1184e34209d840947c3595864da891b4 (patch)
treedfc222fdd97fdbdc67b58053bd406468a27f155b /test
parent69f57b4672f1c14f39921cbe0b9419a2fc04e4ba (diff)
parent66e3b02d8a970f978c7907903f04094f802c7b44 (diff)
Merge github.com:grpc/grpc into backoff_lib
Diffstat (limited to 'test')
-rw-r--r--test/core/client_config/resolvers/dns_resolver_connectivity_test.c148
-rw-r--r--test/core/support/thd_test.c15
-rw-r--r--test/core/surface/concurrent_connectivity_test.c83
-rw-r--r--test/core/tsi/transport_security_test.c195
-rw-r--r--test/cpp/end2end/async_end2end_test.cc232
-rw-r--r--test/cpp/end2end/end2end_test.cc34
-rw-r--r--test/cpp/end2end/test_service_impl.cc15
-rw-r--r--test/cpp/qps/client.h35
-rw-r--r--test/cpp/qps/driver.cc32
-rw-r--r--test/cpp/util/test_credentials_provider.h5
10 files changed, 591 insertions, 203 deletions
diff --git a/test/core/client_config/resolvers/dns_resolver_connectivity_test.c b/test/core/client_config/resolvers/dns_resolver_connectivity_test.c
new file mode 100644
index 0000000000..75d1eb674f
--- /dev/null
+++ b/test/core/client_config/resolvers/dns_resolver_connectivity_test.c
@@ -0,0 +1,148 @@
+/*
+ *
+ * Copyright 2015-2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/client_config/resolvers/dns_resolver.h"
+
+#include <string.h>
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+
+#include "src/core/iomgr/resolve_address.h"
+#include "src/core/iomgr/timer.h"
+#include "test/core/util/test_config.h"
+
+static void subchannel_factory_ref(grpc_subchannel_factory *scv) {}
+static void subchannel_factory_unref(grpc_exec_ctx *exec_ctx,
+ grpc_subchannel_factory *scv) {}
+static grpc_subchannel *subchannel_factory_create_subchannel(
+ grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *factory,
+ grpc_subchannel_args *args) {
+ return NULL;
+}
+
+static const grpc_subchannel_factory_vtable sc_vtable = {
+ subchannel_factory_ref, subchannel_factory_unref,
+ subchannel_factory_create_subchannel};
+
+static grpc_subchannel_factory sc_factory = {&sc_vtable};
+
+static gpr_mu g_mu;
+static bool g_fail_resolution = true;
+
+static grpc_resolved_addresses *my_resolve_address(const char *name,
+ const char *addr) {
+ gpr_mu_lock(&g_mu);
+ GPR_ASSERT(0 == strcmp("test", name));
+ if (g_fail_resolution) {
+ g_fail_resolution = false;
+ gpr_mu_unlock(&g_mu);
+ return NULL;
+ } else {
+ gpr_mu_unlock(&g_mu);
+ grpc_resolved_addresses *addrs = gpr_malloc(sizeof(*addrs));
+ addrs->naddrs = 1;
+ addrs->addrs = gpr_malloc(sizeof(*addrs->addrs));
+ addrs->addrs[0].len = 123;
+ return addrs;
+ }
+}
+
+static grpc_resolver *create_resolver(const char *name) {
+ grpc_resolver_factory *factory = grpc_dns_resolver_factory_create();
+ grpc_uri *uri = grpc_uri_parse(name, 0);
+ GPR_ASSERT(uri);
+ grpc_resolver_args args;
+ memset(&args, 0, sizeof(args));
+ args.uri = uri;
+ args.subchannel_factory = &sc_factory;
+ grpc_resolver *resolver =
+ grpc_resolver_factory_create_resolver(factory, &args);
+ grpc_resolver_factory_unref(factory);
+ grpc_uri_destroy(uri);
+ return resolver;
+}
+
+static void on_done(grpc_exec_ctx *exec_ctx, void *ev, bool success) {
+ gpr_event_set(ev, (void *)1);
+}
+
+// interleave waiting for an event with a timer check
+static bool wait_loop(int deadline_seconds, gpr_event *ev) {
+ while (deadline_seconds) {
+ gpr_log(GPR_DEBUG, "Test: waiting for %d more seconds", deadline_seconds);
+ if (gpr_event_wait(ev, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1))) return true;
+ deadline_seconds--;
+
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_timer_check(&exec_ctx, gpr_now(GPR_CLOCK_MONOTONIC), NULL);
+ grpc_exec_ctx_finish(&exec_ctx);
+ }
+ return false;
+}
+
+int main(int argc, char **argv) {
+ grpc_test_init(argc, argv);
+
+ grpc_init();
+ gpr_mu_init(&g_mu);
+ grpc_blocking_resolve_address = my_resolve_address;
+
+ grpc_resolver *resolver = create_resolver("dns:test");
+
+ grpc_client_config *config = (grpc_client_config *)1;
+
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ gpr_event ev1;
+ gpr_event_init(&ev1);
+ grpc_resolver_next(&exec_ctx, resolver, &config,
+ grpc_closure_create(on_done, &ev1));
+ grpc_exec_ctx_flush(&exec_ctx);
+ GPR_ASSERT(wait_loop(5, &ev1));
+ GPR_ASSERT(config == NULL);
+
+ gpr_event ev2;
+ gpr_event_init(&ev2);
+ grpc_resolver_next(&exec_ctx, resolver, &config,
+ grpc_closure_create(on_done, &ev2));
+ grpc_exec_ctx_flush(&exec_ctx);
+ GPR_ASSERT(wait_loop(30, &ev2));
+ GPR_ASSERT(config != NULL);
+
+ grpc_client_config_unref(&exec_ctx, config);
+ GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test");
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ grpc_shutdown();
+ gpr_mu_destroy(&g_mu);
+}
diff --git a/test/core/support/thd_test.c b/test/core/support/thd_test.c
index f7807d280a..0c176da2d3 100644
--- a/test/core/support/thd_test.c
+++ b/test/core/support/thd_test.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,6 +41,8 @@
#include <grpc/support/time.h>
#include "test/core/util/test_config.h"
+#define NUM_THREADS 300
+
struct test {
gpr_mu mu;
int n;
@@ -79,15 +81,14 @@ static void test_options(void) {
static void test(void) {
int i;
gpr_thd_id thd;
- gpr_thd_id thds[1000];
+ gpr_thd_id thds[NUM_THREADS];
struct test t;
- int n = 1000;
gpr_thd_options options = gpr_thd_options_default();
gpr_mu_init(&t.mu);
gpr_cv_init(&t.done_cv);
- t.n = n;
+ t.n = NUM_THREADS;
t.is_done = 0;
- for (i = 0; i != n; i++) {
+ for (i = 0; i < NUM_THREADS; i++) {
GPR_ASSERT(gpr_thd_new(&thd, &thd_body, &t, NULL));
}
gpr_mu_lock(&t.mu);
@@ -97,10 +98,10 @@ static void test(void) {
gpr_mu_unlock(&t.mu);
GPR_ASSERT(t.n == 0);
gpr_thd_options_set_joinable(&options);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < NUM_THREADS; i++) {
GPR_ASSERT(gpr_thd_new(&thds[i], &thd_body_joinable, NULL, &options));
}
- for (i = 0; i < n; i++) {
+ for (i = 0; i < NUM_THREADS; i++) {
gpr_thd_join(thds[i]);
}
}
diff --git a/test/core/surface/concurrent_connectivity_test.c b/test/core/surface/concurrent_connectivity_test.c
new file mode 100644
index 0000000000..4d3b7bf22a
--- /dev/null
+++ b/test/core/surface/concurrent_connectivity_test.c
@@ -0,0 +1,83 @@
+/*
+*
+* Copyright 2016, Google Inc.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following disclaimer
+* in the documentation and/or other materials provided with the
+* distribution.
+* * Neither the name of Google Inc. nor the names of its
+* contributors may be used to endorse or promote products derived from
+* this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#include <stdio.h>
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/thd.h>
+#include "test/core/util/test_config.h"
+
+#define NUM_THREADS 100
+static grpc_channel* channels[NUM_THREADS];
+static grpc_completion_queue* queues[NUM_THREADS];
+
+void create_loop_destroy(void* actually_an_int) {
+ int thread_index = (int)(intptr_t)(actually_an_int);
+ for (int i = 0; i < 10; ++i) {
+ grpc_completion_queue* cq = grpc_completion_queue_create(NULL);
+ grpc_channel* chan = grpc_insecure_channel_create("localhost", NULL, NULL);
+
+ channels[thread_index] = chan;
+ queues[thread_index] = cq;
+
+ for (int j = 0; j < 10; ++j) {
+ gpr_timespec later_time = GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10);
+ grpc_connectivity_state state =
+ grpc_channel_check_connectivity_state(chan, 1);
+ grpc_channel_watch_connectivity_state(chan, state, later_time, cq, NULL);
+ GPR_ASSERT(grpc_completion_queue_next(cq,
+ GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3),
+ NULL).type == GRPC_OP_COMPLETE);
+ }
+ grpc_channel_destroy(channels[thread_index]);
+ grpc_completion_queue_destroy(queues[thread_index]);
+ }
+}
+
+int main(int argc, char** argv) {
+ grpc_test_init(argc, argv);
+ grpc_init();
+ gpr_thd_id threads[NUM_THREADS];
+ for (intptr_t i = 0; i < NUM_THREADS; ++i) {
+ gpr_thd_options options = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&options);
+ gpr_thd_new(&threads[i], create_loop_destroy, (void*)i, &options);
+ }
+ for (int i = 0; i < NUM_THREADS; ++i) {
+ gpr_thd_join(threads[i]);
+ }
+ grpc_shutdown();
+ return 0;
+}
diff --git a/test/core/tsi/transport_security_test.c b/test/core/tsi/transport_security_test.c
index 7ce343987b..667d3f0349 100644
--- a/test/core/tsi/transport_security_test.c
+++ b/test/core/tsi/transport_security_test.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,35 +61,37 @@ typedef struct {
of '#' will be replaced with a null character before processing. */
const char *dns_names;
+ /* Comma separated list of IP SANs to match aggainst */
+ const char *ip_names;
} cert_name_test_entry;
/* Largely inspired from:
chromium/src/net/cert/x509_certificate_unittest.cc.
TODO(jboeuf) uncomment test cases as we fix tsi_ssl_peer_matches_name. */
const cert_name_test_entry cert_name_test_entries[] = {
- {1, "foo.com", "foo.com", NULL},
- {1, "f", "f", NULL},
- {0, "h", "i", NULL},
- {1, "bar.foo.com", "*.foo.com", NULL},
+ {1, "foo.com", "foo.com", NULL, NULL},
+ {1, "f", "f", NULL, NULL},
+ {0, "h", "i", NULL, NULL},
+ {1, "bar.foo.com", "*.foo.com", NULL, NULL},
{1, "www.test.fr", "common.name",
- "*.test.com,*.test.co.uk,*.test.de,*.test.fr"},
+ "*.test.com,*.test.co.uk,*.test.de,*.test.fr", NULL},
/*
{1, "wwW.tESt.fr", "common.name", ",*.*,*.test.de,*.test.FR,www"},
*/
- {0, "f.uk", ".uk", NULL},
- {0, "w.bar.foo.com", "?.bar.foo.com", NULL},
- {0, "www.foo.com", "(www|ftp).foo.com", NULL},
- {0, "www.foo.com", "www.foo.com#", NULL}, /* # = null char. */
- {0, "www.foo.com", "", "www.foo.com#*.foo.com,#,#"},
- {0, "www.house.example", "ww.house.example", NULL},
- {0, "test.org", "", "www.test.org,*.test.org,*.org"},
- {0, "w.bar.foo.com", "w*.bar.foo.com", NULL},
- {0, "www.bar.foo.com", "ww*ww.bar.foo.com", NULL},
- {0, "wwww.bar.foo.com", "ww*ww.bar.foo.com", NULL},
- {0, "wwww.bar.foo.com", "w*w.bar.foo.com", NULL},
- {0, "wwww.bar.foo.com", "w*w.bar.foo.c0m", NULL},
- {0, "WALLY.bar.foo.com", "wa*.bar.foo.com", NULL},
- {0, "wally.bar.foo.com", "*Ly.bar.foo.com", NULL},
+ {0, "f.uk", ".uk", NULL, NULL},
+ {0, "w.bar.foo.com", "?.bar.foo.com", NULL, NULL},
+ {0, "www.foo.com", "(www|ftp).foo.com", NULL, NULL},
+ {0, "www.foo.com", "www.foo.com#", NULL, NULL}, /* # = null char. */
+ {0, "www.foo.com", "", "www.foo.com#*.foo.com,#,#", NULL},
+ {0, "www.house.example", "ww.house.example", NULL, NULL},
+ {0, "test.org", "", "www.test.org,*.test.org,*.org", NULL},
+ {0, "w.bar.foo.com", "w*.bar.foo.com", NULL, NULL},
+ {0, "www.bar.foo.com", "ww*ww.bar.foo.com", NULL, NULL},
+ {0, "wwww.bar.foo.com", "ww*ww.bar.foo.com", NULL, NULL},
+ {0, "wwww.bar.foo.com", "w*w.bar.foo.com", NULL, NULL},
+ {0, "wwww.bar.foo.com", "w*w.bar.foo.c0m", NULL, NULL},
+ {0, "WALLY.bar.foo.com", "wa*.bar.foo.com", NULL, NULL},
+ {0, "wally.bar.foo.com", "*Ly.bar.foo.com", NULL, NULL},
/*
{1, "ww%57.foo.com", "", "www.foo.com"},
{1, "www&.foo.com", "www%26.foo.com", NULL},
@@ -97,94 +99,108 @@ const cert_name_test_entry cert_name_test_entries[] = {
/* Common name must not be used if subject alternative name was provided. */
{0, "www.test.co.jp", "www.test.co.jp",
- "*.test.de,*.jp,www.test.co.uk,www.*.co.jp"},
+ "*.test.de,*.jp,www.test.co.uk,www.*.co.jp", NULL},
{0, "www.bar.foo.com", "www.bar.foo.com",
- "*.foo.com,*.*.foo.com,*.*.bar.foo.com,*..bar.foo.com,"},
+ "*.foo.com,*.*.foo.com,*.*.bar.foo.com,*..bar.foo.com,", NULL},
/* IDN tests */
- {1, "xn--poema-9qae5a.com.br", "xn--poema-9qae5a.com.br", NULL},
- {1, "www.xn--poema-9qae5a.com.br", "*.xn--poema-9qae5a.com.br", NULL},
+ {1, "xn--poema-9qae5a.com.br", "xn--poema-9qae5a.com.br", NULL, NULL},
+ {1, "www.xn--poema-9qae5a.com.br", "*.xn--poema-9qae5a.com.br", NULL, NULL},
{0, "xn--poema-9qae5a.com.br", "",
"*.xn--poema-9qae5a.com.br,"
"xn--poema-*.com.br,"
"xn--*-9qae5a.com.br,"
- "*--poema-9qae5a.com.br"},
+ "*--poema-9qae5a.com.br",
+ NULL},
/* The following are adapted from the examples quoted from
http://tools.ietf.org/html/rfc6125#section-6.4.3
(e.g., *.example.com would match foo.example.com but
not bar.foo.example.com or example.com). */
- {1, "foo.example.com", "*.example.com", NULL},
- {0, "bar.foo.example.com", "*.example.com", NULL},
- {0, "example.com", "*.example.com", NULL},
+ {1, "foo.example.com", "*.example.com", NULL, NULL},
+ {0, "bar.foo.example.com", "*.example.com", NULL, NULL},
+ {0, "example.com", "*.example.com", NULL, NULL},
/* Partial wildcards are disallowed, though RFC 2818 rules allow them.
That is, forms such as baz*.example.net, *baz.example.net, and
b*z.example.net should NOT match domains. Instead, the wildcard must
always be the left-most label, and only a single label. */
- {0, "baz1.example.net", "baz*.example.net", NULL},
- {0, "foobaz.example.net", "*baz.example.net", NULL},
- {0, "buzz.example.net", "b*z.example.net", NULL},
- {0, "www.test.example.net", "www.*.example.net", NULL},
+ {0, "baz1.example.net", "baz*.example.net", NULL, NULL},
+ {0, "foobaz.example.net", "*baz.example.net", NULL, NULL},
+ {0, "buzz.example.net", "b*z.example.net", NULL, NULL},
+ {0, "www.test.example.net", "www.*.example.net", NULL, NULL},
/* Wildcards should not be valid for public registry controlled domains,
and unknown/unrecognized domains, at least three domain components must
be present. */
- {1, "www.test.example", "*.test.example", NULL},
- {1, "test.example.co.uk", "*.example.co.uk", NULL},
- {0, "test.example", "*.example", NULL},
+ {1, "www.test.example", "*.test.example", NULL, NULL},
+ {1, "test.example.co.uk", "*.example.co.uk", NULL, NULL},
+ {0, "test.example", "*.example", NULL, NULL},
/*
{0, "example.co.uk", "*.co.uk", NULL},
*/
- {0, "foo.com", "*.com", NULL},
- {0, "foo.us", "*.us", NULL},
- {0, "foo", "*", NULL},
+ {0, "foo.com", "*.com", NULL, NULL},
+ {0, "foo.us", "*.us", NULL, NULL},
+ {0, "foo", "*", NULL, NULL},
/* IDN variants of wildcards and registry controlled domains. */
- {1, "www.xn--poema-9qae5a.com.br", "*.xn--poema-9qae5a.com.br", NULL},
- {1, "test.example.xn--mgbaam7a8h", "*.example.xn--mgbaam7a8h", NULL},
+ {1, "www.xn--poema-9qae5a.com.br", "*.xn--poema-9qae5a.com.br", NULL, NULL},
+ {1, "test.example.xn--mgbaam7a8h", "*.example.xn--mgbaam7a8h", NULL, NULL},
/*
{0, "xn--poema-9qae5a.com.br", "*.com.br", NULL},
*/
- {0, "example.xn--mgbaam7a8h", "*.xn--mgbaam7a8h", NULL},
+ {0, "example.xn--mgbaam7a8h", "*.xn--mgbaam7a8h", NULL, NULL},
/* Wildcards should be permissible for 'private' registry controlled
domains. */
- {1, "www.appspot.com", "*.appspot.com", NULL},
- {1, "foo.s3.amazonaws.com", "*.s3.amazonaws.com", NULL},
+ {1, "www.appspot.com", "*.appspot.com", NULL, NULL},
+ {1, "foo.s3.amazonaws.com", "*.s3.amazonaws.com", NULL, NULL},
/* Multiple wildcards are not valid. */
- {0, "foo.example.com", "*.*.com", NULL},
- {0, "foo.bar.example.com", "*.bar.*.com", NULL},
+ {0, "foo.example.com", "*.*.com", NULL, NULL},
+ {0, "foo.bar.example.com", "*.bar.*.com", NULL, NULL},
/* Absolute vs relative DNS name tests. Although not explicitly specified
in RFC 6125, absolute reference names (those ending in a .) should
match either absolute or relative presented names. */
- {1, "foo.com", "foo.com.", NULL},
- {1, "foo.com.", "foo.com", NULL},
- {1, "foo.com.", "foo.com.", NULL},
- {1, "f", "f.", NULL},
- {1, "f.", "f", NULL},
- {1, "f.", "f.", NULL},
- {1, "www-3.bar.foo.com", "*.bar.foo.com.", NULL},
- {1, "www-3.bar.foo.com.", "*.bar.foo.com", NULL},
- {1, "www-3.bar.foo.com.", "*.bar.foo.com.", NULL},
- {0, ".", ".", NULL},
- {0, "example.com", "*.com.", NULL},
- {0, "example.com.", "*.com", NULL},
- {0, "example.com.", "*.com.", NULL},
- {0, "foo.", "*.", NULL},
- {0, "foo", "*.", NULL},
+ {1, "foo.com", "foo.com.", NULL, NULL},
+ {1, "foo.com.", "foo.com", NULL, NULL},
+ {1, "foo.com.", "foo.com.", NULL, NULL},
+ {1, "f", "f.", NULL, NULL},
+ {1, "f.", "f", NULL, NULL},
+ {1, "f.", "f.", NULL, NULL},
+ {1, "www-3.bar.foo.com", "*.bar.foo.com.", NULL, NULL},
+ {1, "www-3.bar.foo.com.", "*.bar.foo.com", NULL, NULL},
+ {1, "www-3.bar.foo.com.", "*.bar.foo.com.", NULL, NULL},
+ {0, ".", ".", NULL, NULL},
+ {0, "example.com", "*.com.", NULL, NULL},
+ {0, "example.com.", "*.com", NULL, NULL},
+ {0, "example.com.", "*.com.", NULL, NULL},
+ {0, "foo.", "*.", NULL, NULL},
+ {0, "foo", "*.", NULL, NULL},
/*
{0, "foo.co.uk", "*.co.uk.", NULL},
{0, "foo.co.uk.", "*.co.uk.", NULL},
*/
/* An empty CN is OK. */
- {1, "test.foo.com", "", "test.foo.com"},
+ {1, "test.foo.com", "", "test.foo.com", NULL},
/* An IP should not be used for the CN. */
- {0, "173.194.195.139", "173.194.195.139", NULL},
+ {0, "173.194.195.139", "173.194.195.139", NULL, NULL},
+ /* An IP can be used if the SAN IP is present */
+ {1, "173.194.195.139", "foo.example.com", NULL, "173.194.195.139"},
+ {0, "173.194.195.139", "foo.example.com", NULL, "8.8.8.8"},
+ {0, "173.194.195.139", "foo.example.com", NULL, "8.8.8.8,8.8.4.4"},
+ {1, "173.194.195.139", "foo.example.com", NULL, "8.8.8.8,173.194.195.139"},
+ {0, "173.194.195.139", "foo.example.com", NULL, "173.194.195.13"},
+ {0, "2001:db8:a0b:12f0::1", "foo.example.com", NULL, "173.194.195.13"},
+ {1, "2001:db8:a0b:12f0::1", "foo.example.com", NULL,
+ "2001:db8:a0b:12f0::1"},
+ {0, "2001:db8:a0b:12f0::1", "foo.example.com", NULL,
+ "2001:db8:a0b:12f0::2"},
+ {1, "2001:db8:a0b:12f0::1", "foo.example.com", NULL,
+ "2001:db8:a0b:12f0::2,2001:db8:a0b:12f0::1,8.8.8.8"},
};
typedef struct name_list {
@@ -196,7 +212,7 @@ typedef struct {
size_t name_count;
char *buffer;
name_list *names;
-} parsed_dns_names;
+} parsed_names;
name_list *name_list_add(const char *n) {
name_list *result = gpr_malloc(sizeof(name_list));
@@ -205,18 +221,18 @@ name_list *name_list_add(const char *n) {
return result;
}
-static parsed_dns_names parse_dns_names(const char *dns_names_str) {
- parsed_dns_names result;
+static parsed_names parse_names(const char *names_str) {
+ parsed_names result;
name_list *current_nl;
size_t i;
- memset(&result, 0, sizeof(parsed_dns_names));
- if (dns_names_str == 0) return result;
+ memset(&result, 0, sizeof(parsed_names));
+ if (names_str == 0) return result;
result.name_count = 1;
- result.buffer = gpr_strdup(dns_names_str);
+ result.buffer = gpr_strdup(names_str);
result.names = name_list_add(result.buffer);
current_nl = result.names;
- for (i = 0; i < strlen(dns_names_str); i++) {
- if (dns_names_str[i] == ',') {
+ for (i = 0; i < strlen(names_str); i++) {
+ if (names_str[i] == ',') {
result.buffer[i] = '\0';
result.name_count++;
i++;
@@ -227,7 +243,7 @@ static parsed_dns_names parse_dns_names(const char *dns_names_str) {
return result;
}
-static void destruct_parsed_dns_names(parsed_dns_names *pdn) {
+static void destruct_parsed_names(parsed_names *pdn) {
name_list *nl = pdn->names;
if (pdn->buffer != NULL) gpr_free(pdn->buffer);
while (nl != NULL) {
@@ -237,8 +253,8 @@ static void destruct_parsed_dns_names(parsed_dns_names *pdn) {
}
}
-static char *processed_dns_name(const char *dns_name) {
- char *result = gpr_strdup(dns_name);
+static char *processed_name(const char *name) {
+ char *result = gpr_strdup(name);
size_t i;
for (i = 0; i < strlen(result); i++) {
if (result[i] == '#') {
@@ -253,31 +269,48 @@ static tsi_peer peer_from_cert_name_test_entry(
size_t i;
tsi_peer peer;
name_list *nl;
- parsed_dns_names dns_entries = parse_dns_names(entry->dns_names);
+ parsed_names dns_entries = parse_names(entry->dns_names);
+ parsed_names ip_entries = parse_names(entry->ip_names);
nl = dns_entries.names;
- GPR_ASSERT(tsi_construct_peer(1 + dns_entries.name_count, &peer) == TSI_OK);
+ GPR_ASSERT(tsi_construct_peer(
+ 1 + dns_entries.name_count + ip_entries.name_count, &peer) ==
+ TSI_OK);
GPR_ASSERT(tsi_construct_string_peer_property_from_cstring(
TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY, entry->common_name,
&peer.properties[0]) == TSI_OK);
i = 1;
while (nl != NULL) {
- char *processed = processed_dns_name(nl->name);
+ char *processed = processed_name(nl->name);
GPR_ASSERT(tsi_construct_string_peer_property(
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, processed,
strlen(nl->name), &peer.properties[i++]) == TSI_OK);
nl = nl->next;
gpr_free(processed);
}
- destruct_parsed_dns_names(&dns_entries);
+
+ nl = ip_entries.names;
+ while (nl != NULL) {
+ char *processed = processed_name(nl->name);
+ GPR_ASSERT(tsi_construct_string_peer_property(
+ TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, processed,
+ strlen(nl->name), &peer.properties[i++]) == TSI_OK);
+ nl = nl->next;
+ gpr_free(processed);
+ }
+ destruct_parsed_names(&dns_entries);
+ destruct_parsed_names(&ip_entries);
return peer;
}
char *cert_name_test_entry_to_string(const cert_name_test_entry *entry) {
char *s;
- gpr_asprintf(
- &s, "{ success = %s, host_name = %s, common_name = %s, dns_names = %s}",
- entry->expected ? "true" : "false", entry->host_name, entry->common_name,
- entry->dns_names != NULL ? entry->dns_names : "");
+ gpr_asprintf(&s,
+ "{ success = %s, host_name = %s, common_name = %s, dns_names = "
+ "%s, ip_names = %s}",
+ entry->expected ? "true" : "false", entry->host_name,
+ entry->common_name,
+ entry->dns_names != NULL ? entry->dns_names : "",
+ entry->ip_names != NULL ? entry->ip_names : "");
return s;
}
diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc
index 9ca3bf98f8..dc8c2bb6e5 100644
--- a/test/cpp/end2end/async_end2end_test.cc
+++ b/test/cpp/end2end/async_end2end_test.cc
@@ -68,6 +68,7 @@ namespace testing {
namespace {
void* tag(int i) { return (void*)(intptr_t)i; }
+int detag(void* p) { return static_cast<int>(reinterpret_cast<intptr_t>(p)); }
#ifdef GPR_POSIX_SOCKET
static int maybe_assert_non_blocking_poll(struct pollfd* pfds, nfds_t nfds,
@@ -106,37 +107,50 @@ class PollingOverrider {
class Verifier {
public:
explicit Verifier(bool spin) : spin_(spin) {}
+ // Expect sets the expected ok value for a specific tag
Verifier& Expect(int i, bool expect_ok) {
expectations_[tag(i)] = expect_ok;
return *this;
}
+ // Next waits for 1 async tag to complete, checks its
+ // expectations, and returns the tag
+ int Next(CompletionQueue* cq, bool ignore_ok) {
+ bool ok;
+ void* got_tag;
+ if (spin_) {
+ for (;;) {
+ auto r = cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME));
+ if (r == CompletionQueue::TIMEOUT) continue;
+ if (r == CompletionQueue::GOT_EVENT) break;
+ gpr_log(GPR_ERROR, "unexpected result from AsyncNext");
+ abort();
+ }
+ } else {
+ EXPECT_TRUE(cq->Next(&got_tag, &ok));
+ }
+ auto it = expectations_.find(got_tag);
+ EXPECT_TRUE(it != expectations_.end());
+ if (!ignore_ok) {
+ EXPECT_EQ(it->second, ok);
+ }
+ expectations_.erase(it);
+ return detag(got_tag);
+ }
+
+ // Verify keeps calling Next until all currently set
+ // expected tags are complete
void Verify(CompletionQueue* cq) { Verify(cq, false); }
+ // This version of Verify allows optionally ignoring the
+ // outcome of the expectation
void Verify(CompletionQueue* cq, bool ignore_ok) {
GPR_ASSERT(!expectations_.empty());
while (!expectations_.empty()) {
- bool ok;
- void* got_tag;
- if (spin_) {
- for (;;) {
- auto r = cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME));
- if (r == CompletionQueue::TIMEOUT) continue;
- if (r == CompletionQueue::GOT_EVENT) break;
- gpr_log(GPR_ERROR, "unexpected result from AsyncNext");
- abort();
- }
- } else {
- EXPECT_TRUE(cq->Next(&got_tag, &ok));
- }
- auto it = expectations_.find(got_tag);
- EXPECT_TRUE(it != expectations_.end());
- if (!ignore_ok) {
- EXPECT_EQ(it->second, ok);
- }
- expectations_.erase(it);
+ Next(cq, ignore_ok);
}
}
+ // This version of Verify stops after a certain deadline
void Verify(CompletionQueue* cq,
std::chrono::system_clock::time_point deadline) {
if (expectations_.empty()) {
@@ -793,7 +807,8 @@ TEST_P(AsyncEnd2endTest, UnimplementedRpc) {
}
// This class is for testing scenarios where RPCs are cancelled on the server
-// by calling ServerContext::TryCancel()
+// by calling ServerContext::TryCancel(). Server uses AsyncNotifyWhenDone
+// API to check for cancellation
class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
protected:
typedef enum {
@@ -803,13 +818,6 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
CANCEL_AFTER_PROCESSING
} ServerTryCancelRequestPhase;
- void ServerTryCancel(ServerContext* context) {
- EXPECT_FALSE(context->IsCancelled());
- context->TryCancel();
- gpr_log(GPR_INFO, "Server called TryCancel()");
- EXPECT_TRUE(context->IsCancelled());
- }
-
// Helper for testing client-streaming RPCs which are cancelled on the server.
// Depending on the value of server_try_cancel parameter, this will test one
// of the following three scenarios:
@@ -843,6 +851,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
// On the server, request to be notified of 'RequestStream' calls
// and receive the 'RequestStream' call just made by the client
+ srv_ctx.AsyncNotifyWhenDone(tag(11));
service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
tag(2));
Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
@@ -858,9 +867,12 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
bool expected_server_cq_result = true;
bool ignore_cq_result = false;
+ bool want_done_tag = false;
if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
- ServerTryCancel(&srv_ctx);
+ srv_ctx.TryCancel();
+ Verifier(GetParam()).Expect(11, true).Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
// Since cancellation is done before server reads any results, we know
// for sure that all cq results will return false from this point forward
@@ -868,22 +880,39 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
}
std::thread* server_try_cancel_thd = NULL;
+
+ auto verif = Verifier(GetParam());
+
if (server_try_cancel == CANCEL_DURING_PROCESSING) {
- server_try_cancel_thd = new std::thread(
- &AsyncEnd2endServerTryCancelTest::ServerTryCancel, this, &srv_ctx);
+ server_try_cancel_thd =
+ new std::thread(&ServerContext::TryCancel, &srv_ctx);
// Server will cancel the RPC in a parallel thread while reading the
// requests from the client. Since the cancellation can happen at anytime,
// some of the cq results (i.e those until cancellation) might be true but
// its non deterministic. So better to ignore the cq results
ignore_cq_result = true;
+ // Expect that we might possibly see the done tag that
+ // indicates cancellation completion in this case
+ want_done_tag = true;
+ verif.Expect(11, true);
}
// Server reads 3 messages (tags 6, 7 and 8)
+ // But if want_done_tag is true, we might also see tag 11
for (int tag_idx = 6; tag_idx <= 8; tag_idx++) {
srv_stream.Read(&recv_request, tag(tag_idx));
- Verifier(GetParam())
- .Expect(tag_idx, expected_server_cq_result)
- .Verify(cq_.get(), ignore_cq_result);
+ // Note that we'll add something to the verifier and verify that
+ // something was seen, but it might be tag 11 and not what we
+ // just added
+ int got_tag = verif.Expect(tag_idx, expected_server_cq_result)
+ .Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == tag_idx) || (got_tag == 11 && want_done_tag));
+ if (got_tag == 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ // Now get the other entry that we were waiting on
+ EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), tag_idx);
+ }
}
if (server_try_cancel_thd != NULL) {
@@ -892,7 +921,15 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
}
if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
- ServerTryCancel(&srv_ctx);
+ srv_ctx.TryCancel();
+ want_done_tag = true;
+ verif.Expect(11, true);
+ }
+
+ if (want_done_tag) {
+ verif.Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
}
// The RPC has been cancelled at this point for sure (i.e irrespective of
@@ -945,6 +982,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
Verifier(GetParam()).Expect(1, true).Verify(cq_.get());
// On the server, request to be notified of 'ResponseStream' calls and
// receive the call just made by the client
+ srv_ctx.AsyncNotifyWhenDone(tag(11));
service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
cq_.get(), cq_.get(), tag(2));
Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
@@ -952,9 +990,12 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
bool expected_cq_result = true;
bool ignore_cq_result = false;
+ bool want_done_tag = false;
if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
- ServerTryCancel(&srv_ctx);
+ srv_ctx.TryCancel();
+ Verifier(GetParam()).Expect(11, true).Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
// We know for sure that all cq results will be false from this point
// since the server cancelled the RPC
@@ -962,24 +1003,41 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
}
std::thread* server_try_cancel_thd = NULL;
+
+ auto verif = Verifier(GetParam());
+
if (server_try_cancel == CANCEL_DURING_PROCESSING) {
- server_try_cancel_thd = new std::thread(
- &AsyncEnd2endServerTryCancelTest::ServerTryCancel, this, &srv_ctx);
+ server_try_cancel_thd =
+ new std::thread(&ServerContext::TryCancel, &srv_ctx);
// Server will cancel the RPC in a parallel thread while writing responses
// to the client. Since the cancellation can happen at anytime, some of
// the cq results (i.e those until cancellation) might be true but it is
// non deterministic. So better to ignore the cq results
ignore_cq_result = true;
+ // Expect that we might possibly see the done tag that
+ // indicates cancellation completion in this case
+ want_done_tag = true;
+ verif.Expect(11, true);
}
// Server sends three messages (tags 3, 4 and 5)
+ // But if want_done tag is true, we might also see tag 11
for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {
send_response.set_message("Pong " + std::to_string(tag_idx));
srv_stream.Write(send_response, tag(tag_idx));
- Verifier(GetParam())
- .Expect(tag_idx, expected_cq_result)
- .Verify(cq_.get(), ignore_cq_result);
+ // Note that we'll add something to the verifier and verify that
+ // something was seen, but it might be tag 11 and not what we
+ // just added
+ int got_tag = verif.Expect(tag_idx, expected_cq_result)
+ .Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == tag_idx) || (got_tag == 11 && want_done_tag));
+ if (got_tag == 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ // Now get the other entry that we were waiting on
+ EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), tag_idx);
+ }
}
if (server_try_cancel_thd != NULL) {
@@ -988,13 +1046,21 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
}
if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
- ServerTryCancel(&srv_ctx);
+ srv_ctx.TryCancel();
+ want_done_tag = true;
+ verif.Expect(11, true);
// Client reads may fail bacause it is notified that the stream is
// cancelled.
ignore_cq_result = true;
}
+ if (want_done_tag) {
+ verif.Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ }
+
// Client attemts to read the three messages from the server
for (int tag_idx = 6; tag_idx <= 8; tag_idx++) {
cli_stream->Read(&recv_response, tag(tag_idx));
@@ -1052,6 +1118,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
// On the server, request to be notified of the 'BidiStream' call and
// receive the call just made by the client
+ srv_ctx.AsyncNotifyWhenDone(tag(11));
service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
tag(2));
Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
@@ -1063,9 +1130,12 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
bool expected_cq_result = true;
bool ignore_cq_result = false;
+ bool want_done_tag = false;
if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
- ServerTryCancel(&srv_ctx);
+ srv_ctx.TryCancel();
+ Verifier(GetParam()).Expect(11, true).Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
// We know for sure that all cq results will be false from this point
// since the server cancelled the RPC
@@ -1073,42 +1143,84 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
}
std::thread* server_try_cancel_thd = NULL;
+
+ auto verif = Verifier(GetParam());
+
if (server_try_cancel == CANCEL_DURING_PROCESSING) {
- server_try_cancel_thd = new std::thread(
- &AsyncEnd2endServerTryCancelTest::ServerTryCancel, this, &srv_ctx);
+ server_try_cancel_thd =
+ new std::thread(&ServerContext::TryCancel, &srv_ctx);
// Since server is going to cancel the RPC in a parallel thread, some of
// the cq results (i.e those until the cancellation) might be true. Since
// that number is non-deterministic, it is better to ignore the cq results
ignore_cq_result = true;
+ // Expect that we might possibly see the done tag that
+ // indicates cancellation completion in this case
+ want_done_tag = true;
+ verif.Expect(11, true);
}
+ int got_tag;
srv_stream.Read(&recv_request, tag(4));
- Verifier(GetParam())
- .Expect(4, expected_cq_result)
- .Verify(cq_.get(), ignore_cq_result);
+ verif.Expect(4, expected_cq_result);
+ got_tag = verif.Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == 4) || (got_tag == 11 && want_done_tag));
+ if (got_tag == 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ // Now get the other entry that we were waiting on
+ EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 4);
+ }
send_response.set_message("Pong");
srv_stream.Write(send_response, tag(5));
- Verifier(GetParam())
- .Expect(5, expected_cq_result)
- .Verify(cq_.get(), ignore_cq_result);
+ verif.Expect(5, expected_cq_result);
+ got_tag = verif.Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == 5) || (got_tag == 11 && want_done_tag));
+ if (got_tag == 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ // Now get the other entry that we were waiting on
+ EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 5);
+ }
cli_stream->Read(&recv_response, tag(6));
- Verifier(GetParam())
- .Expect(6, expected_cq_result)
- .Verify(cq_.get(), ignore_cq_result);
+ verif.Expect(6, expected_cq_result);
+ got_tag = verif.Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == 6) || (got_tag == 11 && want_done_tag));
+ if (got_tag == 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ // Now get the other entry that we were waiting on
+ EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 6);
+ }
// This is expected to succeed in all cases
cli_stream->WritesDone(tag(7));
- Verifier(GetParam()).Expect(7, true).Verify(cq_.get());
+ verif.Expect(7, true);
+ got_tag = verif.Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == 7) || (got_tag == 11 && want_done_tag));
+ if (got_tag == 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ // Now get the other entry that we were waiting on
+ EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 7);
+ }
// This is expected to fail in all cases i.e for all values of
// server_try_cancel. This is because at this point, either there are no
// more msgs from the client (because client called WritesDone) or the RPC
// is cancelled on the server
srv_stream.Read(&recv_request, tag(8));
- Verifier(GetParam()).Expect(8, false).Verify(cq_.get());
+ verif.Expect(8, false);
+ got_tag = verif.Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == 8) || (got_tag == 11 && want_done_tag));
+ if (got_tag == 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ // Now get the other entry that we were waiting on
+ EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 8);
+ }
if (server_try_cancel_thd != NULL) {
server_try_cancel_thd->join();
@@ -1116,7 +1228,15 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
}
if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
- ServerTryCancel(&srv_ctx);
+ srv_ctx.TryCancel();
+ want_done_tag = true;
+ verif.Expect(11, true);
+ }
+
+ if (want_done_tag) {
+ verif.Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
}
// The RPC has been cancelled at this point for sure (i.e irrespective of
diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc
index dc2c4f6426..4759818322 100644
--- a/test/cpp/end2end/end2end_test.cc
+++ b/test/cpp/end2end/end2end_test.cc
@@ -59,6 +59,7 @@
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
+using grpc::testing::kTlsCredentialsType;
using std::chrono::system_clock;
namespace grpc {
@@ -1194,6 +1195,8 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorSuccess) {
request.mutable_param()->set_echo_metadata(true);
request.mutable_param()->set_expected_client_identity(
TestAuthMetadataProcessor::kGoodGuy);
+ request.mutable_param()->set_expected_transport_security_type(
+ GetParam().credentials_type);
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(request.message(), response.message());
@@ -1301,6 +1304,8 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) {
request.mutable_param()->set_echo_metadata(true);
request.mutable_param()->set_expected_client_identity(
TestAuthMetadataProcessor::kGoodGuy);
+ request.mutable_param()->set_expected_transport_security_type(
+ GetParam().credentials_type);
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(request.message(), response.message());
@@ -1349,25 +1354,30 @@ TEST_P(SecureEnd2endTest, ClientAuthContext) {
EchoRequest request;
EchoResponse response;
request.set_message("Hello");
- request.mutable_param()->set_check_auth_context(true);
-
+ request.mutable_param()->set_check_auth_context(GetParam().credentials_type ==
+ kTlsCredentialsType);
+ request.mutable_param()->set_expected_transport_security_type(
+ GetParam().credentials_type);
ClientContext context;
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(response.message(), request.message());
EXPECT_TRUE(s.ok());
std::shared_ptr<const AuthContext> auth_ctx = context.auth_context();
- std::vector<grpc::string_ref> ssl =
+ std::vector<grpc::string_ref> tst =
auth_ctx->FindPropertyValues("transport_security_type");
- EXPECT_EQ(1u, ssl.size());
- EXPECT_EQ("ssl", ToString(ssl[0]));
- EXPECT_EQ("x509_subject_alternative_name",
- auth_ctx->GetPeerIdentityPropertyName());
- EXPECT_EQ(3u, auth_ctx->GetPeerIdentity().size());
- EXPECT_EQ("*.test.google.fr", ToString(auth_ctx->GetPeerIdentity()[0]));
- EXPECT_EQ("waterzooi.test.google.be",
- ToString(auth_ctx->GetPeerIdentity()[1]));
- EXPECT_EQ("*.test.youtube.com", ToString(auth_ctx->GetPeerIdentity()[2]));
+ EXPECT_EQ(1u, tst.size());
+ EXPECT_EQ(GetParam().credentials_type, ToString(tst[0]));
+ if (GetParam().credentials_type == kTlsCredentialsType) {
+ EXPECT_EQ("x509_subject_alternative_name",
+ auth_ctx->GetPeerIdentityPropertyName());
+ EXPECT_EQ(4u, auth_ctx->GetPeerIdentity().size());
+ EXPECT_EQ("*.test.google.fr", ToString(auth_ctx->GetPeerIdentity()[0]));
+ EXPECT_EQ("waterzooi.test.google.be",
+ ToString(auth_ctx->GetPeerIdentity()[1]));
+ EXPECT_EQ("*.test.youtube.com", ToString(auth_ctx->GetPeerIdentity()[2]));
+ EXPECT_EQ("192.168.1.3", ToString(auth_ctx->GetPeerIdentity()[3]));
+ }
}
std::vector<TestScenario> CreateTestScenarios(bool use_proxy,
diff --git a/test/cpp/end2end/test_service_impl.cc b/test/cpp/end2end/test_service_impl.cc
index 7c3e514eff..fe29c4afe9 100644
--- a/test/cpp/end2end/test_service_impl.cc
+++ b/test/cpp/end2end/test_service_impl.cc
@@ -62,14 +62,16 @@ void MaybeEchoDeadline(ServerContext* context, const EchoRequest* request,
}
}
-void CheckServerAuthContext(const ServerContext* context,
- const grpc::string& expected_client_identity) {
+void CheckServerAuthContext(
+ const ServerContext* context,
+ const grpc::string& expected_transport_security_type,
+ const grpc::string& expected_client_identity) {
std::shared_ptr<const AuthContext> auth_ctx = context->auth_context();
- std::vector<grpc::string_ref> ssl =
+ std::vector<grpc::string_ref> tst =
auth_ctx->FindPropertyValues("transport_security_type");
- EXPECT_EQ(1u, ssl.size());
- EXPECT_EQ("ssl", ToString(ssl[0]));
- if (expected_client_identity.length() == 0) {
+ EXPECT_EQ(1u, tst.size());
+ EXPECT_EQ(expected_transport_security_type, ToString(tst[0]));
+ if (expected_client_identity.empty()) {
EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty());
EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty());
EXPECT_FALSE(auth_ctx->IsPeerAuthenticated());
@@ -139,6 +141,7 @@ Status TestServiceImpl::Echo(ServerContext* context, const EchoRequest* request,
(request->param().expected_client_identity().length() > 0 ||
request->param().check_auth_context())) {
CheckServerAuthContext(context,
+ request->param().expected_transport_security_type(),
request->param().expected_client_identity());
}
if (request->has_param() && request->param().response_message_length() > 0) {
diff --git a/test/cpp/qps/client.h b/test/cpp/qps/client.h
index 2dc83f0f29..92e77eed9b 100644
--- a/test/cpp/qps/client.h
+++ b/test/cpp/qps/client.h
@@ -123,15 +123,13 @@ class Client {
if (reset) {
Histogram* to_merge = new Histogram[threads_.size()];
for (size_t i = 0; i < threads_.size(); i++) {
- threads_[i]->BeginSwap(&to_merge[i]);
- }
- std::unique_ptr<UsageTimer> timer(new UsageTimer);
- timer_.swap(timer);
- for (size_t i = 0; i < threads_.size(); i++) {
- threads_[i]->EndSwap();
+ threads_[i]->Swap(&to_merge[i]);
latencies.Merge(to_merge[i]);
}
delete[] to_merge;
+
+ std::unique_ptr<UsageTimer> timer(new UsageTimer);
+ timer_.swap(timer);
timer_result = timer->Mark();
} else {
// merge snapshots of each thread histogram
@@ -227,7 +225,6 @@ class Client {
public:
Thread(Client* client, size_t idx)
: done_(false),
- new_stats_(nullptr),
client_(client),
idx_(idx),
impl_(&Thread::ThreadFunc, this) {}
@@ -240,16 +237,9 @@ class Client {
impl_.join();
}
- void BeginSwap(Histogram* n) {
+ void Swap(Histogram* n) {
std::lock_guard<std::mutex> g(mu_);
- new_stats_ = n;
- }
-
- void EndSwap() {
- std::unique_lock<std::mutex> g(mu_);
- while (new_stats_ != nullptr) {
- cv_.wait(g);
- };
+ n->Swap(&histogram_);
}
void MergeStatsInto(Histogram* hist) {
@@ -263,10 +253,11 @@ class Client {
void ThreadFunc() {
for (;;) {
+ // lock since the thread should only be doing one thing at a time
+ std::lock_guard<std::mutex> g(mu_);
// run the loop body
const bool thread_still_ok = client_->ThreadFunc(&histogram_, idx_);
- // lock, see if we're done
- std::lock_guard<std::mutex> g(mu_);
+ // see if we're done
if (!thread_still_ok) {
gpr_log(GPR_ERROR, "Finishing client thread due to RPC error");
done_ = true;
@@ -274,19 +265,11 @@ class Client {
if (done_) {
return;
}
- // check if we're resetting stats, swap out the histogram if so
- if (new_stats_) {
- new_stats_->Swap(&histogram_);
- new_stats_ = nullptr;
- cv_.notify_one();
- }
}
}
std::mutex mu_;
- std::condition_variable cv_;
bool done_;
- Histogram* new_stats_;
Histogram histogram_;
Client* client_;
const size_t idx_;
diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc
index 1c7fdf8796..bc8780f74d 100644
--- a/test/cpp/qps/driver.cc
+++ b/test/cpp/qps/driver.cc
@@ -348,19 +348,10 @@ std::unique_ptr<ScenarioResult> RunScenario(
std::unique_ptr<ScenarioResult> result(new ScenarioResult);
result->client_config = result_client_config;
result->server_config = result_server_config;
- gpr_log(GPR_INFO, "Finishing");
- for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
- GPR_ASSERT(server->stream->Write(server_mark));
- }
+ gpr_log(GPR_INFO, "Finishing clients");
for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
GPR_ASSERT(client->stream->Write(client_mark));
- }
- for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
- GPR_ASSERT(server->stream->Read(&server_status));
- const auto& stats = server_status.stats();
- result->server_resources.emplace_back(
- stats.time_elapsed(), stats.time_user(), stats.time_system(),
- server_status.cores());
+ GPR_ASSERT(client->stream->WritesDone());
}
for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
GPR_ASSERT(client->stream->Read(&client_status));
@@ -368,17 +359,30 @@ std::unique_ptr<ScenarioResult> RunScenario(
result->latencies.MergeProto(stats.latencies());
result->client_resources.emplace_back(
stats.time_elapsed(), stats.time_user(), stats.time_system(), -1);
+ GPR_ASSERT(!client->stream->Read(&client_status));
}
-
for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
- GPR_ASSERT(client->stream->WritesDone());
GPR_ASSERT(client->stream->Finish().ok());
}
+ delete[] clients;
+
+ gpr_log(GPR_INFO, "Finishing servers");
for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
+ GPR_ASSERT(server->stream->Write(server_mark));
GPR_ASSERT(server->stream->WritesDone());
+ }
+ for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
+ GPR_ASSERT(server->stream->Read(&server_status));
+ const auto& stats = server_status.stats();
+ result->server_resources.emplace_back(
+ stats.time_elapsed(), stats.time_user(), stats.time_system(),
+ server_status.cores());
+ GPR_ASSERT(!server->stream->Read(&server_status));
+ }
+ for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
GPR_ASSERT(server->stream->Finish().ok());
}
- delete[] clients;
+
delete[] servers;
return result;
}
diff --git a/test/cpp/util/test_credentials_provider.h b/test/cpp/util/test_credentials_provider.h
index 50fadb53a2..1fb311e556 100644
--- a/test/cpp/util/test_credentials_provider.h
+++ b/test/cpp/util/test_credentials_provider.h
@@ -44,7 +44,10 @@ namespace grpc {
namespace testing {
const char kInsecureCredentialsType[] = "INSECURE_CREDENTIALS";
-const char kTlsCredentialsType[] = "TLS_CREDENTIALS";
+
+// For real credentials, like tls/ssl, this name should match the AuthContext
+// property "transport_security_type".
+const char kTlsCredentialsType[] = "ssl";
// Provide test credentials of a particular type.
class CredentialTypeProvider {