diff options
author | Yuchen Zeng <zyc@google.com> | 2017-06-06 13:26:36 -0700 |
---|---|---|
committer | Yuchen Zeng <zyc@google.com> | 2017-06-06 13:26:36 -0700 |
commit | b89b5011ecfe5135e03e98fb058e0ad345747007 (patch) | |
tree | c49d57c3c752150148840b672474ccfeb81d6294 /test | |
parent | 4ebace71b071d9dd38a089c88b737b52fe57dfd5 (diff) | |
parent | b1332047d049d788a59284a561f4d6c2c2488792 (diff) |
Merge remote-tracking branch 'upstream/master' into srv_record
Diffstat (limited to 'test')
-rw-r--r-- | test/core/census/intrusive_hash_map_test.c | 299 | ||||
-rw-r--r-- | test/core/surface/concurrent_connectivity_test.c | 69 | ||||
-rw-r--r-- | test/core/surface/num_external_connectivity_watchers_test.c | 214 | ||||
-rw-r--r-- | test/core/surface/sequential_connectivity_test.c | 3 | ||||
-rw-r--r-- | test/cpp/end2end/grpclb_end2end_test.cc | 6 | ||||
-rw-r--r-- | test/cpp/end2end/round_robin_end2end_test.cc | 31 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/bm_fullstack_trickle.cc | 12 |
7 files changed, 601 insertions, 33 deletions
diff --git a/test/core/census/intrusive_hash_map_test.c b/test/core/census/intrusive_hash_map_test.c new file mode 100644 index 0000000000..552546f9a3 --- /dev/null +++ b/test/core/census/intrusive_hash_map_test.c @@ -0,0 +1,299 @@ +/* + * + * Copyright 2017, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "src/core/ext/census/intrusive_hash_map.h" + +#include <grpc/support/log.h> +#include <grpc/support/useful.h> +#include "test/core/util/test_config.h" + +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +/* The initial size of an intrusive hash map will be 2 to this power. */ +static const uint32_t kInitialLog2Size = 4; + +/* Simple object used for testing intrusive_hash_map. */ +typedef struct object { uint64_t val; } object; + +/* Helper function to allocate and initialize object. */ +static __inline object *make_new_object(uint64_t val) { + object *obj = (object *)gpr_malloc(sizeof(object)); + obj->val = val; + return obj; +} + +/* Wrapper struct for object. */ +typedef struct ptr_item { + INTRUSIVE_HASH_MAP_HEADER; + object *obj; +} ptr_item; + +/* Helper function that creates a new hash map item. It is up to the user to + * free the item that was allocated. */ +static __inline ptr_item *make_ptr_item(uint64_t key, uint64_t value) { + ptr_item *new_item = (ptr_item *)gpr_malloc(sizeof(ptr_item)); + new_item->IHM_key = key; + new_item->IHM_hash_link = NULL; + new_item->obj = make_new_object(value); + return new_item; +} + +/* Helper function to deallocate ptr_item. */ +static void free_ptr_item(void *ptr) { gpr_free(((ptr_item *)ptr)->obj); } + +/* Simple string object used for testing intrusive_hash_map. */ +typedef struct string_item { + INTRUSIVE_HASH_MAP_HEADER; + // User data. + char buf[32]; + uint16_t len; +} string_item; + +/* Helper function to allocate and initialize string object. */ +static string_item *make_string_item(uint64_t key, const char *buf, + uint16_t len) { + string_item *item = (string_item *)gpr_malloc(sizeof(string_item)); + item->IHM_key = key; + item->IHM_hash_link = NULL; + item->len = len; + memcpy(item->buf, buf, sizeof(char) * len); + return item; +} + +/* Helper function for comparing two string objects. */ +static bool compare_string_item(const string_item *A, const string_item *B) { + if (A->IHM_key != B->IHM_key || A->len != B->len) + return false; + else { + for (int i = 0; i < A->len; ++i) { + if (A->buf[i] != B->buf[i]) return false; + } + } + + return true; +} + +void test_empty() { + intrusive_hash_map hash_map; + intrusive_hash_map_init(&hash_map, kInitialLog2Size); + GPR_ASSERT(0 == intrusive_hash_map_size(&hash_map)); + GPR_ASSERT(intrusive_hash_map_empty(&hash_map)); + intrusive_hash_map_free(&hash_map, NULL); +} + +void test_single_item() { + intrusive_hash_map hash_map; + intrusive_hash_map_init(&hash_map, kInitialLog2Size); + + ptr_item *new_item = make_ptr_item(10, 20); + bool ok = intrusive_hash_map_insert(&hash_map, (hm_item *)new_item); + GPR_ASSERT(ok); + + ptr_item *item1 = + (ptr_item *)intrusive_hash_map_find(&hash_map, (uint64_t)10); + GPR_ASSERT(item1->obj->val == 20); + GPR_ASSERT(item1 == new_item); + + ptr_item *item2 = + (ptr_item *)intrusive_hash_map_erase(&hash_map, (uint64_t)10); + GPR_ASSERT(item2 == new_item); + + gpr_free(new_item->obj); + gpr_free(new_item); + GPR_ASSERT(0 == intrusive_hash_map_size(&hash_map)); + intrusive_hash_map_free(&hash_map, &free_ptr_item); +} + +void test_two_items() { + intrusive_hash_map hash_map; + intrusive_hash_map_init(&hash_map, kInitialLog2Size); + + string_item *new_item1 = make_string_item(10, "test1", 5); + bool ok = intrusive_hash_map_insert(&hash_map, (hm_item *)new_item1); + GPR_ASSERT(ok); + string_item *new_item2 = make_string_item(20, "test2", 5); + ok = intrusive_hash_map_insert(&hash_map, (hm_item *)new_item2); + GPR_ASSERT(ok); + + string_item *item1 = + (string_item *)intrusive_hash_map_find(&hash_map, (uint64_t)10); + GPR_ASSERT(compare_string_item(new_item1, item1)); + GPR_ASSERT(item1 == new_item1); + string_item *item2 = + (string_item *)intrusive_hash_map_find(&hash_map, (uint64_t)20); + GPR_ASSERT(compare_string_item(new_item2, item2)); + GPR_ASSERT(item2 == new_item2); + + item1 = (string_item *)intrusive_hash_map_erase(&hash_map, (uint64_t)10); + GPR_ASSERT(item1 == new_item1); + item2 = (string_item *)intrusive_hash_map_erase(&hash_map, (uint64_t)20); + GPR_ASSERT(item2 == new_item2); + + gpr_free(new_item1); + gpr_free(new_item2); + GPR_ASSERT(0 == intrusive_hash_map_size(&hash_map)); + intrusive_hash_map_free(&hash_map, NULL); +} + +// Test resetting and clearing the hash map. +void test_reset_clear() { + intrusive_hash_map hash_map; + intrusive_hash_map_init(&hash_map, kInitialLog2Size); + + // Add some data to the hash_map. + for (uint64_t i = 0; i < 3; ++i) { + intrusive_hash_map_insert(&hash_map, (hm_item *)make_ptr_item(i, i)); + } + GPR_ASSERT(3 == intrusive_hash_map_size(&hash_map)); + + // Test find. + for (uint64_t i = 0; i < 3; ++i) { + ptr_item *item = (ptr_item *)intrusive_hash_map_find(&hash_map, i); + GPR_ASSERT(item != NULL); + GPR_ASSERT(item->IHM_key == i && item->obj->val == i); + } + + intrusive_hash_map_clear(&hash_map, &free_ptr_item); + GPR_ASSERT(intrusive_hash_map_empty(&hash_map)); + intrusive_hash_map_free(&hash_map, &free_ptr_item); +} + +// Check that the hash_map contains every key between [min_value, max_value] +// (inclusive). +void check_hash_map_values(intrusive_hash_map *hash_map, uint64_t min_value, + uint64_t max_value) { + GPR_ASSERT(intrusive_hash_map_size(hash_map) == max_value - min_value + 1); + + for (uint64_t i = min_value; i <= max_value; ++i) { + ptr_item *item = (ptr_item *)intrusive_hash_map_find(hash_map, i); + GPR_ASSERT(item != NULL); + GPR_ASSERT(item->obj->val == i); + } +} + +// Add many items and cause the hash_map to extend. +void test_extend() { + intrusive_hash_map hash_map; + intrusive_hash_map_init(&hash_map, kInitialLog2Size); + + const uint64_t kNumValues = (1 << 16); + + for (uint64_t i = 0; i < kNumValues; ++i) { + ptr_item *item = make_ptr_item(i, i); + bool ok = intrusive_hash_map_insert(&hash_map, (hm_item *)item); + GPR_ASSERT(ok); + if (i % 1000 == 0) { + check_hash_map_values(&hash_map, 0, i); + } + } + + for (uint64_t i = 0; i < kNumValues; ++i) { + ptr_item *item = (ptr_item *)intrusive_hash_map_find(&hash_map, i); + GPR_ASSERT(item != NULL); + GPR_ASSERT(item->IHM_key == i && item->obj->val == i); + ptr_item *item2 = (ptr_item *)intrusive_hash_map_erase(&hash_map, i); + GPR_ASSERT(item == item2); + gpr_free(item->obj); + gpr_free(item); + } + + GPR_ASSERT(intrusive_hash_map_empty(&hash_map)); + intrusive_hash_map_free(&hash_map, &free_ptr_item); +} + +void test_stress() { + intrusive_hash_map hash_map; + intrusive_hash_map_init(&hash_map, kInitialLog2Size); + size_t n = 0; + + // Randomly add and insert entries 1000000 times. + for (uint64_t i = 0; i < 1000000; ++i) { + int op = rand() & 0x1; + + switch (op) { + // Case 0 is insertion of entry. + case 0: { + uint64_t key = (uint64_t)(rand() % 10000); + ptr_item *item = make_ptr_item(key, key); + bool ok = intrusive_hash_map_insert(&hash_map, (hm_item *)item); + if (ok) { + n++; + } else { + gpr_free(item->obj); + gpr_free(item); + } + break; + } + // Case 1 is removal of entry. + case 1: { + uint64_t key = (uint64_t)(rand() % 10000); + ptr_item *item = (ptr_item *)intrusive_hash_map_find(&hash_map, key); + if (item != NULL) { + n--; + GPR_ASSERT(key == item->obj->val); + ptr_item *item2 = + (ptr_item *)intrusive_hash_map_erase(&hash_map, key); + GPR_ASSERT(item == item2); + gpr_free(item->obj); + gpr_free(item); + } + break; + } + } + } + // Check size + GPR_ASSERT(n == intrusive_hash_map_size(&hash_map)); + + // Clean the hash_map up. + intrusive_hash_map_clear(&hash_map, &free_ptr_item); + GPR_ASSERT(intrusive_hash_map_empty(&hash_map)); + intrusive_hash_map_free(&hash_map, &free_ptr_item); +} + +int main(int argc, char **argv) { + grpc_test_init(argc, argv); + gpr_time_init(); + srand((unsigned)gpr_now(GPR_CLOCK_REALTIME).tv_nsec); + + test_empty(); + test_single_item(); + test_two_items(); + test_reset_clear(); + test_extend(); + test_stress(); + + return 0; +} diff --git a/test/core/surface/concurrent_connectivity_test.c b/test/core/surface/concurrent_connectivity_test.c index 87ad095170..7614696cae 100644 --- a/test/core/surface/concurrent_connectivity_test.c +++ b/test/core/surface/concurrent_connectivity_test.c @@ -61,6 +61,14 @@ #define DELAY_MILLIS 10 #define POLL_MILLIS 3000 +#define NUM_OUTER_LOOPS_SHORT_TIMEOUTS 10 +#define NUM_INNER_LOOPS_SHORT_TIMEOUTS 100 +#define DELAY_MILLIS_SHORT_TIMEOUTS 1 +// in a successful test run, POLL_MILLIS should never be reached beause all runs +// should +// end after the shorter delay_millis +#define POLL_MILLIS_SHORT_TIMEOUTS 30000 + static void *tag(int n) { return (void *)(uintptr_t)n; } static int detag(void *p) { return (int)(uintptr_t)p; } @@ -79,6 +87,8 @@ void create_loop_destroy(void *addr) { grpc_timeout_milliseconds_to_deadline(POLL_MILLIS); GPR_ASSERT(grpc_completion_queue_next(cq, poll_time, NULL).type == GRPC_OP_COMPLETE); + /* check that the watcher from "watch state" was free'd */ + GPR_ASSERT(grpc_channel_num_external_connectivity_watchers(chan) == 0); } grpc_channel_destroy(chan); grpc_completion_queue_destroy(cq); @@ -168,11 +178,10 @@ static void done_pollset_shutdown(grpc_exec_ctx *exec_ctx, void *pollset, gpr_free(pollset); } -int main(int argc, char **argv) { +int run_concurrent_connectivity_test() { struct server_thread_args args; memset(&args, 0, sizeof(args)); - grpc_test_init(argc, argv); grpc_init(); gpr_thd_id threads[NUM_THREADS]; @@ -242,3 +251,59 @@ int main(int argc, char **argv) { grpc_shutdown(); return 0; } + +void watches_with_short_timeouts(void *addr) { + for (int i = 0; i < NUM_OUTER_LOOPS_SHORT_TIMEOUTS; ++i) { + grpc_completion_queue *cq = grpc_completion_queue_create_for_next(NULL); + grpc_channel *chan = grpc_insecure_channel_create((char *)addr, NULL, NULL); + + for (int j = 0; j < NUM_INNER_LOOPS_SHORT_TIMEOUTS; ++j) { + gpr_timespec later_time = + grpc_timeout_milliseconds_to_deadline(DELAY_MILLIS_SHORT_TIMEOUTS); + grpc_connectivity_state state = + grpc_channel_check_connectivity_state(chan, 0); + GPR_ASSERT(state == GRPC_CHANNEL_IDLE); + grpc_channel_watch_connectivity_state(chan, state, later_time, cq, NULL); + gpr_timespec poll_time = + grpc_timeout_milliseconds_to_deadline(POLL_MILLIS_SHORT_TIMEOUTS); + grpc_event ev = grpc_completion_queue_next(cq, poll_time, NULL); + GPR_ASSERT(ev.type == GRPC_OP_COMPLETE); + GPR_ASSERT(ev.success == false); + /* check that the watcher from "watch state" was free'd */ + GPR_ASSERT(grpc_channel_num_external_connectivity_watchers(chan) == 0); + } + grpc_channel_destroy(chan); + grpc_completion_queue_destroy(cq); + } +} + +// This test tries to catch deadlock situations. +// With short timeouts on "watches" and long timeouts on cq next calls, +// so that a QUEUE_TIMEOUT likely means that something is stuck. +int run_concurrent_watches_with_short_timeouts_test() { + grpc_init(); + + gpr_thd_id threads[NUM_THREADS]; + + char *localhost = gpr_strdup("localhost:54321"); + gpr_thd_options options = gpr_thd_options_default(); + gpr_thd_options_set_joinable(&options); + + for (size_t i = 0; i < NUM_THREADS; ++i) { + gpr_thd_new(&threads[i], watches_with_short_timeouts, localhost, &options); + } + for (size_t i = 0; i < NUM_THREADS; ++i) { + gpr_thd_join(threads[i]); + } + gpr_free(localhost); + + grpc_shutdown(); + return 0; +} + +int main(int argc, char **argv) { + grpc_test_init(argc, argv); + + run_concurrent_connectivity_test(); + run_concurrent_watches_with_short_timeouts_test(); +} diff --git a/test/core/surface/num_external_connectivity_watchers_test.c b/test/core/surface/num_external_connectivity_watchers_test.c new file mode 100644 index 0000000000..93944c9ad5 --- /dev/null +++ b/test/core/surface/num_external_connectivity_watchers_test.c @@ -0,0 +1,214 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include <grpc/grpc.h> +#include <grpc/grpc_security.h> +#include <grpc/support/alloc.h> +#include <grpc/support/host_port.h> +#include <grpc/support/log.h> +#include <grpc/support/thd.h> + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/iomgr/exec_ctx.h" +#include "test/core/end2end/data/ssl_test_data.h" +#include "test/core/util/port.h" +#include "test/core/util/test_config.h" + +typedef struct test_fixture { + const char *name; + grpc_channel *(*create_channel)(const char *addr); +} test_fixture; + +static size_t next_tag = 1; + +static void channel_idle_start_watch(grpc_channel *channel, + grpc_completion_queue *cq) { + gpr_timespec connect_deadline = grpc_timeout_milliseconds_to_deadline(1); + GPR_ASSERT(grpc_channel_check_connectivity_state(channel, 0) == + GRPC_CHANNEL_IDLE); + + grpc_channel_watch_connectivity_state( + channel, GRPC_CHANNEL_IDLE, connect_deadline, cq, (void *)(next_tag++)); + gpr_log(GPR_DEBUG, "number of active connect watchers: %d", + grpc_channel_num_external_connectivity_watchers(channel)); +} + +static void channel_idle_poll_for_timeout(grpc_channel *channel, + grpc_completion_queue *cq) { + grpc_event ev = + grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + + /* expect watch_connectivity_state to end with a timeout */ + GPR_ASSERT(ev.type == GRPC_OP_COMPLETE); + GPR_ASSERT(ev.success == false); + GPR_ASSERT(grpc_channel_check_connectivity_state(channel, 0) == + GRPC_CHANNEL_IDLE); +} + +/* Test and use the "num_external_watchers" call to make sure + * that "connectivity watcher" structs are free'd just after, if + * their corresponding timeouts occur. */ +static void run_timeouts_test(const test_fixture *fixture) { + gpr_log(GPR_INFO, "TEST: %s", fixture->name); + + char *addr; + + grpc_init(); + + gpr_join_host_port(&addr, "localhost", grpc_pick_unused_port_or_die()); + + grpc_channel *channel = fixture->create_channel(addr); + grpc_completion_queue *cq = grpc_completion_queue_create_for_next(NULL); + + /* start 1 watcher and then let it time out */ + channel_idle_start_watch(channel, cq); + channel_idle_poll_for_timeout(channel, cq); + GPR_ASSERT(grpc_channel_num_external_connectivity_watchers(channel) == 0); + + /* start 3 watchers and then let them all time out */ + for (size_t i = 1; i <= 3; i++) { + channel_idle_start_watch(channel, cq); + } + for (size_t i = 1; i <= 3; i++) { + channel_idle_poll_for_timeout(channel, cq); + } + GPR_ASSERT(grpc_channel_num_external_connectivity_watchers(channel) == 0); + + /* start 3 watchers, see one time out, start another 3, and then see them all + * time out */ + for (size_t i = 1; i <= 3; i++) { + channel_idle_start_watch(channel, cq); + } + channel_idle_poll_for_timeout(channel, cq); + for (size_t i = 3; i <= 5; i++) { + channel_idle_start_watch(channel, cq); + } + for (size_t i = 1; i <= 5; i++) { + channel_idle_poll_for_timeout(channel, cq); + } + GPR_ASSERT(grpc_channel_num_external_connectivity_watchers(channel) == 0); + + grpc_channel_destroy(channel); + grpc_completion_queue_shutdown(cq); + GPR_ASSERT( + grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL) + .type == GRPC_QUEUE_SHUTDOWN); + grpc_completion_queue_destroy(cq); + + grpc_shutdown(); + gpr_free(addr); +} + +/* An edge scenario; sets channel state to explicitly, and outside + * of a polling call. */ +static void run_channel_shutdown_before_timeout_test( + const test_fixture *fixture) { + gpr_log(GPR_INFO, "TEST: %s", fixture->name); + + char *addr; + + grpc_init(); + + gpr_join_host_port(&addr, "localhost", grpc_pick_unused_port_or_die()); + + grpc_channel *channel = fixture->create_channel(addr); + grpc_completion_queue *cq = grpc_completion_queue_create_for_next(NULL); + + /* start 1 watcher and then shut down the channel before the timer goes off */ + GPR_ASSERT(grpc_channel_num_external_connectivity_watchers(channel) == 0); + + /* expecting a 30 second timeout to go off much later than the shutdown. */ + gpr_timespec connect_deadline = grpc_timeout_seconds_to_deadline(30); + GPR_ASSERT(grpc_channel_check_connectivity_state(channel, 0) == + GRPC_CHANNEL_IDLE); + + grpc_channel_watch_connectivity_state(channel, GRPC_CHANNEL_IDLE, + connect_deadline, cq, (void *)1); + grpc_channel_destroy(channel); + + grpc_event ev = + grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + GPR_ASSERT(ev.type == GRPC_OP_COMPLETE); + /* expect success with a state transition to CHANNEL_SHUTDOWN */ + GPR_ASSERT(ev.success == true); + + grpc_completion_queue_shutdown(cq); + GPR_ASSERT( + grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL) + .type == GRPC_QUEUE_SHUTDOWN); + grpc_completion_queue_destroy(cq); + + grpc_shutdown(); + gpr_free(addr); +} + +static grpc_channel *insecure_test_create_channel(const char *addr) { + return grpc_insecure_channel_create(addr, NULL, NULL); +} + +static const test_fixture insecure_test = { + "insecure", insecure_test_create_channel, +}; + +static grpc_channel *secure_test_create_channel(const char *addr) { + grpc_channel_credentials *ssl_creds = + grpc_ssl_credentials_create(test_root_cert, NULL, NULL); + grpc_arg ssl_name_override = {GRPC_ARG_STRING, + GRPC_SSL_TARGET_NAME_OVERRIDE_ARG, + {"foo.test.google.fr"}}; + grpc_channel_args *new_client_args = + grpc_channel_args_copy_and_add(NULL, &ssl_name_override, 1); + grpc_channel *channel = + grpc_secure_channel_create(ssl_creds, addr, new_client_args, NULL); + { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_channel_args_destroy(&exec_ctx, new_client_args); + grpc_exec_ctx_finish(&exec_ctx); + } + grpc_channel_credentials_release(ssl_creds); + return channel; +} + +static const test_fixture secure_test = { + "secure", secure_test_create_channel, +}; + +int main(int argc, char **argv) { + grpc_test_init(argc, argv); + + run_timeouts_test(&insecure_test); + run_timeouts_test(&secure_test); + + run_channel_shutdown_before_timeout_test(&insecure_test); + run_channel_shutdown_before_timeout_test(&secure_test); +} diff --git a/test/core/surface/sequential_connectivity_test.c b/test/core/surface/sequential_connectivity_test.c index fbecdd7e38..a0f18e9bbf 100644 --- a/test/core/surface/sequential_connectivity_test.c +++ b/test/core/surface/sequential_connectivity_test.c @@ -100,6 +100,9 @@ static void run_test(const test_fixture *fixture) { connect_deadline, cq, NULL); grpc_event ev = grpc_completion_queue_next( cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + /* check that the watcher from "watch state" was free'd */ + GPR_ASSERT(grpc_channel_num_external_connectivity_watchers(channels[i]) == + 0); GPR_ASSERT(ev.type == GRPC_OP_COMPLETE); GPR_ASSERT(ev.tag == NULL); GPR_ASSERT(ev.success == true); diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc index 4e1bcc7a60..b0d4e2dadf 100644 --- a/test/cpp/end2end/grpclb_end2end_test.cc +++ b/test/cpp/end2end/grpclb_end2end_test.cc @@ -568,9 +568,11 @@ TEST_F(SingleBalancerTest, RepeatedServerlist) { // only the first half of the backends will receive them. for (size_t i = 0; i < backends_.size(); ++i) { if (i < backends_.size() / 2) - EXPECT_EQ(1U, backend_servers_[i].service_->request_count()); + EXPECT_EQ(1U, backend_servers_[i].service_->request_count()) + << "for backend #" << i; else - EXPECT_EQ(0U, backend_servers_[i].service_->request_count()); + EXPECT_EQ(0U, backend_servers_[i].service_->request_count()) + << "for backend #" << i; } EXPECT_EQ(statuses_and_responses.size(), num_backends_ / 2); for (const auto& status_and_response : statuses_and_responses) { diff --git a/test/cpp/end2end/round_robin_end2end_test.cc b/test/cpp/end2end/round_robin_end2end_test.cc index f8e3cc06c0..ea7639bc8f 100644 --- a/test/cpp/end2end/round_robin_end2end_test.cc +++ b/test/cpp/end2end/round_robin_end2end_test.cc @@ -42,7 +42,6 @@ #include <grpc++/server_builder.h> #include <grpc/grpc.h> #include <grpc/support/log.h> -#include <grpc/support/thd.h> #include <grpc/support/time.h> #include "src/proto/grpc/testing/echo.grpc.pb.h" @@ -131,22 +130,10 @@ class RoundRobinEnd2endTest : public ::testing::Test { int port_; std::unique_ptr<Server> server_; MyTestServiceImpl service_; - std::unique_ptr<std::thread> thread_; explicit ServerData(const grpc::string& server_host) { port_ = grpc_pick_unused_port_or_die(); gpr_log(GPR_INFO, "starting server on port %d", port_); - std::mutex mu; - std::condition_variable cond; - thread_.reset(new std::thread( - std::bind(&ServerData::Start, this, server_host, &mu, &cond))); - std::unique_lock<std::mutex> lock(mu); - cond.wait(lock); - gpr_log(GPR_INFO, "server startup complete"); - } - - void Start(const grpc::string& server_host, std::mutex* mu, - std::condition_variable* cond) { std::ostringstream server_address; server_address << server_host << ":" << port_; ServerBuilder builder; @@ -154,18 +141,13 @@ class RoundRobinEnd2endTest : public ::testing::Test { InsecureServerCredentials()); builder.RegisterService(&service_); server_ = builder.BuildAndStart(); - std::lock_guard<std::mutex> lock(*mu); - cond->notify_one(); + gpr_log(GPR_INFO, "server startup complete"); } - void Shutdown() { - server_->Shutdown(); - thread_->join(); - } + void Shutdown() { server_->Shutdown(); } }; const grpc::string server_host_; - CompletionQueue cli_cq_; std::shared_ptr<Channel> channel_; std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_; std::vector<std::unique_ptr<ServerData>> servers_; @@ -197,10 +179,13 @@ TEST_F(RoundRobinEnd2endTest, RoundRobin) { const int kNumServers = 3; StartServers(kNumServers); ResetStub(true /* round_robin */); - SendRpc(kNumServers); - // One request should have gone to each server. + // Send one RPC per backend and make sure they are used in order. + // Note: This relies on the fact that the subchannels are reported in + // state READY in the order in which the addresses are specified, + // which is only true because the backends are all local. for (size_t i = 0; i < servers_.size(); ++i) { - EXPECT_EQ(1, servers_[i]->service_.request_count()); + SendRpc(1); + EXPECT_EQ(1, servers_[i]->service_.request_count()) << "for backend #" << i; } // Check LB policy name for the channel. EXPECT_EQ("round_robin", channel_->GetLoadBalancingPolicyName()); diff --git a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc index d7e3a9cf47..9f616fe152 100644 --- a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc +++ b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc @@ -419,18 +419,18 @@ static void BM_PumpUnbalancedUnary_Trickle(benchmark::State& state) { } static void UnaryTrickleArgs(benchmark::internal::Benchmark* b) { + // A selection of interesting numbers const int cli_1024k = 1024 * 1024; const int cli_32M = 32 * 1024 * 1024; const int svr_256k = 256 * 1024; const int svr_4M = 4 * 1024 * 1024; const int svr_64M = 64 * 1024 * 1024; for (int bw = 64; bw <= 128 * 1024 * 1024; bw *= 16) { - b->Args({bw, cli_1024k, svr_256k}); - b->Args({bw, cli_1024k, svr_4M}); - b->Args({bw, cli_1024k, svr_64M}); - b->Args({bw, cli_32M, svr_256k}); - b->Args({bw, cli_32M, svr_4M}); - b->Args({bw, cli_32M, svr_64M}); + for (auto svr : {svr_256k, svr_4M, svr_64M}) { + for (auto cli : {cli_1024k, cli_32M}) { + b->Args({cli, svr, bw}); + } + } } } BENCHMARK(BM_PumpUnbalancedUnary_Trickle)->Apply(UnaryTrickleArgs); |