diff options
author | Nicolas Noble <nnoble@google.com> | 2014-11-26 16:33:03 -0800 |
---|---|---|
committer | Nicolas Noble <nnoble@google.com> | 2014-11-26 16:33:03 -0800 |
commit | b7ebd3b8c6fe39f99c40b10c1b563e4adb607b6c (patch) | |
tree | c1decf819492d455ec81cd471942c5516138f825 /test/core/surface | |
parent | 0e905e63db21bcdd85d3d1af051fcdc5bb5caa38 (diff) |
Initial import.
Diffstat (limited to 'test/core/surface')
-rw-r--r-- | test/core/surface/byte_buffer_reader_test.c | 111 | ||||
-rw-r--r-- | test/core/surface/completion_queue_benchmark.c | 168 | ||||
-rw-r--r-- | test/core/surface/completion_queue_test.c | 435 | ||||
-rw-r--r-- | test/core/surface/lame_client_test.c | 82 |
4 files changed, 796 insertions, 0 deletions
diff --git a/test/core/surface/byte_buffer_reader_test.c b/test/core/surface/byte_buffer_reader_test.c new file mode 100644 index 0000000000..bc5a512a0b --- /dev/null +++ b/test/core/surface/byte_buffer_reader_test.c @@ -0,0 +1,111 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include <grpc/byte_buffer_reader.h> +#include <grpc/byte_buffer.h> +#include <grpc/support/slice.h> +#include <grpc/grpc.h> + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> +#include <grpc/support/thd.h> +#include <grpc/support/time.h> +#include "test/core/util/test_config.h" + +#include <string.h> + +#define LOG_TEST() gpr_log(GPR_INFO, "%s", __FUNCTION__) + +static void test_create() { + grpc_byte_buffer *buffer; + grpc_byte_buffer_reader *reader; + gpr_slice empty = gpr_empty_slice(); + LOG_TEST(); + buffer = grpc_byte_buffer_create(&empty, 1); + reader = grpc_byte_buffer_reader_create(buffer); + grpc_byte_buffer_reader_destroy(reader); + grpc_byte_buffer_destroy(buffer); +} + +static void test_read_one_slice() { + gpr_slice slice; + grpc_byte_buffer *buffer; + grpc_byte_buffer_reader *reader; + gpr_slice first_slice, second_slice; + int first_code, second_code; + + LOG_TEST(); + slice = gpr_slice_from_copied_string("test"); + buffer = grpc_byte_buffer_create(&slice, 1); + gpr_slice_unref(slice); + reader = grpc_byte_buffer_reader_create(buffer); + first_code = grpc_byte_buffer_reader_next(reader, &first_slice); + GPR_ASSERT(first_code != 0); + GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(first_slice), "test", 4) == 0); + gpr_slice_unref(first_slice); + second_code = grpc_byte_buffer_reader_next(reader, &second_slice); + GPR_ASSERT(second_code == 0); + grpc_byte_buffer_reader_destroy(reader); + grpc_byte_buffer_destroy(buffer); +} + +static void test_read_one_slice_malloc() { + gpr_slice slice; + grpc_byte_buffer *buffer; + grpc_byte_buffer_reader *reader; + gpr_slice first_slice, second_slice; + int first_code, second_code; + + LOG_TEST(); + slice = gpr_slice_malloc(4); + memcpy(GPR_SLICE_START_PTR(slice), "test", 4); + buffer = grpc_byte_buffer_create(&slice, 1); + gpr_slice_unref(slice); + reader = grpc_byte_buffer_reader_create(buffer); + first_code = grpc_byte_buffer_reader_next(reader, &first_slice); + GPR_ASSERT(first_code != 0); + GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(first_slice), "test", 4) == 0); + gpr_slice_unref(first_slice); + second_code = grpc_byte_buffer_reader_next(reader, &second_slice); + GPR_ASSERT(second_code == 0); + grpc_byte_buffer_reader_destroy(reader); + grpc_byte_buffer_destroy(buffer); +} + +int main(int argc, char **argv) { + grpc_test_init(argc, argv); + test_create(); + test_read_one_slice(); + test_read_one_slice_malloc(); + return 0; +} diff --git a/test/core/surface/completion_queue_benchmark.c b/test/core/surface/completion_queue_benchmark.c new file mode 100644 index 0000000000..5360d7c6c3 --- /dev/null +++ b/test/core/surface/completion_queue_benchmark.c @@ -0,0 +1,168 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "src/core/surface/completion_queue.h" + +#include <math.h> +#include <stdio.h> + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> +#include <grpc/support/thd.h> +#include <grpc/support/time.h> + +typedef struct test_thread_options { + gpr_event on_started; + gpr_event *start; + gpr_event on_finished; + grpc_completion_queue *cc; + int iterations; +} test_thread_options; + +static void producer_thread(void *arg) { + test_thread_options *opt = arg; + int i; + + gpr_event_set(&opt->on_started, (void *)(gpr_intptr) 1); + GPR_ASSERT(gpr_event_wait(opt->start, gpr_inf_future)); + + for (i = 0; i < opt->iterations; i++) { + grpc_cq_begin_op(opt->cc, NULL, GRPC_WRITE_ACCEPTED); + grpc_cq_end_write_accepted(opt->cc, (void *)(gpr_intptr) 1, NULL, NULL, + NULL, GRPC_OP_OK); + } + + gpr_event_set(&opt->on_finished, (void *)(gpr_intptr) 1); +} + +static void consumer_thread(void *arg) { + test_thread_options *opt = arg; + grpc_event *ev; + + gpr_event_set(&opt->on_started, (void *)(gpr_intptr) 1); + GPR_ASSERT(gpr_event_wait(opt->start, gpr_inf_future)); + + for (;;) { + ev = grpc_completion_queue_next(opt->cc, gpr_inf_future); + switch (ev->type) { + case GRPC_WRITE_ACCEPTED: + break; + case GRPC_QUEUE_SHUTDOWN: + gpr_event_set(&opt->on_finished, (void *)(gpr_intptr) 1); + return; + default: + gpr_log(GPR_ERROR, "Invalid event received: %d", ev->type); + abort(); + } + grpc_event_finish(ev); + } +} + +double ops_per_second(int consumers, int producers, int iterations) { + test_thread_options *options = + gpr_malloc((producers + consumers) * sizeof(test_thread_options)); + gpr_event start = GPR_EVENT_INIT; + grpc_completion_queue *cc = grpc_completion_queue_create(); + int i; + gpr_timespec t_start, t_end, t_delta; + + /* start all threads: they will wait for phase1 */ + for (i = 0; i < producers + consumers; i++) { + gpr_thd_id id; + gpr_event_init(&options[i].on_started); + gpr_event_init(&options[i].on_finished); + options[i].start = &start; + options[i].cc = cc; + options[i].iterations = iterations; + GPR_ASSERT(gpr_thd_new(&id, + i < producers ? producer_thread : consumer_thread, + options + i, NULL)); + gpr_event_wait(&options[i].on_started, gpr_inf_future); + } + + /* start the benchmark */ + t_start = gpr_now(); + gpr_event_set(&start, (void *)(gpr_intptr) 1); + + /* wait for producers to finish */ + for (i = 0; i < producers; i++) { + GPR_ASSERT(gpr_event_wait(&options[i].on_finished, gpr_inf_future)); + } + + /* in parallel, we shutdown the completion channel - all events should still + be consumed */ + grpc_completion_queue_shutdown(cc); + + /* join all threads */ + for (i = producers; i < producers + consumers; i++) { + GPR_ASSERT(gpr_event_wait(&options[i].on_finished, gpr_inf_future)); + } + t_end = gpr_now(); + + /* destroy the completion channel */ + grpc_completion_queue_destroy(cc); + + gpr_free(options); + + t_delta = gpr_time_sub(t_end, t_start); + return (t_delta.tv_sec + 1e-9 * t_delta.tv_nsec) / (producers * iterations); +} + +double ops_per_second_top(int consumers, int producers) { + return ops_per_second(consumers, producers, 1000000 / producers); +} + +int main(void) { + const int counts[] = {1, 2, 3, 4, 5, 6, 7, 8, 12, 16, 20, 24, 32, 40, 64}; + const int ncounts = sizeof(counts) / sizeof(*counts); + int i, j; + + printf("\"\","); + for (i = 0; i < ncounts; i++) { + int producers = counts[i]; + printf("%d%s", producers, i == ncounts - 1 ? "\n" : ","); + } + + for (j = 0; j < ncounts; j++) { + int consumers = counts[j]; + printf("%d,", consumers); + for (i = 0; i < ncounts; i++) { + int producers = counts[i]; + printf("%f%s", ops_per_second_top(consumers, producers), + i == ncounts - 1 ? "\n" : ","); + fflush(stdout); + } + } + + return 0; +} diff --git a/test/core/surface/completion_queue_test.c b/test/core/surface/completion_queue_test.c new file mode 100644 index 0000000000..6df159f697 --- /dev/null +++ b/test/core/surface/completion_queue_test.c @@ -0,0 +1,435 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "src/core/surface/completion_queue.h" + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> +#include <grpc/support/thd.h> +#include <grpc/support/time.h> +#include <grpc/support/useful.h> +#include "src/core/surface/surface_em.h" +#include "test/core/util/test_config.h" + +#define LOG_TEST() gpr_log(GPR_INFO, "%s", __FUNCTION__) + +static void increment_int_on_finish(void *user_data, grpc_op_error error) { + ++*(int *)user_data; +} + +static void *create_test_tag() { + static gpr_intptr i = 0; + return (void *)(++i); +} + +/* helper for tests to shutdown correctly and tersely */ +static void shutdown_and_destroy(grpc_completion_queue *cc) { + grpc_event *ev; + grpc_completion_queue_shutdown(cc); + ev = grpc_completion_queue_next(cc, gpr_inf_past); + GPR_ASSERT(ev != NULL); + GPR_ASSERT(ev->type == GRPC_QUEUE_SHUTDOWN); + grpc_event_finish(ev); + grpc_completion_queue_destroy(cc); +} + +/* ensure we can create and destroy a completion channel */ +static void test_no_op() { + LOG_TEST(); + shutdown_and_destroy(grpc_completion_queue_create()); +} + +static void test_wait_empty() { + grpc_completion_queue *cc; + + LOG_TEST(); + + cc = grpc_completion_queue_create(); + GPR_ASSERT(grpc_completion_queue_next(cc, gpr_now()) == NULL); + shutdown_and_destroy(cc); +} + +static void test_cq_end_read() { + grpc_event *ev; + grpc_completion_queue *cc; + int on_finish_called = 0; + void *tag = create_test_tag(); + + LOG_TEST(); + + cc = grpc_completion_queue_create(); + + grpc_cq_begin_op(cc, NULL, GRPC_READ); + grpc_cq_end_read(cc, tag, NULL, increment_int_on_finish, &on_finish_called, + NULL); + + ev = grpc_completion_queue_next(cc, gpr_inf_past); + GPR_ASSERT(ev != NULL); + GPR_ASSERT(ev->type == GRPC_READ); + GPR_ASSERT(ev->tag == tag); + GPR_ASSERT(ev->data.read == NULL); + GPR_ASSERT(on_finish_called == 0); + grpc_event_finish(ev); + GPR_ASSERT(on_finish_called == 1); + + shutdown_and_destroy(cc); +} + +static void test_cq_end_invoke_accepted() { + grpc_event *ev; + grpc_completion_queue *cc; + int on_finish_called = 0; + void *tag = create_test_tag(); + + LOG_TEST(); + + cc = grpc_completion_queue_create(); + + grpc_cq_begin_op(cc, NULL, GRPC_INVOKE_ACCEPTED); + grpc_cq_end_invoke_accepted(cc, tag, NULL, increment_int_on_finish, + &on_finish_called, GRPC_OP_OK); + + ev = grpc_completion_queue_next(cc, gpr_inf_past); + GPR_ASSERT(ev != NULL); + GPR_ASSERT(ev->type == GRPC_INVOKE_ACCEPTED); + GPR_ASSERT(ev->tag == tag); + GPR_ASSERT(ev->data.invoke_accepted == GRPC_OP_OK); + GPR_ASSERT(on_finish_called == 0); + grpc_event_finish(ev); + GPR_ASSERT(on_finish_called == 1); + + shutdown_and_destroy(cc); +} + +static void test_cq_end_write_accepted() { + grpc_event *ev; + grpc_completion_queue *cc; + int on_finish_called = 0; + void *tag = create_test_tag(); + + LOG_TEST(); + + cc = grpc_completion_queue_create(); + + grpc_cq_begin_op(cc, NULL, GRPC_WRITE_ACCEPTED); + grpc_cq_end_write_accepted(cc, tag, NULL, increment_int_on_finish, + &on_finish_called, GRPC_OP_OK); + + ev = grpc_completion_queue_next(cc, gpr_inf_past); + GPR_ASSERT(ev != NULL); + GPR_ASSERT(ev->type == GRPC_WRITE_ACCEPTED); + GPR_ASSERT(ev->tag == tag); + GPR_ASSERT(ev->data.write_accepted == GRPC_OP_OK); + GPR_ASSERT(on_finish_called == 0); + grpc_event_finish(ev); + GPR_ASSERT(on_finish_called == 1); + + shutdown_and_destroy(cc); +} + +static void test_cq_end_finish_accepted() { + grpc_event *ev; + grpc_completion_queue *cc; + int on_finish_called = 0; + void *tag = create_test_tag(); + + LOG_TEST(); + + cc = grpc_completion_queue_create(); + + grpc_cq_begin_op(cc, NULL, GRPC_FINISH_ACCEPTED); + grpc_cq_end_finish_accepted(cc, tag, NULL, increment_int_on_finish, + &on_finish_called, GRPC_OP_OK); + + ev = grpc_completion_queue_next(cc, gpr_inf_past); + GPR_ASSERT(ev != NULL); + GPR_ASSERT(ev->type == GRPC_FINISH_ACCEPTED); + GPR_ASSERT(ev->tag == tag); + GPR_ASSERT(ev->data.finish_accepted == GRPC_OP_OK); + GPR_ASSERT(on_finish_called == 0); + grpc_event_finish(ev); + GPR_ASSERT(on_finish_called == 1); + + shutdown_and_destroy(cc); +} + +static void test_cq_end_client_metadata_read() { + grpc_event *ev; + grpc_completion_queue *cc; + int on_finish_called = 0; + void *tag = create_test_tag(); + + LOG_TEST(); + + cc = grpc_completion_queue_create(); + + grpc_cq_begin_op(cc, NULL, GRPC_CLIENT_METADATA_READ); + grpc_cq_end_client_metadata_read(cc, tag, NULL, increment_int_on_finish, + &on_finish_called, 0, NULL); + + ev = grpc_completion_queue_next(cc, gpr_inf_past); + GPR_ASSERT(ev != NULL); + GPR_ASSERT(ev->type == GRPC_CLIENT_METADATA_READ); + GPR_ASSERT(ev->tag == tag); + GPR_ASSERT(ev->data.client_metadata_read.count == 0); + GPR_ASSERT(ev->data.client_metadata_read.elements == NULL); + GPR_ASSERT(on_finish_called == 0); + grpc_event_finish(ev); + GPR_ASSERT(on_finish_called == 1); + + shutdown_and_destroy(cc); +} + +static void test_pluck() { + grpc_event *ev; + grpc_completion_queue *cc; + void *tags[128]; + int i, j; + int on_finish_called = 0; + + LOG_TEST(); + + for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) { + tags[i] = create_test_tag(); + for (j = 0; j < i; j++) { + GPR_ASSERT(tags[i] != tags[j]); + } + } + + cc = grpc_completion_queue_create(); + + for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) { + grpc_cq_begin_op(cc, NULL, GRPC_WRITE_ACCEPTED); + grpc_cq_end_write_accepted(cc, tags[i], NULL, increment_int_on_finish, + &on_finish_called, GRPC_OP_OK); + } + + for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) { + ev = grpc_completion_queue_pluck(cc, tags[i], gpr_inf_past); + GPR_ASSERT(ev->tag == tags[i]); + grpc_event_finish(ev); + } + + GPR_ASSERT(on_finish_called == GPR_ARRAY_SIZE(tags)); + + for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) { + grpc_cq_begin_op(cc, NULL, GRPC_WRITE_ACCEPTED); + grpc_cq_end_write_accepted(cc, tags[i], NULL, increment_int_on_finish, + &on_finish_called, GRPC_OP_OK); + } + + for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) { + ev = grpc_completion_queue_pluck(cc, tags[GPR_ARRAY_SIZE(tags) - i - 1], + gpr_inf_past); + GPR_ASSERT(ev->tag == tags[GPR_ARRAY_SIZE(tags) - i - 1]); + grpc_event_finish(ev); + } + + GPR_ASSERT(on_finish_called == 2 * GPR_ARRAY_SIZE(tags)); + + shutdown_and_destroy(cc); +} + +#define TEST_THREAD_EVENTS 10000 + +typedef struct test_thread_options { + gpr_event on_started; + gpr_event *phase1; + gpr_event on_phase1_done; + gpr_event *phase2; + gpr_event on_finished; + int events_triggered; + int id; + grpc_completion_queue *cc; +} test_thread_options; + +gpr_timespec ten_seconds_time() { + return gpr_time_add(gpr_now(), gpr_time_from_micros(10 * 1000000)); +} + +static void producer_thread(void *arg) { + test_thread_options *opt = arg; + int i; + + gpr_log(GPR_INFO, "producer %d started", opt->id); + gpr_event_set(&opt->on_started, (void *)(gpr_intptr) 1); + GPR_ASSERT(gpr_event_wait(opt->phase1, ten_seconds_time())); + + gpr_log(GPR_INFO, "producer %d phase 1", opt->id); + for (i = 0; i < TEST_THREAD_EVENTS; i++) { + grpc_cq_begin_op(opt->cc, NULL, GRPC_WRITE_ACCEPTED); + } + + gpr_log(GPR_INFO, "producer %d phase 1 done", opt->id); + gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr) 1); + GPR_ASSERT(gpr_event_wait(opt->phase2, ten_seconds_time())); + + gpr_log(GPR_INFO, "producer %d phase 2", opt->id); + for (i = 0; i < TEST_THREAD_EVENTS; i++) { + grpc_cq_end_write_accepted(opt->cc, (void *)(gpr_intptr) 1, NULL, NULL, + NULL, GRPC_OP_OK); + opt->events_triggered++; + } + + gpr_log(GPR_INFO, "producer %d phase 2 done", opt->id); + gpr_event_set(&opt->on_finished, (void *)(gpr_intptr) 1); +} + +static void consumer_thread(void *arg) { + test_thread_options *opt = arg; + grpc_event *ev; + + gpr_log(GPR_INFO, "consumer %d started", opt->id); + gpr_event_set(&opt->on_started, (void *)(gpr_intptr) 1); + GPR_ASSERT(gpr_event_wait(opt->phase1, ten_seconds_time())); + + gpr_log(GPR_INFO, "consumer %d phase 1", opt->id); + + gpr_log(GPR_INFO, "consumer %d phase 1 done", opt->id); + gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr) 1); + GPR_ASSERT(gpr_event_wait(opt->phase2, ten_seconds_time())); + + gpr_log(GPR_INFO, "consumer %d phase 2", opt->id); + for (;;) { + ev = grpc_completion_queue_next(opt->cc, ten_seconds_time()); + GPR_ASSERT(ev); + switch (ev->type) { + case GRPC_WRITE_ACCEPTED: + GPR_ASSERT(ev->data.write_accepted == GRPC_OP_OK); + opt->events_triggered++; + grpc_event_finish(ev); + break; + case GRPC_QUEUE_SHUTDOWN: + gpr_log(GPR_INFO, "consumer %d phase 2 done", opt->id); + gpr_event_set(&opt->on_finished, (void *)(gpr_intptr) 1); + grpc_event_finish(ev); + return; + default: + gpr_log(GPR_ERROR, "Invalid event received: %d", ev->type); + abort(); + } + } +} + +static void test_threading(int producers, int consumers) { + test_thread_options *options = + gpr_malloc((producers + consumers) * sizeof(test_thread_options)); + gpr_event phase1 = GPR_EVENT_INIT; + gpr_event phase2 = GPR_EVENT_INIT; + grpc_completion_queue *cc = grpc_completion_queue_create(); + int i; + int total_consumed = 0; + static int optid = 101; + + gpr_log(GPR_INFO, "%s: %d producers, %d consumers", __FUNCTION__, producers, + consumers); + + grpc_completion_queue_dont_poll_test_only(cc); + + /* start all threads: they will wait for phase1 */ + for (i = 0; i < producers + consumers; i++) { + gpr_thd_id id; + gpr_event_init(&options[i].on_started); + gpr_event_init(&options[i].on_phase1_done); + gpr_event_init(&options[i].on_finished); + options[i].phase1 = &phase1; + options[i].phase2 = &phase2; + options[i].events_triggered = 0; + options[i].cc = cc; + options[i].id = optid++; + GPR_ASSERT(gpr_thd_new(&id, + i < producers ? producer_thread : consumer_thread, + options + i, NULL)); + gpr_event_wait(&options[i].on_started, ten_seconds_time()); + } + + /* start phase1: producers will pre-declare all operations they will + complete */ + gpr_log(GPR_INFO, "start phase 1"); + gpr_event_set(&phase1, (void *)(gpr_intptr) 1); + + gpr_log(GPR_INFO, "wait phase 1"); + for (i = 0; i < producers + consumers; i++) { + GPR_ASSERT(gpr_event_wait(&options[i].on_phase1_done, ten_seconds_time())); + } + gpr_log(GPR_INFO, "done phase 1"); + + /* start phase2: operations will complete, and consumers will consume them */ + gpr_log(GPR_INFO, "start phase 2"); + gpr_event_set(&phase2, (void *)(gpr_intptr) 1); + + /* in parallel, we shutdown the completion channel - all events should still + be consumed */ + grpc_completion_queue_shutdown(cc); + + /* join all threads */ + gpr_log(GPR_INFO, "wait phase 2"); + for (i = 0; i < producers + consumers; i++) { + GPR_ASSERT(gpr_event_wait(&options[i].on_finished, ten_seconds_time())); + } + gpr_log(GPR_INFO, "done phase 2"); + + /* destroy the completion channel */ + grpc_completion_queue_destroy(cc); + + /* verify that everything was produced and consumed */ + for (i = 0; i < producers + consumers; i++) { + if (i < producers) { + GPR_ASSERT(options[i].events_triggered == TEST_THREAD_EVENTS); + } else { + total_consumed += options[i].events_triggered; + } + } + GPR_ASSERT(total_consumed == producers * TEST_THREAD_EVENTS); + + gpr_free(options); +} + +int main(int argc, char **argv) { + grpc_test_init(argc, argv); + grpc_surface_em_init(); + test_no_op(); + test_wait_empty(); + test_cq_end_read(); + test_cq_end_invoke_accepted(); + test_cq_end_write_accepted(); + test_cq_end_finish_accepted(); + test_cq_end_client_metadata_read(); + test_pluck(); + test_threading(1, 1); + test_threading(1, 10); + test_threading(10, 1); + test_threading(10, 10); + grpc_surface_em_shutdown(); + return 0; +} diff --git a/test/core/surface/lame_client_test.c b/test/core/surface/lame_client_test.c new file mode 100644 index 0000000000..0520a39ea2 --- /dev/null +++ b/test/core/surface/lame_client_test.c @@ -0,0 +1,82 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "src/core/surface/lame_client.h" + +#include "test/core/end2end/cq_verifier.h" +#include "test/core/util/test_config.h" +#include <grpc/support/log.h> + +static void *tag(gpr_intptr x) { return (void *)x; } + +int main(int argc, char **argv) { + grpc_channel *chan; + grpc_call *call; + grpc_metadata md = {"a", "b", 1}; + grpc_completion_queue *cq; + cq_verifier *cqv; + + grpc_test_init(argc, argv); + grpc_init(); + + chan = grpc_lame_client_channel_create(); + GPR_ASSERT(chan); + call = grpc_channel_create_call( + chan, "/Foo", "anywhere", + gpr_time_add(gpr_now(), gpr_time_from_seconds(100))); + GPR_ASSERT(call); + cq = grpc_completion_queue_create(); + cqv = cq_verifier_create(cq); + + /* we should be able to add metadata */ + GPR_ASSERT(GRPC_CALL_OK == grpc_call_add_metadata(call, &md, 0)); + + /* and invoke the call */ + GPR_ASSERT(GRPC_CALL_OK == + grpc_call_start_invoke(call, cq, tag(1), tag(2), tag(3), 0)); + + /* the call should immediately fail */ + cq_expect_invoke_accepted(cqv, tag(1), GRPC_OP_ERROR); + cq_expect_client_metadata_read(cqv, tag(2), NULL); + cq_expect_finished(cqv, tag(3), NULL); + cq_verify(cqv); + + grpc_call_destroy(call); + grpc_channel_destroy(chan); + cq_verifier_destroy(cqv); + grpc_completion_queue_destroy(cq); + + grpc_shutdown(); + + return 0; +} |