diff options
Diffstat (limited to 'test')
25 files changed, 833 insertions, 59 deletions
diff --git a/test/core/end2end/bad_server_response_test.c b/test/core/end2end/bad_server_response_test.c index 42d960c428..39a98e84ca 100644 --- a/test/core/end2end/bad_server_response_test.c +++ b/test/core/end2end/bad_server_response_test.c @@ -82,7 +82,9 @@ #define HTTP1_DETAIL_MSG "Trying to connect an http1.x server" /* TODO(zyc) Check the content of incomming data instead of using this length */ -#define EXPECTED_INCOMING_DATA_LENGTH (size_t)310 +/* The 'bad' server will start sending responses after reading this amount of + * data from the client. */ +#define SERVER_INCOMING_DATA_LENGTH_LOWER_THRESHOLD (size_t)200 struct rpc_state { char *target; @@ -134,8 +136,10 @@ static void handle_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { } gpr_log(GPR_DEBUG, "got %" PRIuPTR " bytes, expected %" PRIuPTR " bytes", - state.incoming_data_length, EXPECTED_INCOMING_DATA_LENGTH); - if (state.incoming_data_length >= EXPECTED_INCOMING_DATA_LENGTH) { + state.incoming_data_length, + SERVER_INCOMING_DATA_LENGTH_LOWER_THRESHOLD); + if (state.incoming_data_length >= + SERVER_INCOMING_DATA_LENGTH_LOWER_THRESHOLD) { handle_write(exec_ctx); } else { grpc_endpoint_read(exec_ctx, state.tcp, &state.temp_incoming_buffer, diff --git a/test/core/end2end/fixtures/http_proxy.c b/test/core/end2end/fixtures/http_proxy.c index 6fdc86fc12..2682ea0e7b 100644 --- a/test/core/end2end/fixtures/http_proxy.c +++ b/test/core/end2end/fixtures/http_proxy.c @@ -110,7 +110,7 @@ static void proxy_connection_unref(grpc_exec_ctx* exec_ctx, grpc_endpoint_destroy(exec_ctx, conn->client_endpoint); if (conn->server_endpoint != NULL) grpc_endpoint_destroy(exec_ctx, conn->server_endpoint); - grpc_pollset_set_destroy(conn->pollset_set); + grpc_pollset_set_destroy(exec_ctx, conn->pollset_set); grpc_slice_buffer_destroy_internal(exec_ctx, &conn->client_read_buffer); grpc_slice_buffer_destroy_internal(exec_ctx, &conn->client_deferred_write_buffer); diff --git a/test/core/end2end/fuzzers/hpack.dictionary b/test/core/end2end/fuzzers/hpack.dictionary index 81a2419d12..6b96785419 100644 --- a/test/core/end2end/fuzzers/hpack.dictionary +++ b/test/core/end2end/fuzzers/hpack.dictionary @@ -15,7 +15,6 @@ "\x0Auser-agent" "\x04host" "\x08lb-token" -"\x0Blb-cost-bin" "\x0Cgrpc-timeout" "\x10grpc-tracing-bin" "\x0Egrpc-stats-bin" @@ -153,7 +152,6 @@ "\x00\x13if-unmodified-since\x00" "\x00\x0Dlast-modified\x00" "\x00\x08lb-token\x00" -"\x00\x0Blb-cost-bin\x00" "\x00\x04link\x00" "\x00\x08location\x00" "\x00\x0Cmax-forwards\x00" diff --git a/test/core/end2end/gen_build_yaml.py b/test/core/end2end/gen_build_yaml.py index bcb7136eaa..5071299545 100755 --- a/test/core/end2end/gen_build_yaml.py +++ b/test/core/end2end/gen_build_yaml.py @@ -91,6 +91,7 @@ LOWCPU = 0.1 # maps test names to options END2END_TESTS = { + 'authority_not_supported': default_test_options, 'bad_hostname': default_test_options, 'binary_metadata': default_test_options, 'resource_quota_server': default_test_options._replace(large_writes=True, @@ -142,7 +143,6 @@ END2END_TESTS = { 'simple_request': default_test_options, 'streaming_error_response': default_test_options, 'trailing_metadata': default_test_options, - 'authority_not_supported': default_test_options, 'write_buffering': default_test_options, 'write_buffering_at_end': default_test_options, } diff --git a/test/core/end2end/tests/load_reporting_hook.c b/test/core/end2end/tests/load_reporting_hook.c index 085a563fb8..d1ee26fe50 100644 --- a/test/core/end2end/tests/load_reporting_hook.c +++ b/test/core/end2end/tests/load_reporting_hook.c @@ -31,23 +31,24 @@ * */ -#include "test/core/end2end/end2end_tests.h" - #include <string.h> #include <grpc/byte_buffer.h> +#include <grpc/load_reporting.h> #include <grpc/support/alloc.h> #include <grpc/support/log.h> #include <grpc/support/string_util.h> #include <grpc/support/time.h> #include <grpc/support/useful.h> -#include "test/core/end2end/cq_verifier.h" #include "src/core/ext/load_reporting/load_reporting.h" #include "src/core/ext/load_reporting/load_reporting_filter.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/transport/static_metadata.h" +#include "test/core/end2end/cq_verifier.h" +#include "test/core/end2end/end2end_tests.h" + enum { TIMEOUT = 200000 }; static void *tag(intptr_t t) { return (void *)t; } @@ -124,7 +125,8 @@ static void end_test(grpc_end2end_test_fixture *f) { static void request_response_with_payload( grpc_end2end_test_config config, grpc_end2end_test_fixture f, const char *method_name, const char *request_msg, const char *response_msg, - grpc_metadata *initial_lr_metadata, grpc_metadata *trailing_lr_metadata) { + grpc_metadata *initial_lr_metadata, + grpc_load_reporting_cost_context *cost_ctx) { grpc_slice request_payload_slice = grpc_slice_from_static_string(request_msg); grpc_slice response_payload_slice = grpc_slice_from_static_string(response_msg); @@ -237,9 +239,8 @@ static void request_response_with_payload( op->reserved = NULL; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; - GPR_ASSERT(trailing_lr_metadata != NULL); - op->data.send_status_from_server.trailing_metadata_count = 1; - op->data.send_status_from_server.trailing_metadata = trailing_lr_metadata; + GPR_ASSERT(cost_ctx != NULL); + grpc_call_set_load_reporting_cost_context(s, cost_ctx); op->data.send_status_from_server.status = GRPC_STATUS_OK; grpc_slice status_details = grpc_slice_from_static_string("xyz"); op->data.send_status_from_server.status_details = &status_details; @@ -293,21 +294,21 @@ static void test_load_reporting_hook(grpc_end2end_test_config config) { const char *response_msg = "... and the response from the server"; grpc_metadata initial_lr_metadata; - grpc_metadata trailing_lr_metadata; initial_lr_metadata.key = GRPC_MDSTR_LB_TOKEN; initial_lr_metadata.value = grpc_slice_from_static_string("client-token"); memset(&initial_lr_metadata.internal_data, 0, sizeof(initial_lr_metadata.internal_data)); - trailing_lr_metadata.key = GRPC_MDSTR_LB_COST_BIN; - trailing_lr_metadata.value = grpc_slice_from_static_string("server-token"); - memset(&trailing_lr_metadata.internal_data, 0, - sizeof(trailing_lr_metadata.internal_data)); + grpc_load_reporting_cost_context *cost_ctx = gpr_malloc(sizeof(*cost_ctx)); + memset(cost_ctx, 0, sizeof(*cost_ctx)); + cost_ctx->values_count = 1; + cost_ctx->values = + gpr_malloc(sizeof(*cost_ctx->values) * cost_ctx->values_count); + cost_ctx->values[0] = grpc_slice_from_static_string("cost-token"); request_response_with_payload(config, f, method_name, request_msg, - response_msg, &initial_lr_metadata, - &trailing_lr_metadata); + response_msg, &initial_lr_metadata, cost_ctx); end_test(&f); { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; diff --git a/test/core/end2end/tests/network_status_change.c b/test/core/end2end/tests/network_status_change.c index 9cef02b2b3..7540ce93a1 100644 --- a/test/core/end2end/tests/network_status_change.c +++ b/test/core/end2end/tests/network_status_change.c @@ -212,8 +212,11 @@ static void test_invoke_network_status_change(grpc_end2end_test_config config) { CQ_EXPECT_COMPLETION(cqv, tag(1), 1); cq_verify(cqv); + // TODO(makdharma) Update this when the shutdown_all_endpoints is implemented. // Expected behavior of a RPC when network is lost. - GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE); + // GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE); + GPR_ASSERT(status == GRPC_STATUS_OK); + GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/foo")); validate_host_override_string("foo.test.google.fr:1234", call_details.host, config); diff --git a/test/core/http/httpcli_test.c b/test/core/http/httpcli_test.c index 6cc00f871d..be8301c5e3 100644 --- a/test/core/http/httpcli_test.c +++ b/test/core/http/httpcli_test.c @@ -209,7 +209,7 @@ int main(int argc, char **argv) { test_get(port); test_post(port); - grpc_httpcli_context_destroy(&g_context); + grpc_httpcli_context_destroy(&exec_ctx, &g_context); grpc_closure_init(&destroyed, destroy_pops, &g_pops, grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops), diff --git a/test/core/http/httpscli_test.c b/test/core/http/httpscli_test.c index e1a26d91e9..5a6f07bec2 100644 --- a/test/core/http/httpscli_test.c +++ b/test/core/http/httpscli_test.c @@ -212,7 +212,7 @@ int main(int argc, char **argv) { test_get(port); test_post(port); - grpc_httpcli_context_destroy(&g_context); + grpc_httpcli_context_destroy(&exec_ctx, &g_context); grpc_closure_init(&destroyed, destroy_pops, &g_pops, grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops), diff --git a/test/core/internal_api_canaries/iomgr.c b/test/core/internal_api_canaries/iomgr.c index d73d5c175c..6fdaf1f980 100644 --- a/test/core/internal_api_canaries/iomgr.c +++ b/test/core/internal_api_canaries/iomgr.c @@ -105,7 +105,6 @@ static void test_code(void) { grpc_pollset_size(); grpc_pollset_init(NULL, NULL); grpc_pollset_shutdown(NULL, NULL, NULL); - grpc_pollset_reset(NULL); grpc_pollset_destroy(NULL); GRPC_ERROR_UNREF(grpc_pollset_work(NULL, NULL, NULL, gpr_now(GPR_CLOCK_REALTIME), diff --git a/test/core/iomgr/pollset_set_test.c b/test/core/iomgr/pollset_set_test.c index 40fa858602..e7777acce1 100644 --- a/test/core/iomgr/pollset_set_test.c +++ b/test/core/iomgr/pollset_set_test.c @@ -59,10 +59,11 @@ void init_test_pollset_sets(test_pollset_set *pollset_sets, const int num_pss) { } } -void cleanup_test_pollset_sets(test_pollset_set *pollset_sets, +void cleanup_test_pollset_sets(grpc_exec_ctx *exec_ctx, + test_pollset_set *pollset_sets, const int num_pss) { for (int i = 0; i < num_pss; i++) { - grpc_pollset_set_destroy(pollset_sets[i].pss); + grpc_pollset_set_destroy(exec_ctx, pollset_sets[i].pss); pollset_sets[i].pss = NULL; } } @@ -297,7 +298,7 @@ static void pollset_set_test_basic() { cleanup_test_fds(&exec_ctx, tfds, num_fds); cleanup_test_pollsets(&exec_ctx, pollsets, num_ps); - cleanup_test_pollset_sets(pollset_sets, num_pss); + cleanup_test_pollset_sets(&exec_ctx, pollset_sets, num_pss); grpc_exec_ctx_finish(&exec_ctx); } @@ -372,7 +373,7 @@ void pollset_set_test_dup_fds() { cleanup_test_fds(&exec_ctx, tfds, num_fds); cleanup_test_pollsets(&exec_ctx, &pollset, num_ps); - cleanup_test_pollset_sets(pollset_sets, num_pss); + cleanup_test_pollset_sets(&exec_ctx, pollset_sets, num_pss); grpc_exec_ctx_finish(&exec_ctx); } @@ -437,7 +438,7 @@ void pollset_set_test_empty_pollset() { cleanup_test_fds(&exec_ctx, tfds, num_fds); cleanup_test_pollsets(&exec_ctx, pollsets, num_ps); - cleanup_test_pollset_sets(&pollset_set, num_pss); + cleanup_test_pollset_sets(&exec_ctx, &pollset_set, num_pss); grpc_exec_ctx_finish(&exec_ctx); } diff --git a/test/core/iomgr/resolve_address_posix_test.c b/test/core/iomgr/resolve_address_posix_test.c index a4feff8b00..ef4cfdf06f 100644 --- a/test/core/iomgr/resolve_address_posix_test.c +++ b/test/core/iomgr/resolve_address_posix_test.c @@ -74,7 +74,7 @@ void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) { GPR_ASSERT(gpr_event_wait(&args->ev, test_deadline())); grpc_resolved_addresses_destroy(args->addrs); grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset); - grpc_pollset_set_destroy(args->pollset_set); + grpc_pollset_set_destroy(exec_ctx, args->pollset_set); grpc_closure do_nothing_cb; grpc_closure_init(&do_nothing_cb, do_nothing, NULL, grpc_schedule_on_exec_ctx); diff --git a/test/core/iomgr/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c index 54de9a20e1..6a9bb5ae6f 100644 --- a/test/core/iomgr/resolve_address_test.c +++ b/test/core/iomgr/resolve_address_test.c @@ -69,7 +69,7 @@ void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) { GPR_ASSERT(gpr_event_wait(&args->ev, test_deadline())); grpc_resolved_addresses_destroy(args->addrs); grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset); - grpc_pollset_set_destroy(args->pollset_set); + grpc_pollset_set_destroy(exec_ctx, args->pollset_set); grpc_closure do_nothing_cb; grpc_closure_init(&do_nothing_cb, do_nothing, NULL, grpc_schedule_on_exec_ctx); diff --git a/test/core/iomgr/tcp_client_posix_test.c b/test/core/iomgr/tcp_client_posix_test.c index dcdff8efb1..c9b514a024 100644 --- a/test/core/iomgr/tcp_client_posix_test.c +++ b/test/core/iomgr/tcp_client_posix_test.c @@ -207,7 +207,7 @@ int main(int argc, char **argv) { test_succeeds(); gpr_log(GPR_ERROR, "End of first test"); test_fails(); - grpc_pollset_set_destroy(g_pollset_set); + grpc_pollset_set_destroy(&exec_ctx, g_pollset_set); grpc_closure_init(&destroyed, destroy_pollset, g_pollset, grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); diff --git a/test/core/security/jwt_verifier_test.c b/test/core/security/jwt_verifier_test.c index a9bd976a39..0a73f67528 100644 --- a/test/core/security/jwt_verifier_test.c +++ b/test/core/security/jwt_verifier_test.c @@ -386,9 +386,9 @@ static void test_jwt_verifier_google_email_issuer_success(void) { GPR_ASSERT(jwt != NULL); grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_success, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } @@ -420,9 +420,9 @@ static void test_jwt_verifier_custom_email_issuer_success(void) { GPR_ASSERT(jwt != NULL); grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_success, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } @@ -469,9 +469,9 @@ static void test_jwt_verifier_url_issuer_success(void) { GPR_ASSERT(jwt != NULL); grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_success, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } @@ -511,9 +511,9 @@ static void test_jwt_verifier_url_issuer_bad_config(void) { grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_key_retrieval_error, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } @@ -534,9 +534,9 @@ static void test_jwt_verifier_bad_json_key(void) { grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_key_retrieval_error, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } @@ -588,9 +588,9 @@ static void test_jwt_verifier_bad_signature(void) { grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_bad_signature, (void *)expected_user_data); - grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); + grpc_exec_ctx_finish(&exec_ctx); grpc_httpcli_set_override(NULL, NULL); } @@ -619,8 +619,8 @@ static void test_jwt_verifier_bad_format(void) { grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, "bad jwt", expected_audience, on_verification_bad_format, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } diff --git a/test/core/security/verify_jwt.c b/test/core/security/verify_jwt.c index bbd4a67ac1..aaf0e7f6b1 100644 --- a/test/core/security/verify_jwt.c +++ b/test/core/security/verify_jwt.c @@ -123,14 +123,15 @@ int main(int argc, char **argv) { gpr_inf_future(GPR_CLOCK_MONOTONIC)))) sync.is_done = true; gpr_mu_unlock(sync.mu); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(sync.mu); } gpr_mu_unlock(sync.mu); gpr_free(sync.pollset); - grpc_jwt_verifier_destroy(verifier); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); + grpc_exec_ctx_finish(&exec_ctx); gpr_cmdline_destroy(cl); grpc_shutdown(); return !sync.success; diff --git a/test/core/surface/public_headers_must_be_c89.c b/test/core/surface/public_headers_must_be_c89.c index e0a2c94216..330da46849 100644 --- a/test/core/surface/public_headers_must_be_c89.c +++ b/test/core/surface/public_headers_must_be_c89.c @@ -52,6 +52,7 @@ #include <grpc/impl/codegen/status.h> #include <grpc/impl/codegen/sync.h> #include <grpc/impl/codegen/sync_generic.h> +#include <grpc/load_reporting.h> #include <grpc/slice.h> #include <grpc/slice_buffer.h> #include <grpc/status.h> diff --git a/test/core/transport/connectivity_state_test.c b/test/core/transport/connectivity_state_test.c index 3520ef0a80..8314a5f619 100644 --- a/test/core/transport/connectivity_state_test.c +++ b/test/core/transport/connectivity_state_test.c @@ -77,8 +77,9 @@ static void test_check(void) { grpc_error *error; gpr_log(GPR_DEBUG, "test_check"); grpc_connectivity_state_init(&tracker, GRPC_CHANNEL_IDLE, "xxx"); - GPR_ASSERT(grpc_connectivity_state_check(&tracker, &error) == + GPR_ASSERT(grpc_connectivity_state_get(&tracker, &error) == GRPC_CHANNEL_IDLE); + GPR_ASSERT(grpc_connectivity_state_check(&tracker) == GRPC_CHANNEL_IDLE); GPR_ASSERT(error == GRPC_ERROR_NONE); grpc_connectivity_state_destroy(&exec_ctx, &tracker); grpc_exec_ctx_finish(&exec_ctx); diff --git a/test/core/util/port_server_client.c b/test/core/util/port_server_client.c index 6d722ffc88..7b733ab9c7 100644 --- a/test/core/util/port_server_client.c +++ b/test/core/util/port_server_client.c @@ -121,7 +121,7 @@ void grpc_free_port_using_server(char *server, int port) { } gpr_mu_unlock(pr.mu); - grpc_httpcli_context_destroy(&context); + grpc_httpcli_context_destroy(&exec_ctx, &context); grpc_exec_ctx_finish(&exec_ctx); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&pr.pops), shutdown_closure); @@ -245,7 +245,7 @@ int grpc_pick_port_using_server(char *server) { gpr_mu_unlock(pr.mu); grpc_http_response_destroy(&pr.response); - grpc_httpcli_context_destroy(&context); + grpc_httpcli_context_destroy(&exec_ctx, &context); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&pr.pops), shutdown_closure); grpc_exec_ctx_finish(&exec_ctx); diff --git a/test/core/util/trickle_endpoint.c b/test/core/util/trickle_endpoint.c new file mode 100644 index 0000000000..7ab0488a66 --- /dev/null +++ b/test/core/util/trickle_endpoint.c @@ -0,0 +1,196 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "test/core/util/passthru_endpoint.h" + +#include <inttypes.h> +#include <string.h> + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> +#include <grpc/support/string_util.h> +#include <grpc/support/useful.h> + +#include "src/core/lib/iomgr/sockaddr.h" + +#include "src/core/lib/slice/slice_internal.h" + +typedef struct { + grpc_endpoint base; + double bytes_per_second; + grpc_endpoint *wrapped; + gpr_timespec last_write; + + gpr_mu mu; + grpc_slice_buffer write_buffer; + grpc_slice_buffer writing_buffer; + grpc_error *error; + bool writing; +} trickle_endpoint; + +static void te_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, + grpc_slice_buffer *slices, grpc_closure *cb) { + trickle_endpoint *te = (trickle_endpoint *)ep; + grpc_endpoint_read(exec_ctx, te->wrapped, slices, cb); +} + +static void te_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, + grpc_slice_buffer *slices, grpc_closure *cb) { + trickle_endpoint *te = (trickle_endpoint *)ep; + for (size_t i = 0; i < slices->count; i++) { + grpc_slice_ref_internal(slices->slices[i]); + } + gpr_mu_lock(&te->mu); + if (te->write_buffer.length == 0) { + te->last_write = gpr_now(GPR_CLOCK_MONOTONIC); + } + grpc_slice_buffer_addn(&te->write_buffer, slices->slices, slices->count); + grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_REF(te->error)); + gpr_mu_unlock(&te->mu); +} + +static grpc_workqueue *te_get_workqueue(grpc_endpoint *ep) { + trickle_endpoint *te = (trickle_endpoint *)ep; + return grpc_endpoint_get_workqueue(te->wrapped); +} + +static void te_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, + grpc_pollset *pollset) { + trickle_endpoint *te = (trickle_endpoint *)ep; + grpc_endpoint_add_to_pollset(exec_ctx, te->wrapped, pollset); +} + +static void te_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, + grpc_pollset_set *pollset_set) { + trickle_endpoint *te = (trickle_endpoint *)ep; + grpc_endpoint_add_to_pollset_set(exec_ctx, te->wrapped, pollset_set); +} + +static void te_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, + grpc_error *why) { + trickle_endpoint *te = (trickle_endpoint *)ep; + gpr_mu_lock(&te->mu); + if (te->error == GRPC_ERROR_NONE) { + te->error = GRPC_ERROR_REF(why); + } + gpr_mu_unlock(&te->mu); + grpc_endpoint_shutdown(exec_ctx, te->wrapped, why); +} + +static void te_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { + trickle_endpoint *te = (trickle_endpoint *)ep; + grpc_endpoint_destroy(exec_ctx, te->wrapped); + gpr_mu_destroy(&te->mu); + grpc_slice_buffer_destroy_internal(exec_ctx, &te->write_buffer); + grpc_slice_buffer_destroy_internal(exec_ctx, &te->writing_buffer); + GRPC_ERROR_UNREF(te->error); + gpr_free(te); +} + +static grpc_resource_user *te_get_resource_user(grpc_endpoint *ep) { + trickle_endpoint *te = (trickle_endpoint *)ep; + return grpc_endpoint_get_resource_user(te->wrapped); +} + +static char *te_get_peer(grpc_endpoint *ep) { + trickle_endpoint *te = (trickle_endpoint *)ep; + return grpc_endpoint_get_peer(te->wrapped); +} + +static int te_get_fd(grpc_endpoint *ep) { + trickle_endpoint *te = (trickle_endpoint *)ep; + return grpc_endpoint_get_fd(te->wrapped); +} + +static void te_finish_write(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { + trickle_endpoint *te = arg; + gpr_mu_lock(&te->mu); + te->writing = false; + grpc_slice_buffer_reset_and_unref(&te->writing_buffer); + gpr_mu_unlock(&te->mu); +} + +static const grpc_endpoint_vtable vtable = {te_read, + te_write, + te_get_workqueue, + te_add_to_pollset, + te_add_to_pollset_set, + te_shutdown, + te_destroy, + te_get_resource_user, + te_get_peer, + te_get_fd}; + +grpc_endpoint *grpc_trickle_endpoint_create(grpc_endpoint *wrap, + double bytes_per_second) { + trickle_endpoint *te = gpr_malloc(sizeof(*te)); + te->base.vtable = &vtable; + te->wrapped = wrap; + te->bytes_per_second = bytes_per_second; + gpr_mu_init(&te->mu); + grpc_slice_buffer_init(&te->write_buffer); + grpc_slice_buffer_init(&te->writing_buffer); + te->error = GRPC_ERROR_NONE; + te->writing = false; + return &te->base; +} + +static double ts2dbl(gpr_timespec s) { + return (double)s.tv_sec + 1e-9 * (double)s.tv_nsec; +} + +size_t grpc_trickle_endpoint_trickle(grpc_exec_ctx *exec_ctx, + grpc_endpoint *ep) { + trickle_endpoint *te = (trickle_endpoint *)ep; + gpr_mu_lock(&te->mu); + if (!te->writing && te->write_buffer.length > 0) { + gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); + double elapsed = ts2dbl(gpr_time_sub(now, te->last_write)); + size_t bytes = (size_t)(te->bytes_per_second * elapsed); + // gpr_log(GPR_DEBUG, "%lf elapsed --> %" PRIdPTR " bytes", elapsed, bytes); + if (bytes > 0) { + grpc_slice_buffer_move_first(&te->write_buffer, + GPR_MIN(bytes, te->write_buffer.length), + &te->writing_buffer); + te->writing = true; + te->last_write = now; + grpc_endpoint_write( + exec_ctx, te->wrapped, &te->writing_buffer, + grpc_closure_create(te_finish_write, te, grpc_schedule_on_exec_ctx)); + } + } + size_t backlog = te->write_buffer.length; + gpr_mu_unlock(&te->mu); + return backlog; +} diff --git a/test/core/util/trickle_endpoint.h b/test/core/util/trickle_endpoint.h new file mode 100644 index 0000000000..7e8d9d91e3 --- /dev/null +++ b/test/core/util/trickle_endpoint.h @@ -0,0 +1,46 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef TRICKLE_ENDPOINT_H +#define TRICKLE_ENDPOINT_H + +#include "src/core/lib/iomgr/endpoint.h" + +grpc_endpoint *grpc_trickle_endpoint_create(grpc_endpoint *wrap, + double bytes_per_second); + +/* Allow up to \a bytes through the endpoint. Returns the new backlog. */ +size_t grpc_trickle_endpoint_trickle(grpc_exec_ctx *exec_ctx, + grpc_endpoint *endpoint); + +#endif diff --git a/test/cpp/interop/client.cc b/test/cpp/interop/client.cc index 8a00b61cef..5688ab7971 100644 --- a/test/cpp/interop/client.cc +++ b/test/cpp/interop/client.cc @@ -51,7 +51,7 @@ DEFINE_bool(use_tls, false, "Whether to use tls."); DEFINE_string(custom_credentials_type, "", "User provided credentials type."); DEFINE_bool(use_test_ca, false, "False to use SSL roots for google"); DEFINE_int32(server_port, 0, "Server port."); -DEFINE_string(server_host, "127.0.0.1", "Server host to connect to"); +DEFINE_string(server_host, "localhost", "Server host to connect to"); DEFINE_string(server_host_override, "foo.test.google.fr", "Override the server host which is sent in HTTP header"); DEFINE_string( diff --git a/test/cpp/interop/http2_client.cc b/test/cpp/interop/http2_client.cc index 38aee43b26..b96e9fac36 100644 --- a/test/cpp/interop/http2_client.cc +++ b/test/cpp/interop/http2_client.cc @@ -223,7 +223,7 @@ bool Http2Client::DoMaxStreams() { } // namespace grpc DEFINE_int32(server_port, 0, "Server port."); -DEFINE_string(server_host, "127.0.0.1", "Server host to connect to"); +DEFINE_string(server_host, "localhost", "Server host to connect to"); DEFINE_string(test_case, "rst_after_header", "Configure different test cases. Valid options are:\n\n" "goaway\n" diff --git a/test/cpp/interop/reconnect_interop_client.cc b/test/cpp/interop/reconnect_interop_client.cc index 797e52c744..1c2f606637 100644 --- a/test/cpp/interop/reconnect_interop_client.cc +++ b/test/cpp/interop/reconnect_interop_client.cc @@ -48,7 +48,7 @@ DEFINE_int32(server_control_port, 0, "Server port for control rpcs."); DEFINE_int32(server_retry_port, 0, "Server port for testing reconnection."); -DEFINE_string(server_host, "127.0.0.1", "Server host to connect to"); +DEFINE_string(server_host, "localhost", "Server host to connect to"); DEFINE_int32(max_reconnect_backoff_ms, 0, "Maximum backoff time, or 0 for default."); diff --git a/test/cpp/microbenchmarks/bm_closure.cc b/test/cpp/microbenchmarks/bm_closure.cc new file mode 100644 index 0000000000..80d6610e13 --- /dev/null +++ b/test/cpp/microbenchmarks/bm_closure.cc @@ -0,0 +1,356 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* Test various closure related operations */ + +#include <grpc/grpc.h> + +extern "C" { +#include "src/core/lib/iomgr/closure.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/exec_ctx.h" +} + +#include "third_party/benchmark/include/benchmark/benchmark.h" + +static class InitializeStuff { + public: + InitializeStuff() { grpc_init(); } + ~InitializeStuff() { grpc_shutdown(); } +} initialize_stuff; + +static void BM_NoOpExecCtx(benchmark::State& state) { + while (state.KeepRunning()) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_exec_ctx_finish(&exec_ctx); + } +} +BENCHMARK(BM_NoOpExecCtx); + +static void BM_WellFlushed(benchmark::State& state) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + grpc_exec_ctx_flush(&exec_ctx); + } + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_WellFlushed); + +static void DoNothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {} + +static void BM_ClosureInitAgainstExecCtx(benchmark::State& state) { + grpc_closure c; + while (state.KeepRunning()) { + benchmark::DoNotOptimize( + grpc_closure_init(&c, DoNothing, NULL, grpc_schedule_on_exec_ctx)); + } +} +BENCHMARK(BM_ClosureInitAgainstExecCtx); + +static void BM_ClosureInitAgainstCombiner(benchmark::State& state) { + grpc_combiner* combiner = grpc_combiner_create(NULL); + grpc_closure c; + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + benchmark::DoNotOptimize(grpc_closure_init( + &c, DoNothing, NULL, grpc_combiner_scheduler(combiner, false))); + } + GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished"); + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureInitAgainstCombiner); + +static void BM_ClosureRunOnExecCtx(benchmark::State& state) { + grpc_closure c; + grpc_closure_init(&c, DoNothing, NULL, grpc_schedule_on_exec_ctx); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + grpc_closure_run(&exec_ctx, &c, GRPC_ERROR_NONE); + grpc_exec_ctx_flush(&exec_ctx); + } + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureRunOnExecCtx); + +static void BM_ClosureCreateAndRun(benchmark::State& state) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + grpc_closure_run(&exec_ctx, grpc_closure_create(DoNothing, NULL, + grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); + } + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureCreateAndRun); + +static void BM_ClosureInitAndRun(benchmark::State& state) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_closure c; + while (state.KeepRunning()) { + grpc_closure_run(&exec_ctx, grpc_closure_init(&c, DoNothing, NULL, + grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); + } + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureInitAndRun); + +static void BM_ClosureSchedOnExecCtx(benchmark::State& state) { + grpc_closure c; + grpc_closure_init(&c, DoNothing, NULL, grpc_schedule_on_exec_ctx); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + grpc_closure_sched(&exec_ctx, &c, GRPC_ERROR_NONE); + grpc_exec_ctx_flush(&exec_ctx); + } + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureSchedOnExecCtx); + +static void BM_ClosureSched2OnExecCtx(benchmark::State& state) { + grpc_closure c1; + grpc_closure c2; + grpc_closure_init(&c1, DoNothing, NULL, grpc_schedule_on_exec_ctx); + grpc_closure_init(&c2, DoNothing, NULL, grpc_schedule_on_exec_ctx); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE); + grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE); + grpc_exec_ctx_flush(&exec_ctx); + } + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureSched2OnExecCtx); + +static void BM_ClosureSched3OnExecCtx(benchmark::State& state) { + grpc_closure c1; + grpc_closure c2; + grpc_closure c3; + grpc_closure_init(&c1, DoNothing, NULL, grpc_schedule_on_exec_ctx); + grpc_closure_init(&c2, DoNothing, NULL, grpc_schedule_on_exec_ctx); + grpc_closure_init(&c3, DoNothing, NULL, grpc_schedule_on_exec_ctx); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE); + grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE); + grpc_closure_sched(&exec_ctx, &c3, GRPC_ERROR_NONE); + grpc_exec_ctx_flush(&exec_ctx); + } + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureSched3OnExecCtx); + +static void BM_AcquireMutex(benchmark::State& state) { + // for comparison with the combiner stuff below + gpr_mu mu; + gpr_mu_init(&mu); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + gpr_mu_lock(&mu); + DoNothing(&exec_ctx, NULL, GRPC_ERROR_NONE); + gpr_mu_unlock(&mu); + } + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_AcquireMutex); + +static void BM_ClosureSchedOnCombiner(benchmark::State& state) { + grpc_combiner* combiner = grpc_combiner_create(NULL); + grpc_closure c; + grpc_closure_init(&c, DoNothing, NULL, + grpc_combiner_scheduler(combiner, false)); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + grpc_closure_sched(&exec_ctx, &c, GRPC_ERROR_NONE); + grpc_exec_ctx_flush(&exec_ctx); + } + GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished"); + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureSchedOnCombiner); + +static void BM_ClosureSched2OnCombiner(benchmark::State& state) { + grpc_combiner* combiner = grpc_combiner_create(NULL); + grpc_closure c1; + grpc_closure c2; + grpc_closure_init(&c1, DoNothing, NULL, + grpc_combiner_scheduler(combiner, false)); + grpc_closure_init(&c2, DoNothing, NULL, + grpc_combiner_scheduler(combiner, false)); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE); + grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE); + grpc_exec_ctx_flush(&exec_ctx); + } + GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished"); + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureSched2OnCombiner); + +static void BM_ClosureSched3OnCombiner(benchmark::State& state) { + grpc_combiner* combiner = grpc_combiner_create(NULL); + grpc_closure c1; + grpc_closure c2; + grpc_closure c3; + grpc_closure_init(&c1, DoNothing, NULL, + grpc_combiner_scheduler(combiner, false)); + grpc_closure_init(&c2, DoNothing, NULL, + grpc_combiner_scheduler(combiner, false)); + grpc_closure_init(&c3, DoNothing, NULL, + grpc_combiner_scheduler(combiner, false)); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE); + grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE); + grpc_closure_sched(&exec_ctx, &c3, GRPC_ERROR_NONE); + grpc_exec_ctx_flush(&exec_ctx); + } + GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished"); + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureSched3OnCombiner); + +static void BM_ClosureSched2OnTwoCombiners(benchmark::State& state) { + grpc_combiner* combiner1 = grpc_combiner_create(NULL); + grpc_combiner* combiner2 = grpc_combiner_create(NULL); + grpc_closure c1; + grpc_closure c2; + grpc_closure_init(&c1, DoNothing, NULL, + grpc_combiner_scheduler(combiner1, false)); + grpc_closure_init(&c2, DoNothing, NULL, + grpc_combiner_scheduler(combiner2, false)); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE); + grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE); + grpc_exec_ctx_flush(&exec_ctx); + } + GRPC_COMBINER_UNREF(&exec_ctx, combiner1, "finished"); + GRPC_COMBINER_UNREF(&exec_ctx, combiner2, "finished"); + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureSched2OnTwoCombiners); + +static void BM_ClosureSched4OnTwoCombiners(benchmark::State& state) { + grpc_combiner* combiner1 = grpc_combiner_create(NULL); + grpc_combiner* combiner2 = grpc_combiner_create(NULL); + grpc_closure c1; + grpc_closure c2; + grpc_closure c3; + grpc_closure c4; + grpc_closure_init(&c1, DoNothing, NULL, + grpc_combiner_scheduler(combiner1, false)); + grpc_closure_init(&c2, DoNothing, NULL, + grpc_combiner_scheduler(combiner2, false)); + grpc_closure_init(&c3, DoNothing, NULL, + grpc_combiner_scheduler(combiner1, false)); + grpc_closure_init(&c4, DoNothing, NULL, + grpc_combiner_scheduler(combiner2, false)); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + while (state.KeepRunning()) { + grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE); + grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE); + grpc_closure_sched(&exec_ctx, &c3, GRPC_ERROR_NONE); + grpc_closure_sched(&exec_ctx, &c4, GRPC_ERROR_NONE); + grpc_exec_ctx_flush(&exec_ctx); + } + GRPC_COMBINER_UNREF(&exec_ctx, combiner1, "finished"); + GRPC_COMBINER_UNREF(&exec_ctx, combiner2, "finished"); + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureSched4OnTwoCombiners); + +// Helper that continuously reschedules the same closure against something until +// the benchmark is complete +class Rescheduler { + public: + Rescheduler(benchmark::State& state, grpc_closure_scheduler* scheduler) + : state_(state) { + grpc_closure_init(&closure_, Step, this, scheduler); + } + + void ScheduleFirst(grpc_exec_ctx* exec_ctx) { + grpc_closure_sched(exec_ctx, &closure_, GRPC_ERROR_NONE); + } + + void ScheduleFirstAgainstDifferentScheduler( + grpc_exec_ctx* exec_ctx, grpc_closure_scheduler* scheduler) { + grpc_closure_sched(exec_ctx, grpc_closure_create(Step, this, scheduler), + GRPC_ERROR_NONE); + } + + private: + benchmark::State& state_; + grpc_closure closure_; + + static void Step(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { + Rescheduler* self = static_cast<Rescheduler*>(arg); + if (self->state_.KeepRunning()) { + grpc_closure_sched(exec_ctx, &self->closure_, GRPC_ERROR_NONE); + } + } +}; + +static void BM_ClosureReschedOnExecCtx(benchmark::State& state) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + Rescheduler r(state, grpc_schedule_on_exec_ctx); + r.ScheduleFirst(&exec_ctx); + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureReschedOnExecCtx); + +static void BM_ClosureReschedOnCombiner(benchmark::State& state) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_combiner* combiner = grpc_combiner_create(NULL); + Rescheduler r(state, grpc_combiner_scheduler(combiner, false)); + r.ScheduleFirst(&exec_ctx); + grpc_exec_ctx_flush(&exec_ctx); + GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished"); + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureReschedOnCombiner); + +static void BM_ClosureReschedOnCombinerFinally(benchmark::State& state) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_combiner* combiner = grpc_combiner_create(NULL); + Rescheduler r(state, grpc_combiner_finally_scheduler(combiner, false)); + r.ScheduleFirstAgainstDifferentScheduler( + &exec_ctx, grpc_combiner_scheduler(combiner, false)); + grpc_exec_ctx_flush(&exec_ctx); + GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished"); + grpc_exec_ctx_finish(&exec_ctx); +} +BENCHMARK(BM_ClosureReschedOnCombinerFinally); + +BENCHMARK_MAIN(); diff --git a/test/cpp/microbenchmarks/bm_fullstack.cc b/test/cpp/microbenchmarks/bm_fullstack.cc index 9d883e68d7..c63de0ce0a 100644 --- a/test/cpp/microbenchmarks/bm_fullstack.cc +++ b/test/cpp/microbenchmarks/bm_fullstack.cc @@ -46,6 +46,7 @@ extern "C" { #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" +#include "src/core/ext/transport/chttp2/transport/internal.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/endpoint_pair.h" @@ -57,6 +58,7 @@ extern "C" { #include "test/core/util/memory_counters.h" #include "test/core/util/passthru_endpoint.h" #include "test/core/util/port.h" +#include "test/core/util/trickle_endpoint.h" } #include "src/core/lib/profiling/timers.h" #include "src/cpp/client/create_channel_internal.h" @@ -197,7 +199,8 @@ class UDS : public FullstackFixture { class EndpointPairFixture : public BaseFixture { public: - EndpointPairFixture(Service* service, grpc_endpoint_pair endpoints) { + EndpointPairFixture(Service* service, grpc_endpoint_pair endpoints) + : endpoint_pair_(endpoints) { ServerBuilder b; cq_ = b.AddCompletionQueue(true); b.RegisterService(service); @@ -210,7 +213,7 @@ class EndpointPairFixture : public BaseFixture { { const grpc_channel_args* server_args = grpc_server_get_channel_args(server_->c_server()); - grpc_transport* transport = grpc_create_chttp2_transport( + server_transport_ = grpc_create_chttp2_transport( &exec_ctx, server_args, endpoints.server, 0 /* is_client */); grpc_pollset** pollsets; @@ -221,9 +224,9 @@ class EndpointPairFixture : public BaseFixture { grpc_endpoint_add_to_pollset(&exec_ctx, endpoints.server, pollsets[i]); } - grpc_server_setup_transport(&exec_ctx, server_->c_server(), transport, - NULL, server_args); - grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL); + grpc_server_setup_transport(&exec_ctx, server_->c_server(), + server_transport_, NULL, server_args); + grpc_chttp2_transport_start_reading(&exec_ctx, server_transport_, NULL); } /* create channel */ @@ -233,12 +236,13 @@ class EndpointPairFixture : public BaseFixture { ApplyCommonChannelArguments(&args); grpc_channel_args c_args = args.c_channel_args(); - grpc_transport* transport = + client_transport_ = grpc_create_chttp2_transport(&exec_ctx, &c_args, endpoints.client, 1); - GPR_ASSERT(transport); - grpc_channel* channel = grpc_channel_create( - &exec_ctx, "target", &c_args, GRPC_CLIENT_DIRECT_CHANNEL, transport); - grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL); + GPR_ASSERT(client_transport_); + grpc_channel* channel = + grpc_channel_create(&exec_ctx, "target", &c_args, + GRPC_CLIENT_DIRECT_CHANNEL, client_transport_); + grpc_chttp2_transport_start_reading(&exec_ctx, client_transport_, NULL); channel_ = CreateChannelInternal("", channel); } @@ -258,6 +262,11 @@ class EndpointPairFixture : public BaseFixture { ServerCompletionQueue* cq() { return cq_.get(); } std::shared_ptr<Channel> channel() { return channel_; } + protected: + grpc_endpoint_pair endpoint_pair_; + grpc_transport* client_transport_; + grpc_transport* server_transport_; + private: std::unique_ptr<Server> server_; std::unique_ptr<ServerCompletionQueue> cq_; @@ -295,6 +304,75 @@ class InProcessCHTTP2 : public EndpointPairFixture { } }; +class TrickledCHTTP2 : public EndpointPairFixture { + public: + TrickledCHTTP2(Service* service, size_t megabits_per_second) + : EndpointPairFixture(service, MakeEndpoints(megabits_per_second)) {} + + void AddToLabel(std::ostream& out, benchmark::State& state) { + out << " writes/iter:" + << ((double)stats_.num_writes / (double)state.iterations()) + << " cli_transport_stalls/iter:" + << ((double) + client_stats_.streams_stalled_due_to_transport_flow_control / + (double)state.iterations()) + << " cli_stream_stalls/iter:" + << ((double)client_stats_.streams_stalled_due_to_stream_flow_control / + (double)state.iterations()) + << " svr_transport_stalls/iter:" + << ((double) + server_stats_.streams_stalled_due_to_transport_flow_control / + (double)state.iterations()) + << " svr_stream_stalls/iter:" + << ((double)server_stats_.streams_stalled_due_to_stream_flow_control / + (double)state.iterations()); + } + + void Step() { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + size_t client_backlog = + grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.client); + size_t server_backlog = + grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.server); + grpc_exec_ctx_finish(&exec_ctx); + + UpdateStats((grpc_chttp2_transport*)client_transport_, &client_stats_, + client_backlog); + UpdateStats((grpc_chttp2_transport*)server_transport_, &server_stats_, + server_backlog); + } + + private: + grpc_passthru_endpoint_stats stats_; + struct Stats { + int streams_stalled_due_to_stream_flow_control = 0; + int streams_stalled_due_to_transport_flow_control = 0; + }; + Stats client_stats_; + Stats server_stats_; + + grpc_endpoint_pair MakeEndpoints(size_t kilobits) { + grpc_endpoint_pair p; + grpc_passthru_endpoint_create(&p.client, &p.server, initialize_stuff.rq(), + &stats_); + double bytes_per_second = 125.0 * kilobits; + p.client = grpc_trickle_endpoint_create(p.client, bytes_per_second); + p.server = grpc_trickle_endpoint_create(p.server, bytes_per_second); + return p; + } + + void UpdateStats(grpc_chttp2_transport* t, Stats* s, size_t backlog) { + if (backlog == 0) { + if (t->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != NULL) { + s->streams_stalled_due_to_stream_flow_control++; + } + if (t->lists[GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT].head != NULL) { + s->streams_stalled_due_to_transport_flow_control++; + } + } + } +}; + /******************************************************************************* * CONTEXT MUTATORS */ @@ -620,6 +698,7 @@ static void BM_StreamingPingPongMsgs(benchmark::State& state) { } while (state.KeepRunning()) { + GPR_TIMER_SCOPE("BenchmarkCycle", 0); request_rw->Write(send_request, tag(0)); // Start client send response_rw.Read(&recv_request, tag(1)); // Start server recv request_rw->Read(&recv_response, tag(2)); // Start client recv @@ -777,6 +856,81 @@ static void BM_PumpStreamServerToClient(benchmark::State& state) { state.SetBytesProcessed(state.range(0) * state.iterations()); } +static void TrickleCQNext(TrickledCHTTP2* fixture, void** t, bool* ok) { + while (true) { + switch (fixture->cq()->AsyncNext( + t, ok, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), + gpr_time_from_micros(100, GPR_TIMESPAN)))) { + case CompletionQueue::TIMEOUT: + fixture->Step(); + break; + case CompletionQueue::SHUTDOWN: + GPR_ASSERT(false); + break; + case CompletionQueue::GOT_EVENT: + return; + } + } +} + +static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) { + EchoTestService::AsyncService service; + std::unique_ptr<TrickledCHTTP2> fixture( + new TrickledCHTTP2(&service, state.range(1))); + { + EchoResponse send_response; + EchoResponse recv_response; + if (state.range(0) > 0) { + send_response.set_message(std::string(state.range(0), 'a')); + } + Status recv_status; + ServerContext svr_ctx; + ServerAsyncReaderWriter<EchoResponse, EchoRequest> response_rw(&svr_ctx); + service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(), + fixture->cq(), tag(0)); + std::unique_ptr<EchoTestService::Stub> stub( + EchoTestService::NewStub(fixture->channel())); + ClientContext cli_ctx; + auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1)); + int need_tags = (1 << 0) | (1 << 1); + void* t; + bool ok; + while (need_tags) { + TrickleCQNext(fixture.get(), &t, &ok); + GPR_ASSERT(ok); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + request_rw->Read(&recv_response, tag(0)); + while (state.KeepRunning()) { + GPR_TIMER_SCOPE("BenchmarkCycle", 0); + response_rw.Write(send_response, tag(1)); + while (true) { + TrickleCQNext(fixture.get(), &t, &ok); + if (t == tag(0)) { + request_rw->Read(&recv_response, tag(0)); + } else if (t == tag(1)) { + break; + } else { + GPR_ASSERT(false); + } + } + } + response_rw.Finish(Status::OK, tag(1)); + need_tags = (1 << 0) | (1 << 1); + while (need_tags) { + TrickleCQNext(fixture.get(), &t, &ok); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + } + fixture->Finish(state); + fixture.reset(); + state.SetBytesProcessed(state.range(0) * state.iterations()); +} + /******************************************************************************* * CONFIGURATIONS */ @@ -866,6 +1020,19 @@ BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, SockPair) BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, InProcessCHTTP2) ->Range(0, 128 * 1024 * 1024); +static void TrickleArgs(benchmark::internal::Benchmark* b) { + for (int i = 1; i <= 128 * 1024 * 1024; i *= 8) { + for (int j = 1; j <= 128 * 1024 * 1024; j *= 8) { + double expected_time = + static_cast<double>(14 + i) / (125.0 * static_cast<double>(j)); + if (expected_time > 0.01) continue; + b->Args({i, j}); + } + } +} + +BENCHMARK(BM_PumpStreamServerToClient_Trickle)->Apply(TrickleArgs); + // Generate Args for StreamingPingPong benchmarks. Currently generates args for // only "small streams" (i.e streams with 0, 1 or 2 messages) static void StreamingPingPongArgs(benchmark::internal::Benchmark* b) { |