aboutsummaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
authorGravatar Muxi Yan <mxyan@google.com>2017-04-11 09:22:52 -0700
committerGravatar Muxi Yan <mxyan@google.com>2017-04-11 09:22:52 -0700
commit81addac3ceba688c4ef00cc54da7c45ae443f1ce (patch)
treef4f19edbe1f0e71678bdbc1e309d122e8f2c4d3f /test
parenta002b2413dac2b8c4304349d549c7f30a9f4aff0 (diff)
parent498e2b350aec3d358b3ec505fa800194778cd8e4 (diff)
Merge remote-tracking branch 'upstream/master' into lazy-deframe
Diffstat (limited to 'test')
-rw-r--r--test/core/end2end/invalid_call_argument_test.c2
-rw-r--r--test/core/end2end/tests/max_connection_idle.c137
-rw-r--r--test/core/iomgr/sockaddr_utils_test.c2
-rw-r--r--test/cpp/end2end/end2end_test.cc33
-rw-r--r--test/cpp/end2end/test_service_impl.cc5
-rw-r--r--test/cpp/microbenchmarks/bm_cq_multiple_threads.cc46
-rw-r--r--test/cpp/qps/benchmark_config.cc15
-rw-r--r--test/cpp/qps/report.cc35
-rw-r--r--test/cpp/qps/report.h18
-rw-r--r--test/cpp/util/grpc_tool.cc1
10 files changed, 278 insertions, 16 deletions
diff --git a/test/core/end2end/invalid_call_argument_test.c b/test/core/end2end/invalid_call_argument_test.c
index 2a9072570d..bfd8e6fefa 100644
--- a/test/core/end2end/invalid_call_argument_test.c
+++ b/test/core/end2end/invalid_call_argument_test.c
@@ -31,6 +31,8 @@
*
*/
+#include <grpc/impl/codegen/port_platform.h>
+
#include <limits.h>
#include <string.h>
diff --git a/test/core/end2end/tests/max_connection_idle.c b/test/core/end2end/tests/max_connection_idle.c
index c0984e4d14..98bc08c6d5 100644
--- a/test/core/end2end/tests/max_connection_idle.c
+++ b/test/core/end2end/tests/max_connection_idle.c
@@ -36,6 +36,7 @@
#include <limits.h>
#include <string.h>
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@@ -48,6 +49,138 @@
static void *tag(intptr_t t) { return (void *)t; }
+static void drain_cq(grpc_completion_queue *cq) {
+ grpc_event ev;
+ do {
+ ev = grpc_completion_queue_next(cq, grpc_timeout_seconds_to_deadline(5),
+ NULL);
+ } while (ev.type != GRPC_QUEUE_SHUTDOWN);
+}
+
+static void simple_request_body(grpc_end2end_test_config config,
+ grpc_end2end_test_fixture *f) {
+ grpc_call *c;
+ grpc_call *s;
+ cq_verifier *cqv = cq_verifier_create(f->cq);
+ grpc_op ops[6];
+ grpc_op *op;
+ grpc_metadata_array initial_metadata_recv;
+ grpc_metadata_array trailing_metadata_recv;
+ grpc_metadata_array request_metadata_recv;
+ grpc_call_details call_details;
+ grpc_status_code status;
+ grpc_call_error error;
+ grpc_slice details;
+ int was_cancelled = 2;
+ char *peer;
+
+ gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5);
+ c = grpc_channel_create_call(
+ f->client, NULL, GRPC_PROPAGATE_DEFAULTS, f->cq,
+ grpc_slice_from_static_string("/foo"),
+ get_host_override_slice("foo.test.google.fr:1234", config), deadline,
+ NULL);
+ GPR_ASSERT(c);
+
+ peer = grpc_call_get_peer(c);
+ GPR_ASSERT(peer != NULL);
+ gpr_log(GPR_DEBUG, "client_peer_before_call=%s", peer);
+ gpr_free(peer);
+
+ grpc_metadata_array_init(&initial_metadata_recv);
+ grpc_metadata_array_init(&trailing_metadata_recv);
+ grpc_metadata_array_init(&request_metadata_recv);
+ grpc_call_details_init(&call_details);
+
+ memset(ops, 0, sizeof(ops));
+ op = ops;
+ op->op = GRPC_OP_SEND_INITIAL_METADATA;
+ op->data.send_initial_metadata.count = 0;
+ op->flags = 0;
+ op->reserved = NULL;
+ op++;
+ op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
+ op->flags = 0;
+ op->reserved = NULL;
+ op++;
+ op->op = GRPC_OP_RECV_INITIAL_METADATA;
+ op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
+ op->flags = 0;
+ op->reserved = NULL;
+ op++;
+ op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+ op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
+ op->data.recv_status_on_client.status = &status;
+ op->data.recv_status_on_client.status_details = &details;
+ op->flags = 0;
+ op->reserved = NULL;
+ op++;
+ error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
+ GPR_ASSERT(GRPC_CALL_OK == error);
+
+ error =
+ grpc_server_request_call(f->server, &s, &call_details,
+ &request_metadata_recv, f->cq, f->cq, tag(101));
+ GPR_ASSERT(GRPC_CALL_OK == error);
+ CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
+ cq_verify(cqv);
+
+ peer = grpc_call_get_peer(s);
+ GPR_ASSERT(peer != NULL);
+ gpr_log(GPR_DEBUG, "server_peer=%s", peer);
+ gpr_free(peer);
+ peer = grpc_call_get_peer(c);
+ GPR_ASSERT(peer != NULL);
+ gpr_log(GPR_DEBUG, "client_peer=%s", peer);
+ gpr_free(peer);
+
+ memset(ops, 0, sizeof(ops));
+ op = ops;
+ op->op = GRPC_OP_SEND_INITIAL_METADATA;
+ op->data.send_initial_metadata.count = 0;
+ op->flags = 0;
+ op->reserved = NULL;
+ op++;
+ op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
+ op->data.send_status_from_server.trailing_metadata_count = 0;
+ op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
+ grpc_slice status_details = grpc_slice_from_static_string("xyz");
+ op->data.send_status_from_server.status_details = &status_details;
+ op->flags = 0;
+ op->reserved = NULL;
+ op++;
+ op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
+ op->data.recv_close_on_server.cancelled = &was_cancelled;
+ op->flags = 0;
+ op->reserved = NULL;
+ op++;
+ error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
+ GPR_ASSERT(GRPC_CALL_OK == error);
+
+ CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
+ CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
+ cq_verify(cqv);
+
+ GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
+ GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz"));
+ GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/foo"));
+ validate_host_override_string("foo.test.google.fr:1234", call_details.host,
+ config);
+ GPR_ASSERT(0 == call_details.flags);
+ GPR_ASSERT(was_cancelled == 1);
+
+ grpc_slice_unref(details);
+ grpc_metadata_array_destroy(&initial_metadata_recv);
+ grpc_metadata_array_destroy(&trailing_metadata_recv);
+ grpc_metadata_array_destroy(&request_metadata_recv);
+ grpc_call_details_destroy(&call_details);
+
+ grpc_call_destroy(c);
+ grpc_call_destroy(s);
+
+ cq_verifier_destroy(cqv);
+}
+
static void test_max_connection_idle(grpc_end2end_test_config config) {
grpc_end2end_test_fixture f = config.create_fixture(NULL, NULL);
grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
@@ -86,6 +219,9 @@ static void test_max_connection_idle(grpc_end2end_test_config config) {
state == GRPC_CHANNEL_TRANSIENT_FAILURE);
}
+ /* Use a simple request to cancel and reset the max idle timer */
+ simple_request_body(config, &f);
+
/* wait for the channel to reach its maximum idle time */
grpc_channel_watch_connectivity_state(
f.client, GRPC_CHANNEL_READY,
@@ -104,6 +240,7 @@ static void test_max_connection_idle(grpc_end2end_test_config config) {
grpc_server_destroy(f.server);
grpc_channel_destroy(f.client);
grpc_completion_queue_shutdown(f.cq);
+ drain_cq(f.cq);
grpc_completion_queue_destroy(f.cq);
config.tear_down_data(&f);
diff --git a/test/core/iomgr/sockaddr_utils_test.c b/test/core/iomgr/sockaddr_utils_test.c
index 70a6c323e5..09c514c8e6 100644
--- a/test/core/iomgr/sockaddr_utils_test.c
+++ b/test/core/iomgr/sockaddr_utils_test.c
@@ -254,8 +254,6 @@ static void test_sockaddr_to_string(void) {
expect_sockaddr_str("(sockaddr family=123)", &dummy, 0);
expect_sockaddr_str("(sockaddr family=123)", &dummy, 1);
GPR_ASSERT(grpc_sockaddr_to_uri(&dummy) == NULL);
-
- GPR_ASSERT(errno == 0x7EADBEEF);
}
static void test_sockaddr_set_get_port(void) {
diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc
index d3a83b188f..df71777e4b 100644
--- a/test/cpp/end2end/end2end_test.cc
+++ b/test/cpp/end2end/end2end_test.cc
@@ -1130,6 +1130,39 @@ TEST_P(End2endTest, BinaryTrailerTest) {
EXPECT_TRUE(returned_info.ParseFromString(ToString(iter->second)));
}
+TEST_P(End2endTest, ExpectErrorTest) {
+ ResetStub();
+
+ std::vector<ErrorStatus> expected_status;
+ expected_status.emplace_back();
+ expected_status.back().set_code(13); // INTERNAL
+ expected_status.back().set_error_message("text error message");
+ expected_status.back().set_binary_error_details("text error details");
+ expected_status.emplace_back();
+ expected_status.back().set_code(13); // INTERNAL
+ expected_status.back().set_error_message("text error message");
+ expected_status.back().set_binary_error_details(
+ "\x0\x1\x2\x3\x4\x5\x6\x8\x9\xA\xB");
+
+ for (auto iter = expected_status.begin(); iter != expected_status.end();
+ ++iter) {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ request.set_message("Hello");
+ auto* error = request.mutable_param()->mutable_expected_error();
+ error->set_code(iter->code());
+ error->set_error_message(iter->error_message());
+ error->set_binary_error_details(iter->binary_error_details());
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(iter->code(), s.error_code());
+ EXPECT_EQ(iter->error_message(), s.error_message());
+ EXPECT_EQ(iter->binary_error_details(), s.error_details());
+ }
+}
+
//////////////////////////////////////////////////////////////////////////
// Test with and without a proxy.
class ProxyEnd2endTest : public End2endTest {
diff --git a/test/cpp/end2end/test_service_impl.cc b/test/cpp/end2end/test_service_impl.cc
index 11729c425c..b473dd1f52 100644
--- a/test/cpp/end2end/test_service_impl.cc
+++ b/test/cpp/end2end/test_service_impl.cc
@@ -92,6 +92,11 @@ Status TestServiceImpl::Echo(ServerContext* context, const EchoRequest* request,
gpr_log(GPR_ERROR, "The request should not reach application handler.");
GPR_ASSERT(0);
}
+ if (request->has_param() && request->param().has_expected_error()) {
+ const auto& error = request->param().expected_error();
+ return Status(static_cast<StatusCode>(error.code()), error.error_message(),
+ error.binary_error_details());
+ }
int server_try_cancel = GetIntValueFromMetadata(
kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
if (server_try_cancel > DO_NOT_CANCEL) {
diff --git a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
index 967c226ac7..8627463204 100644
--- a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
+++ b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
@@ -36,6 +36,8 @@
#include <atomic>
#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
#include "test/cpp/microbenchmarks/helpers.h"
extern "C" {
@@ -51,13 +53,10 @@ struct grpc_pollset {
namespace grpc {
namespace testing {
-static void* make_tag(int i) { return (void*)(intptr_t)i; }
+static void* g_tag = (void*)(intptr_t)10; // Some random number
static grpc_completion_queue* g_cq;
static grpc_event_engine_vtable g_vtable;
-static __thread int g_thread_idx;
-static __thread grpc_cq_completion g_cq_completion;
-
static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
grpc_closure* closure) {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
@@ -76,15 +75,18 @@ static grpc_error* pollset_kick(grpc_pollset* p, grpc_pollset_worker* worker) {
/* Callback when the tag is dequeued from the completion queue. Does nothing */
static void cq_done_cb(grpc_exec_ctx* exec_ctx, void* done_arg,
- grpc_cq_completion* cq_completion) {}
+ grpc_cq_completion* cq_completion) {
+ gpr_free(cq_completion);
+}
/* Queues a completion tag. ZERO polling overhead */
static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
grpc_pollset_worker** worker, gpr_timespec now,
gpr_timespec deadline) {
gpr_mu_unlock(&ps->mu);
- grpc_cq_end_op(exec_ctx, g_cq, make_tag(g_thread_idx), GRPC_ERROR_NONE,
- cq_done_cb, NULL, &g_cq_completion);
+ grpc_cq_begin_op(g_cq, g_tag);
+ grpc_cq_end_op(exec_ctx, g_cq, g_tag, GRPC_ERROR_NONE, cq_done_cb, NULL,
+ (grpc_cq_completion*)gpr_malloc(sizeof(grpc_cq_completion)));
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&ps->mu);
return GRPC_ERROR_NONE;
@@ -109,26 +111,42 @@ static void setup() {
g_cq = grpc_completion_queue_create(NULL);
}
+static void teardown() {
+ grpc_completion_queue_shutdown(g_cq);
+ grpc_completion_queue_destroy(g_cq);
+}
+
+/* A few notes about Multi-threaded benchmarks:
+
+ Setup:
+ The benchmark framework ensures that none of the threads proceed beyond the
+ state.KeepRunning() call unless all the threads have called state.keepRunning
+ atleast once. So it is safe to do the initialization in one of the threads
+ before state.KeepRunning() is called.
+
+ Teardown:
+ The benchmark framework also ensures that no thread is running the benchmark
+ code (i.e the code between two successive calls of state.KeepRunning()) if
+ state.KeepRunning() returns false. So it is safe to do the teardown in one
+ of the threads after state.keepRunning() returns false.
+*/
static void BM_Cq_Throughput(benchmark::State& state) {
TrackCounters track_counters;
- gpr_timespec deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
if (state.thread_index == 0) {
setup();
}
while (state.KeepRunning()) {
- g_thread_idx = state.thread_index;
- void* dummy_tag = make_tag(g_thread_idx);
- grpc_cq_begin_op(g_cq, dummy_tag);
- grpc_completion_queue_next(g_cq, deadline, NULL);
+ GPR_ASSERT(grpc_completion_queue_next(g_cq, deadline, NULL).type ==
+ GRPC_OP_COMPLETE);
}
state.SetItemsProcessed(state.iterations());
if (state.thread_index == 0) {
- grpc_completion_queue_shutdown(g_cq);
- grpc_completion_queue_destroy(g_cq);
+ teardown();
}
track_counters.Finish(state);
diff --git a/test/cpp/qps/benchmark_config.cc b/test/cpp/qps/benchmark_config.cc
index 98b8d0ba37..d33f3e9ae1 100644
--- a/test/cpp/qps/benchmark_config.cc
+++ b/test/cpp/qps/benchmark_config.cc
@@ -33,6 +33,9 @@
#include "test/cpp/qps/benchmark_config.h"
#include <gflags/gflags.h>
+#include <grpc++/create_channel.h>
+#include <grpc++/security/credentials.h>
+#include <grpc/support/log.h>
DEFINE_bool(enable_log_reporter, true,
"Enable reporting of benchmark results through GprLog");
@@ -51,6 +54,11 @@ DEFINE_string(server_address, "localhost:50052",
DEFINE_string(tag, "", "Optional tag for the test");
+DEFINE_string(rpc_reporter_server_address, "",
+ "Server address for rpc reporter to send results to");
+
+DEFINE_bool(enable_rpc_reporter, false, "Enable use of RPC reporter");
+
// In some distros, gflags is in the namespace google, and in some others,
// in gflags. This hack is enabling us to find both.
namespace google {}
@@ -75,6 +83,13 @@ static std::shared_ptr<Reporter> InitBenchmarkReporters() {
composite_reporter->add(std::unique_ptr<Reporter>(
new JsonReporter("JsonReporter", FLAGS_scenario_result_file)));
}
+ if (FLAGS_enable_rpc_reporter) {
+ GPR_ASSERT(!FLAGS_rpc_reporter_server_address.empty());
+ composite_reporter->add(std::unique_ptr<Reporter>(new RpcReporter(
+ "RpcReporter",
+ grpc::CreateChannel(FLAGS_rpc_reporter_server_address,
+ grpc::InsecureChannelCredentials()))));
+ }
return std::shared_ptr<Reporter>(composite_reporter);
}
diff --git a/test/cpp/qps/report.cc b/test/cpp/qps/report.cc
index 7f84816421..a9130bf5d4 100644
--- a/test/cpp/qps/report.cc
+++ b/test/cpp/qps/report.cc
@@ -40,6 +40,9 @@
#include "test/cpp/qps/parse_json.h"
#include "test/cpp/qps/stats.h"
+#include <grpc++/client_context.h>
+#include "src/proto/grpc/testing/services.grpc.pb.h"
+
namespace grpc {
namespace testing {
@@ -142,5 +145,37 @@ void JsonReporter::ReportCpuUsage(const ScenarioResult& result) {
// NOP - all reporting is handled by ReportQPS.
}
+void RpcReporter::ReportQPS(const ScenarioResult& result) {
+ grpc::ClientContext context;
+ grpc::Status status;
+ Void dummy;
+
+ gpr_log(GPR_INFO, "RPC reporter sending scenario result to server");
+ status = stub_->ReportScenario(&context, result, &dummy);
+
+ if (status.ok()) {
+ gpr_log(GPR_INFO, "RpcReporter report RPC success!");
+ } else {
+ gpr_log(GPR_ERROR, "RpcReporter report RPC: code: %d. message: %s",
+ status.error_code(), status.error_message().c_str());
+ }
+}
+
+void RpcReporter::ReportQPSPerCore(const ScenarioResult& result) {
+ // NOP - all reporting is handled by ReportQPS.
+}
+
+void RpcReporter::ReportLatency(const ScenarioResult& result) {
+ // NOP - all reporting is handled by ReportQPS.
+}
+
+void RpcReporter::ReportTimes(const ScenarioResult& result) {
+ // NOP - all reporting is handled by ReportQPS.
+}
+
+void RpcReporter::ReportCpuUsage(const ScenarioResult& result) {
+ // NOP - all reporting is handled by ReportQPS.
+}
+
} // namespace testing
} // namespace grpc
diff --git a/test/cpp/qps/report.h b/test/cpp/qps/report.h
index faf87ff060..1749be98c6 100644
--- a/test/cpp/qps/report.h
+++ b/test/cpp/qps/report.h
@@ -42,6 +42,9 @@
#include "test/cpp/qps/driver.h"
+#include <grpc++/channel.h>
+#include "src/proto/grpc/testing/services.grpc.pb.h"
+
namespace grpc {
namespace testing {
@@ -124,6 +127,21 @@ class JsonReporter : public Reporter {
const string report_file_;
};
+class RpcReporter : public Reporter {
+ public:
+ RpcReporter(const string& name, std::shared_ptr<grpc::Channel> channel)
+ : Reporter(name), stub_(ReportQpsScenarioService::NewStub(channel)) {}
+
+ private:
+ void ReportQPS(const ScenarioResult& result) override;
+ void ReportQPSPerCore(const ScenarioResult& result) override;
+ void ReportLatency(const ScenarioResult& result) override;
+ void ReportTimes(const ScenarioResult& result) override;
+ void ReportCpuUsage(const ScenarioResult& result) override;
+
+ std::unique_ptr<ReportQpsScenarioService::Stub> stub_;
+};
+
} // namespace testing
} // namespace grpc
diff --git a/test/cpp/util/grpc_tool.cc b/test/cpp/util/grpc_tool.cc
index 856cd32c3c..c7acb7c631 100644
--- a/test/cpp/util/grpc_tool.cc
+++ b/test/cpp/util/grpc_tool.cc
@@ -321,6 +321,7 @@ bool GrpcTool::ListServices(int argc, const char** argv,
std::vector<grpc::string> service_list;
if (!desc_db.GetServices(&service_list)) {
+ fprintf(stderr, "Received an error when querying services endpoint.\n");
return false;
}