aboutsummaryrefslogtreecommitdiffhomepage
path: root/test/cpp
diff options
context:
space:
mode:
authorGravatar David Garcia Quintas <dgq@google.com>2016-02-08 20:07:19 -0800
committerGravatar David Garcia Quintas <dgq@google.com>2016-02-08 20:07:19 -0800
commit7e46618897790b0808d676e18cee7c47e40ca0e5 (patch)
treec363ed99cce18a6443bcf18ec2b4b98f4878772c /test/cpp
parent9508a42096762eef3665f046cc4f370874fd9f7c (diff)
parent2dbaca8adce55fb8e54a1e4d5c19f75ffcc3fd79 (diff)
Merge branch 'master' of github.com:grpc/grpc into grpclb_api
Diffstat (limited to 'test/cpp')
-rw-r--r--test/cpp/end2end/async_end2end_test.cc410
-rw-r--r--test/cpp/end2end/end2end_test.cc500
-rw-r--r--test/cpp/end2end/hybrid_end2end_test.cc2
-rw-r--r--test/cpp/end2end/test_service_impl.cc168
-rw-r--r--test/cpp/end2end/test_service_impl.h17
-rw-r--r--test/cpp/end2end/thread_stress_test.cc143
-rw-r--r--test/cpp/qps/client.h11
-rw-r--r--test/cpp/qps/client_async.cc7
-rw-r--r--test/cpp/qps/driver.cc144
-rw-r--r--test/cpp/qps/driver.h3
-rw-r--r--test/cpp/qps/limit_cores.cc79
-rw-r--r--test/cpp/qps/limit_cores.h49
-rwxr-xr-xtest/cpp/qps/qps-sweep.sh14
-rw-r--r--test/cpp/qps/qps_driver.cc22
-rw-r--r--test/cpp/qps/qps_worker.cc36
-rw-r--r--test/cpp/qps/qps_worker.h7
-rw-r--r--test/cpp/qps/server.h13
-rw-r--r--test/cpp/qps/worker.cc2
-rw-r--r--test/cpp/util/time_test.cc4
19 files changed, 1376 insertions, 255 deletions
diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc
index 0616cc07ee..252bda3798 100644
--- a/test/cpp/end2end/async_end2end_test.cc
+++ b/test/cpp/end2end/async_end2end_test.cc
@@ -32,6 +32,7 @@
*/
#include <memory>
+#include <thread>
#include <grpc++/channel.h>
#include <grpc++/client_context.h>
@@ -104,7 +105,10 @@ class Verifier : public PollingCheckRegion {
expectations_[tag(i)] = expect_ok;
return *this;
}
- void Verify(CompletionQueue* cq) {
+
+ void Verify(CompletionQueue* cq) { Verify(cq, false); }
+
+ void Verify(CompletionQueue* cq, bool ignore_ok) {
GPR_ASSERT(!expectations_.empty());
while (!expectations_.empty()) {
bool ok;
@@ -122,7 +126,9 @@ class Verifier : public PollingCheckRegion {
}
auto it = expectations_.find(got_tag);
EXPECT_TRUE(it != expectations_.end());
- EXPECT_EQ(it->second, ok);
+ if (!ignore_ok) {
+ EXPECT_EQ(it->second, ok);
+ }
expectations_.erase(it);
}
}
@@ -217,7 +223,7 @@ class AsyncEnd2endTest : public ::testing::TestWithParam<bool> {
grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
send_request.set_message("Hello");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse> > response_reader(
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
@@ -270,7 +276,7 @@ TEST_P(AsyncEnd2endTest, AsyncNextRpc) {
grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
send_request.set_message("Hello");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse> > response_reader(
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
std::chrono::system_clock::time_point time_now(
@@ -315,7 +321,7 @@ TEST_P(AsyncEnd2endTest, SimpleClientStreaming) {
ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
send_request.set_message("Hello");
- std::unique_ptr<ClientAsyncWriter<EchoRequest> > cli_stream(
+ std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1)));
service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
@@ -368,7 +374,7 @@ TEST_P(AsyncEnd2endTest, SimpleServerStreaming) {
ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
send_request.set_message("Hello");
- std::unique_ptr<ClientAsyncReader<EchoResponse> > cli_stream(
+ std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
@@ -418,7 +424,7 @@ TEST_P(AsyncEnd2endTest, SimpleBidiStreaming) {
ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
send_request.set_message("Hello");
- std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse> >
+ std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
@@ -476,7 +482,7 @@ TEST_P(AsyncEnd2endTest, ClientInitialMetadataRpc) {
cli_ctx.AddMetadata(meta1.first, meta1.second);
cli_ctx.AddMetadata(meta2.first, meta2.second);
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse> > response_reader(
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
@@ -519,7 +525,7 @@ TEST_P(AsyncEnd2endTest, ServerInitialMetadataRpc) {
std::pair<grpc::string, grpc::string> meta1("key1", "val1");
std::pair<grpc::string, grpc::string> meta2("key2", "val2");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse> > response_reader(
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
@@ -568,7 +574,7 @@ TEST_P(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
std::pair<grpc::string, grpc::string> meta1("key1", "val1");
std::pair<grpc::string, grpc::string> meta2("key2", "val2");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse> > response_reader(
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
@@ -629,7 +635,7 @@ TEST_P(AsyncEnd2endTest, MetadataRpc) {
cli_ctx.AddMetadata(meta1.first, meta1.second);
cli_ctx.AddMetadata(meta2.first, meta2.second);
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse> > response_reader(
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
@@ -690,7 +696,7 @@ TEST_P(AsyncEnd2endTest, ServerCheckCancellation) {
grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
send_request.set_message("Hello");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse> > response_reader(
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
srv_ctx.AsyncNotifyWhenDone(tag(5));
@@ -725,7 +731,7 @@ TEST_P(AsyncEnd2endTest, ServerCheckDone) {
grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
send_request.set_message("Hello");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse> > response_reader(
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
srv_ctx.AsyncNotifyWhenDone(tag(5));
@@ -759,7 +765,7 @@ TEST_P(AsyncEnd2endTest, UnimplementedRpc) {
ClientContext cli_ctx;
send_request.set_message("Hello");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse> > response_reader(
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
stub->AsyncUnimplemented(&cli_ctx, send_request, cq_.get()));
response_reader->Finish(&recv_response, &recv_status, tag(4));
@@ -769,8 +775,384 @@ TEST_P(AsyncEnd2endTest, UnimplementedRpc) {
EXPECT_EQ("", recv_status.error_message());
}
+// This class is for testing scenarios where RPCs are cancelled on the server
+// by calling ServerContext::TryCancel()
+class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
+ protected:
+ typedef enum {
+ DO_NOT_CANCEL = 0,
+ CANCEL_BEFORE_PROCESSING,
+ CANCEL_DURING_PROCESSING,
+ CANCEL_AFTER_PROCESSING
+ } ServerTryCancelRequestPhase;
+
+ void ServerTryCancel(ServerContext* context) {
+ EXPECT_FALSE(context->IsCancelled());
+ context->TryCancel();
+ gpr_log(GPR_INFO, "Server called TryCancel()");
+ EXPECT_TRUE(context->IsCancelled());
+ }
+
+ // Helper for testing client-streaming RPCs which are cancelled on the server.
+ // Depending on the value of server_try_cancel parameter, this will test one
+ // of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading
+ // any messages from the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading
+ // messages from the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading all
+ // messages from the client (but before sending any status back to the
+ // client)
+ void TestClientStreamingServerCancel(
+ ServerTryCancelRequestPhase server_try_cancel) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
+
+ // Initiate the 'RequestStream' call on client
+ std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
+ stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1)));
+ Verifier(GetParam()).Expect(1, true).Verify(cq_.get());
+
+ // On the server, request to be notified of 'RequestStream' calls
+ // and receive the 'RequestStream' call just made by the client
+ service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
+ tag(2));
+ Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+
+ // Client sends 3 messages (tags 3, 4 and 5)
+ for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {
+ send_request.set_message("Ping " + std::to_string(tag_idx));
+ cli_stream->Write(send_request, tag(tag_idx));
+ Verifier(GetParam()).Expect(tag_idx, true).Verify(cq_.get());
+ }
+ cli_stream->WritesDone(tag(6));
+ Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
+
+ bool expected_server_cq_result = true;
+ bool ignore_cq_result = false;
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ ServerTryCancel(&srv_ctx);
+
+ // Since cancellation is done before server reads any results, we know
+ // for sure that all cq results will return false from this point forward
+ expected_server_cq_result = false;
+ }
+
+ std::thread* server_try_cancel_thd = NULL;
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd = new std::thread(
+ &AsyncEnd2endServerTryCancelTest::ServerTryCancel, this, &srv_ctx);
+ // Server will cancel the RPC in a parallel thread while reading the
+ // requests from the client. Since the cancellation can happen at anytime,
+ // some of the cq results (i.e those until cancellation) might be true but
+ // its non deterministic. So better to ignore the cq results
+ ignore_cq_result = true;
+ }
+
+ // Server reads 3 messages (tags 6, 7 and 8)
+ for (int tag_idx = 6; tag_idx <= 8; tag_idx++) {
+ srv_stream.Read(&recv_request, tag(tag_idx));
+ Verifier(GetParam())
+ .Expect(tag_idx, expected_server_cq_result)
+ .Verify(cq_.get(), ignore_cq_result);
+ }
+
+ if (server_try_cancel_thd != NULL) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ ServerTryCancel(&srv_ctx);
+ }
+
+ // The RPC has been cancelled at this point for sure (i.e irrespective of
+ // the value of `server_try_cancel` is). So, from this point forward, we
+ // know that cq results are supposed to return false on server.
+
+ // Server sends the final message and cancelled status (but the RPC is
+ // already cancelled at this point. So we expect the operation to fail)
+ srv_stream.Finish(send_response, Status::CANCELLED, tag(9));
+ Verifier(GetParam()).Expect(9, false).Verify(cq_.get());
+
+ // Client will see the cancellation
+ cli_stream->Finish(&recv_status, tag(10));
+ // TODO(sreek): The expectation here should be true. This is a bug (github
+ // issue #4972)
+ Verifier(GetParam()).Expect(10, false).Verify(cq_.get());
+ EXPECT_FALSE(recv_status.ok());
+ EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());
+ }
+
+ // Helper for testing server-streaming RPCs which are cancelled on the server.
+ // Depending on the value of server_try_cancel parameter, this will test one
+ // of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before sending
+ // any messages to the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while sending
+ // messages to the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after sending all
+ // messages to the client (but before sending any status back to the
+ // client)
+ void TestServerStreamingServerCancel(
+ ServerTryCancelRequestPhase server_try_cancel) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
+
+ send_request.set_message("Ping");
+ // Initiate the 'ResponseStream' call on the client
+ std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
+ stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
+ Verifier(GetParam()).Expect(1, true).Verify(cq_.get());
+ // On the server, request to be notified of 'ResponseStream' calls and
+ // receive the call just made by the client
+ service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
+ cq_.get(), cq_.get(), tag(2));
+ Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ bool expected_cq_result = true;
+ bool ignore_cq_result = false;
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ ServerTryCancel(&srv_ctx);
+
+ // We know for sure that all cq results will be false from this point
+ // since the server cancelled the RPC
+ expected_cq_result = false;
+ }
+
+ std::thread* server_try_cancel_thd = NULL;
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd = new std::thread(
+ &AsyncEnd2endServerTryCancelTest::ServerTryCancel, this, &srv_ctx);
+
+ // Server will cancel the RPC in a parallel thread while writing responses
+ // to the client. Since the cancellation can happen at anytime, some of
+ // the cq results (i.e those until cancellation) might be true but it is
+ // non deterministic. So better to ignore the cq results
+ ignore_cq_result = true;
+ }
+
+ // Server sends three messages (tags 3, 4 and 5)
+ for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {
+ send_response.set_message("Pong " + std::to_string(tag_idx));
+ srv_stream.Write(send_response, tag(tag_idx));
+ Verifier(GetParam())
+ .Expect(tag_idx, expected_cq_result)
+ .Verify(cq_.get(), ignore_cq_result);
+ }
+
+ if (server_try_cancel_thd != NULL) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ ServerTryCancel(&srv_ctx);
+ }
+
+ // Client attemts to read the three messages from the server
+ for (int tag_idx = 6; tag_idx <= 8; tag_idx++) {
+ cli_stream->Read(&recv_response, tag(tag_idx));
+ Verifier(GetParam())
+ .Expect(tag_idx, expected_cq_result)
+ .Verify(cq_.get(), ignore_cq_result);
+ }
+
+ // The RPC has been cancelled at this point for sure (i.e irrespective of
+ // the value of `server_try_cancel` is). So, from this point forward, we
+ // know that cq results are supposed to return false on server.
+
+ // Server finishes the stream (but the RPC is already cancelled)
+ srv_stream.Finish(Status::CANCELLED, tag(9));
+ Verifier(GetParam()).Expect(9, false).Verify(cq_.get());
+
+ // Client will see the cancellation
+ cli_stream->Finish(&recv_status, tag(10));
+ Verifier(GetParam()).Expect(10, true).Verify(cq_.get());
+ EXPECT_FALSE(recv_status.ok());
+ EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());
+ }
+
+ // Helper for testing bidirectinal-streaming RPCs which are cancelled on the
+ // server.
+ //
+ // Depending on the value of server_try_cancel parameter, this will
+ // test one of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading/
+ // writing any messages from/to the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading
+ // messages from the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading all
+ // messages from the client (but before sending any status back to the
+ // client)
+ void TestBidiStreamingServerCancel(
+ ServerTryCancelRequestPhase server_try_cancel) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
+
+ // Initiate the call from the client side
+ std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
+ cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
+ Verifier(GetParam()).Expect(1, true).Verify(cq_.get());
+
+ // On the server, request to be notified of the 'BidiStream' call and
+ // receive the call just made by the client
+ service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
+ tag(2));
+ Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+
+ // Client sends the first and the only message
+ send_request.set_message("Ping");
+ cli_stream->Write(send_request, tag(3));
+ Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
+
+ bool expected_cq_result = true;
+ bool ignore_cq_result = false;
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ ServerTryCancel(&srv_ctx);
+
+ // We know for sure that all cq results will be false from this point
+ // since the server cancelled the RPC
+ expected_cq_result = false;
+ }
+
+ std::thread* server_try_cancel_thd = NULL;
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd = new std::thread(
+ &AsyncEnd2endServerTryCancelTest::ServerTryCancel, this, &srv_ctx);
+
+ // Since server is going to cancel the RPC in a parallel thread, some of
+ // the cq results (i.e those until the cancellation) might be true. Since
+ // that number is non-deterministic, it is better to ignore the cq results
+ ignore_cq_result = true;
+ }
+
+ srv_stream.Read(&recv_request, tag(4));
+ Verifier(GetParam())
+ .Expect(4, expected_cq_result)
+ .Verify(cq_.get(), ignore_cq_result);
+
+ send_response.set_message("Pong");
+ srv_stream.Write(send_response, tag(5));
+ Verifier(GetParam())
+ .Expect(5, expected_cq_result)
+ .Verify(cq_.get(), ignore_cq_result);
+
+ cli_stream->Read(&recv_response, tag(6));
+ Verifier(GetParam())
+ .Expect(6, expected_cq_result)
+ .Verify(cq_.get(), ignore_cq_result);
+
+ // This is expected to succeed in all cases
+ cli_stream->WritesDone(tag(7));
+ Verifier(GetParam()).Expect(7, true).Verify(cq_.get());
+
+ // This is expected to fail in all cases i.e for all values of
+ // server_try_cancel. This is becasue at this point, either there are no
+ // more msgs from the client (because client called WritesDone) or the RPC
+ // is cancelled on the server
+ srv_stream.Read(&recv_request, tag(8));
+ Verifier(GetParam()).Expect(8, false).Verify(cq_.get());
+
+ if (server_try_cancel_thd != NULL) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ ServerTryCancel(&srv_ctx);
+ }
+
+ // The RPC has been cancelled at this point for sure (i.e irrespective of
+ // the value of `server_try_cancel` is). So, from this point forward, we
+ // know that cq results are supposed to return false on server.
+
+ srv_stream.Finish(Status::CANCELLED, tag(9));
+ Verifier(GetParam()).Expect(9, false).Verify(cq_.get());
+
+ cli_stream->Finish(&recv_status, tag(10));
+ Verifier(GetParam()).Expect(10, true).Verify(cq_.get());
+ EXPECT_FALSE(recv_status.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, recv_status.error_code());
+ }
+};
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelBefore) {
+ TestClientStreamingServerCancel(CANCEL_BEFORE_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelDuring) {
+ TestClientStreamingServerCancel(CANCEL_DURING_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelAfter) {
+ TestClientStreamingServerCancel(CANCEL_AFTER_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelBefore) {
+ TestServerStreamingServerCancel(CANCEL_BEFORE_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelDuring) {
+ TestServerStreamingServerCancel(CANCEL_DURING_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelAfter) {
+ TestServerStreamingServerCancel(CANCEL_AFTER_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelBefore) {
+ TestBidiStreamingServerCancel(CANCEL_BEFORE_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelDuring) {
+ TestBidiStreamingServerCancel(CANCEL_DURING_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelAfter) {
+ TestBidiStreamingServerCancel(CANCEL_AFTER_PROCESSING);
+}
+
INSTANTIATE_TEST_CASE_P(AsyncEnd2end, AsyncEnd2endTest,
::testing::Values(false, true));
+INSTANTIATE_TEST_CASE_P(AsyncEnd2endServerTryCancel,
+ AsyncEnd2endServerTryCancelTest,
+ ::testing::Values(false));
} // namespace
} // namespace testing
diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc
index 5a414ebc86..65da71b391 100644
--- a/test/cpp/end2end/end2end_test.cc
+++ b/test/cpp/end2end/end2end_test.cc
@@ -54,6 +54,7 @@
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_service_impl.h"
#include "test/cpp/util/string_ref_helper.h"
using grpc::testing::EchoRequest;
@@ -64,40 +65,6 @@ namespace grpc {
namespace testing {
namespace {
-const char* kServerCancelAfterReads = "cancel_after_reads";
-
-// When echo_deadline is requested, deadline seen in the ServerContext is set in
-// the response in seconds.
-void MaybeEchoDeadline(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) {
- if (request->has_param() && request->param().echo_deadline()) {
- gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
- if (context->deadline() != system_clock::time_point::max()) {
- Timepoint2Timespec(context->deadline(), &deadline);
- }
- response->mutable_param()->set_request_deadline(deadline.tv_sec);
- }
-}
-
-void CheckServerAuthContext(const ServerContext* context,
- const grpc::string& expected_client_identity) {
- std::shared_ptr<const AuthContext> auth_ctx = context->auth_context();
- std::vector<grpc::string_ref> ssl =
- auth_ctx->FindPropertyValues("transport_security_type");
- EXPECT_EQ(1u, ssl.size());
- EXPECT_EQ("ssl", ToString(ssl[0]));
- if (expected_client_identity.length() == 0) {
- EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty());
- EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty());
- EXPECT_FALSE(auth_ctx->IsPeerAuthenticated());
- } else {
- auto identity = auth_ctx->GetPeerIdentity();
- EXPECT_TRUE(auth_ctx->IsPeerAuthenticated());
- EXPECT_EQ(1u, identity.size());
- EXPECT_EQ(expected_client_identity, identity[0]);
- }
-}
-
bool CheckIsLocalhost(const grpc::string& addr) {
const grpc::string kIpv6("ipv6:[::1]:");
const grpc::string kIpv4MappedIpv6("ipv6:[::ffff:127.0.0.1]:");
@@ -212,138 +179,6 @@ class Proxy : public ::grpc::testing::EchoTestService::Service {
std::unique_ptr< ::grpc::testing::EchoTestService::Stub> stub_;
};
-class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
- public:
- TestServiceImpl() : signal_client_(false), host_() {}
- explicit TestServiceImpl(const grpc::string& host)
- : signal_client_(false), host_(new grpc::string(host)) {}
-
- Status Echo(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) GRPC_OVERRIDE {
- response->set_message(request->message());
- MaybeEchoDeadline(context, request, response);
- if (host_) {
- response->mutable_param()->set_host(*host_);
- }
- if (request->has_param() && request->param().client_cancel_after_us()) {
- {
- std::unique_lock<std::mutex> lock(mu_);
- signal_client_ = true;
- }
- while (!context->IsCancelled()) {
- gpr_sleep_until(gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_micros(request->param().client_cancel_after_us(),
- GPR_TIMESPAN)));
- }
- return Status::CANCELLED;
- } else if (request->has_param() &&
- request->param().server_cancel_after_us()) {
- gpr_sleep_until(gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_micros(request->param().server_cancel_after_us(),
- GPR_TIMESPAN)));
- return Status::CANCELLED;
- } else if (!request->has_param() ||
- !request->param().skip_cancelled_check()) {
- EXPECT_FALSE(context->IsCancelled());
- }
-
- if (request->has_param() && request->param().echo_metadata()) {
- const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata =
- context->client_metadata();
- for (std::multimap<grpc::string_ref, grpc::string_ref>::const_iterator
- iter = client_metadata.begin();
- iter != client_metadata.end(); ++iter) {
- context->AddTrailingMetadata(ToString(iter->first),
- ToString(iter->second));
- }
- }
- if (request->has_param() &&
- (request->param().expected_client_identity().length() > 0 ||
- request->param().check_auth_context())) {
- CheckServerAuthContext(context,
- request->param().expected_client_identity());
- }
- if (request->has_param() &&
- request->param().response_message_length() > 0) {
- response->set_message(
- grpc::string(request->param().response_message_length(), '\0'));
- }
- if (request->has_param() && request->param().echo_peer()) {
- response->mutable_param()->set_peer(context->peer());
- }
- return Status::OK;
- }
-
- // Unimplemented is left unimplemented to test the returned error.
-
- Status RequestStream(ServerContext* context,
- ServerReader<EchoRequest>* reader,
- EchoResponse* response) GRPC_OVERRIDE {
- EchoRequest request;
- response->set_message("");
- int cancel_after_reads = 0;
- const std::multimap<grpc::string_ref, grpc::string_ref>&
- client_initial_metadata = context->client_metadata();
- if (client_initial_metadata.find(kServerCancelAfterReads) !=
- client_initial_metadata.end()) {
- std::istringstream iss(ToString(
- client_initial_metadata.find(kServerCancelAfterReads)->second));
- iss >> cancel_after_reads;
- gpr_log(GPR_INFO, "cancel_after_reads %d", cancel_after_reads);
- }
- while (reader->Read(&request)) {
- if (cancel_after_reads == 1) {
- gpr_log(GPR_INFO, "return cancel status");
- return Status::CANCELLED;
- } else if (cancel_after_reads > 0) {
- cancel_after_reads--;
- }
- response->mutable_message()->append(request.message());
- }
- return Status::OK;
- }
-
- // Return 3 messages.
- // TODO(yangg) make it generic by adding a parameter into EchoRequest
- Status ResponseStream(ServerContext* context, const EchoRequest* request,
- ServerWriter<EchoResponse>* writer) GRPC_OVERRIDE {
- EchoResponse response;
- response.set_message(request->message() + "0");
- writer->Write(response);
- response.set_message(request->message() + "1");
- writer->Write(response);
- response.set_message(request->message() + "2");
- writer->Write(response);
-
- return Status::OK;
- }
-
- Status BidiStream(ServerContext* context,
- ServerReaderWriter<EchoResponse, EchoRequest>* stream)
- GRPC_OVERRIDE {
- EchoRequest request;
- EchoResponse response;
- while (stream->Read(&request)) {
- gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
- response.set_message(request.message());
- stream->Write(response);
- }
- return Status::OK;
- }
-
- bool signal_client() {
- std::unique_lock<std::mutex> lock(mu_);
- return signal_client_;
- }
-
- private:
- bool signal_client_;
- std::mutex mu_;
- std::unique_ptr<grpc::string> host_;
-};
-
class TestServiceImplDupPkg
: public ::grpc::testing::duplicate::EchoTestService::Service {
public:
@@ -452,13 +287,18 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> {
TestServiceImplDupPkg dup_pkg_service_;
};
-static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs) {
+static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs,
+ bool with_binary_metadata) {
EchoRequest request;
EchoResponse response;
request.set_message("Hello hello hello hello");
for (int i = 0; i < num_rpcs; ++i) {
ClientContext context;
+ if (with_binary_metadata) {
+ char bytes[8] = {'\0', '\1', '\2', '\3', '\4', '\5', '\6', (char)i};
+ context.AddMetadata("custom-bin", grpc::string(bytes, 8));
+ }
context.set_compression_algorithm(GRPC_COMPRESS_GZIP);
Status s = stub->Echo(&context, request, &response);
EXPECT_EQ(response.message(), request.message());
@@ -466,6 +306,325 @@ static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs) {
}
}
+// This class is for testing scenarios where RPCs are cancelled on the server
+// by calling ServerContext::TryCancel()
+class End2endServerTryCancelTest : public End2endTest {
+ protected:
+ // Helper for testing client-streaming RPCs which are cancelled on the server.
+ // Depending on the value of server_try_cancel parameter, this will test one
+ // of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading
+ // any messages from the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading
+ // messages from the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading all
+ // the messages from the client
+ //
+ // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
+ void TestRequestStreamServerCancel(
+ ServerTryCancelRequestPhase server_try_cancel, int num_msgs_to_send) {
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ // Send server_try_cancel value in the client metadata
+ context.AddMetadata(kServerTryCancelRequest,
+ std::to_string(server_try_cancel));
+
+ auto stream = stub_->RequestStream(&context, &response);
+
+ int num_msgs_sent = 0;
+ while (num_msgs_sent < num_msgs_to_send) {
+ request.set_message("hello");
+ if (!stream->Write(request)) {
+ break;
+ }
+ num_msgs_sent++;
+ }
+ gpr_log(GPR_INFO, "Sent %d messages", num_msgs_sent);
+
+ stream->WritesDone();
+ Status s = stream->Finish();
+
+ // At this point, we know for sure that RPC was cancelled by the server
+ // since we passed server_try_cancel value in the metadata. Depending on the
+ // value of server_try_cancel, the RPC might have been cancelled by the
+ // server at different stages. The following validates our expectations of
+ // number of messages sent in various cancellation scenarios:
+
+ switch (server_try_cancel) {
+ case CANCEL_BEFORE_PROCESSING:
+ case CANCEL_DURING_PROCESSING:
+ // If the RPC is cancelled by server before / during messages from the
+ // client, it means that the client most likely did not get a chance to
+ // send all the messages it wanted to send. i.e num_msgs_sent <=
+ // num_msgs_to_send
+ EXPECT_LE(num_msgs_sent, num_msgs_to_send);
+ break;
+
+ case CANCEL_AFTER_PROCESSING:
+ // If the RPC was cancelled after all messages were read by the server,
+ // the client did get a chance to send all its messages
+ EXPECT_EQ(num_msgs_sent, num_msgs_to_send);
+ break;
+
+ default:
+ gpr_log(GPR_ERROR, "Invalid server_try_cancel value: %d",
+ server_try_cancel);
+ EXPECT_TRUE(server_try_cancel > DO_NOT_CANCEL &&
+ server_try_cancel <= CANCEL_AFTER_PROCESSING);
+ break;
+ }
+
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ }
+
+ // Helper for testing server-streaming RPCs which are cancelled on the server.
+ // Depending on the value of server_try_cancel parameter, this will test one
+ // of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before writing
+ // any messages to the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while writing
+ // messages to the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after writing all
+ // the messages to the client
+ //
+ // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
+ void TestResponseStreamServerCancel(
+ ServerTryCancelRequestPhase server_try_cancel) {
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ // Send server_try_cancel in the client metadata
+ context.AddMetadata(kServerTryCancelRequest,
+ std::to_string(server_try_cancel));
+
+ request.set_message("hello");
+ auto stream = stub_->ResponseStream(&context, request);
+
+ int num_msgs_read = 0;
+ while (num_msgs_read < kNumResponseStreamsMsgs) {
+ if (!stream->Read(&response)) {
+ break;
+ }
+ EXPECT_EQ(response.message(),
+ request.message() + std::to_string(num_msgs_read));
+ num_msgs_read++;
+ }
+ gpr_log(GPR_INFO, "Read %d messages", num_msgs_read);
+
+ Status s = stream->Finish();
+
+ // Depending on the value of server_try_cancel, the RPC might have been
+ // cancelled by the server at different stages. The following validates our
+ // expectations of number of messages read in various cancellation
+ // scenarios:
+ switch (server_try_cancel) {
+ case CANCEL_BEFORE_PROCESSING:
+ // Server cancelled before sending any messages. Which means the client
+ // wouldn't have read any
+ EXPECT_EQ(num_msgs_read, 0);
+ break;
+
+ case CANCEL_DURING_PROCESSING:
+ // Server cancelled while writing messages. Client must have read less
+ // than or equal to the expected number of messages
+ EXPECT_LE(num_msgs_read, kNumResponseStreamsMsgs);
+ break;
+
+ case CANCEL_AFTER_PROCESSING:
+ // Server cancelled after writing all messages. Client must have read
+ // all messages
+ EXPECT_EQ(num_msgs_read, kNumResponseStreamsMsgs);
+ break;
+
+ default: {
+ gpr_log(GPR_ERROR, "Invalid server_try_cancel value: %d",
+ server_try_cancel);
+ EXPECT_TRUE(server_try_cancel > DO_NOT_CANCEL &&
+ server_try_cancel <= CANCEL_AFTER_PROCESSING);
+ break;
+ }
+ }
+
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ }
+
+ // Helper for testing bidirectional-streaming RPCs which are cancelled on the
+ // server. Depending on the value of server_try_cancel parameter, this will
+ // test one of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading/
+ // writing any messages from/to the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading/
+ // writing messages from/to the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading/writing
+ // all the messages from/to the client
+ //
+ // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
+ void TestBidiStreamServerCancel(ServerTryCancelRequestPhase server_try_cancel,
+ int num_messages) {
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ // Send server_try_cancel in the client metadata
+ context.AddMetadata(kServerTryCancelRequest,
+ std::to_string(server_try_cancel));
+
+ auto stream = stub_->BidiStream(&context);
+
+ int num_msgs_read = 0;
+ int num_msgs_sent = 0;
+ while (num_msgs_sent < num_messages) {
+ request.set_message("hello " + std::to_string(num_msgs_sent));
+ if (!stream->Write(request)) {
+ break;
+ }
+ num_msgs_sent++;
+
+ if (!stream->Read(&response)) {
+ break;
+ }
+ num_msgs_read++;
+
+ EXPECT_EQ(response.message(), request.message());
+ }
+ gpr_log(GPR_INFO, "Sent %d messages", num_msgs_sent);
+ gpr_log(GPR_INFO, "Read %d messages", num_msgs_read);
+
+ stream->WritesDone();
+ Status s = stream->Finish();
+
+ // Depending on the value of server_try_cancel, the RPC might have been
+ // cancelled by the server at different stages. The following validates our
+ // expectations of number of messages read in various cancellation
+ // scenarios:
+ switch (server_try_cancel) {
+ case CANCEL_BEFORE_PROCESSING:
+ EXPECT_EQ(num_msgs_read, 0);
+ break;
+
+ case CANCEL_DURING_PROCESSING:
+ EXPECT_LE(num_msgs_sent, num_messages);
+ EXPECT_LE(num_msgs_read, num_msgs_sent);
+ break;
+
+ case CANCEL_AFTER_PROCESSING:
+ EXPECT_EQ(num_msgs_sent, num_messages);
+ EXPECT_EQ(num_msgs_read, num_msgs_sent);
+ break;
+
+ default:
+ gpr_log(GPR_ERROR, "Invalid server_try_cancel value: %d",
+ server_try_cancel);
+ EXPECT_TRUE(server_try_cancel > DO_NOT_CANCEL &&
+ server_try_cancel <= CANCEL_AFTER_PROCESSING);
+ break;
+ }
+
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ }
+};
+
+TEST_P(End2endServerTryCancelTest, RequestEchoServerCancel) {
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ context.AddMetadata(kServerTryCancelRequest,
+ std::to_string(CANCEL_BEFORE_PROCESSING));
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+}
+
+// Server to cancel before doing reading the request
+TEST_P(End2endServerTryCancelTest, RequestStreamServerCancelBeforeReads) {
+ TestRequestStreamServerCancel(CANCEL_BEFORE_PROCESSING, 1);
+}
+
+// Server to cancel while reading a request from the stream in parallel
+TEST_P(End2endServerTryCancelTest, RequestStreamServerCancelDuringRead) {
+ TestRequestStreamServerCancel(CANCEL_DURING_PROCESSING, 10);
+}
+
+// Server to cancel after reading all the requests but before returning to the
+// client
+TEST_P(End2endServerTryCancelTest, RequestStreamServerCancelAfterReads) {
+ TestRequestStreamServerCancel(CANCEL_AFTER_PROCESSING, 4);
+}
+
+// Server to cancel before sending any response messages
+TEST_P(End2endServerTryCancelTest, ResponseStreamServerCancelBefore) {
+ TestResponseStreamServerCancel(CANCEL_BEFORE_PROCESSING);
+}
+
+// Server to cancel while writing a response to the stream in parallel
+TEST_P(End2endServerTryCancelTest, ResponseStreamServerCancelDuring) {
+ TestResponseStreamServerCancel(CANCEL_DURING_PROCESSING);
+}
+
+// Server to cancel after writing all the respones to the stream but before
+// returning to the client
+TEST_P(End2endServerTryCancelTest, ResponseStreamServerCancelAfter) {
+ TestResponseStreamServerCancel(CANCEL_AFTER_PROCESSING);
+}
+
+// Server to cancel before reading/writing any requests/responses on the stream
+TEST_P(End2endServerTryCancelTest, BidiStreamServerCancelBefore) {
+ TestBidiStreamServerCancel(CANCEL_BEFORE_PROCESSING, 2);
+}
+
+// Server to cancel while reading/writing requests/responses on the stream in
+// parallel
+TEST_P(End2endServerTryCancelTest, BidiStreamServerCancelDuring) {
+ TestBidiStreamServerCancel(CANCEL_DURING_PROCESSING, 10);
+}
+
+// Server to cancel after reading/writing all requests/responses on the stream
+// but before returning to the client
+TEST_P(End2endServerTryCancelTest, BidiStreamServerCancelAfter) {
+ TestBidiStreamServerCancel(CANCEL_AFTER_PROCESSING, 5);
+}
+
+TEST_P(End2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
+ ResetStub();
+ std::vector<std::thread*> threads;
+ for (int i = 0; i < 10; ++i) {
+ threads.push_back(new std::thread(SendRpc, stub_.get(), 10, true));
+ }
+ for (int i = 0; i < 10; ++i) {
+ threads[i]->join();
+ delete threads[i];
+ }
+}
+
+TEST_P(End2endTest, MultipleRpcs) {
+ ResetStub();
+ std::vector<std::thread*> threads;
+ for (int i = 0; i < 10; ++i) {
+ threads.push_back(new std::thread(SendRpc, stub_.get(), 10, false));
+ }
+ for (int i = 0; i < 10; ++i) {
+ threads[i]->join();
+ delete threads[i];
+ }
+}
+
TEST_P(End2endTest, RequestStreamOneRequest) {
ResetStub();
EchoRequest request;
@@ -803,14 +962,14 @@ class ProxyEnd2endTest : public End2endTest {
TEST_P(ProxyEnd2endTest, SimpleRpc) {
ResetStub();
- SendRpc(stub_.get(), 1);
+ SendRpc(stub_.get(), 1, false);
}
TEST_P(ProxyEnd2endTest, MultipleRpcs) {
ResetStub();
std::vector<std::thread*> threads;
for (int i = 0; i < 10; ++i) {
- threads.push_back(new std::thread(SendRpc, stub_.get(), 10));
+ threads.push_back(new std::thread(SendRpc, stub_.get(), 10, false));
}
for (int i = 0; i < 10; ++i) {
threads[i]->join();
@@ -1195,6 +1354,9 @@ INSTANTIATE_TEST_CASE_P(End2end, End2endTest,
::testing::Values(TestScenario(false, false),
TestScenario(false, true)));
+INSTANTIATE_TEST_CASE_P(End2endServerTryCancel, End2endServerTryCancelTest,
+ ::testing::Values(TestScenario(false, false)));
+
INSTANTIATE_TEST_CASE_P(ProxyEnd2end, ProxyEnd2endTest,
::testing::Values(TestScenario(false, false),
TestScenario(false, true),
diff --git a/test/cpp/end2end/hybrid_end2end_test.cc b/test/cpp/end2end/hybrid_end2end_test.cc
index f8405627f9..c72e20628f 100644
--- a/test/cpp/end2end/hybrid_end2end_test.cc
+++ b/test/cpp/end2end/hybrid_end2end_test.cc
@@ -216,7 +216,7 @@ class HybridEnd2endTest : public ::testing::Test {
}
// Create a separate cq for each potential handler.
for (int i = 0; i < 5; i++) {
- cqs_.push_back(std::move(builder.AddCompletionQueue()));
+ cqs_.push_back(builder.AddCompletionQueue());
}
server_ = builder.BuildAndStart();
}
diff --git a/test/cpp/end2end/test_service_impl.cc b/test/cpp/end2end/test_service_impl.cc
index c9a32ecf5a..66d11d0dfc 100644
--- a/test/cpp/end2end/test_service_impl.cc
+++ b/test/cpp/end2end/test_service_impl.cc
@@ -33,6 +33,8 @@
#include "test/cpp/end2end/test_service_impl.h"
+#include <thread>
+
#include <grpc++/security/credentials.h>
#include <grpc++/server_context.h>
#include <grpc/grpc.h>
@@ -82,6 +84,17 @@ void CheckServerAuthContext(const ServerContext* context,
Status TestServiceImpl::Echo(ServerContext* context, const EchoRequest* request,
EchoResponse* response) {
+ int server_try_cancel = GetIntValueFromMetadata(
+ kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
+ if (server_try_cancel > DO_NOT_CANCEL) {
+ // Since this is a unary RPC, by the time this server handler is called,
+ // the 'request' message is already read from the client. So the scenarios
+ // in server_try_cancel don't make much sense. Just cancel the RPC as long
+ // as server_try_cancel is not DO_NOT_CANCEL
+ ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
response->set_message(request->message());
MaybeEchoDeadline(context, request, response);
if (host_) {
@@ -106,7 +119,8 @@ Status TestServiceImpl::Echo(ServerContext* context, const EchoRequest* request,
gpr_time_from_micros(request->param().server_cancel_after_us(),
GPR_TIMESPAN)));
return Status::CANCELLED;
- } else {
+ } else if (!request->has_param() ||
+ !request->param().skip_cancelled_check()) {
EXPECT_FALSE(context->IsCancelled());
}
@@ -142,18 +156,39 @@ Status TestServiceImpl::Echo(ServerContext* context, const EchoRequest* request,
Status TestServiceImpl::RequestStream(ServerContext* context,
ServerReader<EchoRequest>* reader,
EchoResponse* response) {
+ // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by
+ // the server by calling ServerContext::TryCancel() depending on the value:
+ // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads
+ // any message from the client
+ // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is
+ // reading messages from the client
+ // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
+ // all the messages from the client
+ int server_try_cancel = GetIntValueFromMetadata(
+ kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
+
+ // If 'cancel_after_reads' is set in the metadata AND non-zero, the server
+ // will cancel the RPC (by just returning Status::CANCELLED - doesn't call
+ // ServerContext::TryCancel()) after reading the number of records specified
+ // by the 'cancel_after_reads' value set in the metadata.
+ int cancel_after_reads = GetIntValueFromMetadata(
+ kServerCancelAfterReads, context->client_metadata(), 0);
+
EchoRequest request;
response->set_message("");
- int cancel_after_reads = 0;
- const std::multimap<grpc::string_ref, grpc::string_ref>&
- client_initial_metadata = context->client_metadata();
- if (client_initial_metadata.find(kServerCancelAfterReads) !=
- client_initial_metadata.end()) {
- std::istringstream iss(ToString(
- client_initial_metadata.find(kServerCancelAfterReads)->second));
- iss >> cancel_after_reads;
- gpr_log(GPR_INFO, "cancel_after_reads %d", cancel_after_reads);
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
+ std::thread* server_try_cancel_thd = NULL;
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd =
+ new std::thread(&TestServiceImpl::ServerTryCancel, this, context);
}
+
+ int num_msgs_read = 0;
while (reader->Read(&request)) {
if (cancel_after_reads == 1) {
gpr_log(GPR_INFO, "return cancel status");
@@ -163,21 +198,65 @@ Status TestServiceImpl::RequestStream(ServerContext* context,
}
response->mutable_message()->append(request.message());
}
+ gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read);
+
+ if (server_try_cancel_thd != NULL) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ return Status::CANCELLED;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
return Status::OK;
}
-// Return 3 messages.
+// Return 'kNumResponseStreamMsgs' messages.
// TODO(yangg) make it generic by adding a parameter into EchoRequest
Status TestServiceImpl::ResponseStream(ServerContext* context,
const EchoRequest* request,
ServerWriter<EchoResponse>* writer) {
+ // If server_try_cancel is set in the metadata, the RPC is cancelled by the
+ // server by calling ServerContext::TryCancel() depending on the value:
+ // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server writes
+ // any messages to the client
+ // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is
+ // writing messages to the client
+ // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server writes
+ // all the messages to the client
+ int server_try_cancel = GetIntValueFromMetadata(
+ kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
EchoResponse response;
- response.set_message(request->message() + "0");
- writer->Write(response);
- response.set_message(request->message() + "1");
- writer->Write(response);
- response.set_message(request->message() + "2");
- writer->Write(response);
+ std::thread* server_try_cancel_thd = NULL;
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd =
+ new std::thread(&TestServiceImpl::ServerTryCancel, this, context);
+ }
+
+ for (int i = 0; i < kNumResponseStreamsMsgs; i++) {
+ response.set_message(request->message() + std::to_string(i));
+ writer->Write(response);
+ }
+
+ if (server_try_cancel_thd != NULL) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ return Status::CANCELLED;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
return Status::OK;
}
@@ -185,15 +264,70 @@ Status TestServiceImpl::ResponseStream(ServerContext* context,
Status TestServiceImpl::BidiStream(
ServerContext* context,
ServerReaderWriter<EchoResponse, EchoRequest>* stream) {
+ // If server_try_cancel is set in the metadata, the RPC is cancelled by the
+ // server by calling ServerContext::TryCancel() depending on the value:
+ // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads/
+ // writes any messages from/to the client
+ // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is
+ // reading/writing messages from/to the client
+ // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server
+ // reads/writes all messages from/to the client
+ int server_try_cancel = GetIntValueFromMetadata(
+ kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
+
EchoRequest request;
EchoResponse response;
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
+ std::thread* server_try_cancel_thd = NULL;
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd =
+ new std::thread(&TestServiceImpl::ServerTryCancel, this, context);
+ }
+
while (stream->Read(&request)) {
gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
response.set_message(request.message());
stream->Write(response);
}
+
+ if (server_try_cancel_thd != NULL) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ return Status::CANCELLED;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
return Status::OK;
}
+int TestServiceImpl::GetIntValueFromMetadata(
+ const char* key,
+ const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
+ int default_value) {
+ if (metadata.find(key) != metadata.end()) {
+ std::istringstream iss(ToString(metadata.find(key)->second));
+ iss >> default_value;
+ gpr_log(GPR_INFO, "%s : %d", key, default_value);
+ }
+
+ return default_value;
+}
+
+void TestServiceImpl::ServerTryCancel(ServerContext* context) {
+ EXPECT_FALSE(context->IsCancelled());
+ context->TryCancel();
+ gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request");
+ EXPECT_TRUE(context->IsCancelled());
+}
+
} // namespace testing
} // namespace grpc
diff --git a/test/cpp/end2end/test_service_impl.h b/test/cpp/end2end/test_service_impl.h
index 2c35b5614c..1ab6ced9e0 100644
--- a/test/cpp/end2end/test_service_impl.h
+++ b/test/cpp/end2end/test_service_impl.h
@@ -44,7 +44,16 @@
namespace grpc {
namespace testing {
+const int kNumResponseStreamsMsgs = 3;
const char* const kServerCancelAfterReads = "cancel_after_reads";
+const char* const kServerTryCancelRequest = "server_try_cancel";
+
+typedef enum {
+ DO_NOT_CANCEL = 0,
+ CANCEL_BEFORE_PROCESSING,
+ CANCEL_DURING_PROCESSING,
+ CANCEL_AFTER_PROCESSING
+} ServerTryCancelRequestPhase;
class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
public:
@@ -74,6 +83,14 @@ class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
}
private:
+ int GetIntValueFromMetadata(
+ const char* key,
+ const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
+ int default_value);
+
+ void ServerTryCancel(ServerContext* context);
+
+ private:
bool signal_client_;
std::mutex mu_;
std::unique_ptr<grpc::string> host_;
diff --git a/test/cpp/end2end/thread_stress_test.cc b/test/cpp/end2end/thread_stress_test.cc
index 4e8860e843..e246c0b0e2 100644
--- a/test/cpp/end2end/thread_stress_test.cc
+++ b/test/cpp/end2end/thread_stress_test.cc
@@ -45,6 +45,7 @@
#include <grpc/support/time.h>
#include <gtest/gtest.h>
+#include "src/core/surface/api_trace.h"
#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
@@ -54,6 +55,11 @@ using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
using std::chrono::system_clock;
+const int kNumThreads = 100; // Number of threads
+const int kNumAsyncSendThreads = 2;
+const int kNumAsyncReceiveThreads = 50;
+const int kNumRpcs = 1000; // Number of RPCs per thread
+
namespace grpc {
namespace testing {
@@ -84,7 +90,7 @@ class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
MaybeEchoDeadline(context, request, response);
if (request->has_param() && request->param().client_cancel_after_us()) {
{
- std::unique_lock<std::mutex> lock(mu_);
+ unique_lock<mutex> lock(mu_);
signal_client_ = true;
}
while (!context->IsCancelled()) {
@@ -149,13 +155,13 @@ class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
}
bool signal_client() {
- std::unique_lock<std::mutex> lock(mu_);
+ unique_lock<mutex> lock(mu_);
return signal_client_;
}
private:
bool signal_client_;
- std::mutex mu_;
+ mutex mu_;
};
class TestServiceImplDupPkg
@@ -168,11 +174,10 @@ class TestServiceImplDupPkg
}
};
-class End2endTest : public ::testing::Test {
- protected:
- End2endTest() : kMaxMessageSize_(8192) {}
-
- void SetUp() GRPC_OVERRIDE {
+class CommonStressTest {
+ public:
+ CommonStressTest() : kMaxMessageSize_(8192) {}
+ void SetUp() {
int port = grpc_pick_unused_port_or_die();
server_address_ << "localhost:" << port;
// Setup server
@@ -185,15 +190,15 @@ class End2endTest : public ::testing::Test {
builder.RegisterService(&dup_pkg_service_);
server_ = builder.BuildAndStart();
}
-
- void TearDown() GRPC_OVERRIDE { server_->Shutdown(); }
-
+ void TearDown() { server_->Shutdown(); }
void ResetStub() {
std::shared_ptr<Channel> channel =
CreateChannel(server_address_.str(), InsecureChannelCredentials());
stub_ = grpc::testing::EchoTestService::NewStub(channel);
}
+ grpc::testing::EchoTestService::Stub* GetStub() { return stub_.get(); }
+ private:
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::unique_ptr<Server> server_;
std::ostringstream server_address_;
@@ -202,6 +207,16 @@ class End2endTest : public ::testing::Test {
TestServiceImplDupPkg dup_pkg_service_;
};
+class End2endTest : public ::testing::Test {
+ protected:
+ End2endTest() {}
+ void SetUp() GRPC_OVERRIDE { common_.SetUp(); }
+ void TearDown() GRPC_OVERRIDE { common_.TearDown(); }
+ void ResetStub() { common_.ResetStub(); }
+
+ CommonStressTest common_;
+};
+
static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs) {
EchoRequest request;
EchoResponse response;
@@ -216,17 +231,115 @@ static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs) {
}
TEST_F(End2endTest, ThreadStress) {
- ResetStub();
+ common_.ResetStub();
std::vector<std::thread*> threads;
- for (int i = 0; i < 100; ++i) {
- threads.push_back(new std::thread(SendRpc, stub_.get(), 1000));
+ for (int i = 0; i < kNumThreads; ++i) {
+ threads.push_back(new std::thread(SendRpc, common_.GetStub(), kNumRpcs));
}
- for (int i = 0; i < 100; ++i) {
+ for (int i = 0; i < kNumThreads; ++i) {
threads[i]->join();
delete threads[i];
}
}
+class AsyncClientEnd2endTest : public ::testing::Test {
+ protected:
+ AsyncClientEnd2endTest() : rpcs_outstanding_(0) {}
+
+ void SetUp() GRPC_OVERRIDE { common_.SetUp(); }
+ void TearDown() GRPC_OVERRIDE {
+ void* ignored_tag;
+ bool ignored_ok;
+ while (cq_.Next(&ignored_tag, &ignored_ok))
+ ;
+ common_.TearDown();
+ }
+
+ void Wait() {
+ unique_lock<mutex> l(mu_);
+ while (rpcs_outstanding_ != 0) {
+ cv_.wait(l);
+ }
+
+ cq_.Shutdown();
+ }
+
+ struct AsyncClientCall {
+ EchoResponse response;
+ ClientContext context;
+ Status status;
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader;
+ };
+
+ void AsyncSendRpc(int num_rpcs) {
+ for (int i = 0; i < num_rpcs; ++i) {
+ AsyncClientCall* call = new AsyncClientCall;
+ EchoRequest request;
+ request.set_message("Hello: " + std::to_string(i));
+ call->response_reader =
+ common_.GetStub()->AsyncEcho(&call->context, request, &cq_);
+ call->response_reader->Finish(&call->response, &call->status,
+ (void*)call);
+
+ unique_lock<mutex> l(mu_);
+ rpcs_outstanding_++;
+ }
+ }
+
+ void AsyncCompleteRpc() {
+ while (true) {
+ void* got_tag;
+ bool ok = false;
+ if (!cq_.Next(&got_tag, &ok)) break;
+ AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);
+ if (!ok) {
+ gpr_log(GPR_DEBUG, "Error: %d", call->status.error_code());
+ }
+ delete call;
+
+ bool notify;
+ {
+ unique_lock<mutex> l(mu_);
+ rpcs_outstanding_--;
+ notify = (rpcs_outstanding_ == 0);
+ }
+ if (notify) {
+ cv_.notify_all();
+ }
+ }
+ }
+
+ CommonStressTest common_;
+ CompletionQueue cq_;
+ mutex mu_;
+ condition_variable cv_;
+ int rpcs_outstanding_;
+};
+
+TEST_F(AsyncClientEnd2endTest, ThreadStress) {
+ common_.ResetStub();
+ std::vector<std::thread*> send_threads, completion_threads;
+ for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
+ completion_threads.push_back(new std::thread(
+ &AsyncClientEnd2endTest_ThreadStress_Test::AsyncCompleteRpc, this));
+ }
+ for (int i = 0; i < kNumAsyncSendThreads; ++i) {
+ send_threads.push_back(
+ new std::thread(&AsyncClientEnd2endTest_ThreadStress_Test::AsyncSendRpc,
+ this, kNumRpcs));
+ }
+ for (int i = 0; i < kNumAsyncSendThreads; ++i) {
+ send_threads[i]->join();
+ delete send_threads[i];
+ }
+
+ Wait();
+ for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
+ completion_threads[i]->join();
+ delete completion_threads[i];
+ }
+}
+
} // namespace testing
} // namespace grpc
diff --git a/test/cpp/qps/client.h b/test/cpp/qps/client.h
index 97487fd0b2..50b2bf2514 100644
--- a/test/cpp/qps/client.h
+++ b/test/cpp/qps/client.h
@@ -36,16 +36,20 @@
#include <condition_variable>
#include <mutex>
+#include <vector>
#include <grpc++/support/byte_buffer.h>
#include <grpc++/support/slice.h>
+#include <grpc/support/log.h>
+#include "src/proto/grpc/testing/payloads.grpc.pb.h"
+#include "src/proto/grpc/testing/services.grpc.pb.h"
+
+#include "test/cpp/qps/limit_cores.h"
#include "test/cpp/qps/histogram.h"
#include "test/cpp/qps/interarrival.h"
#include "test/cpp/qps/timer.h"
#include "test/cpp/util/create_test_channel.h"
-#include "src/proto/grpc/testing/payloads.grpc.pb.h"
-#include "src/proto/grpc/testing/services.grpc.pb.h"
namespace grpc {
@@ -320,6 +324,8 @@ class ClientImpl : public Client {
std::function<std::unique_ptr<StubType>(std::shared_ptr<Channel>)>
create_stub)
: channels_(config.client_channels()), create_stub_(create_stub) {
+ cores_ = LimitCores(config.core_list().data(), config.core_list_size());
+
for (int i = 0; i < config.client_channels(); i++) {
channels_[i].init(config.server_targets(i % config.server_targets_size()),
config, create_stub_);
@@ -331,6 +337,7 @@ class ClientImpl : public Client {
virtual ~ClientImpl() {}
protected:
+ int cores_;
RequestType request_;
class ClientChannelInfo {
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index 4229e1956e..f3f8f37051 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -159,6 +159,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
using Client::SetupLoadTest;
using Client::NextIssueTime;
using Client::closed_loop_;
+ using ClientImpl<StubType, RequestType>::cores_;
using ClientImpl<StubType, RequestType>::channels_;
using ClientImpl<StubType, RequestType>::request_;
AsyncClient(const ClientConfig& config,
@@ -345,11 +346,11 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
private:
bool val_;
};
- static int NumThreads(const ClientConfig& config) {
+ int NumThreads(const ClientConfig& config) {
int num_threads = config.async_client_threads();
if (num_threads <= 0) { // Use dynamic sizing
- num_threads = gpr_cpu_num_cores();
- gpr_log(GPR_INFO, "Sizing client server to %d threads", num_threads);
+ num_threads = cores_;
+ gpr_log(GPR_INFO, "Sizing async client to %d threads", num_threads);
}
return num_threads;
}
diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc
index 490156aec2..80f6ada409 100644
--- a/test/cpp/qps/driver.cc
+++ b/test/cpp/qps/driver.cc
@@ -34,6 +34,7 @@
#include <deque>
#include <list>
#include <thread>
+#include <unordered_map>
#include <vector>
#include <grpc++/channel.h>
@@ -59,7 +60,42 @@ using std::vector;
namespace grpc {
namespace testing {
-static deque<string> get_hosts(const string& name) {
+static std::string get_host(const std::string& worker) {
+ char* host;
+ char* port;
+
+ gpr_split_host_port(worker.c_str(), &host, &port);
+ const string s(host);
+
+ gpr_free(host);
+ gpr_free(port);
+ return s;
+}
+
+static std::unordered_map<string, std::deque<int>> get_hosts_and_cores(
+ const deque<string>& workers) {
+ std::unordered_map<string, std::deque<int>> hosts;
+ for (auto it = workers.begin(); it != workers.end(); it++) {
+ const string host = get_host(*it);
+ if (hosts.find(host) == hosts.end()) {
+ auto stub = WorkerService::NewStub(
+ CreateChannel(*it, InsecureChannelCredentials()));
+ grpc::ClientContext ctx;
+ CoreRequest dummy;
+ CoreResponse cores;
+ grpc::Status s = stub->CoreCount(&ctx, dummy, &cores);
+ assert(s.ok());
+ std::deque<int> dq;
+ for (int i = 0; i < cores.cores(); i++) {
+ dq.push_back(i);
+ }
+ hosts[host] = dq;
+ }
+ }
+ return hosts;
+}
+
+static deque<string> get_workers(const string& name) {
char* env = gpr_getenv(name.c_str());
if (!env) return deque<string>();
@@ -105,18 +141,18 @@ struct ClientData {
std::unique_ptr<ScenarioResult> RunScenario(
const ClientConfig& initial_client_config, size_t num_clients,
- const ServerConfig& server_config, size_t num_servers, int warmup_seconds,
- int benchmark_seconds, int spawn_local_worker_count) {
+ const ServerConfig& initial_server_config, size_t num_servers,
+ int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count) {
// ClientContext allocations (all are destroyed at scope exit)
list<ClientContext> contexts;
// To be added to the result, containing the final configuration used for
// client and config (including host, etc.)
ClientConfig result_client_config;
- ServerConfig result_server_config;
+ const ServerConfig result_server_config = initial_server_config;
// Get client, server lists
- auto workers = get_hosts("QPS_WORKERS");
+ auto workers = get_workers("QPS_WORKERS");
ClientConfig client_config = initial_client_config;
// Spawn some local workers if desired
@@ -143,6 +179,9 @@ std::unique_ptr<ScenarioResult> RunScenario(
}
}
+ // Setup the hosts and core counts
+ auto hosts_cores = get_hosts_and_cores(workers);
+
// if num_clients is set to <=0, do dynamic sizing: all workers
// except for servers are clients
if (num_clients <= 0) {
@@ -172,18 +211,49 @@ std::unique_ptr<ScenarioResult> RunScenario(
i);
servers[i].stub = WorkerService::NewStub(
CreateChannel(workers[i], InsecureChannelCredentials()));
+
+ ServerConfig server_config = initial_server_config;
+ char* host;
+ char* driver_port;
+ char* cli_target;
+ gpr_split_host_port(workers[i].c_str(), &host, &driver_port);
+ string host_str(host);
+ int server_core_limit = initial_server_config.core_limit();
+ int client_core_limit = initial_client_config.core_limit();
+
+ if (server_core_limit == 0 && client_core_limit > 0) {
+ // In this case, limit the server cores if it matches the
+ // same host as one or more clients
+ const auto& dq = hosts_cores.at(host_str);
+ bool match = false;
+ int limit = dq.size();
+ for (size_t cli = 0; cli < num_clients; cli++) {
+ if (host_str == get_host(workers[cli + num_servers])) {
+ limit -= client_core_limit;
+ match = true;
+ }
+ }
+ if (match) {
+ GPR_ASSERT(limit > 0);
+ server_core_limit = limit;
+ }
+ }
+ if (server_core_limit > 0) {
+ auto& dq = hosts_cores.at(host_str);
+ GPR_ASSERT(dq.size() >= static_cast<size_t>(server_core_limit));
+ for (int core = 0; core < server_core_limit; core++) {
+ server_config.add_core_list(dq.front());
+ dq.pop_front();
+ }
+ }
+
ServerArgs args;
- result_server_config = server_config;
*args.mutable_setup() = server_config;
servers[i].stream =
servers[i].stub->RunServer(runsc::AllocContext(&contexts, deadline));
GPR_ASSERT(servers[i].stream->Write(args));
ServerStatus init_status;
GPR_ASSERT(servers[i].stream->Read(&init_status));
- char* host;
- char* driver_port;
- char* cli_target;
- gpr_split_host_port(workers[i].c_str(), &host, &driver_port);
gpr_join_host_port(&cli_target, host, init_status.port());
client_config.add_server_targets(cli_target);
gpr_free(host);
@@ -191,19 +261,50 @@ std::unique_ptr<ScenarioResult> RunScenario(
gpr_free(cli_target);
}
+ // Targets are all set by now
+ result_client_config = client_config;
// Start clients
using runsc::ClientData;
// clients is array rather than std::vector to avoid gcc-4.4 issues
// where class contained in std::vector must have a copy constructor
auto* clients = new ClientData[num_clients];
for (size_t i = 0; i < num_clients; i++) {
- gpr_log(GPR_INFO, "Starting client on %s (worker #%d)",
- workers[i + num_servers].c_str(), i + num_servers);
+ const auto& worker = workers[i + num_servers];
+ gpr_log(GPR_INFO, "Starting client on %s (worker #%d)", worker.c_str(),
+ i + num_servers);
clients[i].stub = WorkerService::NewStub(
- CreateChannel(workers[i + num_servers], InsecureChannelCredentials()));
+ CreateChannel(worker, InsecureChannelCredentials()));
+ ClientConfig per_client_config = client_config;
+
+ int server_core_limit = initial_server_config.core_limit();
+ int client_core_limit = initial_client_config.core_limit();
+ if ((server_core_limit > 0) || (client_core_limit > 0)) {
+ auto& dq = hosts_cores.at(get_host(worker));
+ if (client_core_limit == 0) {
+ // limit client cores if it matches a server host
+ bool match = false;
+ int limit = dq.size();
+ for (size_t srv = 0; srv < num_servers; srv++) {
+ if (get_host(worker) == get_host(workers[srv])) {
+ match = true;
+ }
+ }
+ if (match) {
+ GPR_ASSERT(limit > 0);
+ client_core_limit = limit;
+ }
+ }
+ if (client_core_limit > 0) {
+ GPR_ASSERT(dq.size() >= static_cast<size_t>(client_core_limit));
+ for (int core = 0; core < client_core_limit; core++) {
+ per_client_config.add_core_list(dq.front());
+ dq.pop_front();
+ }
+ }
+ }
+
ClientArgs args;
- result_client_config = client_config;
- *args.mutable_setup() = client_config;
+ *args.mutable_setup() = per_client_config;
clients[i].stream =
clients[i].stub->RunClient(runsc::AllocContext(&contexts, deadline));
GPR_ASSERT(clients[i].stream->Write(args));
@@ -283,5 +384,18 @@ std::unique_ptr<ScenarioResult> RunScenario(
delete[] servers;
return result;
}
+
+void RunQuit() {
+ // Get client, server lists
+ auto workers = get_workers("QPS_WORKERS");
+ for (size_t i = 0; i < workers.size(); i++) {
+ auto stub = WorkerService::NewStub(
+ CreateChannel(workers[i], InsecureChannelCredentials()));
+ Void dummy;
+ grpc::ClientContext ctx;
+ GPR_ASSERT(stub->QuitWorker(&ctx, dummy, &dummy).ok());
+ }
+}
+
} // namespace testing
} // namespace grpc
diff --git a/test/cpp/qps/driver.h b/test/cpp/qps/driver.h
index 2a7cf805e5..3af61f7391 100644
--- a/test/cpp/qps/driver.h
+++ b/test/cpp/qps/driver.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
const grpc::testing::ServerConfig& server_config, size_t num_servers,
int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count);
+void RunQuit();
} // namespace testing
} // namespace grpc
diff --git a/test/cpp/qps/limit_cores.cc b/test/cpp/qps/limit_cores.cc
new file mode 100644
index 0000000000..fad9a323af
--- /dev/null
+++ b/test/cpp/qps/limit_cores.cc
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "test/cpp/qps/limit_cores.h"
+
+#include <grpc/support/cpu.h>
+#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
+
+namespace grpc {
+namespace testing {
+
+#ifdef GPR_CPU_LINUX
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <sched.h>
+int LimitCores(const int* cores, int cores_size) {
+ const int num_cores = gpr_cpu_num_cores();
+ int cores_set = 0;
+
+ cpu_set_t* cpup = CPU_ALLOC(num_cores);
+ GPR_ASSERT(cpup);
+ const size_t size = CPU_ALLOC_SIZE(num_cores);
+ CPU_ZERO_S(size, cpup);
+
+ if (cores_size > 0) {
+ for (int i = 0; i < cores_size; i++) {
+ if (cores[i] < num_cores) {
+ CPU_SET_S(cores[i], size, cpup);
+ cores_set++;
+ }
+ }
+ } else {
+ for (int i = 0; i < num_cores; i++) {
+ CPU_SET_S(i, size, cpup);
+ cores_set++;
+ }
+ }
+ GPR_ASSERT(sched_setaffinity(0, size, cpup) == 0);
+ CPU_FREE(cpup);
+ return cores_set;
+}
+#else
+// LimitCores is not currently supported for non-Linux platforms
+int LimitCores(const int*, int) { return gpr_cpu_num_cores(); }
+#endif
+} // namespace testing
+} // namespace grpc
diff --git a/test/cpp/qps/limit_cores.h b/test/cpp/qps/limit_cores.h
new file mode 100644
index 0000000000..5482904a3c
--- /dev/null
+++ b/test/cpp/qps/limit_cores.h
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef TEST_QPS_LIMIT_CORES_H
+#define TEST_QPS_LIMIT_CORES_H
+
+namespace grpc {
+namespace testing {
+/// LimitCores: allow this worker to only run on the cores specified in the
+/// array \a cores, which is of length \a cores_size.
+///
+/// LimitCores takes array and size arguments (instead of vector) for direct
+/// conversion from repeated field of protobuf. Use a cores_size of 0 to remove
+/// existing limits (from an empty repeated field)
+int LimitCores(const int *cores, int cores_size);
+} // namespace testing
+} // namespace grpc
+
+#endif // TEST_QPS_LIMIT_CORES_H
diff --git a/test/cpp/qps/qps-sweep.sh b/test/cpp/qps/qps-sweep.sh
index 333f4bd7d0..539da1d893 100755
--- a/test/cpp/qps/qps-sweep.sh
+++ b/test/cpp/qps/qps-sweep.sh
@@ -57,6 +57,20 @@ for secure in true false; do
--async_client_threads=0 --async_server_threads=0 --secure_test=$secure \
--num_servers=1 --num_clients=0
+ # Scenario 2b: QPS with a single server core
+ "$bins"/opt/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \
+ --server_type=ASYNC_GENERIC_SERVER --outstanding_rpcs_per_channel=100 \
+ --client_channels=64 --bbuf_req_size=0 --bbuf_resp_size=0 \
+ --async_client_threads=0 --async_server_threads=0 --secure_test=$secure \
+ --num_servers=1 --num_clients=0 --server_core_limit=1
+
+ # Scenario 2c: protobuf-based QPS
+ "$bins"/opt/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \
+ --server_type=ASYNC_SERVER --outstanding_rpcs_per_channel=100 \
+ --client_channels=64 --simple_req_size=0 --simple_resp_size=0 \
+ --async_client_threads=0 --async_server_threads=0 --secure_test=$secure \
+ --num_servers=1 --num_clients=0
+
# Scenario 3: Latency at near-peak load (TBD)
# Scenario 4: Single-channel bidirectional throughput test (like TCP_STREAM).
diff --git a/test/cpp/qps/qps_driver.cc b/test/cpp/qps/qps_driver.cc
index aa3cb68821..69fb4d75e8 100644
--- a/test/cpp/qps/qps_driver.cc
+++ b/test/cpp/qps/qps_driver.cc
@@ -48,14 +48,13 @@ DEFINE_int32(warmup_seconds, 5, "Warmup time (in seconds)");
DEFINE_int32(benchmark_seconds, 30, "Benchmark time (in seconds)");
DEFINE_int32(local_workers, 0, "Number of local workers to start");
-// Common config
-DEFINE_string(rpc_type, "UNARY", "Type of RPC: UNARY or STREAMING");
-
// Server config
DEFINE_int32(async_server_threads, 1, "Number of threads for async servers");
DEFINE_string(server_type, "SYNC_SERVER", "Server type");
+DEFINE_int32(server_core_limit, -1, "Limit on server cores to use");
// Client config
+DEFINE_string(rpc_type, "UNARY", "Type of RPC: UNARY or STREAMING");
DEFINE_int32(outstanding_rpcs_per_channel, 1,
"Number of outstanding rpcs per channel");
DEFINE_int32(client_channels, 1, "Number of client channels");
@@ -75,8 +74,12 @@ DEFINE_double(determ_load, -1.0, "Deterministic offered load (qps)");
DEFINE_double(pareto_base, -1.0, "Pareto base interarrival time (us)");
DEFINE_double(pareto_alpha, -1.0, "Pareto alpha value");
+DEFINE_int32(client_core_limit, -1, "Limit on client cores to use");
+
DEFINE_bool(secure_test, false, "Run a secure test");
+DEFINE_bool(quit, false, "Quit the workers");
+
using grpc::testing::ClientConfig;
using grpc::testing::ServerConfig;
using grpc::testing::ClientType;
@@ -89,6 +92,11 @@ namespace grpc {
namespace testing {
static void QpsDriver() {
+ if (FLAGS_quit) {
+ RunQuit();
+ return;
+ }
+
RpcType rpc_type;
GPR_ASSERT(RpcType_Parse(FLAGS_rpc_type, &rpc_type));
@@ -151,10 +159,18 @@ static void QpsDriver() {
client_config.mutable_histogram_params()->set_max_possible(
Histogram::default_max_possible());
+ if (FLAGS_client_core_limit > 0) {
+ client_config.set_core_limit(FLAGS_client_core_limit);
+ }
+
ServerConfig server_config;
server_config.set_server_type(server_type);
server_config.set_async_server_threads(FLAGS_async_server_threads);
+ if (FLAGS_server_core_limit > 0) {
+ server_config.set_core_limit(FLAGS_server_core_limit);
+ }
+
if (FLAGS_secure_test) {
// Set up security params
SecurityParams security;
diff --git a/test/cpp/qps/qps_worker.cc b/test/cpp/qps/qps_worker.cc
index 6316605aaf..9442017ddf 100644
--- a/test/cpp/qps/qps_worker.cc
+++ b/test/cpp/qps/qps_worker.cc
@@ -47,6 +47,7 @@
#include <grpc++/server_builder.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/cpu.h>
#include <grpc/support/histogram.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
@@ -83,15 +84,10 @@ static std::unique_ptr<Client> CreateClient(const ClientConfig& config) {
abort();
}
-static void LimitCores(int cores) {}
-
static std::unique_ptr<Server> CreateServer(const ServerConfig& config) {
gpr_log(GPR_INFO, "Starting server of type %s",
ServerType_Name(config.server_type()).c_str());
- if (config.core_limit() > 0) {
- LimitCores(config.core_limit());
- }
switch (config.server_type()) {
case ServerType::SYNC_SERVER:
return CreateSynchronousServer(config);
@@ -107,8 +103,8 @@ static std::unique_ptr<Server> CreateServer(const ServerConfig& config) {
class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
public:
- explicit WorkerServiceImpl(int server_port)
- : acquired_(false), server_port_(server_port) {}
+ WorkerServiceImpl(int server_port, QpsWorker* worker)
+ : acquired_(false), server_port_(server_port), worker_(worker) {}
Status RunClient(ServerContext* ctx,
ServerReaderWriter<ClientStatus, ClientArgs>* stream)
@@ -138,6 +134,22 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
return ret;
}
+ Status CoreCount(ServerContext* ctx, const CoreRequest*,
+ CoreResponse* resp) GRPC_OVERRIDE {
+ resp->set_cores(gpr_cpu_num_cores());
+ return Status::OK;
+ }
+
+ Status QuitWorker(ServerContext* ctx, const Void*, Void*) GRPC_OVERRIDE {
+ InstanceGuard g(this);
+ if (!g.Acquired()) {
+ return Status(StatusCode::RESOURCE_EXHAUSTED, "");
+ }
+
+ worker_->MarkDone();
+ return Status::OK;
+ }
+
private:
// Protect against multiple clients using this worker at once.
class InstanceGuard {
@@ -248,10 +260,12 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
std::mutex mu_;
bool acquired_;
int server_port_;
+ QpsWorker* worker_;
};
QpsWorker::QpsWorker(int driver_port, int server_port) {
- impl_.reset(new WorkerServiceImpl(server_port));
+ impl_.reset(new WorkerServiceImpl(server_port, this));
+ gpr_atm_rel_store(&done_, static_cast<gpr_atm>(0));
char* server_address = NULL;
gpr_join_host_port(&server_address, "::", driver_port);
@@ -267,5 +281,11 @@ QpsWorker::QpsWorker(int driver_port, int server_port) {
QpsWorker::~QpsWorker() {}
+bool QpsWorker::Done() const {
+ return (gpr_atm_acq_load(&done_) != static_cast<gpr_atm>(0));
+}
+void QpsWorker::MarkDone() {
+ gpr_atm_rel_store(&done_, static_cast<gpr_atm>(1));
+}
} // namespace testing
} // namespace grpc
diff --git a/test/cpp/qps/qps_worker.h b/test/cpp/qps/qps_worker.h
index 27de69fa65..624c182100 100644
--- a/test/cpp/qps/qps_worker.h
+++ b/test/cpp/qps/qps_worker.h
@@ -36,6 +36,8 @@
#include <memory>
+#include <grpc/support/atm.h>
+
namespace grpc {
class Server;
@@ -49,9 +51,14 @@ class QpsWorker {
explicit QpsWorker(int driver_port, int server_port = 0);
~QpsWorker();
+ bool Done() const;
+ void MarkDone();
+
private:
std::unique_ptr<WorkerServiceImpl> impl_;
std::unique_ptr<Server> server_;
+
+ gpr_atm done_;
};
} // namespace testing
diff --git a/test/cpp/qps/server.h b/test/cpp/qps/server.h
index 196fdac8f3..94a6f8acfa 100644
--- a/test/cpp/qps/server.h
+++ b/test/cpp/qps/server.h
@@ -34,14 +34,16 @@
#ifndef TEST_QPS_SERVER_H
#define TEST_QPS_SERVER_H
-#include <grpc/support/cpu.h>
#include <grpc++/security/server_credentials.h>
+#include <grpc/support/cpu.h>
+#include <vector>
+#include "src/proto/grpc/testing/control.grpc.pb.h"
+#include "src/proto/grpc/testing/messages.grpc.pb.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/util/port.h"
+#include "test/cpp/qps/limit_cores.h"
#include "test/cpp/qps/timer.h"
-#include "src/proto/grpc/testing/messages.grpc.pb.h"
-#include "src/proto/grpc/testing/control.grpc.pb.h"
namespace grpc {
namespace testing {
@@ -49,8 +51,10 @@ namespace testing {
class Server {
public:
explicit Server(const ServerConfig& config) : timer_(new Timer) {
+ cores_ = LimitCores(config.core_list().data(), config.core_list_size());
if (config.port()) {
port_ = config.port();
+
} else {
port_ = grpc_pick_unused_port_or_die();
}
@@ -86,7 +90,7 @@ class Server {
}
int port() const { return port_; }
- int cores() const { return gpr_cpu_num_cores(); }
+ int cores() const { return cores_; }
static std::shared_ptr<ServerCredentials> CreateServerCredentials(
const ServerConfig& config) {
if (config.has_security_params()) {
@@ -103,6 +107,7 @@ class Server {
private:
int port_;
+ int cores_;
std::unique_ptr<Timer> timer_;
};
diff --git a/test/cpp/qps/worker.cc b/test/cpp/qps/worker.cc
index a1e73e9abe..f42cfe3255 100644
--- a/test/cpp/qps/worker.cc
+++ b/test/cpp/qps/worker.cc
@@ -56,7 +56,7 @@ namespace testing {
static void RunServer() {
QpsWorker worker(FLAGS_driver_port, FLAGS_server_port);
- while (!got_sigint) {
+ while (!got_sigint && !worker.Done()) {
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_seconds(5, GPR_TIMESPAN)));
}
diff --git a/test/cpp/util/time_test.cc b/test/cpp/util/time_test.cc
index 1e501dfd28..48c6ce7697 100644
--- a/test/cpp/util/time_test.cc
+++ b/test/cpp/util/time_test.cc
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,7 +45,7 @@ namespace {
class TimeTest : public ::testing::Test {};
TEST_F(TimeTest, AbsolutePointTest) {
- long us = 10000000L;
+ int64_t us = 10000000L;
gpr_timespec ts = gpr_time_from_micros(us, GPR_TIMESPAN);
ts.clock_type = GPR_CLOCK_REALTIME;
system_clock::time_point tp{microseconds(us)};