aboutsummaryrefslogtreecommitdiffhomepage
path: root/test/cpp
diff options
context:
space:
mode:
authorGravatar Sree Kuchibhotla <sreek@google.com>2017-04-06 13:09:17 -0700
committerGravatar Sree Kuchibhotla <sreek@google.com>2017-04-06 13:09:17 -0700
commita6ff103446ef739b5eb11a76dda2f8b230532cd5 (patch)
treef0a6bf05ef83081c634cffabd233893d69510d1e /test/cpp
parent3f182df7de52374e600264af7d76f1f6f73da6b8 (diff)
parenta956d99978c46027663da22ff758506e83f1b7f5 (diff)
Merge branch 'master' into cq_create_api_changes
Diffstat (limited to 'test/cpp')
-rw-r--r--test/cpp/end2end/filter_end2end_test.cc5
-rw-r--r--test/cpp/grpclb/grpclb_api_test.cc2
-rw-r--r--test/cpp/grpclb/grpclb_test.cc2
-rw-r--r--test/cpp/interop/client.cc3
-rw-r--r--test/cpp/interop/client_helper.cc3
-rw-r--r--test/cpp/interop/client_helper.h4
-rw-r--r--test/cpp/microbenchmarks/bm_call_create.cc32
-rw-r--r--test/cpp/microbenchmarks/bm_chttp2_transport.cc81
-rw-r--r--test/cpp/qps/client_async.cc132
-rw-r--r--test/cpp/qps/client_sync.cc18
10 files changed, 204 insertions, 78 deletions
diff --git a/test/cpp/end2end/filter_end2end_test.cc b/test/cpp/end2end/filter_end2end_test.cc
index bd384f68b4..2f873eeaa8 100644
--- a/test/cpp/end2end/filter_end2end_test.cc
+++ b/test/cpp/end2end/filter_end2end_test.cc
@@ -122,8 +122,9 @@ class ChannelDataImpl : public ChannelData {
class CallDataImpl : public CallData {
public:
- void StartTransportStreamOp(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- TransportStreamOp* op) override {
+ void StartTransportStreamOpBatch(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ TransportStreamOpBatch* op) override {
// Incrementing the counter could be done from Init(), but we want
// to test that the individual methods are actually called correctly.
if (op->recv_initial_metadata() != nullptr) IncrementCallCounter();
diff --git a/test/cpp/grpclb/grpclb_api_test.cc b/test/cpp/grpclb/grpclb_api_test.cc
index 82ccf436f8..d9df2bb673 100644
--- a/test/cpp/grpclb/grpclb_api_test.cc
+++ b/test/cpp/grpclb/grpclb_api_test.cc
@@ -34,7 +34,7 @@
#include <grpc++/impl/codegen/config.h>
#include <gtest/gtest.h>
-#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
+#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/proto/grpc/lb/v1/load_balancer.pb.h" // C++ version
diff --git a/test/cpp/grpclb/grpclb_test.cc b/test/cpp/grpclb/grpclb_test.cc
index e0ae86955e..a80431f46a 100644
--- a/test/cpp/grpclb/grpclb_test.cc
+++ b/test/cpp/grpclb/grpclb_test.cc
@@ -51,7 +51,7 @@
#include <grpc++/impl/codegen/config.h>
extern "C" {
-#include "src/core/ext/client_channel/client_channel.h"
+#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/sockaddr.h"
diff --git a/test/cpp/interop/client.cc b/test/cpp/interop/client.cc
index 5688ab7971..369413e6a1 100644
--- a/test/cpp/interop/client.cc
+++ b/test/cpp/interop/client.cc
@@ -99,6 +99,7 @@ DEFINE_bool(do_not_abort_on_transient_failures, false,
using grpc::testing::CreateChannelForTestCase;
using grpc::testing::GetServiceAccountJsonKey;
+using grpc::testing::UpdateActions;
int main(int argc, char** argv) {
grpc::testing::InitTest(&argc, &argv, true);
@@ -165,6 +166,8 @@ int main(int argc, char** argv) {
// actions["cacheable_unary"] =
// std::bind(&grpc::testing::InteropClient::DoCacheableUnary, &client);
+ UpdateActions(&actions);
+
if (FLAGS_test_case == "all") {
for (const auto& action : actions) {
action.second();
diff --git a/test/cpp/interop/client_helper.cc b/test/cpp/interop/client_helper.cc
index d3192ad0c9..784cd2826d 100644
--- a/test/cpp/interop/client_helper.cc
+++ b/test/cpp/interop/client_helper.cc
@@ -89,6 +89,9 @@ grpc::string GetOauth2AccessToken() {
return access_token;
}
+void UpdateActions(
+ std::unordered_map<grpc::string, std::function<bool()>>* actions) {}
+
std::shared_ptr<Channel> CreateChannelForTestCase(
const grpc::string& test_case) {
GPR_ASSERT(FLAGS_server_port);
diff --git a/test/cpp/interop/client_helper.h b/test/cpp/interop/client_helper.h
index 622b96e4fb..387530a21c 100644
--- a/test/cpp/interop/client_helper.h
+++ b/test/cpp/interop/client_helper.h
@@ -35,6 +35,7 @@
#define GRPC_TEST_CPP_INTEROP_CLIENT_HELPER_H
#include <memory>
+#include <unordered_map>
#include <grpc++/channel.h>
@@ -47,6 +48,9 @@ grpc::string GetServiceAccountJsonKey();
grpc::string GetOauth2AccessToken();
+void UpdateActions(
+ std::unordered_map<grpc::string, std::function<bool()>>* actions);
+
std::shared_ptr<Channel> CreateChannelForTestCase(
const grpc::string& test_case);
diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc
index ed6668d92a..15e9425f01 100644
--- a/test/cpp/microbenchmarks/bm_call_create.cc
+++ b/test/cpp/microbenchmarks/bm_call_create.cc
@@ -45,8 +45,8 @@
#include <grpc/support/string_util.h>
extern "C" {
-#include "src/core/ext/client_channel/client_channel.h"
-#include "src/core/ext/load_reporting/load_reporting_filter.h"
+#include "src/core/ext/filters/client_channel/client_channel.h"
+#include "src/core/ext/filters/load_reporting/load_reporting_filter.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/compress_filter.h"
#include "src/core/lib/channel/connected_channel.h"
@@ -54,6 +54,7 @@ extern "C" {
#include "src/core/lib/channel/http_client_filter.h"
#include "src/core/lib/channel/http_server_filter.h"
#include "src/core/lib/channel/message_size_filter.h"
+#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/transport_impl.h"
}
@@ -152,6 +153,7 @@ static void BM_LameChannelCallCreateCpp(benchmark::State &state) {
grpc::testing::EchoResponse recv_response;
grpc::Status recv_status;
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc::ClientContext cli_ctx;
auto reader = stub->AsyncEcho(&cli_ctx, send_request, &cq);
reader->Finish(&recv_response, &recv_status, tag(0));
@@ -221,7 +223,7 @@ namespace dummy_filter {
static void StartTransportStreamOp(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_transport_stream_op *op) {}
+ grpc_transport_stream_op_batch *op) {}
static void StartTransportOp(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
@@ -296,7 +298,7 @@ void SetPollsetSet(grpc_exec_ctx *exec_ctx, grpc_transport *self,
/* implementation of grpc_transport_perform_stream_op */
void PerformStreamOp(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, grpc_transport_stream_op *op) {
+ grpc_stream *stream, grpc_transport_stream_op_batch *op) {
grpc_closure_sched(exec_ctx, op->on_complete, GRPC_ERROR_NONE);
}
@@ -346,13 +348,15 @@ class SendEmptyMetadata {
memset(&op_, 0, sizeof(op_));
op_.on_complete = grpc_closure_init(&closure_, DoNothing, nullptr,
grpc_schedule_on_exec_ctx);
+ op_.send_initial_metadata = true;
+ op_.payload = &op_payload_;
}
class Op {
public:
Op(grpc_exec_ctx *exec_ctx, SendEmptyMetadata *p, grpc_call_stack *s) {
grpc_metadata_batch_init(&batch_);
- p->op_.send_initial_metadata = &batch_;
+ p->op_payload_.send_initial_metadata.send_initial_metadata = &batch_;
}
void Finish(grpc_exec_ctx *exec_ctx) {
grpc_metadata_batch_destroy(exec_ctx, &batch_);
@@ -366,7 +370,8 @@ class SendEmptyMetadata {
const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC);
const gpr_timespec start_time_ = gpr_now(GPR_CLOCK_MONOTONIC);
const grpc_slice method_ = grpc_slice_from_static_string("/foo/bar");
- grpc_transport_stream_op op_;
+ grpc_transport_stream_op_batch op_;
+ grpc_transport_stream_op_batch_payload op_payload_;
grpc_closure closure_;
};
@@ -426,6 +431,7 @@ static void BM_IsolatedFilter(benchmark::State &state) {
const int kArenaSize = 4096;
call_args.arena = gpr_arena_create(kArenaSize);
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
GRPC_ERROR_UNREF(grpc_call_stack_init(&exec_ctx, channel_stack, 1,
DoNothing, NULL, &call_args));
typename TestOp::Op op(&exec_ctx, &test_op_data, call_stack);
@@ -488,13 +494,16 @@ namespace isolated_call_filter {
static void StartTransportStreamOp(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
if (op->recv_initial_metadata) {
- grpc_closure_sched(exec_ctx, op->recv_initial_metadata_ready,
- GRPC_ERROR_NONE);
+ grpc_closure_sched(
+ exec_ctx,
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready,
+ GRPC_ERROR_NONE);
}
if (op->recv_message) {
- grpc_closure_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_NONE);
+ grpc_closure_sched(exec_ctx, op->payload->recv_message.recv_message_ready,
+ GRPC_ERROR_NONE);
}
grpc_closure_sched(exec_ctx, op->on_complete, GRPC_ERROR_NONE);
}
@@ -590,6 +599,7 @@ static void BM_IsolatedCall_NoOp(benchmark::State &state) {
void *method_hdl =
grpc_channel_register_call(fixture.channel(), "/foo/bar", NULL, NULL);
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call_destroy(grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
method_hdl, deadline, NULL));
@@ -628,6 +638,7 @@ static void BM_IsolatedCall_Unary(benchmark::State &state) {
ops[5].data.recv_status_on_client.status_details = &status_details;
ops[5].data.recv_status_on_client.trailing_metadata = &recv_trailing_metadata;
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call *call = grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
method_hdl, deadline, NULL);
@@ -670,6 +681,7 @@ static void BM_IsolatedCall_StreamingSend(benchmark::State &state) {
ops[0].op = GRPC_OP_SEND_MESSAGE;
ops[0].data.send_message.send_message = send_message;
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call_start_batch(call, ops, 1, tag(2), NULL);
grpc_completion_queue_next(fixture.cq(),
gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL);
diff --git a/test/cpp/microbenchmarks/bm_chttp2_transport.cc b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
index 254d57de20..c89f349ca7 100644
--- a/test/cpp/microbenchmarks/bm_chttp2_transport.cc
+++ b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
@@ -207,7 +207,7 @@ class Stream {
static_cast<grpc_stream *>(stream_), closure);
}
- void Op(grpc_transport_stream_op *op) {
+ void Op(grpc_transport_stream_op_batch *op) {
grpc_transport_perform_stream_op(f_->exec_ctx(), f_->transport(),
static_cast<grpc_stream *>(stream_), op);
}
@@ -305,10 +305,16 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) {
TrackCounters track_counters;
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
- grpc_transport_stream_op op;
+ grpc_transport_stream_op_batch op;
+ grpc_transport_stream_op_batch_payload op_payload;
std::unique_ptr<Closure> start;
std::unique_ptr<Closure> done;
+ auto reset_op = [&]() {
+ memset(&op, 0, sizeof(op));
+ op.payload = &op_payload;
+ };
+
grpc_metadata_batch b;
grpc_metadata_batch_init(&b);
b.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
@@ -324,14 +330,16 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) {
start = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
if (!state.KeepRunning()) return;
s.Init(state);
- memset(&op, 0, sizeof(op));
+ reset_op();
op.on_complete = done.get();
- op.send_initial_metadata = &b;
+ op.send_initial_metadata = true;
+ op.payload->send_initial_metadata.send_initial_metadata = &b;
s.Op(&op);
});
done = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
- memset(&op, 0, sizeof(op));
- op.cancel_error = GRPC_ERROR_CANCELLED;
+ reset_op();
+ op.cancel_stream = true;
+ op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
s.Op(&op);
s.DestroyThen(start.get());
});
@@ -348,11 +356,16 @@ static void BM_TransportEmptyOp(benchmark::State &state) {
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
s.Init(state);
- grpc_transport_stream_op op;
+ grpc_transport_stream_op_batch op;
+ grpc_transport_stream_op_batch_payload op_payload;
+ auto reset_op = [&]() {
+ memset(&op, 0, sizeof(op));
+ op.payload = &op_payload;
+ };
std::unique_ptr<Closure> c =
MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
if (!state.KeepRunning()) return;
- memset(&op, 0, sizeof(op));
+ reset_op();
op.on_complete = c.get();
s.Op(&op);
});
@@ -370,7 +383,12 @@ static void BM_TransportStreamSend(benchmark::State &state) {
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
s.Init(state);
- grpc_transport_stream_op op;
+ grpc_transport_stream_op_batch op;
+ grpc_transport_stream_op_batch_payload op_payload;
+ auto reset_op = [&]() {
+ memset(&op, 0, sizeof(op));
+ op.payload = &op_payload;
+ };
grpc_slice_buffer_stream send_stream;
grpc_slice_buffer send_buffer;
grpc_slice_buffer_init(&send_buffer);
@@ -397,20 +415,23 @@ static void BM_TransportStreamSend(benchmark::State &state) {
s.chttp2_stream()->outgoing_window_delta = 1024 * 1024 * 1024;
f.chttp2_transport()->outgoing_window = 1024 * 1024 * 1024;
grpc_slice_buffer_stream_init(&send_stream, &send_buffer, 0);
- memset(&op, 0, sizeof(op));
+ reset_op();
op.on_complete = c.get();
- op.send_message = &send_stream.base;
+ op.send_message = true;
+ op.payload->send_message.send_message = &send_stream.base;
s.Op(&op);
});
- memset(&op, 0, sizeof(op));
- op.send_initial_metadata = &b;
+ reset_op();
+ op.send_initial_metadata = true;
+ op.payload->send_initial_metadata.send_initial_metadata = &b;
op.on_complete = c.get();
s.Op(&op);
f.FlushExecCtx();
- memset(&op, 0, sizeof(op));
- op.cancel_error = GRPC_ERROR_CANCELLED;
+ reset_op();
+ op.cancel_stream = true;
+ op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
s.Op(&op);
s.DestroyThen(
MakeOnceClosure([](grpc_exec_ctx *exec_ctx, grpc_error *error) {}));
@@ -483,10 +504,16 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
s.Init(state);
- grpc_transport_stream_op op;
+ grpc_transport_stream_op_batch_payload op_payload;
+ grpc_transport_stream_op_batch op;
grpc_byte_stream *recv_stream;
grpc_slice incoming_data = CreateIncomingDataSlice(state.range(0), 16384);
+ auto reset_op = [&]() {
+ memset(&op, 0, sizeof(op));
+ op.payload = &op_payload;
+ };
+
grpc_metadata_batch b;
grpc_metadata_batch_init(&b);
grpc_metadata_batch b_recv;
@@ -518,10 +545,11 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
s.chttp2_stream()->incoming_window_delta = 1024 * 1024 * 1024;
f.chttp2_transport()->incoming_window = 1024 * 1024 * 1024;
received = 0;
- memset(&op, 0, sizeof(op));
+ reset_op();
op.on_complete = do_nothing.get();
- op.recv_message = &recv_stream;
- op.recv_message_ready = drain_start.get();
+ op.recv_message = true;
+ op.payload->recv_message.recv_message = &recv_stream;
+ op.payload->recv_message.recv_message_ready = drain_start.get();
s.Op(&op);
f.PushInput(grpc_slice_ref(incoming_data));
});
@@ -552,9 +580,13 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
grpc_closure_run(exec_ctx, drain.get(), GRPC_ERROR_NONE);
});
- memset(&op, 0, sizeof(op));
- op.send_initial_metadata = &b;
- op.recv_initial_metadata = &b_recv;
+ reset_op();
+ op.send_initial_metadata = true;
+ op.payload->send_initial_metadata.send_initial_metadata = &b;
+ op.recv_initial_metadata = true;
+ op.payload->recv_initial_metadata.recv_initial_metadata = &b_recv;
+ op.payload->recv_initial_metadata.recv_initial_metadata_ready =
+ do_nothing.get();
op.on_complete = c.get();
s.Op(&op);
f.PushInput(SLICE_FROM_BUFFER(
@@ -571,8 +603,9 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
"\x10\x14grpc-accept-encoding\x15identity,deflate,gzip"));
f.FlushExecCtx();
- memset(&op, 0, sizeof(op));
- op.cancel_error = GRPC_ERROR_CANCELLED;
+ reset_op();
+ op.cancel_stream = true;
+ op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
s.Op(&op);
s.DestroyThen(
MakeOnceClosure([](grpc_exec_ctx *exec_ctx, grpc_error *error) {}));
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index 396d308e2a..29a79e7343 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -63,13 +63,13 @@ class ClientRpcContext {
virtual ~ClientRpcContext() {}
// next state, return false if done. Collect stats when appropriate
virtual bool RunNextState(bool, HistogramEntry* entry) = 0;
- virtual ClientRpcContext* StartNewClone() = 0;
+ virtual void StartNewClone(CompletionQueue* cq) = 0;
static void* tag(ClientRpcContext* c) { return reinterpret_cast<void*>(c); }
static ClientRpcContext* detag(void* t) {
return reinterpret_cast<ClientRpcContext*>(t);
}
- virtual void Start(CompletionQueue* cq) = 0;
+ virtual void Start(CompletionQueue* cq, const ClientConfig& config) = 0;
};
template <class RequestType, class ResponseType>
@@ -94,22 +94,17 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
next_issue_(next_issue),
start_req_(start_req) {}
~ClientRpcContextUnaryImpl() override {}
- void Start(CompletionQueue* cq) override {
- cq_ = cq;
- if (!next_issue_) { // ready to issue
- RunNextState(true, nullptr);
- } else { // wait for the issue time
- alarm_.reset(new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this)));
- }
+ void Start(CompletionQueue* cq, const ClientConfig& config) override {
+ StartInternal(cq);
}
bool RunNextState(bool ok, HistogramEntry* entry) override {
switch (next_state_) {
case State::READY:
start_ = UsageTimer::Now();
response_reader_ = start_req_(stub_, &context_, req_, cq_);
+ next_state_ = State::RESP_DONE;
response_reader_->Finish(&response_, &status_,
ClientRpcContext::tag(this));
- next_state_ = State::RESP_DONE;
return true;
case State::RESP_DONE:
if (status_.ok()) {
@@ -123,9 +118,10 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
return false;
}
}
- ClientRpcContext* StartNewClone() override {
- return new ClientRpcContextUnaryImpl(stub_, req_, next_issue_, start_req_,
- callback_);
+ void StartNewClone(CompletionQueue* cq) override {
+ auto* clone = new ClientRpcContextUnaryImpl(stub_, req_, next_issue_,
+ start_req_, callback_);
+ clone->StartInternal(cq);
}
private:
@@ -147,6 +143,15 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
double start_;
std::unique_ptr<grpc::ClientAsyncResponseReader<ResponseType>>
response_reader_;
+
+ void StartInternal(CompletionQueue* cq) {
+ cq_ = cq;
+ if (!next_issue_) { // ready to issue
+ RunNextState(true, nullptr);
+ } else { // wait for the issue time
+ alarm_.reset(new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this)));
+ }
+ }
};
typedef std::forward_list<ClientRpcContext*> context_list;
@@ -185,7 +190,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
auto* cq = cli_cqs_[t].get();
auto ctx =
setup_ctx(channels_[ch].get_stub(), next_issuers_[t], request_);
- ctx->Start(cq);
+ ctx->Start(cq, config);
}
t = (t + 1) % cli_cqs_.size();
}
@@ -248,8 +253,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
} else if (!ctx->RunNextState(ok, entry)) {
// The RPC and callback are done, so clone the ctx
// and kickstart the new one
- auto clone = ctx->StartNewClone();
- clone->Start(cli_cqs_[thread_idx].get());
+ ctx->StartNewClone(cli_cqs_[thread_idx].get());
// delete the old version
delete ctx;
}
@@ -330,10 +334,8 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
next_issue_(next_issue),
start_req_(start_req) {}
~ClientRpcContextStreamingImpl() override {}
- void Start(CompletionQueue* cq) override {
- cq_ = cq;
- stream_ = start_req_(stub_, &context_, cq, ClientRpcContext::tag(this));
- next_state_ = State::STREAM_IDLE;
+ void Start(CompletionQueue* cq, const ClientConfig& config) override {
+ StartInternal(cq, config.messages_per_stream());
}
bool RunNextState(bool ok, HistogramEntry* entry) override {
while (true) {
@@ -346,9 +348,9 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
}
break; // loop around, don't return
case State::WAIT:
+ next_state_ = State::READY_TO_WRITE;
alarm_.reset(
new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this)));
- next_state_ = State::READY_TO_WRITE;
return true;
case State::READY_TO_WRITE:
if (!ok) {
@@ -369,17 +371,32 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
case State::READ_DONE:
entry->set_value((UsageTimer::Now() - start_) * 1e9);
callback_(status_, &response_);
+ if ((messages_per_stream_ != 0) &&
+ (++messages_issued_ >= messages_per_stream_)) {
+ next_state_ = State::WRITES_DONE_DONE;
+ stream_->WritesDone(ClientRpcContext::tag(this));
+ return true;
+ }
next_state_ = State::STREAM_IDLE;
break; // loop around
+ case State::WRITES_DONE_DONE:
+ next_state_ = State::FINISH_DONE;
+ stream_->Finish(&status_, ClientRpcContext::tag(this));
+ return true;
+ case State::FINISH_DONE:
+ next_state_ = State::INVALID;
+ return false;
+ break;
default:
GPR_ASSERT(false);
return false;
}
}
}
- ClientRpcContext* StartNewClone() override {
- return new ClientRpcContextStreamingImpl(stub_, req_, next_issue_,
- start_req_, callback_);
+ void StartNewClone(CompletionQueue* cq) override {
+ auto* clone = new ClientRpcContextStreamingImpl(stub_, req_, next_issue_,
+ start_req_, callback_);
+ clone->StartInternal(cq, messages_per_stream_);
}
private:
@@ -395,7 +412,9 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
WAIT,
READY_TO_WRITE,
WRITE_DONE,
- READ_DONE
+ READ_DONE,
+ WRITES_DONE_DONE,
+ FINISH_DONE
};
State next_state_;
std::function<void(grpc::Status, ResponseType*)> callback_;
@@ -408,6 +427,18 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
double start_;
std::unique_ptr<grpc::ClientAsyncReaderWriter<RequestType, ResponseType>>
stream_;
+
+ // Allow a limit on number of messages in a stream
+ int messages_per_stream_;
+ int messages_issued_;
+
+ void StartInternal(CompletionQueue* cq, int messages_per_stream) {
+ cq_ = cq;
+ next_state_ = State::STREAM_IDLE;
+ stream_ = start_req_(stub_, &context_, cq, ClientRpcContext::tag(this));
+ messages_per_stream_ = messages_per_stream;
+ messages_issued_ = 0;
+ }
};
class AsyncStreamingClient final
@@ -459,13 +490,8 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
next_issue_(next_issue),
start_req_(start_req) {}
~ClientRpcContextGenericStreamingImpl() override {}
- void Start(CompletionQueue* cq) override {
- cq_ = cq;
- const grpc::string kMethodName(
- "/grpc.testing.BenchmarkService/StreamingCall");
- stream_ = start_req_(stub_, &context_, kMethodName, cq,
- ClientRpcContext::tag(this));
- next_state_ = State::STREAM_IDLE;
+ void Start(CompletionQueue* cq, const ClientConfig& config) override {
+ StartInternal(cq, config.messages_per_stream());
}
bool RunNextState(bool ok, HistogramEntry* entry) override {
while (true) {
@@ -478,9 +504,9 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
}
break; // loop around, don't return
case State::WAIT:
+ next_state_ = State::READY_TO_WRITE;
alarm_.reset(
new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this)));
- next_state_ = State::READY_TO_WRITE;
return true;
case State::READY_TO_WRITE:
if (!ok) {
@@ -501,17 +527,32 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
case State::READ_DONE:
entry->set_value((UsageTimer::Now() - start_) * 1e9);
callback_(status_, &response_);
+ if ((messages_per_stream_ != 0) &&
+ (++messages_issued_ >= messages_per_stream_)) {
+ next_state_ = State::WRITES_DONE_DONE;
+ stream_->WritesDone(ClientRpcContext::tag(this));
+ return true;
+ }
next_state_ = State::STREAM_IDLE;
break; // loop around
+ case State::WRITES_DONE_DONE:
+ next_state_ = State::FINISH_DONE;
+ stream_->Finish(&status_, ClientRpcContext::tag(this));
+ return true;
+ case State::FINISH_DONE:
+ next_state_ = State::INVALID;
+ return false;
+ break;
default:
GPR_ASSERT(false);
return false;
}
}
}
- ClientRpcContext* StartNewClone() override {
- return new ClientRpcContextGenericStreamingImpl(stub_, req_, next_issue_,
- start_req_, callback_);
+ void StartNewClone(CompletionQueue* cq) override {
+ auto* clone = new ClientRpcContextGenericStreamingImpl(
+ stub_, req_, next_issue_, start_req_, callback_);
+ clone->StartInternal(cq, messages_per_stream_);
}
private:
@@ -527,7 +568,9 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
WAIT,
READY_TO_WRITE,
WRITE_DONE,
- READ_DONE
+ READ_DONE,
+ WRITES_DONE_DONE,
+ FINISH_DONE
};
State next_state_;
std::function<void(grpc::Status, ByteBuffer*)> callback_;
@@ -539,6 +582,21 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
grpc::Status status_;
double start_;
std::unique_ptr<grpc::GenericClientAsyncReaderWriter> stream_;
+
+ // Allow a limit on number of messages in a stream
+ int messages_per_stream_;
+ int messages_issued_;
+
+ void StartInternal(CompletionQueue* cq, int messages_per_stream) {
+ cq_ = cq;
+ const grpc::string kMethodName(
+ "/grpc.testing.BenchmarkService/StreamingCall");
+ next_state_ = State::STREAM_IDLE;
+ stream_ = start_req_(stub_, &context_, kMethodName, cq,
+ ClientRpcContext::tag(this));
+ messages_per_stream_ = messages_per_stream;
+ messages_issued_ = 0;
+ }
};
static std::unique_ptr<grpc::GenericStub> GenericStubCreator(
diff --git a/test/cpp/qps/client_sync.cc b/test/cpp/qps/client_sync.cc
index a944c45496..a020adde51 100644
--- a/test/cpp/qps/client_sync.cc
+++ b/test/cpp/qps/client_sync.cc
@@ -142,10 +142,13 @@ class SynchronousStreamingClient final : public SynchronousClient {
SynchronousStreamingClient(const ClientConfig& config)
: SynchronousClient(config),
context_(num_threads_),
- stream_(num_threads_) {
+ stream_(num_threads_),
+ messages_per_stream_(config.messages_per_stream()),
+ messages_issued_(num_threads_) {
for (size_t thread_idx = 0; thread_idx < num_threads_; thread_idx++) {
auto* stub = channels_[thread_idx % channels_.size()].get_stub();
stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]);
+ messages_issued_[thread_idx] = 0;
}
StartThreads(num_threads_);
}
@@ -173,11 +176,17 @@ class SynchronousStreamingClient final : public SynchronousClient {
stream_[thread_idx]->Read(&responses_[thread_idx])) {
entry->set_value((UsageTimer::Now() - start) * 1e9);
// don't set the status since there isn't one yet
- return true;
+ if ((messages_per_stream_ != 0) &&
+ (++messages_issued_[thread_idx] < messages_per_stream_)) {
+ return true;
+ } else {
+ // Fall through to the below resetting code after finish
+ }
}
stream_[thread_idx]->WritesDone();
Status s = stream_[thread_idx]->Finish();
- // don't set the value since the stream is failed and shouldn't be timed
+ // don't set the value since this is either a failure (shouldn't be timed)
+ // or a stream-end (already has been timed)
entry->set_status(s.error_code());
if (!s.ok()) {
gpr_log(GPR_ERROR, "Stream %" PRIuPTR " received an error %s", thread_idx,
@@ -187,6 +196,7 @@ class SynchronousStreamingClient final : public SynchronousClient {
context_[thread_idx].~ClientContext();
new (&context_[thread_idx]) ClientContext();
stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]);
+ messages_issued_[thread_idx] = 0;
return true;
}
@@ -197,6 +207,8 @@ class SynchronousStreamingClient final : public SynchronousClient {
std::vector<
std::unique_ptr<grpc::ClientReaderWriter<SimpleRequest, SimpleResponse>>>
stream_;
+ const int messages_per_stream_;
+ std::vector<int> messages_issued_;
};
std::unique_ptr<Client> CreateSynchronousUnaryClient(