aboutsummaryrefslogtreecommitdiffhomepage
path: root/test/cpp
diff options
context:
space:
mode:
Diffstat (limited to 'test/cpp')
-rw-r--r--test/cpp/end2end/filter_end2end_test.cc5
-rw-r--r--test/cpp/microbenchmarks/bm_call_create.cc22
-rw-r--r--test/cpp/microbenchmarks/bm_chttp2_transport.cc81
-rw-r--r--test/cpp/qps/client_async.cc132
-rw-r--r--test/cpp/qps/client_sync.cc18
5 files changed, 184 insertions, 74 deletions
diff --git a/test/cpp/end2end/filter_end2end_test.cc b/test/cpp/end2end/filter_end2end_test.cc
index bd384f68b4..2f873eeaa8 100644
--- a/test/cpp/end2end/filter_end2end_test.cc
+++ b/test/cpp/end2end/filter_end2end_test.cc
@@ -122,8 +122,9 @@ class ChannelDataImpl : public ChannelData {
class CallDataImpl : public CallData {
public:
- void StartTransportStreamOp(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- TransportStreamOp* op) override {
+ void StartTransportStreamOpBatch(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ TransportStreamOpBatch* op) override {
// Incrementing the counter could be done from Init(), but we want
// to test that the individual methods are actually called correctly.
if (op->recv_initial_metadata() != nullptr) IncrementCallCounter();
diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc
index 4417e0e829..13fede4ca2 100644
--- a/test/cpp/microbenchmarks/bm_call_create.cc
+++ b/test/cpp/microbenchmarks/bm_call_create.cc
@@ -221,7 +221,7 @@ namespace dummy_filter {
static void StartTransportStreamOp(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_transport_stream_op *op) {}
+ grpc_transport_stream_op_batch *op) {}
static void StartTransportOp(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
@@ -296,7 +296,7 @@ void SetPollsetSet(grpc_exec_ctx *exec_ctx, grpc_transport *self,
/* implementation of grpc_transport_perform_stream_op */
void PerformStreamOp(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, grpc_transport_stream_op *op) {
+ grpc_stream *stream, grpc_transport_stream_op_batch *op) {
grpc_closure_sched(exec_ctx, op->on_complete, GRPC_ERROR_NONE);
}
@@ -346,13 +346,15 @@ class SendEmptyMetadata {
memset(&op_, 0, sizeof(op_));
op_.on_complete = grpc_closure_init(&closure_, DoNothing, nullptr,
grpc_schedule_on_exec_ctx);
+ op_.send_initial_metadata = true;
+ op_.payload = &op_payload_;
}
class Op {
public:
Op(grpc_exec_ctx *exec_ctx, SendEmptyMetadata *p, grpc_call_stack *s) {
grpc_metadata_batch_init(&batch_);
- p->op_.send_initial_metadata = &batch_;
+ p->op_payload_.send_initial_metadata.send_initial_metadata = &batch_;
}
void Finish(grpc_exec_ctx *exec_ctx) {
grpc_metadata_batch_destroy(exec_ctx, &batch_);
@@ -366,7 +368,8 @@ class SendEmptyMetadata {
const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC);
const gpr_timespec start_time_ = gpr_now(GPR_CLOCK_MONOTONIC);
const grpc_slice method_ = grpc_slice_from_static_string("/foo/bar");
- grpc_transport_stream_op op_;
+ grpc_transport_stream_op_batch op_;
+ grpc_transport_stream_op_batch_payload op_payload_;
grpc_closure closure_;
};
@@ -488,13 +491,16 @@ namespace isolated_call_filter {
static void StartTransportStreamOp(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
if (op->recv_initial_metadata) {
- grpc_closure_sched(exec_ctx, op->recv_initial_metadata_ready,
- GRPC_ERROR_NONE);
+ grpc_closure_sched(
+ exec_ctx,
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready,
+ GRPC_ERROR_NONE);
}
if (op->recv_message) {
- grpc_closure_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_NONE);
+ grpc_closure_sched(exec_ctx, op->payload->recv_message.recv_message_ready,
+ GRPC_ERROR_NONE);
}
grpc_closure_sched(exec_ctx, op->on_complete, GRPC_ERROR_NONE);
}
diff --git a/test/cpp/microbenchmarks/bm_chttp2_transport.cc b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
index 254d57de20..c89f349ca7 100644
--- a/test/cpp/microbenchmarks/bm_chttp2_transport.cc
+++ b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
@@ -207,7 +207,7 @@ class Stream {
static_cast<grpc_stream *>(stream_), closure);
}
- void Op(grpc_transport_stream_op *op) {
+ void Op(grpc_transport_stream_op_batch *op) {
grpc_transport_perform_stream_op(f_->exec_ctx(), f_->transport(),
static_cast<grpc_stream *>(stream_), op);
}
@@ -305,10 +305,16 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) {
TrackCounters track_counters;
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
- grpc_transport_stream_op op;
+ grpc_transport_stream_op_batch op;
+ grpc_transport_stream_op_batch_payload op_payload;
std::unique_ptr<Closure> start;
std::unique_ptr<Closure> done;
+ auto reset_op = [&]() {
+ memset(&op, 0, sizeof(op));
+ op.payload = &op_payload;
+ };
+
grpc_metadata_batch b;
grpc_metadata_batch_init(&b);
b.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
@@ -324,14 +330,16 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) {
start = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
if (!state.KeepRunning()) return;
s.Init(state);
- memset(&op, 0, sizeof(op));
+ reset_op();
op.on_complete = done.get();
- op.send_initial_metadata = &b;
+ op.send_initial_metadata = true;
+ op.payload->send_initial_metadata.send_initial_metadata = &b;
s.Op(&op);
});
done = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
- memset(&op, 0, sizeof(op));
- op.cancel_error = GRPC_ERROR_CANCELLED;
+ reset_op();
+ op.cancel_stream = true;
+ op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
s.Op(&op);
s.DestroyThen(start.get());
});
@@ -348,11 +356,16 @@ static void BM_TransportEmptyOp(benchmark::State &state) {
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
s.Init(state);
- grpc_transport_stream_op op;
+ grpc_transport_stream_op_batch op;
+ grpc_transport_stream_op_batch_payload op_payload;
+ auto reset_op = [&]() {
+ memset(&op, 0, sizeof(op));
+ op.payload = &op_payload;
+ };
std::unique_ptr<Closure> c =
MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
if (!state.KeepRunning()) return;
- memset(&op, 0, sizeof(op));
+ reset_op();
op.on_complete = c.get();
s.Op(&op);
});
@@ -370,7 +383,12 @@ static void BM_TransportStreamSend(benchmark::State &state) {
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
s.Init(state);
- grpc_transport_stream_op op;
+ grpc_transport_stream_op_batch op;
+ grpc_transport_stream_op_batch_payload op_payload;
+ auto reset_op = [&]() {
+ memset(&op, 0, sizeof(op));
+ op.payload = &op_payload;
+ };
grpc_slice_buffer_stream send_stream;
grpc_slice_buffer send_buffer;
grpc_slice_buffer_init(&send_buffer);
@@ -397,20 +415,23 @@ static void BM_TransportStreamSend(benchmark::State &state) {
s.chttp2_stream()->outgoing_window_delta = 1024 * 1024 * 1024;
f.chttp2_transport()->outgoing_window = 1024 * 1024 * 1024;
grpc_slice_buffer_stream_init(&send_stream, &send_buffer, 0);
- memset(&op, 0, sizeof(op));
+ reset_op();
op.on_complete = c.get();
- op.send_message = &send_stream.base;
+ op.send_message = true;
+ op.payload->send_message.send_message = &send_stream.base;
s.Op(&op);
});
- memset(&op, 0, sizeof(op));
- op.send_initial_metadata = &b;
+ reset_op();
+ op.send_initial_metadata = true;
+ op.payload->send_initial_metadata.send_initial_metadata = &b;
op.on_complete = c.get();
s.Op(&op);
f.FlushExecCtx();
- memset(&op, 0, sizeof(op));
- op.cancel_error = GRPC_ERROR_CANCELLED;
+ reset_op();
+ op.cancel_stream = true;
+ op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
s.Op(&op);
s.DestroyThen(
MakeOnceClosure([](grpc_exec_ctx *exec_ctx, grpc_error *error) {}));
@@ -483,10 +504,16 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
s.Init(state);
- grpc_transport_stream_op op;
+ grpc_transport_stream_op_batch_payload op_payload;
+ grpc_transport_stream_op_batch op;
grpc_byte_stream *recv_stream;
grpc_slice incoming_data = CreateIncomingDataSlice(state.range(0), 16384);
+ auto reset_op = [&]() {
+ memset(&op, 0, sizeof(op));
+ op.payload = &op_payload;
+ };
+
grpc_metadata_batch b;
grpc_metadata_batch_init(&b);
grpc_metadata_batch b_recv;
@@ -518,10 +545,11 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
s.chttp2_stream()->incoming_window_delta = 1024 * 1024 * 1024;
f.chttp2_transport()->incoming_window = 1024 * 1024 * 1024;
received = 0;
- memset(&op, 0, sizeof(op));
+ reset_op();
op.on_complete = do_nothing.get();
- op.recv_message = &recv_stream;
- op.recv_message_ready = drain_start.get();
+ op.recv_message = true;
+ op.payload->recv_message.recv_message = &recv_stream;
+ op.payload->recv_message.recv_message_ready = drain_start.get();
s.Op(&op);
f.PushInput(grpc_slice_ref(incoming_data));
});
@@ -552,9 +580,13 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
grpc_closure_run(exec_ctx, drain.get(), GRPC_ERROR_NONE);
});
- memset(&op, 0, sizeof(op));
- op.send_initial_metadata = &b;
- op.recv_initial_metadata = &b_recv;
+ reset_op();
+ op.send_initial_metadata = true;
+ op.payload->send_initial_metadata.send_initial_metadata = &b;
+ op.recv_initial_metadata = true;
+ op.payload->recv_initial_metadata.recv_initial_metadata = &b_recv;
+ op.payload->recv_initial_metadata.recv_initial_metadata_ready =
+ do_nothing.get();
op.on_complete = c.get();
s.Op(&op);
f.PushInput(SLICE_FROM_BUFFER(
@@ -571,8 +603,9 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
"\x10\x14grpc-accept-encoding\x15identity,deflate,gzip"));
f.FlushExecCtx();
- memset(&op, 0, sizeof(op));
- op.cancel_error = GRPC_ERROR_CANCELLED;
+ reset_op();
+ op.cancel_stream = true;
+ op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
s.Op(&op);
s.DestroyThen(
MakeOnceClosure([](grpc_exec_ctx *exec_ctx, grpc_error *error) {}));
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index 396d308e2a..29a79e7343 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -63,13 +63,13 @@ class ClientRpcContext {
virtual ~ClientRpcContext() {}
// next state, return false if done. Collect stats when appropriate
virtual bool RunNextState(bool, HistogramEntry* entry) = 0;
- virtual ClientRpcContext* StartNewClone() = 0;
+ virtual void StartNewClone(CompletionQueue* cq) = 0;
static void* tag(ClientRpcContext* c) { return reinterpret_cast<void*>(c); }
static ClientRpcContext* detag(void* t) {
return reinterpret_cast<ClientRpcContext*>(t);
}
- virtual void Start(CompletionQueue* cq) = 0;
+ virtual void Start(CompletionQueue* cq, const ClientConfig& config) = 0;
};
template <class RequestType, class ResponseType>
@@ -94,22 +94,17 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
next_issue_(next_issue),
start_req_(start_req) {}
~ClientRpcContextUnaryImpl() override {}
- void Start(CompletionQueue* cq) override {
- cq_ = cq;
- if (!next_issue_) { // ready to issue
- RunNextState(true, nullptr);
- } else { // wait for the issue time
- alarm_.reset(new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this)));
- }
+ void Start(CompletionQueue* cq, const ClientConfig& config) override {
+ StartInternal(cq);
}
bool RunNextState(bool ok, HistogramEntry* entry) override {
switch (next_state_) {
case State::READY:
start_ = UsageTimer::Now();
response_reader_ = start_req_(stub_, &context_, req_, cq_);
+ next_state_ = State::RESP_DONE;
response_reader_->Finish(&response_, &status_,
ClientRpcContext::tag(this));
- next_state_ = State::RESP_DONE;
return true;
case State::RESP_DONE:
if (status_.ok()) {
@@ -123,9 +118,10 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
return false;
}
}
- ClientRpcContext* StartNewClone() override {
- return new ClientRpcContextUnaryImpl(stub_, req_, next_issue_, start_req_,
- callback_);
+ void StartNewClone(CompletionQueue* cq) override {
+ auto* clone = new ClientRpcContextUnaryImpl(stub_, req_, next_issue_,
+ start_req_, callback_);
+ clone->StartInternal(cq);
}
private:
@@ -147,6 +143,15 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
double start_;
std::unique_ptr<grpc::ClientAsyncResponseReader<ResponseType>>
response_reader_;
+
+ void StartInternal(CompletionQueue* cq) {
+ cq_ = cq;
+ if (!next_issue_) { // ready to issue
+ RunNextState(true, nullptr);
+ } else { // wait for the issue time
+ alarm_.reset(new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this)));
+ }
+ }
};
typedef std::forward_list<ClientRpcContext*> context_list;
@@ -185,7 +190,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
auto* cq = cli_cqs_[t].get();
auto ctx =
setup_ctx(channels_[ch].get_stub(), next_issuers_[t], request_);
- ctx->Start(cq);
+ ctx->Start(cq, config);
}
t = (t + 1) % cli_cqs_.size();
}
@@ -248,8 +253,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
} else if (!ctx->RunNextState(ok, entry)) {
// The RPC and callback are done, so clone the ctx
// and kickstart the new one
- auto clone = ctx->StartNewClone();
- clone->Start(cli_cqs_[thread_idx].get());
+ ctx->StartNewClone(cli_cqs_[thread_idx].get());
// delete the old version
delete ctx;
}
@@ -330,10 +334,8 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
next_issue_(next_issue),
start_req_(start_req) {}
~ClientRpcContextStreamingImpl() override {}
- void Start(CompletionQueue* cq) override {
- cq_ = cq;
- stream_ = start_req_(stub_, &context_, cq, ClientRpcContext::tag(this));
- next_state_ = State::STREAM_IDLE;
+ void Start(CompletionQueue* cq, const ClientConfig& config) override {
+ StartInternal(cq, config.messages_per_stream());
}
bool RunNextState(bool ok, HistogramEntry* entry) override {
while (true) {
@@ -346,9 +348,9 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
}
break; // loop around, don't return
case State::WAIT:
+ next_state_ = State::READY_TO_WRITE;
alarm_.reset(
new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this)));
- next_state_ = State::READY_TO_WRITE;
return true;
case State::READY_TO_WRITE:
if (!ok) {
@@ -369,17 +371,32 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
case State::READ_DONE:
entry->set_value((UsageTimer::Now() - start_) * 1e9);
callback_(status_, &response_);
+ if ((messages_per_stream_ != 0) &&
+ (++messages_issued_ >= messages_per_stream_)) {
+ next_state_ = State::WRITES_DONE_DONE;
+ stream_->WritesDone(ClientRpcContext::tag(this));
+ return true;
+ }
next_state_ = State::STREAM_IDLE;
break; // loop around
+ case State::WRITES_DONE_DONE:
+ next_state_ = State::FINISH_DONE;
+ stream_->Finish(&status_, ClientRpcContext::tag(this));
+ return true;
+ case State::FINISH_DONE:
+ next_state_ = State::INVALID;
+ return false;
+ break;
default:
GPR_ASSERT(false);
return false;
}
}
}
- ClientRpcContext* StartNewClone() override {
- return new ClientRpcContextStreamingImpl(stub_, req_, next_issue_,
- start_req_, callback_);
+ void StartNewClone(CompletionQueue* cq) override {
+ auto* clone = new ClientRpcContextStreamingImpl(stub_, req_, next_issue_,
+ start_req_, callback_);
+ clone->StartInternal(cq, messages_per_stream_);
}
private:
@@ -395,7 +412,9 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
WAIT,
READY_TO_WRITE,
WRITE_DONE,
- READ_DONE
+ READ_DONE,
+ WRITES_DONE_DONE,
+ FINISH_DONE
};
State next_state_;
std::function<void(grpc::Status, ResponseType*)> callback_;
@@ -408,6 +427,18 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
double start_;
std::unique_ptr<grpc::ClientAsyncReaderWriter<RequestType, ResponseType>>
stream_;
+
+ // Allow a limit on number of messages in a stream
+ int messages_per_stream_;
+ int messages_issued_;
+
+ void StartInternal(CompletionQueue* cq, int messages_per_stream) {
+ cq_ = cq;
+ next_state_ = State::STREAM_IDLE;
+ stream_ = start_req_(stub_, &context_, cq, ClientRpcContext::tag(this));
+ messages_per_stream_ = messages_per_stream;
+ messages_issued_ = 0;
+ }
};
class AsyncStreamingClient final
@@ -459,13 +490,8 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
next_issue_(next_issue),
start_req_(start_req) {}
~ClientRpcContextGenericStreamingImpl() override {}
- void Start(CompletionQueue* cq) override {
- cq_ = cq;
- const grpc::string kMethodName(
- "/grpc.testing.BenchmarkService/StreamingCall");
- stream_ = start_req_(stub_, &context_, kMethodName, cq,
- ClientRpcContext::tag(this));
- next_state_ = State::STREAM_IDLE;
+ void Start(CompletionQueue* cq, const ClientConfig& config) override {
+ StartInternal(cq, config.messages_per_stream());
}
bool RunNextState(bool ok, HistogramEntry* entry) override {
while (true) {
@@ -478,9 +504,9 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
}
break; // loop around, don't return
case State::WAIT:
+ next_state_ = State::READY_TO_WRITE;
alarm_.reset(
new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this)));
- next_state_ = State::READY_TO_WRITE;
return true;
case State::READY_TO_WRITE:
if (!ok) {
@@ -501,17 +527,32 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
case State::READ_DONE:
entry->set_value((UsageTimer::Now() - start_) * 1e9);
callback_(status_, &response_);
+ if ((messages_per_stream_ != 0) &&
+ (++messages_issued_ >= messages_per_stream_)) {
+ next_state_ = State::WRITES_DONE_DONE;
+ stream_->WritesDone(ClientRpcContext::tag(this));
+ return true;
+ }
next_state_ = State::STREAM_IDLE;
break; // loop around
+ case State::WRITES_DONE_DONE:
+ next_state_ = State::FINISH_DONE;
+ stream_->Finish(&status_, ClientRpcContext::tag(this));
+ return true;
+ case State::FINISH_DONE:
+ next_state_ = State::INVALID;
+ return false;
+ break;
default:
GPR_ASSERT(false);
return false;
}
}
}
- ClientRpcContext* StartNewClone() override {
- return new ClientRpcContextGenericStreamingImpl(stub_, req_, next_issue_,
- start_req_, callback_);
+ void StartNewClone(CompletionQueue* cq) override {
+ auto* clone = new ClientRpcContextGenericStreamingImpl(
+ stub_, req_, next_issue_, start_req_, callback_);
+ clone->StartInternal(cq, messages_per_stream_);
}
private:
@@ -527,7 +568,9 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
WAIT,
READY_TO_WRITE,
WRITE_DONE,
- READ_DONE
+ READ_DONE,
+ WRITES_DONE_DONE,
+ FINISH_DONE
};
State next_state_;
std::function<void(grpc::Status, ByteBuffer*)> callback_;
@@ -539,6 +582,21 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
grpc::Status status_;
double start_;
std::unique_ptr<grpc::GenericClientAsyncReaderWriter> stream_;
+
+ // Allow a limit on number of messages in a stream
+ int messages_per_stream_;
+ int messages_issued_;
+
+ void StartInternal(CompletionQueue* cq, int messages_per_stream) {
+ cq_ = cq;
+ const grpc::string kMethodName(
+ "/grpc.testing.BenchmarkService/StreamingCall");
+ next_state_ = State::STREAM_IDLE;
+ stream_ = start_req_(stub_, &context_, kMethodName, cq,
+ ClientRpcContext::tag(this));
+ messages_per_stream_ = messages_per_stream;
+ messages_issued_ = 0;
+ }
};
static std::unique_ptr<grpc::GenericStub> GenericStubCreator(
diff --git a/test/cpp/qps/client_sync.cc b/test/cpp/qps/client_sync.cc
index a944c45496..a020adde51 100644
--- a/test/cpp/qps/client_sync.cc
+++ b/test/cpp/qps/client_sync.cc
@@ -142,10 +142,13 @@ class SynchronousStreamingClient final : public SynchronousClient {
SynchronousStreamingClient(const ClientConfig& config)
: SynchronousClient(config),
context_(num_threads_),
- stream_(num_threads_) {
+ stream_(num_threads_),
+ messages_per_stream_(config.messages_per_stream()),
+ messages_issued_(num_threads_) {
for (size_t thread_idx = 0; thread_idx < num_threads_; thread_idx++) {
auto* stub = channels_[thread_idx % channels_.size()].get_stub();
stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]);
+ messages_issued_[thread_idx] = 0;
}
StartThreads(num_threads_);
}
@@ -173,11 +176,17 @@ class SynchronousStreamingClient final : public SynchronousClient {
stream_[thread_idx]->Read(&responses_[thread_idx])) {
entry->set_value((UsageTimer::Now() - start) * 1e9);
// don't set the status since there isn't one yet
- return true;
+ if ((messages_per_stream_ != 0) &&
+ (++messages_issued_[thread_idx] < messages_per_stream_)) {
+ return true;
+ } else {
+ // Fall through to the below resetting code after finish
+ }
}
stream_[thread_idx]->WritesDone();
Status s = stream_[thread_idx]->Finish();
- // don't set the value since the stream is failed and shouldn't be timed
+ // don't set the value since this is either a failure (shouldn't be timed)
+ // or a stream-end (already has been timed)
entry->set_status(s.error_code());
if (!s.ok()) {
gpr_log(GPR_ERROR, "Stream %" PRIuPTR " received an error %s", thread_idx,
@@ -187,6 +196,7 @@ class SynchronousStreamingClient final : public SynchronousClient {
context_[thread_idx].~ClientContext();
new (&context_[thread_idx]) ClientContext();
stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]);
+ messages_issued_[thread_idx] = 0;
return true;
}
@@ -197,6 +207,8 @@ class SynchronousStreamingClient final : public SynchronousClient {
std::vector<
std::unique_ptr<grpc::ClientReaderWriter<SimpleRequest, SimpleResponse>>>
stream_;
+ const int messages_per_stream_;
+ std::vector<int> messages_issued_;
};
std::unique_ptr<Client> CreateSynchronousUnaryClient(