aboutsummaryrefslogtreecommitdiffhomepage
path: root/test/cpp/qps/client_async.cc
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2016-07-15 06:37:41 -0700
committerGravatar Craig Tiller <ctiller@google.com>2016-07-15 06:37:41 -0700
commitb23239b6d835c2f3abee715c0120144f401a641f (patch)
tree22f6a8674fac62142a3e5bac98215f32dd170f42 /test/cpp/qps/client_async.cc
parent13d3e3b7e2538025148dd08956765eab516b2f0b (diff)
parent5fde20d9f0ce64f5caade6f485c3715af3ff23b8 (diff)
Merge branch 'histo' of github.com:vjpai/grpc into delayed-write
Diffstat (limited to 'test/cpp/qps/client_async.cc')
-rw-r--r--test/cpp/qps/client_async.cc91
1 files changed, 57 insertions, 34 deletions
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index 5dd0bd8533..43f037ce48 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -31,7 +31,6 @@
*
*/
-#include <cassert>
#include <forward_list>
#include <functional>
#include <list>
@@ -48,7 +47,6 @@
#include <grpc++/generic/generic_stub.h>
#include <grpc/grpc.h>
#include <grpc/support/cpu.h>
-#include <grpc/support/histogram.h>
#include <grpc/support/log.h>
#include "src/proto/grpc/testing/services.grpc.pb.h"
@@ -64,7 +62,7 @@ class ClientRpcContext {
ClientRpcContext() {}
virtual ~ClientRpcContext() {}
// next state, return false if done. Collect stats when appropriate
- virtual bool RunNextState(bool, Histogram* hist) = 0;
+ virtual bool RunNextState(bool, HistogramEntry* entry) = 0;
virtual ClientRpcContext* StartNewClone() = 0;
static void* tag(ClientRpcContext* c) { return reinterpret_cast<void*>(c); }
static ClientRpcContext* detag(void* t) {
@@ -104,7 +102,7 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
alarm_.reset(new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this)));
}
}
- bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE {
+ bool RunNextState(bool ok, HistogramEntry* entry) GRPC_OVERRIDE {
switch (next_state_) {
case State::READY:
start_ = UsageTimer::Now();
@@ -114,7 +112,7 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
next_state_ = State::RESP_DONE;
return true;
case State::RESP_DONE:
- hist->Add((UsageTimer::Now() - start_) * 1e9);
+ entry->set_value((UsageTimer::Now() - start_) * 1e9);
callback_(status_, &response_);
next_state_ = State::INVALID;
return false;
@@ -176,6 +174,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
for (int i = 0; i < num_async_threads_; i++) {
cli_cqs_.emplace_back(new CompletionQueue);
next_issuers_.emplace_back(NextIssuer(i));
+ shutdown_state_.emplace_back(new PerThreadShutdownState());
}
using namespace std::placeholders;
@@ -192,7 +191,6 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
}
virtual ~AsyncClient() {
for (auto cq = cli_cqs_.begin(); cq != cli_cqs_.end(); cq++) {
- (*cq)->Shutdown();
void* got_tag;
bool ok;
while ((*cq)->Next(&got_tag, &ok)) {
@@ -201,7 +199,36 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
}
}
- bool ThreadFunc(Histogram* histogram,
+ protected:
+ const int num_async_threads_;
+
+ private:
+ struct PerThreadShutdownState {
+ mutable std::mutex mutex;
+ bool shutdown;
+ PerThreadShutdownState() : shutdown(false) {}
+ };
+
+ int NumThreads(const ClientConfig& config) {
+ int num_threads = config.async_client_threads();
+ if (num_threads <= 0) { // Use dynamic sizing
+ num_threads = cores_;
+ gpr_log(GPR_INFO, "Sizing async client to %d threads", num_threads);
+ }
+ return num_threads;
+ }
+ void DestroyMultithreading() GRPC_OVERRIDE GRPC_FINAL {
+ for (auto ss = shutdown_state_.begin(); ss != shutdown_state_.end(); ++ss) {
+ std::lock_guard<std::mutex> lock((*ss)->mutex);
+ (*ss)->shutdown = true;
+ }
+ for (auto cq = cli_cqs_.begin(); cq != cli_cqs_.end(); cq++) {
+ (*cq)->Shutdown();
+ }
+ this->EndThreads(); // this needed for resolution
+ }
+
+ bool ThreadFunc(HistogramEntry* entry,
size_t thread_idx) GRPC_OVERRIDE GRPC_FINAL {
void* got_tag;
bool ok;
@@ -209,12 +236,15 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
switch (cli_cqs_[thread_idx]->AsyncNext(
&got_tag, &ok,
std::chrono::system_clock::now() + std::chrono::milliseconds(10))) {
- case CompletionQueue::SHUTDOWN:
- return false;
case CompletionQueue::GOT_EVENT: {
// Got a regular event, so process it
ClientRpcContext* ctx = ClientRpcContext::detag(got_tag);
- if (!ctx->RunNextState(ok, histogram)) {
+ // Proceed while holding a lock to make sure that
+ // this thread isn't supposed to shut down
+ std::lock_guard<std::mutex> l(shutdown_state_[thread_idx]->mutex);
+ if (shutdown_state_[thread_idx]->shutdown) {
+ return true;
+ } else if (!ctx->RunNextState(ok, entry)) {
// The RPC and callback are done, so clone the ctx
// and kickstart the new one
auto clone = ctx->StartNewClone();
@@ -224,29 +254,22 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
}
return true;
}
- case CompletionQueue::TIMEOUT:
- // TODO(ctiller): do something here to track how frequently we pass
- // through this codepath.
+ case CompletionQueue::TIMEOUT: {
+ std::lock_guard<std::mutex> l(shutdown_state_[thread_idx]->mutex);
+ if (shutdown_state_[thread_idx]->shutdown) {
+ return true;
+ }
+ return true;
+ }
+ case CompletionQueue::SHUTDOWN: // queue is shutting down, so we must be
+ // done
return true;
}
- GPR_UNREACHABLE_CODE(return false);
- }
-
- protected:
- const int num_async_threads_;
-
- private:
- int NumThreads(const ClientConfig& config) {
- int num_threads = config.async_client_threads();
- if (num_threads <= 0) { // Use dynamic sizing
- num_threads = cores_;
- gpr_log(GPR_INFO, "Sizing async client to %d threads", num_threads);
- }
- return num_threads;
}
std::vector<std::unique_ptr<CompletionQueue>> cli_cqs_;
std::vector<std::function<gpr_timespec()>> next_issuers_;
+ std::vector<std::unique_ptr<PerThreadShutdownState>> shutdown_state_;
};
static std::unique_ptr<BenchmarkService::Stub> BenchmarkStubCreator(
@@ -262,7 +285,7 @@ class AsyncUnaryClient GRPC_FINAL
config, SetupCtx, BenchmarkStubCreator) {
StartThreads(num_async_threads_);
}
- ~AsyncUnaryClient() GRPC_OVERRIDE { EndThreads(); }
+ ~AsyncUnaryClient() GRPC_OVERRIDE {}
private:
static void CheckDone(grpc::Status s, SimpleResponse* response) {}
@@ -307,7 +330,7 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
stream_ = start_req_(stub_, &context_, cq, ClientRpcContext::tag(this));
next_state_ = State::STREAM_IDLE;
}
- bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE {
+ bool RunNextState(bool ok, HistogramEntry* entry) GRPC_OVERRIDE {
while (true) {
switch (next_state_) {
case State::STREAM_IDLE:
@@ -339,7 +362,7 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
return true;
break;
case State::READ_DONE:
- hist->Add((UsageTimer::Now() - start_) * 1e9);
+ entry->set_value((UsageTimer::Now() - start_) * 1e9);
callback_(status_, &response_);
next_state_ = State::STREAM_IDLE;
break; // loop around
@@ -391,7 +414,7 @@ class AsyncStreamingClient GRPC_FINAL
StartThreads(num_async_threads_);
}
- ~AsyncStreamingClient() GRPC_OVERRIDE { EndThreads(); }
+ ~AsyncStreamingClient() GRPC_OVERRIDE {}
private:
static void CheckDone(grpc::Status s, SimpleResponse* response) {}
@@ -439,7 +462,7 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
ClientRpcContext::tag(this));
next_state_ = State::STREAM_IDLE;
}
- bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE {
+ bool RunNextState(bool ok, HistogramEntry* entry) GRPC_OVERRIDE {
while (true) {
switch (next_state_) {
case State::STREAM_IDLE:
@@ -471,7 +494,7 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
return true;
break;
case State::READ_DONE:
- hist->Add((UsageTimer::Now() - start_) * 1e9);
+ entry->set_value((UsageTimer::Now() - start_) * 1e9);
callback_(status_, &response_);
next_state_ = State::STREAM_IDLE;
break; // loop around
@@ -527,7 +550,7 @@ class GenericAsyncStreamingClient GRPC_FINAL
StartThreads(num_async_threads_);
}
- ~GenericAsyncStreamingClient() GRPC_OVERRIDE { EndThreads(); }
+ ~GenericAsyncStreamingClient() GRPC_OVERRIDE {}
private:
static void CheckDone(grpc::Status s, ByteBuffer* response) {}