aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rwxr-xr-xgrpc.gemspec1
-rw-r--r--include/grpc++/ext/reflection.grpc.pb.h2
-rw-r--r--include/grpc++/impl/codegen/call.h6
-rw-r--r--include/grpc++/impl/codegen/completion_queue.h10
-rw-r--r--include/grpc++/impl/codegen/method_handler_impl.h64
-rw-r--r--include/grpc++/impl/codegen/rpc_method.h3
-rw-r--r--include/grpc++/impl/codegen/rpc_service_method.h1
-rw-r--r--include/grpc++/impl/codegen/server_context.h10
-rw-r--r--include/grpc++/impl/codegen/service_type.h11
-rw-r--r--include/grpc++/impl/codegen/sync_stream.h107
-rw-r--r--src/compiler/cpp_generator.cc77
-rw-r--r--src/ruby/lib/grpc/generic/rpc_server.rb143
-rw-r--r--src/ruby/spec/generic/rpc_server_pool_spec.rb138
-rw-r--r--src/ruby/spec/generic/rpc_server_spec.rb13
-rw-r--r--templates/grpc.gemspec.template1
-rw-r--r--test/cpp/codegen/compiler_test_golden43
-rw-r--r--test/cpp/end2end/hybrid_end2end_test.cc85
-rw-r--r--test/cpp/end2end/mock_test.cc9
18 files changed, 427 insertions, 297 deletions
diff --git a/grpc.gemspec b/grpc.gemspec
index c08e333cb3..e8ddd5ae35 100755
--- a/grpc.gemspec
+++ b/grpc.gemspec
@@ -29,6 +29,7 @@ Gem::Specification.new do |s|
s.add_dependency 'google-protobuf', '~> 3.0'
s.add_dependency 'googleauth', '~> 0.5.1'
+ s.add_dependency 'concurrent-ruby'
s.add_development_dependency 'bundler', '~> 1.9'
s.add_development_dependency 'facter', '~> 2.4'
diff --git a/include/grpc++/ext/reflection.grpc.pb.h b/include/grpc++/ext/reflection.grpc.pb.h
index 0b4ef86147..064117e303 100644
--- a/include/grpc++/ext/reflection.grpc.pb.h
+++ b/include/grpc++/ext/reflection.grpc.pb.h
@@ -74,6 +74,7 @@
#include <grpc++/impl/codegen/async_stream.h>
#include <grpc++/impl/codegen/async_unary_call.h>
+#include <grpc++/impl/codegen/method_handler_impl.h>
#include <grpc++/impl/codegen/proto_utils.h>
#include <grpc++/impl/codegen/rpc_method.h>
#include <grpc++/impl/codegen/service_type.h>
@@ -174,6 +175,7 @@ class ServerReflection GRPC_FINAL {
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
};
+ typedef Service StreamedUnaryService;
};
} // namespace v1alpha
diff --git a/include/grpc++/impl/codegen/call.h b/include/grpc++/impl/codegen/call.h
index dfac177970..df225d362b 100644
--- a/include/grpc++/impl/codegen/call.h
+++ b/include/grpc++/impl/codegen/call.h
@@ -662,10 +662,10 @@ class Call GRPC_FINAL {
call_hook_->PerformOpsOnCall(ops, this);
}
- grpc_call* call() { return call_; }
- CompletionQueue* cq() { return cq_; }
+ grpc_call* call() const { return call_; }
+ CompletionQueue* cq() const { return cq_; }
- int max_message_size() { return max_message_size_; }
+ int max_message_size() const { return max_message_size_; }
private:
CallHook* call_hook_;
diff --git a/include/grpc++/impl/codegen/completion_queue.h b/include/grpc++/impl/codegen/completion_queue.h
index 03009e0561..78bc5ca481 100644
--- a/include/grpc++/impl/codegen/completion_queue.h
+++ b/include/grpc++/impl/codegen/completion_queue.h
@@ -68,8 +68,10 @@ template <class R>
class ServerReader;
template <class W>
class ServerWriter;
+namespace internal {
template <class W, class R>
-class ServerReaderWriter;
+class ServerReaderWriterBody;
+}
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
@@ -178,15 +180,15 @@ class CompletionQueue : private GrpcLibraryCodegen {
template <class W>
friend class ::grpc::ServerWriter;
template <class W, class R>
- friend class ::grpc::ServerReaderWriter;
+ friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class BidiStreamingHandler;
+ template <class Streamer, bool WriteNeeded>
+ friend class TemplatedBidiStreamingHandler;
friend class UnknownMethodHandler;
friend class ::grpc::Server;
friend class ::grpc::ServerContext;
diff --git a/include/grpc++/impl/codegen/method_handler_impl.h b/include/grpc++/impl/codegen/method_handler_impl.h
index 2f4be644ba..d989263252 100644
--- a/include/grpc++/impl/codegen/method_handler_impl.h
+++ b/include/grpc++/impl/codegen/method_handler_impl.h
@@ -167,20 +167,22 @@ class ServerStreamingHandler : public MethodHandler {
};
// A wrapper class of an application provided bidi-streaming handler.
-template <class ServiceType, class RequestType, class ResponseType>
-class BidiStreamingHandler : public MethodHandler {
+// This also applies to server-streamed implementation of a unary method
+// with the additional requirement that such methods must have done a
+// write for status to be ok
+// Since this is used by more than 1 class, the service is not passed in.
+// Instead, it is expected to be an implicitly-captured argument of func
+// (through bind or something along those lines)
+template <class Streamer, bool WriteNeeded>
+class TemplatedBidiStreamingHandler : public MethodHandler {
public:
- BidiStreamingHandler(
- std::function<Status(ServiceType*, ServerContext*,
- ServerReaderWriter<ResponseType, RequestType>*)>
- func,
- ServiceType* service)
- : func_(func), service_(service) {}
+ TemplatedBidiStreamingHandler(
+ std::function<Status(ServerContext*, Streamer*)> func)
+ : func_(func), write_needed_(WriteNeeded) {}
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
- ServerReaderWriter<ResponseType, RequestType> stream(param.call,
- param.server_context);
- Status status = func_(service_, param.server_context, &stream);
+ Streamer stream(param.call, param.server_context);
+ Status status = func_(param.server_context, &stream);
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops;
if (!param.server_context->sent_initial_metadata_) {
@@ -189,6 +191,12 @@ class BidiStreamingHandler : public MethodHandler {
if (param.server_context->compression_level_set()) {
ops.set_compression_level(param.server_context->compression_level());
}
+ if (write_needed_ && status.ok()) {
+ // If we needed a write but never did one, we need to mark the
+ // status as a fail
+ status = Status(StatusCode::INTERNAL,
+ "Service did not provide response message");
+ }
}
ops.ServerSendStatus(param.server_context->trailing_metadata_, status);
param.call->PerformOps(&ops);
@@ -196,10 +204,36 @@ class BidiStreamingHandler : public MethodHandler {
}
private:
- std::function<Status(ServiceType*, ServerContext*,
- ServerReaderWriter<ResponseType, RequestType>*)>
- func_;
- ServiceType* service_;
+ std::function<Status(ServerContext*, Streamer*)> func_;
+ const bool write_needed_;
+};
+
+template <class ServiceType, class RequestType, class ResponseType>
+class BidiStreamingHandler
+ : public TemplatedBidiStreamingHandler<
+ ServerReaderWriter<ResponseType, RequestType>, false> {
+ public:
+ BidiStreamingHandler(
+ std::function<Status(ServiceType*, ServerContext*,
+ ServerReaderWriter<ResponseType, RequestType>*)>
+ func,
+ ServiceType* service)
+ : TemplatedBidiStreamingHandler<
+ ServerReaderWriter<ResponseType, RequestType>, false>(std::bind(
+ func, service, std::placeholders::_1, std::placeholders::_2)) {}
+};
+
+template <class RequestType, class ResponseType>
+class StreamedUnaryHandler
+ : public TemplatedBidiStreamingHandler<
+ ServerUnaryStreamer<RequestType, ResponseType>, true> {
+ public:
+ explicit StreamedUnaryHandler(
+ std::function<Status(ServerContext*,
+ ServerUnaryStreamer<RequestType, ResponseType>*)>
+ func)
+ : TemplatedBidiStreamingHandler<
+ ServerUnaryStreamer<RequestType, ResponseType>, true>(func) {}
};
// Handle unknown method by returning UNIMPLEMENTED error.
diff --git a/include/grpc++/impl/codegen/rpc_method.h b/include/grpc++/impl/codegen/rpc_method.h
index 39cb4f75df..4897428074 100644
--- a/include/grpc++/impl/codegen/rpc_method.h
+++ b/include/grpc++/impl/codegen/rpc_method.h
@@ -60,11 +60,12 @@ class RpcMethod {
const char* name() const { return name_; }
RpcType method_type() const { return method_type_; }
+ void SetMethodType(RpcType type) { method_type_ = type; }
void* channel_tag() const { return channel_tag_; }
private:
const char* const name_;
- const RpcType method_type_;
+ RpcType method_type_;
void* const channel_tag_;
};
diff --git a/include/grpc++/impl/codegen/rpc_service_method.h b/include/grpc++/impl/codegen/rpc_service_method.h
index 8b1f026c91..52124fba0b 100644
--- a/include/grpc++/impl/codegen/rpc_service_method.h
+++ b/include/grpc++/impl/codegen/rpc_service_method.h
@@ -82,6 +82,7 @@ class RpcServiceMethod : public RpcMethod {
// if MethodHandler is nullptr, then this is an async method
MethodHandler* handler() const { return handler_.get(); }
void ResetHandler() { handler_.reset(); }
+ void SetHandler(MethodHandler* handler) { handler_.reset(handler); }
private:
void* server_tag_;
diff --git a/include/grpc++/impl/codegen/server_context.h b/include/grpc++/impl/codegen/server_context.h
index c9d1f4d69e..379c9f7cf8 100644
--- a/include/grpc++/impl/codegen/server_context.h
+++ b/include/grpc++/impl/codegen/server_context.h
@@ -65,8 +65,10 @@ template <class R>
class ServerReader;
template <class W>
class ServerWriter;
+namespace internal {
template <class W, class R>
-class ServerReaderWriter;
+class ServerReaderWriterBody;
+}
template <class ServiceType, class RequestType, class ResponseType>
class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
@@ -187,15 +189,15 @@ class ServerContext {
template <class W>
friend class ::grpc::ServerWriter;
template <class W, class R>
- friend class ::grpc::ServerReaderWriter;
+ friend class ::grpc::internal::ServerReaderWriterBody;
template <class ServiceType, class RequestType, class ResponseType>
friend class RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ClientStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType>
friend class ServerStreamingHandler;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class BidiStreamingHandler;
+ template <class Streamer, bool WriteNeeded>
+ friend class TemplatedBidiStreamingHandler;
friend class UnknownMethodHandler;
friend class ::grpc::ClientContext;
diff --git a/include/grpc++/impl/codegen/service_type.h b/include/grpc++/impl/codegen/service_type.h
index c19dfc7d45..72b2225312 100644
--- a/include/grpc++/impl/codegen/service_type.h
+++ b/include/grpc++/impl/codegen/service_type.h
@@ -147,6 +147,17 @@ class Service {
methods_[index].reset();
}
+ void MarkMethodStreamedUnary(int index,
+ MethodHandler* streamed_unary_method) {
+ GPR_CODEGEN_ASSERT(methods_[index] && methods_[index]->handler() &&
+ "Cannot mark an async or generic method Streamed Unary");
+ methods_[index]->SetHandler(streamed_unary_method);
+
+ // From the server's point of view, streamed unary is a special
+ // case of BIDI_STREAMING that has 1 read and 1 write, in that order.
+ methods_[index]->SetMethodType(::grpc::RpcMethod::BIDI_STREAMING);
+ }
+
private:
friend class Server;
friend class ServerInterface;
diff --git a/include/grpc++/impl/codegen/sync_stream.h b/include/grpc++/impl/codegen/sync_stream.h
index 7601ceae92..e1d4660ae7 100644
--- a/include/grpc++/impl/codegen/sync_stream.h
+++ b/include/grpc++/impl/codegen/sync_stream.h
@@ -79,6 +79,9 @@ class ReaderInterface {
public:
virtual ~ReaderInterface() {}
+ /// Upper bound on the next message size available for reading on this stream
+ virtual bool NextMessageSize(uint32_t* sz) = 0;
+
/// Blocking read a message and parse to \a msg. Returns \a true on success.
/// This is thread-safe with respect to \a Write or \WritesDone methods on
/// the same stream. It should not be called concurrently with another \a
@@ -157,6 +160,11 @@ class ClientReader GRPC_FINAL : public ClientReaderInterface<R> {
cq_.Pluck(&ops); /// status ignored
}
+ bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
+ *sz = call_.max_message_size();
+ return true;
+ }
+
bool Read(R* msg) GRPC_OVERRIDE {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
if (!context_->initial_metadata_received_) {
@@ -302,6 +310,11 @@ class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
cq_.Pluck(&ops); // status ignored
}
+ bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
+ *sz = call_.max_message_size();
+ return true;
+ }
+
bool Read(R* msg) GRPC_OVERRIDE {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
if (!context_->initial_metadata_received_) {
@@ -369,6 +382,11 @@ class ServerReader GRPC_FINAL : public ServerReaderInterface<R> {
call_->cq()->Pluck(&ops);
}
+ bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
+ *sz = call_->max_message_size();
+ return true;
+ }
+
bool Read(R* msg) GRPC_OVERRIDE {
CallOpSet<CallOpRecvMessage<R>> ops;
ops.RecvMessage(msg);
@@ -434,12 +452,15 @@ class ServerReaderWriterInterface : public ServerStreamingInterface,
public WriterInterface<W>,
public ReaderInterface<R> {};
+// Actual implementation of bi-directional streaming
+namespace internal {
template <class W, class R>
-class ServerReaderWriter GRPC_FINAL : public ServerReaderWriterInterface<W, R> {
+class ServerReaderWriterBody GRPC_FINAL {
public:
- ServerReaderWriter(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
+ ServerReaderWriterBody(Call* call, ServerContext* ctx)
+ : call_(call), ctx_(ctx) {}
- void SendInitialMetadata() GRPC_OVERRIDE {
+ void SendInitialMetadata() {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops;
@@ -453,15 +474,19 @@ class ServerReaderWriter GRPC_FINAL : public ServerReaderWriterInterface<W, R> {
call_->cq()->Pluck(&ops);
}
- bool Read(R* msg) GRPC_OVERRIDE {
+ bool NextMessageSize(uint32_t* sz) {
+ *sz = call_->max_message_size();
+ return true;
+ }
+
+ bool Read(R* msg) {
CallOpSet<CallOpRecvMessage<R>> ops;
ops.RecvMessage(msg);
call_->PerformOps(&ops);
return call_->cq()->Pluck(&ops) && ops.got_message;
}
- using WriterInterface<W>::Write;
- bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
+ bool Write(const W& msg, const WriteOptions& options) {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> ops;
if (!ops.SendMessage(msg, options).ok()) {
return false;
@@ -482,6 +507,76 @@ class ServerReaderWriter GRPC_FINAL : public ServerReaderWriterInterface<W, R> {
Call* const call_;
ServerContext* const ctx_;
};
+}
+
+// class to represent the user API for a bidirectional streaming call
+template <class W, class R>
+class ServerReaderWriter GRPC_FINAL : public ServerReaderWriterInterface<W, R> {
+ public:
+ ServerReaderWriter(Call* call, ServerContext* ctx) : body_(call, ctx) {}
+
+ void SendInitialMetadata() GRPC_OVERRIDE { body_.SendInitialMetadata(); }
+
+ bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
+ return body_.NextMessageSize(sz);
+ }
+
+ bool Read(R* msg) GRPC_OVERRIDE { return body_.Read(msg); }
+
+ using WriterInterface<W>::Write;
+ bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
+ return body_.Write(msg, options);
+ }
+
+ private:
+ internal::ServerReaderWriterBody<W, R> body_;
+};
+
+/// A class to represent a flow-controlled unary call. This is something
+/// of a hybrid between conventional unary and streaming. This is invoked
+/// through a unary call on the client side, but the server responds to it
+/// as though it were a single-ping-pong streaming call. The server can use
+/// the \a NextMessageSize method to determine an upper-bound on the size of
+/// the message.
+/// A key difference relative to streaming: ServerUnaryStreamer
+/// must have exactly 1 Read and exactly 1 Write, in that order, to function
+/// correctly. Otherwise, the RPC is in error.
+template <class RequestType, class ResponseType>
+class ServerUnaryStreamer GRPC_FINAL
+ : public ServerReaderWriterInterface<ResponseType, RequestType> {
+ public:
+ ServerUnaryStreamer(Call* call, ServerContext* ctx)
+ : body_(call, ctx), read_done_(false), write_done_(false) {}
+
+ void SendInitialMetadata() GRPC_OVERRIDE { body_.SendInitialMetadata(); }
+
+ bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
+ return body_.NextMessageSize(sz);
+ }
+
+ bool Read(RequestType* request) GRPC_OVERRIDE {
+ if (read_done_) {
+ return false;
+ }
+ read_done_ = true;
+ return body_.Read(request);
+ }
+
+ using WriterInterface<ResponseType>::Write;
+ bool Write(const ResponseType& response,
+ const WriteOptions& options) GRPC_OVERRIDE {
+ if (write_done_ || !read_done_) {
+ return false;
+ }
+ write_done_ = true;
+ return body_.Write(response, options);
+ }
+
+ private:
+ internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
+ bool read_done_;
+ bool write_done_;
+};
} // namespace grpc
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index c386115ec2..d0c35ea1ab 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -130,6 +130,7 @@ grpc::string GetHeaderIncludes(File *file, const Parameters &params) {
static const char *headers_strs[] = {
"grpc++/impl/codegen/async_stream.h",
"grpc++/impl/codegen/async_unary_call.h",
+ "grpc++/impl/codegen/method_handler_impl.h",
"grpc++/impl/codegen/proto_utils.h",
"grpc++/impl/codegen/rpc_method.h",
"grpc++/impl/codegen/service_type.h",
@@ -604,6 +605,57 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
printer->Print(*vars, "};\n");
}
+void PrintHeaderServerMethodStreamedUnary(
+ Printer *printer, const Method *method,
+ std::map<grpc::string, grpc::string> *vars) {
+ (*vars)["Method"] = method->name();
+ (*vars)["Request"] = method->input_type_name();
+ (*vars)["Response"] = method->output_type_name();
+ if (method->NoStreaming()) {
+ printer->Print(*vars, "template <class BaseClass>\n");
+ printer->Print(*vars,
+ "class WithStreamedUnaryMethod_$Method$ : "
+ "public BaseClass {\n");
+ printer->Print(
+ " private:\n"
+ " void BaseClassMustBeDerivedFromService(const Service *service) "
+ "{}\n");
+ printer->Print(" public:\n");
+ printer->Indent();
+ printer->Print(*vars,
+ "WithStreamedUnaryMethod_$Method$() {\n"
+ " ::grpc::Service::MarkMethodStreamedUnary($Idx$,\n"
+ " new ::grpc::StreamedUnaryHandler< $Request$, "
+ "$Response$>(std::bind"
+ "(&WithStreamedUnaryMethod_$Method$<BaseClass>::"
+ "Streamed$Method$, this, std::placeholders::_1, "
+ "std::placeholders::_2)));\n"
+ "}\n");
+ printer->Print(*vars,
+ "~WithStreamedUnaryMethod_$Method$() GRPC_OVERRIDE {\n"
+ " BaseClassMustBeDerivedFromService(this);\n"
+ "}\n");
+ printer->Print(
+ *vars,
+ "// disable regular version of this method\n"
+ "::grpc::Status $Method$("
+ "::grpc::ServerContext* context, const $Request$* request, "
+ "$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
+ " abort();\n"
+ " return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
+ "}\n");
+ printer->Print(*vars,
+ "// replace default version of method with streamed unary\n"
+ "virtual ::grpc::Status Streamed$Method$("
+ "::grpc::ServerContext* context, "
+ "::grpc::ServerUnaryStreamer< "
+ "$Request$,$Response$>* server_unary_streamer)"
+ " = 0;\n");
+ printer->Outdent();
+ printer->Print(*vars, "};\n");
+ }
+}
+
void PrintHeaderServerMethodGeneric(
Printer *printer, const Method *method,
std::map<grpc::string, grpc::string> *vars) {
@@ -770,6 +822,28 @@ void PrintHeaderService(Printer *printer, const Service *service,
PrintHeaderServerMethodGeneric(printer, service->method(i).get(), vars);
}
+ // Server side - Streamed Unary
+ for (int i = 0; i < service->method_count(); ++i) {
+ (*vars)["Idx"] = as_string(i);
+ PrintHeaderServerMethodStreamedUnary(printer, service->method(i).get(),
+ vars);
+ }
+
+ printer->Print("typedef ");
+ for (int i = 0; i < service->method_count(); ++i) {
+ (*vars)["method_name"] = service->method(i).get()->name();
+ if (service->method(i)->NoStreaming()) {
+ printer->Print(*vars, "WithStreamedUnaryMethod_$method_name$<");
+ }
+ }
+ printer->Print("Service");
+ for (int i = 0; i < service->method_count(); ++i) {
+ if (service->method(i)->NoStreaming()) {
+ printer->Print(" >");
+ }
+ }
+ printer->Print(" StreamedUnaryService;\n");
+
printer->Outdent();
printer->Print("};\n");
printer->Print(service->GetTrailingComments().c_str());
@@ -1080,6 +1154,9 @@ void PrintSourceService(Printer *printer, const Service *service,
(*vars)["Idx"] = as_string(i);
if (method->NoStreaming()) {
(*vars)["StreamingType"] = "NORMAL_RPC";
+ // NOTE: There is no reason to consider streamed-unary as a separate
+ // category here since this part is setting up the client-side stub
+ // and this appears as a NORMAL_RPC from the client-side.
} else if (method->ClientOnlyStreaming()) {
(*vars)["StreamingType"] = "CLIENT_STREAMING";
} else if (method->ServerOnlyStreaming()) {
diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb
index 8ea798dce0..da0f6503db 100644
--- a/src/ruby/lib/grpc/generic/rpc_server.rb
+++ b/src/ruby/lib/grpc/generic/rpc_server.rb
@@ -31,113 +31,10 @@ require_relative '../grpc'
require_relative 'active_call'
require_relative 'service'
require 'thread'
+require 'concurrent'
# GRPC contains the General RPC module.
module GRPC
- # Pool is a simple thread pool.
- class Pool
- # Default keep alive period is 1s
- DEFAULT_KEEP_ALIVE = 1
-
- def initialize(size, keep_alive: DEFAULT_KEEP_ALIVE)
- fail 'pool size must be positive' unless size > 0
- @jobs = Queue.new
- @size = size
- @stopped = false
- @stop_mutex = Mutex.new # needs to be held when accessing @stopped
- @stop_cond = ConditionVariable.new
- @workers = []
- @keep_alive = keep_alive
- end
-
- # Returns the number of jobs waiting
- def jobs_waiting
- @jobs.size
- end
-
- # Runs the given block on the queue with the provided args.
- #
- # @param args the args passed blk when it is called
- # @param blk the block to call
- def schedule(*args, &blk)
- return if blk.nil?
- @stop_mutex.synchronize do
- if @stopped
- GRPC.logger.warn('did not schedule job, already stopped')
- return
- end
- GRPC.logger.info('schedule another job')
- @jobs << [blk, args]
- end
- end
-
- # Starts running the jobs in the thread pool.
- def start
- @stop_mutex.synchronize do
- fail 'already stopped' if @stopped
- end
- until @workers.size == @size.to_i
- next_thread = Thread.new do
- catch(:exit) do # allows { throw :exit } to kill a thread
- loop_execute_jobs
- end
- remove_current_thread
- end
- @workers << next_thread
- end
- end
-
- # Stops the jobs in the pool
- def stop
- GRPC.logger.info('stopping, will wait for all the workers to exit')
- @workers.size.times { schedule { throw :exit } }
- @stop_mutex.synchronize do # wait @keep_alive for works to stop
- @stopped = true
- @stop_cond.wait(@stop_mutex, @keep_alive) if @workers.size > 0
- end
- forcibly_stop_workers
- GRPC.logger.info('stopped, all workers are shutdown')
- end
-
- protected
-
- # Forcibly shutdown any threads that are still alive.
- def forcibly_stop_workers
- return unless @workers.size > 0
- GRPC.logger.info("forcibly terminating #{@workers.size} worker(s)")
- @workers.each do |t|
- next unless t.alive?
- begin
- t.exit
- rescue StandardError => e
- GRPC.logger.warn('error while terminating a worker')
- GRPC.logger.warn(e)
- end
- end
- end
-
- # removes the threads from workers, and signal when all the
- # threads are complete.
- def remove_current_thread
- @stop_mutex.synchronize do
- @workers.delete(Thread.current)
- @stop_cond.signal if @workers.size.zero?
- end
- end
-
- def loop_execute_jobs
- loop do
- begin
- blk, args = @jobs.pop
- blk.call(*args)
- rescue StandardError => e
- GRPC.logger.warn('Error in worker thread')
- GRPC.logger.warn(e)
- end
- end
- end
- end
-
# RpcServer hosts a number of services and makes them available on the
# network.
class RpcServer
@@ -147,11 +44,14 @@ module GRPC
def_delegators :@server, :add_http2_port
- # Default thread pool size is 3
- DEFAULT_POOL_SIZE = 3
+ # Default max size of the thread pool size is 100
+ DEFAULT_MAX_POOL_SIZE = 100
+
+ # Default minimum size of the thread pool is 5
+ DEFAULT_MIN_POOL_SIZE = 5
- # Default max_waiting_requests size is 20
- DEFAULT_MAX_WAITING_REQUESTS = 20
+ # Default max_waiting_requests size is 60
+ DEFAULT_MAX_WAITING_REQUESTS = 60
# Default poll period is 1s
DEFAULT_POLL_PERIOD = 1
@@ -174,8 +74,8 @@ module GRPC
# There are some specific keyword args used to configure the RpcServer
# instance.
#
- # * pool_size: the size of the thread pool the server uses to run its
- # threads
+ # * pool_size: the maximum size of the thread pool that the server's
+ # thread pool can reach.
#
# * max_waiting_requests: the maximum number of requests that are not
# being handled to allow. When this limit is exceeded, the server responds
@@ -191,7 +91,8 @@ module GRPC
#
# * server_args:
# A server arguments hash to be passed down to the underlying core server
- def initialize(pool_size:DEFAULT_POOL_SIZE,
+ def initialize(pool_size:DEFAULT_MAX_POOL_SIZE,
+ min_pool_size:DEFAULT_MIN_POOL_SIZE,
max_waiting_requests:DEFAULT_MAX_WAITING_REQUESTS,
poll_period:DEFAULT_POLL_PERIOD,
connect_md_proc:nil,
@@ -199,8 +100,12 @@ module GRPC
@connect_md_proc = RpcServer.setup_connect_md_proc(connect_md_proc)
@max_waiting_requests = max_waiting_requests
@poll_period = poll_period
- @pool_size = pool_size
- @pool = Pool.new(@pool_size)
+
+ @pool = Concurrent::ThreadPoolExecutor.new(
+ min_threads: [min_pool_size, pool_size].min,
+ max_threads: pool_size,
+ max_queue: max_waiting_requests,
+ fallback_policy: :discard)
@run_cond = ConditionVariable.new
@run_mutex = Mutex.new
# running_state can take 4 values: :not_started, :running, :stopping, and
@@ -221,7 +126,8 @@ module GRPC
end
deadline = from_relative_time(@poll_period)
@server.close(deadline)
- @pool.stop
+ @pool.shutdown
+ @pool.wait_for_termination
end
def running_state
@@ -318,7 +224,6 @@ module GRPC
def run
@run_mutex.synchronize do
fail 'cannot run without registering services' if rpc_descs.size.zero?
- @pool.start
@server.start
transition_running_state(:running)
@run_cond.broadcast
@@ -330,9 +235,11 @@ module GRPC
# Sends RESOURCE_EXHAUSTED if there are too many unprocessed jobs
def available?(an_rpc)
- jobs_count, max = @pool.jobs_waiting, @max_waiting_requests
+ jobs_count, max = @pool.queue_length, @pool.max_queue
GRPC.logger.info("waiting: #{jobs_count}, max: #{max}")
- return an_rpc if @pool.jobs_waiting <= @max_waiting_requests
+
+ # remaining capacity for ThreadPoolExecutors is -1 if unbounded
+ return an_rpc if @pool.remaining_capacity != 0
GRPC.logger.warn("NOT AVAILABLE: too many jobs_waiting: #{an_rpc}")
noop = proc { |x| x }
@@ -368,7 +275,7 @@ module GRPC
break if (!an_rpc.nil?) && an_rpc.call.nil?
active_call = new_active_server_call(an_rpc)
unless active_call.nil?
- @pool.schedule(active_call) do |ac|
+ @pool.post(active_call) do |ac|
c, mth = ac
begin
rpc_descs[mth].run_server_method(c, rpc_handlers[mth])
diff --git a/src/ruby/spec/generic/rpc_server_pool_spec.rb b/src/ruby/spec/generic/rpc_server_pool_spec.rb
deleted file mode 100644
index b67008de48..0000000000
--- a/src/ruby/spec/generic/rpc_server_pool_spec.rb
+++ /dev/null
@@ -1,138 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-require 'grpc'
-
-describe GRPC::Pool do
- Pool = GRPC::Pool
-
- describe '#new' do
- it 'raises if a non-positive size is used' do
- expect { Pool.new(0) }.to raise_error
- expect { Pool.new(-1) }.to raise_error
- expect { Pool.new(Object.new) }.to raise_error
- end
-
- it 'is constructed OK with a positive size' do
- expect { Pool.new(1) }.not_to raise_error
- end
- end
-
- describe '#jobs_waiting' do
- it 'at start, it is zero' do
- p = Pool.new(1)
- expect(p.jobs_waiting).to be(0)
- end
-
- it 'it increases, with each scheduled job if the pool is not running' do
- p = Pool.new(1)
- job = proc {}
- expect(p.jobs_waiting).to be(0)
- 5.times do |i|
- p.schedule(&job)
- expect(p.jobs_waiting).to be(i + 1)
- end
- end
-
- it 'it decreases as jobs are run' do
- p = Pool.new(1)
- job = proc {}
- expect(p.jobs_waiting).to be(0)
- 3.times do
- p.schedule(&job)
- end
- p.start
- sleep 2
- expect(p.jobs_waiting).to be(0)
- end
- end
-
- describe '#schedule' do
- it 'return if the pool is already stopped' do
- p = Pool.new(1)
- p.stop
- job = proc {}
- expect { p.schedule(&job) }.to_not raise_error
- end
-
- it 'adds jobs that get run by the pool' do
- p = Pool.new(1)
- p.start
- o, q = Object.new, Queue.new
- job = proc { q.push(o) }
- p.schedule(&job)
- expect(q.pop).to be(o)
- p.stop
- end
- end
-
- describe '#stop' do
- it 'works when there are no scheduled tasks' do
- p = Pool.new(1)
- expect { p.stop }.not_to raise_error
- end
-
- it 'stops jobs when there are long running jobs' do
- p = Pool.new(1)
- p.start
- o, q = Object.new, Queue.new
- job = proc do
- sleep(5) # long running
- q.push(o)
- end
- p.schedule(&job)
- sleep(1) # should ensure the long job gets scheduled
- expect { p.stop }.not_to raise_error
- end
- end
-
- describe '#start' do
- it 'runs pre-scheduled jobs' do
- p = Pool.new(2)
- o, q = Object.new, Queue.new
- n = 5 # arbitrary
- n.times { p.schedule(o, &q.method(:push)) }
- p.start
- n.times { expect(q.pop).to be(o) }
- p.stop
- end
-
- it 'runs jobs as they are scheduled ' do
- p = Pool.new(2)
- o, q = Object.new, Queue.new
- p.start
- n = 5 # arbitrary
- n.times do
- p.schedule(o, &q.method(:push))
- expect(q.pop).to be(o)
- end
- p.stop
- end
- end
-end
diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb
index 31157cf161..d362e48dee 100644
--- a/src/ruby/spec/generic/rpc_server_spec.rb
+++ b/src/ruby/spec/generic/rpc_server_spec.rb
@@ -395,9 +395,9 @@ describe GRPC::RpcServer do
it 'should return RESOURCE_EXHAUSTED on too many jobs', server: true do
opts = {
server_args: { a_channel_arg: 'an_arg' },
- pool_size: 1,
+ pool_size: 2,
poll_period: 1,
- max_waiting_requests: 0
+ max_waiting_requests: 1
}
alt_srv = RpcServer.new(**opts)
alt_srv.handle(SlowService)
@@ -406,24 +406,23 @@ describe GRPC::RpcServer do
t = Thread.new { alt_srv.run }
alt_srv.wait_till_running
req = EchoMsg.new
- n = 5 # arbitrary, use as many to ensure the server pool is exceeded
+ n = 20 # arbitrary, use as many to ensure the server pool is exceeded
threads = []
- one_failed_as_unavailable = false
+ bad_status_code = nil
n.times do
threads << Thread.new do
stub = SlowStub.new(alt_host, :this_channel_is_insecure)
begin
stub.an_rpc(req)
rescue GRPC::BadStatus => e
- one_failed_as_unavailable =
- e.code == StatusCodes::RESOURCE_EXHAUSTED
+ bad_status_code = e.code
end
end
end
threads.each(&:join)
alt_srv.stop
t.join
- expect(one_failed_as_unavailable).to be(true)
+ expect(bad_status_code).to be(StatusCodes::RESOURCE_EXHAUSTED)
end
end
diff --git a/templates/grpc.gemspec.template b/templates/grpc.gemspec.template
index f95adaf30f..75a6054b81 100644
--- a/templates/grpc.gemspec.template
+++ b/templates/grpc.gemspec.template
@@ -31,6 +31,7 @@
s.add_dependency 'google-protobuf', '~> 3.0'
s.add_dependency 'googleauth', '~> 0.5.1'
+ s.add_dependency 'concurrent-ruby'
s.add_development_dependency 'bundler', '~> 1.9'
s.add_development_dependency 'facter', '~> 2.4'
diff --git a/test/cpp/codegen/compiler_test_golden b/test/cpp/codegen/compiler_test_golden
index ef3d1aaa51..7b0fd6ce80 100644
--- a/test/cpp/codegen/compiler_test_golden
+++ b/test/cpp/codegen/compiler_test_golden
@@ -43,6 +43,7 @@
#include <grpc++/impl/codegen/async_stream.h>
#include <grpc++/impl/codegen/async_unary_call.h>
+#include <grpc++/impl/codegen/method_handler_impl.h>
#include <grpc++/impl/codegen/proto_utils.h>
#include <grpc++/impl/codegen/rpc_method.h>
#include <grpc++/impl/codegen/service_type.h>
@@ -206,6 +207,27 @@ class ServiceA GRPC_FINAL {
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
};
+ template <class BaseClass>
+ class WithStreamedUnaryMethod_MethodA1 : public BaseClass {
+ private:
+ void BaseClassMustBeDerivedFromService(const Service *service) {}
+ public:
+ WithStreamedUnaryMethod_MethodA1() {
+ ::grpc::Service::MarkMethodStreamedUnary(0,
+ new ::grpc::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodA1<BaseClass>::StreamedMethodA1, this, std::placeholders::_1, std::placeholders::_2)));
+ }
+ ~WithStreamedUnaryMethod_MethodA1() GRPC_OVERRIDE {
+ BaseClassMustBeDerivedFromService(this);
+ }
+ // disable regular version of this method
+ ::grpc::Status MethodA1(::grpc::ServerContext* context, const ::grpc::testing::Request* request, ::grpc::testing::Response* response) GRPC_FINAL GRPC_OVERRIDE {
+ abort();
+ return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
+ }
+ // replace default version of method with streamed unary
+ virtual ::grpc::Status StreamedMethodA1(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::grpc::testing::Request,::grpc::testing::Response>* server_unary_streamer) = 0;
+ };
+ typedef WithStreamedUnaryMethod_MethodA1<Service > StreamedUnaryService;
};
// ServiceB leading comment 1
@@ -284,6 +306,27 @@ class ServiceB GRPC_FINAL {
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
};
+ template <class BaseClass>
+ class WithStreamedUnaryMethod_MethodB1 : public BaseClass {
+ private:
+ void BaseClassMustBeDerivedFromService(const Service *service) {}
+ public:
+ WithStreamedUnaryMethod_MethodB1() {
+ ::grpc::Service::MarkMethodStreamedUnary(0,
+ new ::grpc::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodB1<BaseClass>::StreamedMethodB1, this, std::placeholders::_1, std::placeholders::_2)));
+ }
+ ~WithStreamedUnaryMethod_MethodB1() GRPC_OVERRIDE {
+ BaseClassMustBeDerivedFromService(this);
+ }
+ // disable regular version of this method
+ ::grpc::Status MethodB1(::grpc::ServerContext* context, const ::grpc::testing::Request* request, ::grpc::testing::Response* response) GRPC_FINAL GRPC_OVERRIDE {
+ abort();
+ return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
+ }
+ // replace default version of method with streamed unary
+ virtual ::grpc::Status StreamedMethodB1(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::grpc::testing::Request,::grpc::testing::Response>* server_unary_streamer) = 0;
+ };
+ typedef WithStreamedUnaryMethod_MethodB1<Service > StreamedUnaryService;
};
// ServiceB trailing comment 1
diff --git a/test/cpp/end2end/hybrid_end2end_test.cc b/test/cpp/end2end/hybrid_end2end_test.cc
index 7e0c0e8a7c..a6ea13aa8b 100644
--- a/test/cpp/end2end/hybrid_end2end_test.cc
+++ b/test/cpp/end2end/hybrid_end2end_test.cc
@@ -199,7 +199,8 @@ class HybridEnd2endTest : public ::testing::Test {
HybridEnd2endTest() {}
void SetUpServer(::grpc::Service* service1, ::grpc::Service* service2,
- AsyncGenericService* generic_service) {
+ AsyncGenericService* generic_service,
+ int max_message_size = 0) {
int port = grpc_pick_unused_port_or_die();
server_address_ << "localhost:" << port;
@@ -217,6 +218,11 @@ class HybridEnd2endTest : public ::testing::Test {
if (generic_service) {
builder.RegisterAsyncGenericService(generic_service);
}
+
+ if (max_message_size != 0) {
+ builder.SetMaxMessageSize(max_message_size);
+ }
+
// Create a separate cq for each potential handler.
for (int i = 0; i < 5; i++) {
cqs_.push_back(builder.AddCompletionQueue(false));
@@ -415,6 +421,83 @@ TEST_F(HybridEnd2endTest, AsyncRequestStreamResponseStream_SyncDupService) {
request_stream_handler_thread.join();
}
+// Add a second service with one sync streamed unary method.
+class StreamedUnaryDupPkg
+ : public duplicate::EchoTestService::WithStreamedUnaryMethod_Echo<
+ TestServiceImplDupPkg> {
+ public:
+ Status StreamedEcho(ServerContext* context,
+ ServerUnaryStreamer<EchoRequest, EchoResponse>* stream)
+ GRPC_OVERRIDE {
+ EchoRequest req;
+ EchoResponse resp;
+ uint32_t next_msg_sz;
+ stream->NextMessageSize(&next_msg_sz);
+ gpr_log(GPR_INFO, "Streamed Unary Next Message Size is %u", next_msg_sz);
+ GPR_ASSERT(stream->Read(&req));
+ resp.set_message(req.message() + "_dup");
+ GPR_ASSERT(stream->Write(resp));
+ return Status::OK;
+ }
+};
+
+TEST_F(HybridEnd2endTest,
+ AsyncRequestStreamResponseStream_SyncStreamedUnaryDupService) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
+ SType;
+ SType service;
+ StreamedUnaryDupPkg dup_service;
+ SetUpServer(&service, &dup_service, nullptr, 8192);
+ ResetStub();
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ SendEchoToDupService();
+ response_stream_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+// Add a second service that is fully Streamed Unary
+class FullyStreamedUnaryDupPkg
+ : public duplicate::EchoTestService::StreamedUnaryService {
+ public:
+ Status StreamedEcho(ServerContext* context,
+ ServerUnaryStreamer<EchoRequest, EchoResponse>* stream)
+ GRPC_OVERRIDE {
+ EchoRequest req;
+ EchoResponse resp;
+ uint32_t next_msg_sz;
+ stream->NextMessageSize(&next_msg_sz);
+ gpr_log(GPR_INFO, "Streamed Unary Next Message Size is %u", next_msg_sz);
+ GPR_ASSERT(stream->Read(&req));
+ resp.set_message(req.message() + "_dup");
+ GPR_ASSERT(stream->Write(resp));
+ return Status::OK;
+ }
+};
+
+TEST_F(HybridEnd2endTest,
+ AsyncRequestStreamResponseStream_SyncFullyStreamedUnaryDupService) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
+ SType;
+ SType service;
+ FullyStreamedUnaryDupPkg dup_service;
+ SetUpServer(&service, &dup_service, nullptr, 8192);
+ ResetStub();
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ SendEchoToDupService();
+ response_stream_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
// Add a second service with one async method.
TEST_F(HybridEnd2endTest, AsyncRequestStreamResponseStream_AsyncDupService) {
typedef EchoTestService::WithAsyncMethod_RequestStream<
diff --git a/test/cpp/end2end/mock_test.cc b/test/cpp/end2end/mock_test.cc
index 0ace5d9418..4052627122 100644
--- a/test/cpp/end2end/mock_test.cc
+++ b/test/cpp/end2end/mock_test.cc
@@ -31,6 +31,7 @@
*
*/
+#include <climits>
#include <thread>
#include <grpc++/channel.h>
@@ -63,6 +64,10 @@ class MockClientReaderWriter GRPC_FINAL
: public ClientReaderWriterInterface<W, R> {
public:
void WaitForInitialMetadata() GRPC_OVERRIDE {}
+ bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
+ *sz = UINT_MAX;
+ return true;
+ }
bool Read(R* msg) GRPC_OVERRIDE { return true; }
bool Write(const W& msg) GRPC_OVERRIDE { return true; }
bool WritesDone() GRPC_OVERRIDE { return true; }
@@ -74,6 +79,10 @@ class MockClientReaderWriter<EchoRequest, EchoResponse> GRPC_FINAL
public:
MockClientReaderWriter() : writes_done_(false) {}
void WaitForInitialMetadata() GRPC_OVERRIDE {}
+ bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
+ *sz = UINT_MAX;
+ return true;
+ }
bool Read(EchoResponse* msg) GRPC_OVERRIDE {
if (writes_done_) return false;
msg->set_message(last_message_);