aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/compiler/cpp_generator.cc67
-rw-r--r--src/core/ext/client_channel/lb_policy.h6
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.c679
-rw-r--r--src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h4
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c44
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c26
-rw-r--r--src/core/lib/iomgr/resource_quota.c7
-rw-r--r--src/core/lib/iomgr/resource_quota.h5
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.c36
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c45
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.c24
-rw-r--r--src/core/lib/iomgr/tcp_uv.c71
-rw-r--r--src/core/lib/iomgr/tcp_uv.h4
-rw-r--r--src/core/lib/security/transport/security_connector.c4
-rw-r--r--src/cpp/client/channel_cc.cc6
-rw-r--r--src/cpp/client/client_context.cc12
-rw-r--r--src/cpp/client/cronet_credentials.cc8
-rw-r--r--src/cpp/client/insecure_credentials.cc8
-rw-r--r--src/cpp/client/secure_credentials.h14
-rw-r--r--src/cpp/common/channel_arguments.cc5
-rw-r--r--src/cpp/common/channel_filter.h2
-rw-r--r--src/cpp/common/secure_auth_context.h21
-rw-r--r--src/cpp/ext/proto_server_reflection.h4
-rw-r--r--src/cpp/server/dynamic_thread_pool.cc18
-rw-r--r--src/cpp/server/dynamic_thread_pool.h17
-rw-r--r--src/cpp/server/insecure_server_credentials.cc7
-rw-r--r--src/cpp/server/secure_server_credentials.h11
-rw-r--r--src/cpp/server/server_cc.cc32
-rw-r--r--src/cpp/server/server_context.cc17
-rw-r--r--src/cpp/thread_manager/thread_manager.cc29
-rw-r--r--src/cpp/thread_manager/thread_manager.h13
-rw-r--r--src/cpp/util/time_cc.cc5
-rw-r--r--src/proto/grpc/lb/v1/load_balancer.options3
-rw-r--r--src/proto/grpc/lb/v1/load_balancer.proto8
-rw-r--r--src/ruby/ext/grpc/rb_compression_options.c2
36 files changed, 713 insertions, 553 deletions
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index fa72f9b0d9..a26eeb46b9 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -322,7 +322,7 @@ void PrintHeaderClientMethod(Printer *printer, const Method *method,
printer->Print(
*vars,
"::grpc::Status $Method$(::grpc::ClientContext* context, "
- "const $Request$& request, $Response$* response) GRPC_OVERRIDE;\n");
+ "const $Request$& request, $Response$* response) override;\n");
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncResponseReader< $Response$>> "
@@ -417,37 +417,34 @@ void PrintHeaderClientMethod(Printer *printer, const Method *method,
"::grpc::ClientAsyncResponseReader< $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, "
- "::grpc::CompletionQueue* cq) GRPC_OVERRIDE;\n");
+ "::grpc::CompletionQueue* cq) override;\n");
} else if (method->ClientOnlyStreaming()) {
printer->Print(*vars,
"::grpc::ClientWriter< $Request$>* $Method$Raw("
"::grpc::ClientContext* context, $Response$* response) "
- "GRPC_OVERRIDE;\n");
- printer->Print(
- *vars,
- "::grpc::ClientAsyncWriter< $Request$>* Async$Method$Raw("
- "::grpc::ClientContext* context, $Response$* response, "
- "::grpc::CompletionQueue* cq, void* tag) GRPC_OVERRIDE;\n");
+ "override;\n");
+ printer->Print(*vars,
+ "::grpc::ClientAsyncWriter< $Request$>* Async$Method$Raw("
+ "::grpc::ClientContext* context, $Response$* response, "
+ "::grpc::CompletionQueue* cq, void* tag) override;\n");
} else if (method->ServerOnlyStreaming()) {
printer->Print(*vars,
"::grpc::ClientReader< $Response$>* $Method$Raw("
"::grpc::ClientContext* context, const $Request$& request)"
- " GRPC_OVERRIDE;\n");
+ " override;\n");
printer->Print(
*vars,
"::grpc::ClientAsyncReader< $Response$>* Async$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, "
- "::grpc::CompletionQueue* cq, void* tag) GRPC_OVERRIDE;\n");
+ "::grpc::CompletionQueue* cq, void* tag) override;\n");
} else if (method->BidiStreaming()) {
- printer->Print(
- *vars,
- "::grpc::ClientReaderWriter< $Request$, $Response$>* "
- "$Method$Raw(::grpc::ClientContext* context) GRPC_OVERRIDE;\n");
- printer->Print(
- *vars,
- "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
- "Async$Method$Raw(::grpc::ClientContext* context, "
- "::grpc::CompletionQueue* cq, void* tag) GRPC_OVERRIDE;\n");
+ printer->Print(*vars,
+ "::grpc::ClientReaderWriter< $Request$, $Response$>* "
+ "$Method$Raw(::grpc::ClientContext* context) override;\n");
+ printer->Print(*vars,
+ "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
+ "Async$Method$Raw(::grpc::ClientContext* context, "
+ "::grpc::CompletionQueue* cq, void* tag) override;\n");
}
}
}
@@ -509,7 +506,7 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
" ::grpc::Service::MarkMethodAsync($Idx$);\n"
"}\n");
printer->Print(*vars,
- "~WithAsyncMethod_$Method$() GRPC_OVERRIDE {\n"
+ "~WithAsyncMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
"}\n");
if (method->NoStreaming()) {
@@ -518,7 +515,7 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
"// disable synchronous version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
- "$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
+ "$Response$* response) final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@@ -540,7 +537,7 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
"::grpc::Status $Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerReader< $Request$>* reader, "
- "$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
+ "$Response$* response) final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@@ -561,7 +558,7 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
"// disable synchronous version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
- "::grpc::ServerWriter< $Response$>* writer) GRPC_FINAL GRPC_OVERRIDE "
+ "::grpc::ServerWriter< $Response$>* writer) final override "
"{\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
@@ -585,7 +582,7 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
"::grpc::Status $Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerReaderWriter< $Response$, $Request$>* stream) "
- "GRPC_FINAL GRPC_OVERRIDE {\n"
+ "final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@@ -632,7 +629,7 @@ void PrintHeaderServerMethodStreamedUnary(
"std::placeholders::_2)));\n"
"}\n");
printer->Print(*vars,
- "~WithStreamedUnaryMethod_$Method$() GRPC_OVERRIDE {\n"
+ "~WithStreamedUnaryMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
"}\n");
printer->Print(
@@ -640,7 +637,7 @@ void PrintHeaderServerMethodStreamedUnary(
"// disable regular version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
- "$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
+ "$Response$* response) final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@@ -683,7 +680,7 @@ void PrintHeaderServerMethodSplitStreaming(
"std::placeholders::_2)));\n"
"}\n");
printer->Print(*vars,
- "~WithSplitStreamingMethod_$Method$() GRPC_OVERRIDE {\n"
+ "~WithSplitStreamingMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
"}\n");
printer->Print(
@@ -691,7 +688,7 @@ void PrintHeaderServerMethodSplitStreaming(
"// disable regular version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
- "::grpc::ServerWriter< $Response$>* writer) GRPC_FINAL GRPC_OVERRIDE "
+ "::grpc::ServerWriter< $Response$>* writer) final override "
"{\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
@@ -727,7 +724,7 @@ void PrintHeaderServerMethodGeneric(
" ::grpc::Service::MarkMethodGeneric($Idx$);\n"
"}\n");
printer->Print(*vars,
- "~WithGenericMethod_$Method$() GRPC_OVERRIDE {\n"
+ "~WithGenericMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
"}\n");
if (method->NoStreaming()) {
@@ -736,7 +733,7 @@ void PrintHeaderServerMethodGeneric(
"// disable synchronous version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
- "$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
+ "$Response$* response) final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@@ -747,7 +744,7 @@ void PrintHeaderServerMethodGeneric(
"::grpc::Status $Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerReader< $Request$>* reader, "
- "$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
+ "$Response$* response) final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@@ -757,7 +754,7 @@ void PrintHeaderServerMethodGeneric(
"// disable synchronous version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
- "::grpc::ServerWriter< $Response$>* writer) GRPC_FINAL GRPC_OVERRIDE "
+ "::grpc::ServerWriter< $Response$>* writer) final override "
"{\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
@@ -769,7 +766,7 @@ void PrintHeaderServerMethodGeneric(
"::grpc::Status $Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerReaderWriter< $Response$, $Request$>* stream) "
- "GRPC_FINAL GRPC_OVERRIDE {\n"
+ "final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@@ -784,7 +781,7 @@ void PrintHeaderService(Printer *printer, const Service *service,
printer->Print(service->GetLeadingComments().c_str());
printer->Print(*vars,
- "class $Service$ GRPC_FINAL {\n"
+ "class $Service$ final {\n"
" public:\n");
printer->Indent();
@@ -810,7 +807,7 @@ void PrintHeaderService(Printer *printer, const Service *service,
printer->Outdent();
printer->Print("};\n");
printer->Print(
- "class Stub GRPC_FINAL : public StubInterface"
+ "class Stub final : public StubInterface"
" {\n public:\n");
printer->Indent();
printer->Print(
diff --git a/src/core/ext/client_channel/lb_policy.h b/src/core/ext/client_channel/lb_policy.h
index 54ad779792..120c641edc 100644
--- a/src/core/ext/client_channel/lb_policy.h
+++ b/src/core/ext/client_channel/lb_policy.h
@@ -109,10 +109,16 @@ struct grpc_lb_policy_vtable {
/*#define GRPC_LB_POLICY_REFCOUNT_DEBUG*/
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
+
+/* Strong references: the policy will shutdown when they reach zero */
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(exec_ctx, p, r) \
grpc_lb_policy_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
+
+/* Weak references: they don't prevent the shutdown of the LB policy. When no
+ * strong references are left but there are still weak ones, shutdown is called.
+ * Once the weak reference also reaches zero, the LB policy is destroyed. */
#define GRPC_LB_POLICY_WEAK_REF(p, r) \
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c
index 6da4febf26..30e412e358 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/lb_policy/grpclb/grpclb.c
@@ -43,30 +43,23 @@
* policy to select from this list of LB server backends.
*
* The first time the policy gets a request for a pick, a ping, or to exit the
- * idle state, \a query_for_backends() is called. It creates an instance of \a
- * lb_client_data, an internal struct meant to contain the data associated with
- * the internal communication with the LB server. This instance is created via
- * \a lb_client_data_create(). There, the call over lb_channel to pick-first
- * from {a1..an} is created, the \a LoadBalancingRequest message is assembled
- * and all necessary callbacks for the progress of the internal call configured.
+ * idle state, \a query_for_backends_locked() is called. This function sets up
+ * and initiates the internal communication with the LB server. In particular,
+ * it's responsible for instantiating the internal *streaming* call to the LB
+ * server (whichever address from {a1..an} pick-first chose). This call is
+ * serviced by two callbacks, \a lb_on_server_status_received and \a
+ * lb_on_response_received. The former will be called when the call to the LB
+ * server completes. This can happen if the LB server closes the connection or
+ * if this policy itself cancels the call (for example because it's shutting
+ * down). If the internal call times out, the usual behavior of pick-first
+ * applies, continuing to pick from the list {a1..an}.
*
- * Back in \a query_for_backends(), the internal *streaming* call to the LB
- * server (whichever address from {a1..an} pick-first chose) is kicked off.
- * It'll progress over the callbacks configured in \a lb_client_data_create()
- * (see the field docstrings of \a lb_client_data for more details).
- *
- * If the call fails with UNIMPLEMENTED, the original call will also fail.
- * There's a misconfiguration somewhere: at least one of {a1..an} isn't a LB
- * server, which contradicts the LB bit being set. If the internal call times
- * out, the usual behavior of pick-first applies, continuing to pick from the
- * list {a1..an}.
- *
- * Upon sucesss, a \a LoadBalancingResponse is expected in \a res_recv_cb. An
- * invalid one results in the termination of the streaming call. A new streaming
- * call should be created if possible, failing the original call otherwise.
- * For a valid \a LoadBalancingResponse, the server list of actual backends is
- * extracted. A Round Robin policy will be created from this list. There are two
- * possible scenarios:
+ * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
+ * res_recv. An invalid one results in the termination of the streaming call. A
+ * new streaming call should be created if possible, failing the original call
+ * otherwise. For a valid \a LoadBalancingResponse, the server list of actual
+ * backends is extracted. A Round Robin policy will be created from this list.
+ * There are two possible scenarios:
*
* 1. This is the first server list received. There was no previous instance of
* the Round Robin policy. \a rr_handover_locked() will instantiate the RR
@@ -84,10 +77,10 @@
* Once a RR policy instance is in place (and getting updated as described),
* calls to for a pick, a ping or a cancellation will be serviced right away by
* forwarding them to the RR instance. Any time there's no RR policy available
- * (ie, right after the creation of the gRPCLB policy, if an empty serverlist
- * is received, etc), pick/ping requests are added to a list of pending
- * picks/pings to be flushed and serviced as part of \a rr_handover_locked() the
- * moment the RR policy instance becomes available.
+ * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
+ * received, etc), pick/ping requests are added to a list of pending picks/pings
+ * to be flushed and serviced as part of \a rr_handover_locked() the moment the
+ * RR policy instance becomes available.
*
* \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
* high level design and details. */
@@ -120,12 +113,20 @@
#include "src/core/ext/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/static_metadata.h"
+#define BACKOFF_MULTIPLIER 1.6
+#define BACKOFF_JITTER 0.2
+#define BACKOFF_MIN_SECONDS 10
+#define BACKOFF_MAX_SECONDS 60
+
int grpc_lb_glb_trace = 0;
/* add lb_token of selected subchannel (address) to the call's initial
@@ -174,13 +175,12 @@ typedef struct wrapped_rr_closure_arg {
static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
wrapped_rr_closure_arg *wc_arg = arg;
- if (wc_arg->rr_policy != NULL) {
- if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
- (intptr_t)wc_arg->rr_policy);
- }
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
+ grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
+ NULL);
+
+ if (wc_arg->rr_policy != NULL) {
/* if target is NULL, no pick has been made by the RR policy (eg, all
* addresses failed to connect). There won't be any user_data/token
* available */
@@ -189,10 +189,12 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
wc_arg->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
}
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
+ (intptr_t)wc_arg->rr_policy);
+ }
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
}
- GPR_ASSERT(wc_arg->wrapped_closure != NULL);
- grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
- NULL);
GPR_ASSERT(wc_arg->free_when_done != NULL);
gpr_free(wc_arg->free_when_done);
}
@@ -264,7 +266,6 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
* glb_lb_policy
*/
typedef struct rr_connectivity_data rr_connectivity_data;
-struct lb_client_data;
static const grpc_lb_policy_vtable glb_lb_policy_vtable;
typedef struct glb_lb_policy {
/** base policy: must be first */
@@ -296,20 +297,47 @@ typedef struct glb_lb_policy {
* response has arrived. */
grpc_grpclb_serverlist *serverlist;
- /** addresses from \a serverlist */
- grpc_lb_addresses *addresses;
-
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
/** list of pings that are waiting on RR's policy connectivity */
pending_ping *pending_pings;
- /** client data associated with the LB server communication */
- struct lb_client_data *lb_client;
+ bool shutting_down;
+
+ /************************************************************/
+ /* client data associated with the LB server communication */
+ /************************************************************/
+ /* Status from the LB server has been received. This signals the end of the LB
+ * call. */
+ grpc_closure lb_on_server_status_received;
+
+ /* A response from the LB server has been received. Process it */
+ grpc_closure lb_on_response_received;
+
+ grpc_call *lb_call; /* streaming call to the LB server, */
+
+ grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
+ grpc_metadata_array
+ lb_trailing_metadata_recv; /* trailing MD from LB server */
+
+ /* what's being sent to the LB server. Note that its value may vary if the LB
+ * server indicates a redirect. */
+ grpc_byte_buffer *lb_request_payload;
+
+ /* response the LB server, if any. Processed in lb_on_response_received() */
+ grpc_byte_buffer *lb_response_payload;
+
+ /* call status code and details, set in lb_on_server_status_received() */
+ grpc_status_code lb_call_status;
+ char *lb_call_status_details;
+ size_t lb_call_status_details_capacity;
+
+ /** LB call retry backoff state */
+ gpr_backoff lb_call_backoff_state;
- /** for tracking of the RR connectivity */
- rr_connectivity_data *rr_connectivity;
+ /** LB call retry timer */
+ grpc_timer lb_call_retry_timer;
} glb_lb_policy;
/* Keeps track and reacts to changes in connectivity of the RR instance */
@@ -358,6 +386,28 @@ static int lb_token_cmp(void *token1, void *token2) {
static const grpc_lb_user_data_vtable lb_token_vtable = {
lb_token_copy, lb_token_destroy, lb_token_cmp};
+static void parse_server(const grpc_grpclb_server *server,
+ grpc_resolved_address *addr) {
+ const uint16_t netorder_port = htons((uint16_t)server->port);
+ /* the addresses are given in binary format (a in(6)_addr struct) in
+ * server->ip_address.bytes. */
+ const grpc_grpclb_ip_address *ip = &server->ip_address;
+ memset(addr, 0, sizeof(*addr));
+ if (ip->size == 4) {
+ addr->len = sizeof(struct sockaddr_in);
+ struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
+ addr4->sin_family = AF_INET;
+ memcpy(&addr4->sin_addr, ip->bytes, ip->size);
+ addr4->sin_port = netorder_port;
+ } else if (ip->size == 16) {
+ addr->len = sizeof(struct sockaddr_in6);
+ struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
+ addr6->sin6_family = AF_INET6;
+ memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
+ addr6->sin6_port = netorder_port;
+ }
+}
+
/* Returns addresses extracted from \a serverlist. */
static grpc_lb_addresses *process_serverlist(
const grpc_grpclb_serverlist *serverlist) {
@@ -384,33 +434,18 @@ static grpc_lb_addresses *process_serverlist(
if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
/* address processing */
- const uint16_t netorder_port = htons((uint16_t)server->port);
- /* the addresses are given in binary format (a in(6)_addr struct) in
- * server->ip_address.bytes. */
- const grpc_grpclb_ip_address *ip = &server->ip_address;
grpc_resolved_address addr;
- memset(&addr, 0, sizeof(addr));
- if (ip->size == 4) {
- addr.len = sizeof(struct sockaddr_in);
- struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr.addr;
- addr4->sin_family = AF_INET;
- memcpy(&addr4->sin_addr, ip->bytes, ip->size);
- addr4->sin_port = netorder_port;
- } else if (ip->size == 16) {
- addr.len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr.addr;
- addr6->sin6_family = AF_INET;
- memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
- addr6->sin6_port = netorder_port;
- }
+ parse_server(server, &addr);
/* lb token processing */
void *user_data;
if (server->has_load_balance_token) {
- const size_t lb_token_size =
- GPR_ARRAY_SIZE(server->load_balance_token) - 1;
+ const size_t lb_token_max_length =
+ GPR_ARRAY_SIZE(server->load_balance_token);
+ const size_t lb_token_length =
+ strnlen(server->load_balance_token, lb_token_max_length);
grpc_mdstr *lb_token_mdstr = grpc_mdstr_from_buffer(
- (uint8_t *)server->load_balance_token, lb_token_size);
+ (uint8_t *)server->load_balance_token, lb_token_length);
user_data = grpc_mdelem_from_metadata_strings(GRPC_MDSTR_LB_TOKEN,
lb_token_mdstr);
} else {
@@ -427,7 +462,6 @@ static grpc_lb_addresses *process_serverlist(
++addr_idx;
}
GPR_ASSERT(addr_idx == num_valid);
-
return lb_addresses;
}
@@ -448,7 +482,7 @@ static bool pick_from_internal_rr_locked(
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick");
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
/* add the load reporting initial metadata */
initial_metadata_add_lb_token(pick_args->initial_metadata,
@@ -461,7 +495,6 @@ static bool pick_from_internal_rr_locked(
* pending pick list inside the RR policy (glb_policy->rr_policy).
* Eventually, wrapped_on_complete will be called, which will -among other
* things- add the LB token to the call's initial metadata */
-
return pick_done;
}
@@ -470,54 +503,70 @@ static grpc_lb_policy *create_rr_locked(
glb_lb_policy *glb_policy) {
GPR_ASSERT(serverlist != NULL && serverlist->num_servers > 0);
- if (glb_policy->addresses != NULL) {
- /* dispose of the previous version */
- grpc_lb_addresses_destroy(glb_policy->addresses);
- }
- glb_policy->addresses = process_serverlist(serverlist);
-
grpc_lb_policy_args args;
memset(&args, 0, sizeof(args));
args.client_channel_factory = glb_policy->cc_factory;
+ grpc_lb_addresses *addresses = process_serverlist(serverlist);
// Replace the LB addresses in the channel args that we pass down to
// the subchannel.
static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
- const grpc_arg arg =
- grpc_lb_addresses_create_channel_arg(glb_policy->addresses);
+ const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
args.args = grpc_channel_args_copy_and_add_and_remove(
glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
1);
grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
+ GPR_ASSERT(rr != NULL);
+ grpc_lb_addresses_destroy(addresses);
grpc_channel_args_destroy(args.args);
-
return rr;
}
+static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy, grpc_error *error) {
GPR_ASSERT(glb_policy->serverlist != NULL &&
glb_policy->serverlist->num_servers > 0);
+
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "RR handover. Old RR: %p", (void *)glb_policy->rr_policy);
+ }
+ if (glb_policy->rr_policy != NULL) {
+ /* if we are phasing out an existing RR instance, unref it. */
+ GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "rr_handover");
+ }
+
glb_policy->rr_policy =
create_rr_locked(exec_ctx, glb_policy->serverlist, glb_policy);
-
if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "Created RR policy (0x%" PRIxPTR ")",
- (intptr_t)glb_policy->rr_policy);
+ gpr_log(GPR_INFO, "Created RR policy (%p)", (void *)glb_policy->rr_policy);
}
+
GPR_ASSERT(glb_policy->rr_policy != NULL);
grpc_pollset_set_add_pollset_set(exec_ctx,
glb_policy->rr_policy->interested_parties,
glb_policy->base.interested_parties);
- glb_policy->rr_connectivity->state = grpc_lb_policy_check_connectivity(
+
+ rr_connectivity_data *rr_connectivity =
+ gpr_malloc(sizeof(rr_connectivity_data));
+ memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
+ grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
+ rr_connectivity);
+ rr_connectivity->glb_policy = glb_policy;
+ rr_connectivity->state = grpc_lb_policy_check_connectivity(
exec_ctx, glb_policy->rr_policy, &error);
- grpc_lb_policy_notify_on_state_change(
- exec_ctx, glb_policy->rr_policy, &glb_policy->rr_connectivity->state,
- &glb_policy->rr_connectivity->on_change);
+
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
- glb_policy->rr_connectivity->state,
- GRPC_ERROR_REF(error), "rr_handover");
+ rr_connectivity->state, GRPC_ERROR_REF(error),
+ "rr_handover");
+ /* subscribe */
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "rr_connectivity_cb");
+ grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
+ &rr_connectivity->state,
+ &rr_connectivity->on_change);
grpc_lb_policy_exit_idle(exec_ctx, glb_policy->rr_policy);
/* flush pending ops */
@@ -551,35 +600,27 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
+ /* If shutdown or error free the arg. Rely on the rest of the code to set the
+ * right grpclb status. */
rr_connectivity_data *rr_conn_data = arg;
glb_lb_policy *glb_policy = rr_conn_data->glb_policy;
- if (rr_conn_data->state == GRPC_CHANNEL_SHUTDOWN) {
- if (glb_policy->serverlist != NULL) {
- /* a RR policy is shutting down but there's a serverlist available ->
- * perform a handover */
- gpr_mu_lock(&glb_policy->mu);
- rr_handover_locked(exec_ctx, glb_policy, error);
- gpr_mu_unlock(&glb_policy->mu);
- } else {
- /* shutting down and no new serverlist available. Bail out. */
- gpr_free(rr_conn_data);
- }
+ if (rr_conn_data->state != GRPC_CHANNEL_SHUTDOWN &&
+ !glb_policy->shutting_down) {
+ gpr_mu_lock(&glb_policy->mu);
+ /* RR not shutting down. Mimic the RR's policy state */
+ grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
+ rr_conn_data->state, GRPC_ERROR_REF(error),
+ "rr_connectivity_cb");
+ /* resubscribe. Reuse the "rr_connectivity_cb" weak ref. */
+ grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
+ &rr_conn_data->state,
+ &rr_conn_data->on_change);
+ gpr_mu_unlock(&glb_policy->mu);
} else {
- if (error == GRPC_ERROR_NONE) {
- gpr_mu_lock(&glb_policy->mu);
- /* RR not shutting down. Mimic the RR's policy state */
- grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
- rr_conn_data->state, GRPC_ERROR_REF(error),
- "glb_rr_connectivity_changed");
- /* resubscribe */
- grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
- &rr_conn_data->state,
- &rr_conn_data->on_change);
- gpr_mu_unlock(&glb_policy->mu);
- } else { /* error */
- gpr_free(rr_conn_data);
- }
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "rr_connectivity_cb");
+ gpr_free(rr_conn_data);
}
}
@@ -682,18 +723,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
return NULL;
}
- rr_connectivity_data *rr_connectivity =
- gpr_malloc(sizeof(rr_connectivity_data));
- memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
- grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
- rr_connectivity);
- rr_connectivity->glb_policy = glb_policy;
- glb_policy->rr_connectivity = rr_connectivity;
-
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable);
gpr_mu_init(&glb_policy->mu);
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
"grpclb");
+
return &glb_policy->base;
}
@@ -710,14 +744,13 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
gpr_mu_destroy(&glb_policy->mu);
- grpc_lb_addresses_destroy(glb_policy->addresses);
gpr_free(glb_policy);
}
-static void lb_client_data_destroy(struct lb_client_data *lb_client);
static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
+ glb_policy->shutting_down = true;
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
@@ -741,15 +774,16 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
if (glb_policy->rr_policy) {
- /* unsubscribe */
- grpc_lb_policy_notify_on_state_change(
- exec_ctx, glb_policy->rr_policy, NULL,
- &glb_policy->rr_connectivity->on_change);
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
}
- lb_client_data_destroy(glb_policy->lb_client);
- glb_policy->lb_client = NULL;
+ if (glb_policy->started_picking) {
+ if (glb_policy->lb_call != NULL) {
+ grpc_call_cancel(glb_policy->lb_call, NULL);
+ /* lb_on_server_status_received will pick up the cancellation and clean up
+ */
+ }
+ }
grpc_connectivity_state_set(
exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
@@ -780,17 +814,12 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static grpc_call *lb_client_data_get_call(struct lb_client_data *lb_client);
static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
- if (glb_policy->lb_client != NULL) {
- /* cancel the call to the load balancer service, if any */
- grpc_call_cancel(lb_client_data_get_call(glb_policy->lb_client), NULL);
- }
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
@@ -810,18 +839,20 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static void query_for_backends(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy);
-static void start_picking(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) {
+static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy);
+static void start_picking_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy) {
glb_policy->started_picking = true;
- query_for_backends(exec_ctx, glb_policy);
+ gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
+ query_for_backends_locked(exec_ctx, glb_policy);
}
static void glb_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
if (!glb_policy->started_picking) {
- start_picking(exec_ctx, glb_policy);
+ start_picking_locked(exec_ctx, glb_policy);
}
gpr_mu_unlock(&glb_policy->mu);
}
@@ -847,8 +878,8 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (glb_policy->rr_policy != NULL) {
if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "about to PICK from 0x%" PRIxPTR "",
- (intptr_t)glb_policy->rr_policy);
+ gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
+ (void *)glb_policy, (void *)glb_policy->rr_policy);
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
@@ -865,11 +896,17 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
pick_args, target, wc_arg);
} else {
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_DEBUG,
+ "No RR policy in grpclb instance %p. Adding to grpclb's pending "
+ "picks",
+ (void *)(glb_policy));
+ }
add_pending_pick(&glb_policy->pending_picks, pick_args, target,
on_complete);
if (!glb_policy->started_picking) {
- start_picking(exec_ctx, glb_policy);
+ start_picking_locked(exec_ctx, glb_policy);
}
pick_done = false;
}
@@ -898,7 +935,7 @@ static void glb_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
} else {
add_pending_ping(&glb_policy->pending_pings, closure);
if (!glb_policy->started_picking) {
- start_picking(exec_ctx, glb_policy);
+ start_picking_locked(exec_ctx, glb_policy);
}
}
gpr_mu_unlock(&glb_policy->mu);
@@ -916,250 +953,182 @@ static void glb_notify_on_state_change(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&glb_policy->mu);
}
-/*
- * lb_client_data
- *
- * Used internally for the client call to the LB */
-typedef struct lb_client_data {
- gpr_mu mu;
-
- /* called once initial metadata's been sent */
- grpc_closure md_sent;
-
- /* called once the LoadBalanceRequest has been sent to the LB server. See
- * src/proto/grpc/.../load_balancer.proto */
- grpc_closure req_sent;
-
- /* A response from the LB server has been received (or error). Process it */
- grpc_closure res_rcvd;
-
- /* After the client has sent a close to the LB server */
- grpc_closure close_sent;
-
- /* ... and the status from the LB server has been received */
- grpc_closure srv_status_rcvd;
-
- grpc_call *lb_call; /* streaming call to the LB server, */
- gpr_timespec deadline; /* for the streaming call to the LB server */
-
- grpc_metadata_array initial_metadata_recv; /* initial MD from LB server */
- grpc_metadata_array trailing_metadata_recv; /* trailing MD from LB server */
-
- /* what's being sent to the LB server. Note that its value may vary if the LB
- * server indicates a redirect. */
- grpc_byte_buffer *request_payload;
-
- /* response from the LB server, if any. Processed in res_recv_cb() */
- grpc_byte_buffer *response_payload;
-
- /* the call's status and status detailset in srv_status_rcvd_cb() */
- grpc_status_code status;
- char *status_details;
- size_t status_details_capacity;
-
- /* pointer back to the enclosing policy */
- glb_lb_policy *glb_policy;
-} lb_client_data;
-
-static void md_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-static void req_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-static void close_sent_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void srv_status_rcvd_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-
-static lb_client_data *lb_client_data_create(glb_lb_policy *glb_policy) {
+static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+static void lb_call_init(glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->server_name != NULL);
GPR_ASSERT(glb_policy->server_name[0] != '\0');
- lb_client_data *lb_client = gpr_malloc(sizeof(lb_client_data));
- memset(lb_client, 0, sizeof(lb_client_data));
-
- gpr_mu_init(&lb_client->mu);
- grpc_closure_init(&lb_client->md_sent, md_sent_cb, lb_client);
-
- grpc_closure_init(&lb_client->req_sent, req_sent_cb, lb_client);
- grpc_closure_init(&lb_client->res_rcvd, res_recv_cb, lb_client);
- grpc_closure_init(&lb_client->close_sent, close_sent_cb, lb_client);
- grpc_closure_init(&lb_client->srv_status_rcvd, srv_status_rcvd_cb, lb_client);
-
- lb_client->deadline = glb_policy->deadline;
-
/* Note the following LB call progresses every time there's activity in \a
* glb_policy->base.interested_parties, which is comprised of the polling
* entities from \a client_channel. */
- lb_client->lb_call = grpc_channel_create_pollset_set_call(
+ glb_policy->lb_call = grpc_channel_create_pollset_set_call(
glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
"/grpc.lb.v1.LoadBalancer/BalanceLoad", glb_policy->server_name,
- lb_client->deadline, NULL);
+ glb_policy->deadline, NULL);
- grpc_metadata_array_init(&lb_client->initial_metadata_recv);
- grpc_metadata_array_init(&lb_client->trailing_metadata_recv);
+ grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
+ grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
grpc_grpclb_request *request =
grpc_grpclb_request_create(glb_policy->server_name);
gpr_slice request_payload_slice = grpc_grpclb_request_encode(request);
- lb_client->request_payload =
+ glb_policy->lb_request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
gpr_slice_unref(request_payload_slice);
grpc_grpclb_request_destroy(request);
- lb_client->status_details = NULL;
- lb_client->status_details_capacity = 0;
- lb_client->glb_policy = glb_policy;
- return lb_client;
+ glb_policy->lb_call_status_details = NULL;
+ glb_policy->lb_call_status_details_capacity = 0;
+
+ grpc_closure_init(&glb_policy->lb_on_server_status_received,
+ lb_on_server_status_received, glb_policy);
+ grpc_closure_init(&glb_policy->lb_on_response_received,
+ lb_on_response_received, glb_policy);
+
+ gpr_backoff_init(&glb_policy->lb_call_backoff_state, BACKOFF_MULTIPLIER,
+ BACKOFF_JITTER, BACKOFF_MIN_SECONDS * 1000,
+ BACKOFF_MAX_SECONDS * 1000);
}
-static void lb_client_data_destroy(lb_client_data *lb_client) {
- grpc_call_destroy(lb_client->lb_call);
- grpc_metadata_array_destroy(&lb_client->initial_metadata_recv);
- grpc_metadata_array_destroy(&lb_client->trailing_metadata_recv);
+static void lb_call_destroy(glb_lb_policy *glb_policy) {
+ GPR_ASSERT(glb_policy->lb_call != NULL);
+ grpc_call_destroy(glb_policy->lb_call);
+ glb_policy->lb_call = NULL;
- grpc_byte_buffer_destroy(lb_client->request_payload);
+ grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
+ grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
- gpr_free(lb_client->status_details);
- gpr_mu_destroy(&lb_client->mu);
- gpr_free(lb_client);
-}
-static grpc_call *lb_client_data_get_call(lb_client_data *lb_client) {
- return lb_client->lb_call;
+ grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
+ gpr_free(glb_policy->lb_call_status_details);
}
/*
* Auxiliary functions and LB client callbacks.
*/
-static void query_for_backends(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->lb_channel != NULL);
+ lb_call_init(glb_policy);
+
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",
+ (void *)glb_policy, (void *)glb_policy->lb_call);
+ }
+ GPR_ASSERT(glb_policy->lb_call != NULL);
- glb_policy->lb_client = lb_client_data_create(glb_policy);
grpc_call_error call_error;
- grpc_op ops[1];
+ grpc_op ops[4];
memset(ops, 0, sizeof(ops));
+
grpc_op *op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
- call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_client->lb_call, ops, (size_t)(op - ops),
- &glb_policy->lb_client->md_sent);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
- op = ops;
- op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
- op->data.recv_status_on_client.trailing_metadata =
- &glb_policy->lb_client->trailing_metadata_recv;
- op->data.recv_status_on_client.status = &glb_policy->lb_client->status;
- op->data.recv_status_on_client.status_details =
- &glb_policy->lb_client->status_details;
- op->data.recv_status_on_client.status_details_capacity =
- &glb_policy->lb_client->status_details_capacity;
+ op->op = GRPC_OP_RECV_INITIAL_METADATA;
+ op->data.recv_initial_metadata = &glb_policy->lb_initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
- call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_client->lb_call, ops, (size_t)(op - ops),
- &glb_policy->lb_client->srv_status_rcvd);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-
-static void md_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- lb_client_data *lb_client = arg;
- GPR_ASSERT(lb_client->lb_call);
- grpc_op ops[1];
- memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
+ GPR_ASSERT(glb_policy->lb_request_payload != NULL);
op->op = GRPC_OP_SEND_MESSAGE;
- op->data.send_message = lb_client->request_payload;
+ op->data.send_message = glb_policy->lb_request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->req_sent);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-
-static void req_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- lb_client_data *lb_client = arg;
- GPR_ASSERT(lb_client->lb_call);
-
- grpc_op ops[2];
- memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
- op->op = GRPC_OP_RECV_INITIAL_METADATA;
- op->data.recv_initial_metadata = &lb_client->initial_metadata_recv;
+ op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+ op->data.recv_status_on_client.trailing_metadata =
+ &glb_policy->lb_trailing_metadata_recv;
+ op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
+ op->data.recv_status_on_client.status_details =
+ &glb_policy->lb_call_status_details;
+ op->data.recv_status_on_client.status_details_capacity =
+ &glb_policy->lb_call_status_details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
+ /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
+ * count goes to zero) to be unref'd in lb_on_server_status_received */
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
+ call_error = grpc_call_start_batch_and_execute(
+ exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+ &glb_policy->lb_on_server_status_received);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+ op = ops;
op->op = GRPC_OP_RECV_MESSAGE;
- op->data.recv_message = &lb_client->response_payload;
+ op->data.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
op->reserved = NULL;
op++;
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->res_rcvd);
+ /* take another weak ref to be unref'd in lb_on_response_received */
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received");
+ call_error = grpc_call_start_batch_and_execute(
+ exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+ &glb_policy->lb_on_response_received);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
-static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- lb_client_data *lb_client = arg;
+static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = arg;
+
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
- if (lb_client->response_payload != NULL) {
+ if (glb_policy->lb_response_payload != NULL) {
+ gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
- * lb_client->response_payload, for a serverlist. */
+ * glb_policy->lb_response_payload, for a serverlist. */
grpc_byte_buffer_reader bbr;
- grpc_byte_buffer_reader_init(&bbr, lb_client->response_payload);
+ grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
gpr_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
- grpc_byte_buffer_destroy(lb_client->response_payload);
+ grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
grpc_grpclb_serverlist *serverlist =
grpc_grpclb_response_parse_serverlist(response_slice);
if (serverlist != NULL) {
+ GPR_ASSERT(glb_policy->lb_call != NULL);
gpr_slice_unref(response_slice);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Serverlist with %lu servers received",
(unsigned long)serverlist->num_servers);
+ for (size_t i = 0; i < serverlist->num_servers; ++i) {
+ grpc_resolved_address addr;
+ parse_server(serverlist->servers[i], &addr);
+ char *ipport;
+ grpc_sockaddr_to_string(&ipport, &addr, false);
+ gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
+ gpr_free(ipport);
+ }
}
/* update serverlist */
if (serverlist->num_servers > 0) {
- gpr_mu_lock(&lb_client->glb_policy->mu);
- if (grpc_grpclb_serverlist_equals(lb_client->glb_policy->serverlist,
- serverlist)) {
+ gpr_mu_lock(&glb_policy->mu);
+ if (grpc_grpclb_serverlist_equals(glb_policy->serverlist, serverlist)) {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
"Incoming server list identical to current, ignoring.");
}
} else { /* new serverlist */
- if (lb_client->glb_policy->serverlist != NULL) {
+ if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
- grpc_grpclb_destroy_serverlist(lb_client->glb_policy->serverlist);
+ grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
/* and update the copy in the glb_lb_policy instance */
- lb_client->glb_policy->serverlist = serverlist;
- }
- if (lb_client->glb_policy->rr_policy == NULL) {
- /* initial "handover", in this case from a null RR policy, meaning
- * it'll just create the first RR policy instance */
- rr_handover_locked(exec_ctx, lb_client->glb_policy, error);
- } else {
- /* unref the RR policy, eventually leading to its substitution with a
- * new one constructed from the received serverlist (see
- * glb_rr_connectivity_changed) */
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_client->glb_policy->rr_policy,
- "serverlist_received");
+ glb_policy->serverlist = serverlist;
+
+ rr_handover_locked(exec_ctx, glb_policy, error);
}
- gpr_mu_unlock(&lb_client->glb_policy->mu);
+ gpr_mu_unlock(&glb_policy->mu);
} else {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
@@ -1167,60 +1136,94 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
"response with > 0 servers is received");
}
}
+ } else { /* serverlist == NULL */
+ gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
+ gpr_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
+ gpr_slice_unref(response_slice);
+ }
+ if (!glb_policy->shutting_down) {
/* keep listening for serverlist updates */
op->op = GRPC_OP_RECV_MESSAGE;
- op->data.recv_message = &lb_client->response_payload;
+ op->data.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
op->reserved = NULL;
op++;
+ /* reuse the "lb_on_response_received" weak ref taken in
+ * query_for_backends_locked() */
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->res_rcvd); /* loop */
+ exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+ &glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
- return;
}
-
- GPR_ASSERT(serverlist == NULL);
- gpr_log(GPR_ERROR, "Invalid LB response received: '%s'",
- gpr_dump_slice(response_slice, GPR_DUMP_ASCII));
- gpr_slice_unref(response_slice);
-
- /* Disconnect from server returning invalid response. */
- op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
- op->flags = 0;
- op->reserved = NULL;
- op++;
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
- &lb_client->close_sent);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
+ } else { /* empty payload: call cancelled. */
+ /* dispose of the "lb_on_response_received" weak ref taken in
+ * query_for_backends_locked() and reused in every reception loop */
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_response_received_empty_payload");
}
- /* empty payload: call cancelled by server. Cleanups happening in
- * srv_status_rcvd_cb */
}
-static void close_sent_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO,
- "Close from LB client sent. Waiting from server status now");
+static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = arg;
+ gpr_mu_lock(&glb_policy->mu);
+
+ if (!glb_policy->shutting_down) {
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
+ (void *)glb_policy);
+ }
+ GPR_ASSERT(glb_policy->lb_call == NULL);
+ query_for_backends_locked(exec_ctx, glb_policy);
}
+ gpr_mu_unlock(&glb_policy->mu);
+
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "grpclb_on_retry_timer");
}
-static void srv_status_rcvd_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- lb_client_data *lb_client = arg;
+static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = arg;
+ gpr_mu_lock(&glb_policy->mu);
+
+ GPR_ASSERT(glb_policy->lb_call != NULL);
+
if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO,
- "status from lb server received. Status = %d, Details = '%s', "
- "Capacity "
- "= %lu",
- lb_client->status, lb_client->status_details,
- (unsigned long)lb_client->status_details_capacity);
+ gpr_log(GPR_DEBUG,
+ "Status from LB server received. Status = %d, Details = '%s', "
+ "(call: %p)",
+ glb_policy->lb_call_status, glb_policy->lb_call_status_details,
+ (void *)glb_policy->lb_call);
+ }
+
+ /* We need to performe cleanups no matter what. */
+ lb_call_destroy(glb_policy);
+
+ if (!glb_policy->shutting_down) {
+ /* if we aren't shutting down, restart the LB client call after some time */
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec next_try =
+ gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
+ (void *)glb_policy);
+ gpr_timespec timeout = gpr_time_sub(next_try, now);
+ if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
+ gpr_log(GPR_DEBUG, "... retrying in %" PRId64 ".%09d seconds.",
+ timeout.tv_sec, timeout.tv_nsec);
+ } else {
+ gpr_log(GPR_DEBUG, "... retrying immediately.");
+ }
+ }
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
+ grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
+ lb_call_on_retry_timer, glb_policy, now);
}
- /* TODO(dgq): deal with stream termination properly (fire up another one?
- * fail the original call?) */
+ gpr_mu_unlock(&glb_policy->mu);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_server_status_received");
}
/* Code wiring the policy with the rest of the core */
diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
index 53fed22bae..e36d0966f8 100644
--- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
+++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
@@ -77,7 +77,7 @@ typedef struct _grpc_lb_v1_Server {
bool has_port;
int32_t port;
bool has_load_balance_token;
- char load_balance_token[65];
+ char load_balance_token[50];
bool has_drop_request;
bool drop_request;
/* @@protoc_insertion_point(struct:grpc_lb_v1_Server) */
@@ -172,7 +172,7 @@ extern const pb_field_t grpc_lb_v1_Server_fields[5];
#define grpc_lb_v1_LoadBalanceResponse_size (98 + grpc_lb_v1_ServerList_size)
#define grpc_lb_v1_InitialLoadBalanceResponse_size 90
/* grpc_lb_v1_ServerList_size depends on runtime parameters */
-#define grpc_lb_v1_Server_size 98
+#define grpc_lb_v1_Server_size 83
/* Message IDs (where set with "msgid" option) */
#ifdef PB_MSGID
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index 37a9b18b97..427999aa6b 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -120,6 +120,8 @@ typedef struct {
grpc_connectivity_state connectivity_state;
/** the subchannel's target user data */
void *user_data;
+ /** vtable to operate over \a user_data */
+ const grpc_lb_user_data_vtable *user_data_vtable;
} subchannel_data;
struct round_robin_lb_policy {
@@ -186,9 +188,13 @@ static void advance_last_picked_locked(round_robin_lb_policy *p) {
}
if (grpc_lb_round_robin_trace) {
- gpr_log(GPR_DEBUG, "[READYLIST] ADVANCED LAST PICK. NOW AT NODE %p (SC %p)",
- (void *)p->ready_list_last_pick,
- (void *)p->ready_list_last_pick->subchannel);
+ gpr_log(GPR_DEBUG,
+ "[READYLIST, RR: %p] ADVANCED LAST PICK. NOW AT NODE %p (SC %p, "
+ "CSC %p)",
+ (void *)p, (void *)p->ready_list_last_pick,
+ (void *)p->ready_list_last_pick->subchannel,
+ (void *)grpc_subchannel_get_connected_subchannel(
+ p->ready_list_last_pick->subchannel));
}
}
@@ -255,9 +261,18 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *elem;
+
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "Destroying Round Robin policy at %p", (void *)pol);
+ }
+
for (size_t i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin_destroy");
+ if (sd->user_data != NULL) {
+ GPR_ASSERT(sd->user_data_vtable != NULL);
+ sd->user_data_vtable->destroy(sd->user_data);
+ }
gpr_free(sd);
}
@@ -285,6 +300,9 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
size_t i;
gpr_mu_lock(&p->mu);
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
+ }
p->shutdown = 1;
while ((pp = p->pending_picks)) {
@@ -296,7 +314,7 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE("Channel Shutdown"), "shutdown");
+ GRPC_ERROR_CREATE("Channel Shutdown"), "rr_shutdown");
for (i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
@@ -395,6 +413,11 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *pp;
ready_list *selected;
gpr_mu_lock(&p->mu);
+
+ if (grpc_lb_round_robin_trace) {
+ gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol);
+ }
+
if ((selected = peek_next_connected_locked(p))) {
/* readily available, report right away */
*target = GRPC_CONNECTED_SUBCHANNEL_REF(
@@ -435,7 +458,6 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
- ready_list *selected;
int unref = 0;
@@ -456,12 +478,14 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
- selected = peek_next_connected_locked(p);
+ ready_list *selected = peek_next_connected_locked(p);
+ GPR_ASSERT(selected != NULL);
if (p->pending_picks != NULL) {
/* if the selected subchannel is going to be used for the pending
* picks, update the last picked pointer */
advance_last_picked_locked(p);
}
+
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
@@ -653,7 +677,11 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
sd->policy = p;
sd->index = subchannel_idx;
sd->subchannel = subchannel;
- sd->user_data = addresses->addresses[i].user_data;
+ sd->user_data_vtable = addresses->user_data_vtable;
+ if (sd->user_data_vtable != NULL) {
+ sd->user_data =
+ sd->user_data_vtable->copy(addresses->addresses[i].user_data);
+ }
++subchannel_idx;
grpc_closure_init(&sd->connectivity_changed_closure,
rr_connectivity_changed, sd);
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
index 57e1a8ec01..d0ac72a011 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
@@ -347,7 +347,7 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
&exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args);
// Clean up.
GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
- "client_channel_factory_create_channel");
+ "secure_client_channel_factory_create_channel");
grpc_channel_args_destroy(new_args);
grpc_client_channel_factory_unref(&exec_ctx, &f->base);
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 8180f78fc0..9702cb2c81 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -50,6 +50,7 @@
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
+#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
@@ -1578,6 +1579,20 @@ static const maybe_complete_func_type maybe_complete_funcs[] = {
grpc_chttp2_maybe_complete_recv_initial_metadata,
grpc_chttp2_maybe_complete_recv_trailing_metadata};
+static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
+ grpc_error *error) {
+ grpc_chttp2_stream *s = sp;
+ grpc_chttp2_transport *t = s->t;
+ if (!s->write_closed) {
+ gpr_slice_buffer_add(
+ &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_CHTTP2_NO_ERROR,
+ &s->stats.outgoing));
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "force_rst_stream");
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE);
+ }
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst");
+}
+
grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
void *hpack_parser,
grpc_chttp2_transport *t,
@@ -1613,6 +1628,17 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
s->header_frames_received++;
}
if (parser->is_eof) {
+ if (t->is_client && !s->write_closed) {
+ /* server eof ==> complete closure; we may need to forcefully close
+ the stream. Wait until the combiner lock is ready to be released
+ however -- it might be that we receive a RST_STREAM following this
+ and can avoid the extra write */
+ GRPC_CHTTP2_STREAM_REF(s, "final_rst");
+ grpc_combiner_execute_finally(
+ exec_ctx, t->combiner,
+ grpc_closure_create(force_client_rst_stream, s), GRPC_ERROR_NONE,
+ false);
+ }
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
GRPC_ERROR_NONE);
}
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
index e39cf28e35..8a06443d58 100644
--- a/src/core/lib/iomgr/resource_quota.c
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -715,3 +715,10 @@ void grpc_resource_user_alloc_slices(
grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user,
count * length, &slice_allocator->on_allocated);
}
+
+gpr_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ size_t size) {
+ grpc_resource_user_alloc(exec_ctx, resource_user, size, NULL);
+ return ru_slice_create(resource_user, size);
+}
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index 6dfac55f88..da68f21a2c 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -221,4 +221,9 @@ void grpc_resource_user_alloc_slices(
grpc_resource_user_slice_allocator *slice_allocator, size_t length,
size_t count, gpr_slice_buffer *dest);
+/* Allocate one slice of length \a size synchronously. */
+gpr_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ size_t size);
+
#endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c
index 6274667042..b07f9ceffa 100644
--- a/src/core/lib/iomgr/tcp_client_uv.c
+++ b/src/core/lib/iomgr/tcp_client_uv.c
@@ -54,9 +54,12 @@ typedef struct grpc_uv_tcp_connect {
grpc_endpoint **endpoint;
int refs;
char *addr_name;
+ grpc_resource_quota *resource_quota;
} grpc_uv_tcp_connect;
-static void uv_tcp_connect_cleanup(grpc_uv_tcp_connect *connect) {
+static void uv_tcp_connect_cleanup(grpc_exec_ctx *exec_ctx,
+ grpc_uv_tcp_connect *connect) {
+ grpc_resource_quota_internal_unref(exec_ctx, connect->resource_quota);
gpr_free(connect);
}
@@ -74,7 +77,7 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
}
done = (--connect->refs == 0);
if (done) {
- uv_tcp_connect_cleanup(connect);
+ uv_tcp_connect_cleanup(exec_ctx, connect);
}
}
@@ -86,8 +89,8 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
grpc_closure *closure = connect->closure;
grpc_timer_cancel(&exec_ctx, &connect->alarm);
if (status == 0) {
- *connect->endpoint =
- grpc_tcp_create(connect->tcp_handle, connect->addr_name);
+ *connect->endpoint = grpc_tcp_create(
+ connect->tcp_handle, connect->resource_quota, connect->addr_name);
} else {
error = GRPC_ERROR_CREATE("Failed to connect to remote host");
error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, -status);
@@ -105,7 +108,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
}
done = (--connect->refs == 0);
if (done) {
- uv_tcp_connect_cleanup(connect);
+ uv_tcp_connect_cleanup(&exec_ctx, connect);
}
grpc_exec_ctx_sched(&exec_ctx, closure, error, NULL);
grpc_exec_ctx_finish(&exec_ctx);
@@ -114,16 +117,31 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
const grpc_resolved_address *resolved_addr,
gpr_timespec deadline) {
grpc_uv_tcp_connect *connect;
+ grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ (void)channel_args;
(void)interested_parties;
+
+ if (channel_args != NULL) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ resource_quota = grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
+
connect = gpr_malloc(sizeof(grpc_uv_tcp_connect));
memset(connect, 0, sizeof(grpc_uv_tcp_connect));
connect->closure = closure;
connect->endpoint = ep;
connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t));
connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
+ connect->resource_quota = resource_quota;
uv_tcp_init(uv_default_loop(), connect->tcp_handle);
connect->connect_req.data = connect;
// TODO(murgatroid99): figure out what the return value here means
@@ -138,16 +156,18 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
// overridden by api_fuzzer.c
void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
- grpc_pollset_set *interested_parties, const grpc_resolved_address *addr,
+ grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
gpr_timespec deadline) = tcp_client_connect_impl;
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
+ const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
gpr_timespec deadline) {
- grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, addr,
- deadline);
+ grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
+ channel_args, addr, deadline);
}
#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index b6fc1e4ca2..7e2fb0f1f9 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -657,41 +657,46 @@ done:
}
}
+/* Return listener at port_index or NULL. Should only be called with s->mu
+ locked. */
+static grpc_tcp_listener *get_port_index(grpc_tcp_server *s,
+ unsigned port_index) {
+ unsigned num_ports = 0;
+ grpc_tcp_listener *sp;
+ for (sp = s->head; sp; sp = sp->next) {
+ if (!sp->is_sibling) {
+ if (++num_ports > port_index) {
+ return sp;
+ }
+ }
+ }
+ return NULL;
+}
+
unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
unsigned port_index) {
unsigned num_fds = 0;
- grpc_tcp_listener *sp;
gpr_mu_lock(&s->mu);
- for (sp = s->head; sp && port_index != 0; sp = sp->next) {
- if (!sp->is_sibling) {
- --port_index;
- }
+ grpc_tcp_listener *sp = get_port_index(s, port_index);
+ for (; sp; sp = sp->sibling) {
+ ++num_fds;
}
- for (; sp; sp = sp->sibling, ++num_fds)
- ;
gpr_mu_unlock(&s->mu);
return num_fds;
}
int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
unsigned fd_index) {
- grpc_tcp_listener *sp;
- int fd;
gpr_mu_lock(&s->mu);
- for (sp = s->head; sp && port_index != 0; sp = sp->next) {
- if (!sp->is_sibling) {
- --port_index;
+ grpc_tcp_listener *sp = get_port_index(s, port_index);
+ for (; sp; sp = sp->sibling, --fd_index) {
+ if (fd_index == 0) {
+ gpr_mu_unlock(&s->mu);
+ return sp->fd;
}
}
- for (; sp && fd_index != 0; sp = sp->sibling, --fd_index)
- ;
- if (sp) {
- fd = sp->fd;
- } else {
- fd = -1;
- }
gpr_mu_unlock(&s->mu);
- return fd;
+ return -1;
}
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c
index 73e4db3d65..b5b9b92a20 100644
--- a/src/core/lib/iomgr/tcp_server_uv.c
+++ b/src/core/lib/iomgr/tcp_server_uv.c
@@ -76,13 +76,30 @@ struct grpc_tcp_server {
/* shutdown callback */
grpc_closure *shutdown_complete;
+
+ grpc_resource_quota *resource_quota;
};
-grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server) {
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
- (void)args;
+ s->resource_quota = grpc_resource_quota_create(NULL);
+ for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
+ if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
+ if (args->args[i].type == GRPC_ARG_POINTER) {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ s->resource_quota =
+ grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
+ } else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ gpr_free(s);
+ return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
+ " must be a pointer to a buffer pool");
+ }
+ }
+ }
gpr_ref_init(&s->refs, 1);
s->on_accept_cb = NULL;
s->on_accept_cb_arg = NULL;
@@ -119,6 +136,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_free(sp->handle);
gpr_free(sp);
}
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s);
}
@@ -201,7 +219,7 @@ static void on_connect(uv_stream_t *server, int status) {
} else {
gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status));
}
- ep = grpc_tcp_create(client, peer_name_string);
+ ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string);
sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
&acceptor);
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
index 3860fe3e9b..8e74c9e863 100644
--- a/src/core/lib/iomgr/tcp_uv.c
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -54,6 +54,9 @@ typedef struct {
grpc_endpoint base;
gpr_refcount refcount;
+ uv_write_t write_req;
+ uv_shutdown_t shutdown_req;
+
uv_tcp_t *handle;
grpc_closure *read_cb;
@@ -64,14 +67,23 @@ typedef struct {
gpr_slice_buffer *write_slices;
uv_buf_t *write_buffers;
+ grpc_resource_user resource_user;
+
bool shutting_down;
+ bool resource_user_shutting_down;
+
char *peer_string;
grpc_pollset *pollset;
} grpc_tcp;
static void uv_close_callback(uv_handle_t *handle) { gpr_free(handle); }
-static void tcp_free(grpc_tcp *tcp) { gpr_free(tcp); }
+static void tcp_free(grpc_tcp *tcp) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_resource_user_destroy(&exec_ctx, &tcp->resource_user);
+ gpr_free(tcp);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
/*#define GRPC_TCP_REFCOUNT_DEBUG*/
#ifdef GRPC_TCP_REFCOUNT_DEBUG
@@ -106,11 +118,14 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
uv_buf_t *buf) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp *tcp = handle->data;
(void)suggested_size;
- tcp->read_slice = gpr_slice_malloc(GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
+ tcp->read_slice = grpc_resource_user_slice_malloc(
+ &exec_ctx, &tcp->resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
buf->base = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
buf->len = GPR_SLICE_LENGTH(tcp->read_slice);
+ grpc_exec_ctx_finish(&exec_ctx);
}
static void read_callback(uv_stream_t *stream, ssize_t nread,
@@ -198,7 +213,8 @@ static void write_callback(uv_write_t *req, int status) {
gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
}
gpr_free(tcp->write_buffers);
- gpr_free(req);
+ grpc_resource_user_free(&exec_ctx, &tcp->resource_user,
+ sizeof(uv_buf_t) * tcp->write_slices->count);
grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -243,12 +259,15 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb;
buffer_count = (unsigned int)tcp->write_slices->count;
buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count);
+ grpc_resource_user_alloc(exec_ctx, &tcp->resource_user,
+ sizeof(uv_buf_t) * buffer_count, NULL);
for (i = 0; i < buffer_count; i++) {
slice = &tcp->write_slices->slices[i];
buffers[i].base = (char *)GPR_SLICE_START_PTR(*slice);
buffers[i].len = GPR_SLICE_LENGTH(*slice);
}
- write_req = gpr_malloc(sizeof(uv_write_t));
+ tcp->write_buffers = buffers;
+ write_req = &tcp->write_req;
write_req->data = tcp;
TCP_REF(tcp, "write");
// TODO(murgatroid99): figure out what the return value here means
@@ -274,13 +293,29 @@ static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
(void)pollset;
}
-static void shutdown_callback(uv_shutdown_t *req, int status) { gpr_free(req); }
+static void shutdown_callback(uv_shutdown_t *req, int status) {}
+
+static void resource_user_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ TCP_UNREF(arg, "resource_user");
+}
+
+static void uv_resource_user_maybe_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_tcp *tcp) {
+ if (!tcp->resource_user_shutting_down) {
+ tcp->resource_user_shutting_down = true;
+ TCP_REF(tcp, "resource_user");
+ grpc_resource_user_shutdown(
+ exec_ctx, &tcp->resource_user,
+ grpc_closure_create(resource_user_shutdown_done, tcp));
+ }
+}
static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
if (!tcp->shutting_down) {
tcp->shutting_down = true;
- uv_shutdown_t *req = gpr_malloc(sizeof(uv_shutdown_t));
+ uv_shutdown_t *req = &tcp->shutdown_req;
uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback);
}
}
@@ -289,6 +324,7 @@ static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
uv_close((uv_handle_t *)tcp->handle, uv_close_callback);
+ uv_resource_user_maybe_shutdown(exec_ctx, tcp);
TCP_UNREF(tcp, "destroy");
}
@@ -297,18 +333,21 @@ static char *uv_get_peer(grpc_endpoint *ep) {
return gpr_strdup(tcp->peer_string);
}
+static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return &tcp->resource_user;
+}
+
static grpc_workqueue *uv_get_workqueue(grpc_endpoint *ep) { return NULL; }
-static grpc_endpoint_vtable vtable = {uv_endpoint_read,
- uv_endpoint_write,
- uv_get_workqueue,
- uv_add_to_pollset,
- uv_add_to_pollset_set,
- uv_endpoint_shutdown,
- uv_destroy,
- uv_get_peer};
+static grpc_endpoint_vtable vtable = {
+ uv_endpoint_read, uv_endpoint_write, uv_get_workqueue,
+ uv_add_to_pollset, uv_add_to_pollset_set, uv_endpoint_shutdown,
+ uv_destroy, uv_get_resource_user, uv_get_peer};
-grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string) {
+grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
+ grpc_resource_quota *resource_quota,
+ char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
if (grpc_tcp_trace) {
@@ -325,6 +364,8 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string) {
gpr_ref_init(&tcp->refcount, 1);
tcp->peer_string = gpr_strdup(peer_string);
tcp->shutting_down = false;
+ tcp->resource_user_shutting_down = false;
+ grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
diff --git a/src/core/lib/iomgr/tcp_uv.h b/src/core/lib/iomgr/tcp_uv.h
index eed41151ea..970fcafe4a 100644
--- a/src/core/lib/iomgr/tcp_uv.h
+++ b/src/core/lib/iomgr/tcp_uv.h
@@ -52,6 +52,8 @@ extern int grpc_tcp_trace;
#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
-grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string);
+grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
+ grpc_resource_quota *resource_quota,
+ char *peer_string);
#endif /* GRPC_CORE_LIB_IOMGR_TCP_UV_H */
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index 0eca46eb52..ebf72a3abb 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -210,11 +210,11 @@ void grpc_security_connector_unref(grpc_security_connector *sc) {
}
static void connector_pointer_arg_destroy(void *p) {
- GRPC_SECURITY_CONNECTOR_UNREF(p, "connector_pointer_arg");
+ GRPC_SECURITY_CONNECTOR_UNREF(p, "connector_pointer_arg_destroy");
}
static void *connector_pointer_arg_copy(void *p) {
- return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg");
+ return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg_copy");
}
static int connector_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc
index 43b3875cb3..847c8c7dc0 100644
--- a/src/cpp/client/channel_cc.cc
+++ b/src/cpp/client/channel_cc.cc
@@ -106,11 +106,11 @@ grpc_connectivity_state Channel::GetState(bool try_to_connect) {
}
namespace {
-class TagSaver GRPC_FINAL : public CompletionQueueTag {
+class TagSaver final : public CompletionQueueTag {
public:
explicit TagSaver(void* tag) : tag_(tag) {}
- ~TagSaver() GRPC_OVERRIDE {}
- bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
+ ~TagSaver() override {}
+ bool FinalizeResult(void** tag, bool* status) override {
*tag = tag_;
delete this;
return true;
diff --git a/src/cpp/client/client_context.cc b/src/cpp/client/client_context.cc
index b6008f47b1..c073741dac 100644
--- a/src/cpp/client/client_context.cc
+++ b/src/cpp/client/client_context.cc
@@ -45,12 +45,12 @@
namespace grpc {
-class DefaultGlobalClientCallbacks GRPC_FINAL
+class DefaultGlobalClientCallbacks final
: public ClientContext::GlobalCallbacks {
public:
- ~DefaultGlobalClientCallbacks() GRPC_OVERRIDE {}
- void DefaultConstructor(ClientContext* context) GRPC_OVERRIDE {}
- void Destructor(ClientContext* context) GRPC_OVERRIDE {}
+ ~DefaultGlobalClientCallbacks() override {}
+ void DefaultConstructor(ClientContext* context) override {}
+ void Destructor(ClientContext* context) override {}
};
static DefaultGlobalClientCallbacks g_default_client_callbacks;
@@ -93,7 +93,7 @@ void ClientContext::AddMetadata(const grpc::string& meta_key,
void ClientContext::set_call(grpc_call* call,
const std::shared_ptr<Channel>& channel) {
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
GPR_ASSERT(call_ == nullptr);
call_ = call;
channel_ = channel;
@@ -119,7 +119,7 @@ void ClientContext::set_compression_algorithm(
}
void ClientContext::TryCancel() {
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
if (call_) {
grpc_call_cancel(call_, nullptr);
} else {
diff --git a/src/cpp/client/cronet_credentials.cc b/src/cpp/client/cronet_credentials.cc
index 60cad097db..8e94cf0ad7 100644
--- a/src/cpp/client/cronet_credentials.cc
+++ b/src/cpp/client/cronet_credentials.cc
@@ -40,12 +40,12 @@
namespace grpc {
-class CronetChannelCredentialsImpl GRPC_FINAL : public ChannelCredentials {
+class CronetChannelCredentialsImpl final : public ChannelCredentials {
public:
CronetChannelCredentialsImpl(void* engine) : engine_(engine) {}
std::shared_ptr<grpc::Channel> CreateChannel(
- const string& target, const grpc::ChannelArguments& args) GRPC_OVERRIDE {
+ const string& target, const grpc::ChannelArguments& args) override {
grpc_channel_args channel_args;
args.SetChannelArgs(&channel_args);
return CreateChannelInternal(
@@ -53,9 +53,7 @@ class CronetChannelCredentialsImpl GRPC_FINAL : public ChannelCredentials {
&channel_args, nullptr));
}
- SecureChannelCredentials* AsSecureCredentials() GRPC_OVERRIDE {
- return nullptr;
- }
+ SecureChannelCredentials* AsSecureCredentials() override { return nullptr; }
private:
void* engine_;
diff --git a/src/cpp/client/insecure_credentials.cc b/src/cpp/client/insecure_credentials.cc
index 13019a7117..116f1dd4ad 100644
--- a/src/cpp/client/insecure_credentials.cc
+++ b/src/cpp/client/insecure_credentials.cc
@@ -43,10 +43,10 @@
namespace grpc {
namespace {
-class InsecureChannelCredentialsImpl GRPC_FINAL : public ChannelCredentials {
+class InsecureChannelCredentialsImpl final : public ChannelCredentials {
public:
std::shared_ptr<grpc::Channel> CreateChannel(
- const string& target, const grpc::ChannelArguments& args) GRPC_OVERRIDE {
+ const string& target, const grpc::ChannelArguments& args) override {
grpc_channel_args channel_args;
args.SetChannelArgs(&channel_args);
return CreateChannelInternal(
@@ -54,9 +54,7 @@ class InsecureChannelCredentialsImpl GRPC_FINAL : public ChannelCredentials {
grpc_insecure_channel_create(target.c_str(), &channel_args, nullptr));
}
- SecureChannelCredentials* AsSecureCredentials() GRPC_OVERRIDE {
- return nullptr;
- }
+ SecureChannelCredentials* AsSecureCredentials() override { return nullptr; }
};
} // namespace
diff --git a/src/cpp/client/secure_credentials.h b/src/cpp/client/secure_credentials.h
index ae41ef8007..281db17e98 100644
--- a/src/cpp/client/secure_credentials.h
+++ b/src/cpp/client/secure_credentials.h
@@ -43,34 +43,34 @@
namespace grpc {
-class SecureChannelCredentials GRPC_FINAL : public ChannelCredentials {
+class SecureChannelCredentials final : public ChannelCredentials {
public:
explicit SecureChannelCredentials(grpc_channel_credentials* c_creds);
~SecureChannelCredentials() { grpc_channel_credentials_release(c_creds_); }
grpc_channel_credentials* GetRawCreds() { return c_creds_; }
std::shared_ptr<grpc::Channel> CreateChannel(
- const string& target, const grpc::ChannelArguments& args) GRPC_OVERRIDE;
- SecureChannelCredentials* AsSecureCredentials() GRPC_OVERRIDE { return this; }
+ const string& target, const grpc::ChannelArguments& args) override;
+ SecureChannelCredentials* AsSecureCredentials() override { return this; }
private:
grpc_channel_credentials* const c_creds_;
};
-class SecureCallCredentials GRPC_FINAL : public CallCredentials {
+class SecureCallCredentials final : public CallCredentials {
public:
explicit SecureCallCredentials(grpc_call_credentials* c_creds);
~SecureCallCredentials() { grpc_call_credentials_release(c_creds_); }
grpc_call_credentials* GetRawCreds() { return c_creds_; }
- bool ApplyToCall(grpc_call* call) GRPC_OVERRIDE;
- SecureCallCredentials* AsSecureCredentials() GRPC_OVERRIDE { return this; }
+ bool ApplyToCall(grpc_call* call) override;
+ SecureCallCredentials* AsSecureCredentials() override { return this; }
private:
grpc_call_credentials* const c_creds_;
};
-class MetadataCredentialsPluginWrapper GRPC_FINAL {
+class MetadataCredentialsPluginWrapper final {
public:
static void Destroy(void* wrapper);
static void GetMetadata(void* wrapper, grpc_auth_metadata_context context,
diff --git a/src/cpp/common/channel_arguments.cc b/src/cpp/common/channel_arguments.cc
index d136d49c89..c6cad8eeae 100644
--- a/src/cpp/common/channel_arguments.cc
+++ b/src/cpp/common/channel_arguments.cc
@@ -121,6 +121,11 @@ void ChannelArguments::SetResourceQuota(
grpc_resource_quota_arg_vtable());
}
+void ChannelArguments::SetLoadBalancingPolicyName(
+ const grpc::string& lb_policy_name) {
+ SetString(GRPC_ARG_LB_POLICY_NAME, lb_policy_name);
+}
+
void ChannelArguments::SetInt(const grpc::string& key, int value) {
grpc_arg arg;
arg.type = GRPC_ARG_INTEGER;
diff --git a/src/cpp/common/channel_filter.h b/src/cpp/common/channel_filter.h
index ae32e02f69..fc0deff3b3 100644
--- a/src/cpp/common/channel_filter.h
+++ b/src/cpp/common/channel_filter.h
@@ -268,7 +268,7 @@ namespace internal {
// Members of this class correspond to the members of the C
// grpc_channel_filter struct.
template <typename ChannelDataType, typename CallDataType>
-class ChannelFilter GRPC_FINAL {
+class ChannelFilter final {
public:
static const size_t channel_data_size = sizeof(ChannelDataType);
diff --git a/src/cpp/common/secure_auth_context.h b/src/cpp/common/secure_auth_context.h
index c9f1dad131..98f5f09e27 100644
--- a/src/cpp/common/secure_auth_context.h
+++ b/src/cpp/common/secure_auth_context.h
@@ -40,30 +40,29 @@ struct grpc_auth_context;
namespace grpc {
-class SecureAuthContext GRPC_FINAL : public AuthContext {
+class SecureAuthContext final : public AuthContext {
public:
SecureAuthContext(grpc_auth_context* ctx, bool take_ownership);
- ~SecureAuthContext() GRPC_OVERRIDE;
+ ~SecureAuthContext() override;
- bool IsPeerAuthenticated() const GRPC_OVERRIDE;
+ bool IsPeerAuthenticated() const override;
- std::vector<grpc::string_ref> GetPeerIdentity() const GRPC_OVERRIDE;
+ std::vector<grpc::string_ref> GetPeerIdentity() const override;
- grpc::string GetPeerIdentityPropertyName() const GRPC_OVERRIDE;
+ grpc::string GetPeerIdentityPropertyName() const override;
std::vector<grpc::string_ref> FindPropertyValues(
- const grpc::string& name) const GRPC_OVERRIDE;
+ const grpc::string& name) const override;
- AuthPropertyIterator begin() const GRPC_OVERRIDE;
+ AuthPropertyIterator begin() const override;
- AuthPropertyIterator end() const GRPC_OVERRIDE;
+ AuthPropertyIterator end() const override;
void AddProperty(const grpc::string& key,
- const grpc::string_ref& value) GRPC_OVERRIDE;
+ const grpc::string_ref& value) override;
- virtual bool SetPeerIdentityPropertyName(const grpc::string& name)
- GRPC_OVERRIDE;
+ virtual bool SetPeerIdentityPropertyName(const grpc::string& name) override;
private:
grpc_auth_context* ctx_;
diff --git a/src/cpp/ext/proto_server_reflection.h b/src/cpp/ext/proto_server_reflection.h
index be5f062f9f..ca0ba97d88 100644
--- a/src/cpp/ext/proto_server_reflection.h
+++ b/src/cpp/ext/proto_server_reflection.h
@@ -42,7 +42,7 @@
namespace grpc {
-class ProtoServerReflection GRPC_FINAL
+class ProtoServerReflection final
: public reflection::v1alpha::ServerReflection::Service {
public:
ProtoServerReflection();
@@ -56,7 +56,7 @@ class ProtoServerReflection GRPC_FINAL
ServerContext* context,
ServerReaderWriter<reflection::v1alpha::ServerReflectionResponse,
reflection::v1alpha::ServerReflectionRequest>* stream)
- GRPC_OVERRIDE;
+ override;
private:
Status ListService(ServerContext* context,
diff --git a/src/cpp/server/dynamic_thread_pool.cc b/src/cpp/server/dynamic_thread_pool.cc
index 4b226c2992..1fdc2edb25 100644
--- a/src/cpp/server/dynamic_thread_pool.cc
+++ b/src/cpp/server/dynamic_thread_pool.cc
@@ -31,16 +31,16 @@
*
*/
-#include <grpc++/impl/sync.h>
-#include <grpc++/impl/thd.h>
+#include <mutex>
+#include <thread>
#include "src/cpp/server/dynamic_thread_pool.h"
namespace grpc {
DynamicThreadPool::DynamicThread::DynamicThread(DynamicThreadPool* pool)
: pool_(pool),
- thd_(new grpc::thread(&DynamicThreadPool::DynamicThread::ThreadFunc,
- this)) {}
+ thd_(new std::thread(&DynamicThreadPool::DynamicThread::ThreadFunc,
+ this)) {}
DynamicThreadPool::DynamicThread::~DynamicThread() {
thd_->join();
thd_.reset();
@@ -49,7 +49,7 @@ DynamicThreadPool::DynamicThread::~DynamicThread() {
void DynamicThreadPool::DynamicThread::ThreadFunc() {
pool_->ThreadFunc();
// Now that we have killed ourselves, we should reduce the thread count
- grpc::unique_lock<grpc::mutex> lock(pool_->mu_);
+ std::unique_lock<std::mutex> lock(pool_->mu_);
pool_->nthreads_--;
// Move ourselves to dead list
pool_->dead_threads_.push_back(this);
@@ -62,7 +62,7 @@ void DynamicThreadPool::DynamicThread::ThreadFunc() {
void DynamicThreadPool::ThreadFunc() {
for (;;) {
// Wait until work is available or we are shutting down.
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
if (!shutdown_ && callbacks_.empty()) {
// If there are too many threads waiting, then quit this thread
if (threads_waiting_ >= reserve_threads_) {
@@ -91,7 +91,7 @@ DynamicThreadPool::DynamicThreadPool(int reserve_threads)
nthreads_(0),
threads_waiting_(0) {
for (int i = 0; i < reserve_threads_; i++) {
- grpc::lock_guard<grpc::mutex> lock(mu_);
+ std::lock_guard<std::mutex> lock(mu_);
nthreads_++;
new DynamicThread(this);
}
@@ -104,7 +104,7 @@ void DynamicThreadPool::ReapThreads(std::list<DynamicThread*>* tlist) {
}
DynamicThreadPool::~DynamicThreadPool() {
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
shutdown_ = true;
cv_.notify_all();
while (nthreads_ != 0) {
@@ -114,7 +114,7 @@ DynamicThreadPool::~DynamicThreadPool() {
}
void DynamicThreadPool::Add(const std::function<void()>& callback) {
- grpc::lock_guard<grpc::mutex> lock(mu_);
+ std::lock_guard<std::mutex> lock(mu_);
// Add works to the callbacks list
callbacks_.push(callback);
// Increase pool size or notify as needed
diff --git a/src/cpp/server/dynamic_thread_pool.h b/src/cpp/server/dynamic_thread_pool.h
index 5ba7533c05..4f8c4111cc 100644
--- a/src/cpp/server/dynamic_thread_pool.h
+++ b/src/cpp/server/dynamic_thread_pool.h
@@ -34,24 +34,25 @@
#ifndef GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
#define GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
+#include <condition_variable>
#include <list>
#include <memory>
+#include <mutex>
#include <queue>
+#include <thread>
-#include <grpc++/impl/sync.h>
-#include <grpc++/impl/thd.h>
#include <grpc++/support/config.h>
#include "src/cpp/server/thread_pool_interface.h"
namespace grpc {
-class DynamicThreadPool GRPC_FINAL : public ThreadPoolInterface {
+class DynamicThreadPool final : public ThreadPoolInterface {
public:
explicit DynamicThreadPool(int reserve_threads);
~DynamicThreadPool();
- void Add(const std::function<void()>& callback) GRPC_OVERRIDE;
+ void Add(const std::function<void()>& callback) override;
private:
class DynamicThread {
@@ -61,12 +62,12 @@ class DynamicThreadPool GRPC_FINAL : public ThreadPoolInterface {
private:
DynamicThreadPool* pool_;
- std::unique_ptr<grpc::thread> thd_;
+ std::unique_ptr<std::thread> thd_;
void ThreadFunc();
};
- grpc::mutex mu_;
- grpc::condition_variable cv_;
- grpc::condition_variable shutdown_cv_;
+ std::mutex mu_;
+ std::condition_variable cv_;
+ std::condition_variable shutdown_cv_;
bool shutdown_;
std::queue<std::function<void()>> callbacks_;
int reserve_threads_;
diff --git a/src/cpp/server/insecure_server_credentials.cc b/src/cpp/server/insecure_server_credentials.cc
index ef3cae5fd7..eb5931b7b0 100644
--- a/src/cpp/server/insecure_server_credentials.cc
+++ b/src/cpp/server/insecure_server_credentials.cc
@@ -38,14 +38,13 @@
namespace grpc {
namespace {
-class InsecureServerCredentialsImpl GRPC_FINAL : public ServerCredentials {
+class InsecureServerCredentialsImpl final : public ServerCredentials {
public:
- int AddPortToServer(const grpc::string& addr,
- grpc_server* server) GRPC_OVERRIDE {
+ int AddPortToServer(const grpc::string& addr, grpc_server* server) override {
return grpc_server_add_insecure_http2_port(server, addr.c_str());
}
void SetAuthMetadataProcessor(
- const std::shared_ptr<AuthMetadataProcessor>& processor) GRPC_OVERRIDE {
+ const std::shared_ptr<AuthMetadataProcessor>& processor) override {
(void)processor;
GPR_ASSERT(0); // Should not be called on InsecureServerCredentials.
}
diff --git a/src/cpp/server/secure_server_credentials.h b/src/cpp/server/secure_server_credentials.h
index 5460f4a02c..3a301e60c2 100644
--- a/src/cpp/server/secure_server_credentials.h
+++ b/src/cpp/server/secure_server_credentials.h
@@ -44,7 +44,7 @@
namespace grpc {
-class AuthMetadataProcessorAyncWrapper GRPC_FINAL {
+class AuthMetadataProcessorAyncWrapper final {
public:
static void Destroy(void* wrapper);
@@ -64,19 +64,18 @@ class AuthMetadataProcessorAyncWrapper GRPC_FINAL {
std::shared_ptr<AuthMetadataProcessor> processor_;
};
-class SecureServerCredentials GRPC_FINAL : public ServerCredentials {
+class SecureServerCredentials final : public ServerCredentials {
public:
explicit SecureServerCredentials(grpc_server_credentials* creds)
: creds_(creds) {}
- ~SecureServerCredentials() GRPC_OVERRIDE {
+ ~SecureServerCredentials() override {
grpc_server_credentials_release(creds_);
}
- int AddPortToServer(const grpc::string& addr,
- grpc_server* server) GRPC_OVERRIDE;
+ int AddPortToServer(const grpc::string& addr, grpc_server* server) override;
void SetAuthMetadataProcessor(
- const std::shared_ptr<AuthMetadataProcessor>& processor) GRPC_OVERRIDE;
+ const std::shared_ptr<AuthMetadataProcessor>& processor) override;
private:
grpc_server_credentials* creds_;
diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc
index d46942d257..b7cfd6dbf1 100644
--- a/src/cpp/server/server_cc.cc
+++ b/src/cpp/server/server_cc.cc
@@ -55,11 +55,11 @@
namespace grpc {
-class DefaultGlobalCallbacks GRPC_FINAL : public Server::GlobalCallbacks {
+class DefaultGlobalCallbacks final : public Server::GlobalCallbacks {
public:
- ~DefaultGlobalCallbacks() GRPC_OVERRIDE {}
- void PreSynchronousRequest(ServerContext* context) GRPC_OVERRIDE {}
- void PostSynchronousRequest(ServerContext* context) GRPC_OVERRIDE {}
+ ~DefaultGlobalCallbacks() override {}
+ void PreSynchronousRequest(ServerContext* context) override {}
+ void PostSynchronousRequest(ServerContext* context) override {}
};
static std::shared_ptr<Server::GlobalCallbacks> g_callbacks = nullptr;
@@ -79,7 +79,7 @@ class Server::UnimplementedAsyncRequestContext {
GenericServerAsyncReaderWriter generic_stream_;
};
-class Server::UnimplementedAsyncRequest GRPC_FINAL
+class Server::UnimplementedAsyncRequest final
: public UnimplementedAsyncRequestContext,
public GenericAsyncRequest {
public:
@@ -89,7 +89,7 @@ class Server::UnimplementedAsyncRequest GRPC_FINAL
server_(server),
cq_(cq) {}
- bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
+ bool FinalizeResult(void** tag, bool* status) override;
ServerContext* context() { return &server_context_; }
GenericServerAsyncReaderWriter* stream() { return &generic_stream_; }
@@ -101,13 +101,13 @@ class Server::UnimplementedAsyncRequest GRPC_FINAL
typedef SneakyCallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus>
UnimplementedAsyncResponseOp;
-class Server::UnimplementedAsyncResponse GRPC_FINAL
+class Server::UnimplementedAsyncResponse final
: public UnimplementedAsyncResponseOp {
public:
UnimplementedAsyncResponse(UnimplementedAsyncRequest* request);
~UnimplementedAsyncResponse() { delete request_; }
- bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
+ bool FinalizeResult(void** tag, bool* status) override {
bool r = UnimplementedAsyncResponseOp::FinalizeResult(tag, status);
delete this;
return r;
@@ -122,7 +122,7 @@ class ShutdownTag : public CompletionQueueTag {
bool FinalizeResult(void** tag, bool* status) { return false; }
};
-class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
+class Server::SyncRequest final : public CompletionQueueTag {
public:
SyncRequest(RpcServiceMethod* method, void* tag)
: method_(method),
@@ -170,7 +170,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
}
}
- bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
+ bool FinalizeResult(void** tag, bool* status) override {
if (!*status) {
grpc_completion_queue_destroy(cq_);
}
@@ -182,7 +182,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
return true;
}
- class CallData GRPC_FINAL {
+ class CallData final {
public:
explicit CallData(Server* server, SyncRequest* mrd)
: cq_(mrd->cq_),
@@ -255,7 +255,7 @@ class Server::SyncRequestThreadManager : public ThreadManager {
cq_timeout_msec_(cq_timeout_msec),
global_callbacks_(global_callbacks) {}
- WorkStatus PollForWork(void** tag, bool* ok) GRPC_OVERRIDE {
+ WorkStatus PollForWork(void** tag, bool* ok) override {
*tag = nullptr;
gpr_timespec deadline =
gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN);
@@ -272,7 +272,7 @@ class Server::SyncRequestThreadManager : public ThreadManager {
GPR_UNREACHABLE_CODE(return TIMEOUT);
}
- void DoWork(void* tag, bool ok) GRPC_OVERRIDE {
+ void DoWork(void* tag, bool ok) override {
SyncRequest* sync_req = static_cast<SyncRequest*>(tag);
if (!sync_req) {
@@ -379,7 +379,7 @@ Server::Server(
Server::~Server() {
{
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
if (started_ && !shutdown_) {
lock.unlock();
Shutdown();
@@ -501,7 +501,7 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
}
void Server::ShutdownInternal(gpr_timespec deadline) {
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
if (started_ && !shutdown_) {
shutdown_ = true;
@@ -549,7 +549,7 @@ void Server::ShutdownInternal(gpr_timespec deadline) {
}
void Server::Wait() {
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
while (started_ && !shutdown_notified_) {
shutdown_cv_.wait(lock);
}
diff --git a/src/cpp/server/server_context.cc b/src/cpp/server/server_context.cc
index 1ca6a2b906..a66ec4ac84 100644
--- a/src/cpp/server/server_context.cc
+++ b/src/cpp/server/server_context.cc
@@ -33,9 +33,10 @@
#include <grpc++/server_context.h>
+#include <mutex>
+
#include <grpc++/completion_queue.h>
#include <grpc++/impl/call.h>
-#include <grpc++/impl/sync.h>
#include <grpc++/support/time.h>
#include <grpc/compression.h>
#include <grpc/grpc.h>
@@ -48,7 +49,7 @@ namespace grpc {
// CompletionOp
-class ServerContext::CompletionOp GRPC_FINAL : public CallOpSetInterface {
+class ServerContext::CompletionOp final : public CallOpSetInterface {
public:
// initial refs: one in the server context, one in the cq
CompletionOp()
@@ -58,8 +59,8 @@ class ServerContext::CompletionOp GRPC_FINAL : public CallOpSetInterface {
finalized_(false),
cancelled_(0) {}
- void FillOps(grpc_op* ops, size_t* nops) GRPC_OVERRIDE;
- bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
+ void FillOps(grpc_op* ops, size_t* nops) override;
+ bool FinalizeResult(void** tag, bool* status) override;
bool CheckCancelled(CompletionQueue* cq) {
cq->TryPluck(this);
@@ -76,20 +77,20 @@ class ServerContext::CompletionOp GRPC_FINAL : public CallOpSetInterface {
private:
bool CheckCancelledNoPluck() {
- grpc::lock_guard<grpc::mutex> g(mu_);
+ std::lock_guard<std::mutex> g(mu_);
return finalized_ ? (cancelled_ != 0) : false;
}
bool has_tag_;
void* tag_;
- grpc::mutex mu_;
+ std::mutex mu_;
int refs_;
bool finalized_;
int cancelled_;
};
void ServerContext::CompletionOp::Unref() {
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
if (--refs_ == 0) {
lock.unlock();
delete this;
@@ -105,7 +106,7 @@ void ServerContext::CompletionOp::FillOps(grpc_op* ops, size_t* nops) {
}
bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
finalized_ = true;
bool ret = false;
if (has_tag_) {
diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc
index caae4c457d..1450d009e4 100644
--- a/src/cpp/thread_manager/thread_manager.cc
+++ b/src/cpp/thread_manager/thread_manager.cc
@@ -31,12 +31,13 @@
*
*/
-#include <grpc++/impl/sync.h>
-#include <grpc++/impl/thd.h>
-#include <grpc/support/log.h>
+#include "src/cpp/thread_manager/thread_manager.h"
+
#include <climits>
+#include <mutex>
+#include <thread>
-#include "src/cpp/thread_manager/thread_manager.h"
+#include <grpc/support/log.h>
namespace grpc {
@@ -59,7 +60,7 @@ ThreadManager::ThreadManager(int min_pollers, int max_pollers)
ThreadManager::~ThreadManager() {
{
- std::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
GPR_ASSERT(num_threads_ == 0);
}
@@ -67,29 +68,29 @@ ThreadManager::~ThreadManager() {
}
void ThreadManager::Wait() {
- std::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
while (num_threads_ != 0) {
shutdown_cv_.wait(lock);
}
}
void ThreadManager::Shutdown() {
- std::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
shutdown_ = true;
}
bool ThreadManager::IsShutdown() {
- std::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
return shutdown_;
}
void ThreadManager::MarkAsCompleted(WorkerThread* thd) {
{
- std::unique_lock<grpc::mutex> list_lock(list_mu_);
+ std::unique_lock<std::mutex> list_lock(list_mu_);
completed_threads_.push_back(thd);
}
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
num_threads_--;
if (num_threads_ == 0) {
shutdown_cv_.notify_one();
@@ -97,7 +98,7 @@ void ThreadManager::MarkAsCompleted(WorkerThread* thd) {
}
void ThreadManager::CleanupCompletedThreads() {
- std::unique_lock<grpc::mutex> lock(list_mu_);
+ std::unique_lock<std::mutex> lock(list_mu_);
for (auto thd = completed_threads_.begin(); thd != completed_threads_.end();
thd = completed_threads_.erase(thd)) {
delete *thd;
@@ -114,7 +115,7 @@ void ThreadManager::Initialize() {
// less than max threshold (i.e max_pollers_) and the total number of threads is
// below the maximum threshold, we can let the current thread continue as poller
bool ThreadManager::MaybeContinueAsPoller() {
- std::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
if (shutdown_ || num_pollers_ > max_pollers_) {
return false;
}
@@ -127,7 +128,7 @@ bool ThreadManager::MaybeContinueAsPoller() {
// threads currently blocked in PollForWork()) is below the threshold (i.e
// min_pollers_) and the total number of threads is below the maximum threshold
void ThreadManager::MaybeCreatePoller() {
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
if (!shutdown_ && num_pollers_ < min_pollers_) {
num_pollers_++;
num_threads_++;
@@ -156,7 +157,7 @@ void ThreadManager::MainWorkLoop() {
WorkStatus work_status = PollForWork(&tag, &ok);
{
- grpc::unique_lock<grpc::mutex> lock(mu_);
+ std::unique_lock<std::mutex> lock(mu_);
num_pollers_--;
if (work_status == TIMEOUT && num_pollers_ > min_pollers_) {
diff --git a/src/cpp/thread_manager/thread_manager.h b/src/cpp/thread_manager/thread_manager.h
index 9cfdb8af25..9c0569c62c 100644
--- a/src/cpp/thread_manager/thread_manager.h
+++ b/src/cpp/thread_manager/thread_manager.h
@@ -34,11 +34,12 @@
#ifndef GRPC_INTERNAL_CPP_THREAD_MANAGER_H
#define GRPC_INTERNAL_CPP_THREAD_MANAGER_H
+#include <condition_variable>
#include <list>
#include <memory>
+#include <mutex>
+#include <thread>
-#include <grpc++/impl/sync.h>
-#include <grpc++/impl/thd.h>
#include <grpc++/support/config.h>
namespace grpc {
@@ -115,7 +116,7 @@ class ThreadManager {
void Run();
ThreadManager* thd_mgr_;
- grpc::thread thd_;
+ std::thread thd_;
};
// The main funtion in ThreadManager
@@ -134,10 +135,10 @@ class ThreadManager {
// Protects shutdown_, num_pollers_ and num_threads_
// TODO: sreek - Change num_pollers and num_threads_ to atomics
- grpc::mutex mu_;
+ std::mutex mu_;
bool shutdown_;
- grpc::condition_variable shutdown_cv_;
+ std::condition_variable shutdown_cv_;
// Number of threads doing polling
int num_pollers_;
@@ -150,7 +151,7 @@ class ThreadManager {
// currently polling i.e num_pollers_)
int num_threads_;
- grpc::mutex list_mu_;
+ std::mutex list_mu_;
std::list<WorkerThread*> completed_threads_;
};
diff --git a/src/cpp/util/time_cc.cc b/src/cpp/util/time_cc.cc
index c43d848cc6..cd59a19703 100644
--- a/src/cpp/util/time_cc.cc
+++ b/src/cpp/util/time_cc.cc
@@ -32,9 +32,6 @@
*/
#include <grpc++/support/config.h>
-
-#ifndef GRPC_CXX0X_NO_CHRONO
-
#include <grpc++/support/time.h>
#include <grpc/support/time.h>
@@ -91,5 +88,3 @@ system_clock::time_point Timespec2Timepoint(gpr_timespec t) {
}
} // namespace grpc
-
-#endif // !GRPC_CXX0X_NO_CHRONO
diff --git a/src/proto/grpc/lb/v1/load_balancer.options b/src/proto/grpc/lb/v1/load_balancer.options
index a9398d5f47..7fbd44b9de 100644
--- a/src/proto/grpc/lb/v1/load_balancer.options
+++ b/src/proto/grpc/lb/v1/load_balancer.options
@@ -1,6 +1,5 @@
grpc.lb.v1.InitialLoadBalanceRequest.name max_size:128
-grpc.lb.v1.InitialLoadBalanceResponse.client_config max_size:64
grpc.lb.v1.InitialLoadBalanceResponse.load_balancer_delegate max_size:64
grpc.lb.v1.Server.ip_address max_size:16
-grpc.lb.v1.Server.load_balance_token max_size:65
+grpc.lb.v1.Server.load_balance_token max_size:50
load_balancer.proto no_unions:true
diff --git a/src/proto/grpc/lb/v1/load_balancer.proto b/src/proto/grpc/lb/v1/load_balancer.proto
index 210fba1323..44a5150a7e 100644
--- a/src/proto/grpc/lb/v1/load_balancer.proto
+++ b/src/proto/grpc/lb/v1/load_balancer.proto
@@ -63,7 +63,8 @@ message LoadBalanceRequest {
}
message InitialLoadBalanceRequest {
- // Name of load balanced service (IE, service.grpc.gslb.google.com)
+ // Name of load balanced service (IE, service.grpc.gslb.google.com). Its
+ // length should be less than 256 bytes.
string name = 1;
}
@@ -95,7 +96,8 @@ message InitialLoadBalanceResponse {
// This is an application layer redirect that indicates the client should use
// the specified server for load balancing. When this field is non-empty in
// the response, the client should open a separate connection to the
- // load_balancer_delegate and call the BalanceLoad method.
+ // load_balancer_delegate and call the BalanceLoad method. Its length should
+ // be less than 64 bytes.
string load_balancer_delegate = 1;
// This interval defines how often the client should send the client stats
@@ -130,6 +132,8 @@ message Server {
// frontend requests for that pick must include the token in its initial
// metadata. The token is used by the backend to verify the request and to
// allow the backend to report load to the gRPC LB system.
+ //
+ // Its length is variable but less than 50 bytes.
string load_balance_token = 3;
// Indicates whether this particular request should be dropped by the client
diff --git a/src/ruby/ext/grpc/rb_compression_options.c b/src/ruby/ext/grpc/rb_compression_options.c
index c5668fdab4..6200dbafeb 100644
--- a/src/ruby/ext/grpc/rb_compression_options.c
+++ b/src/ruby/ext/grpc/rb_compression_options.c
@@ -283,6 +283,8 @@ VALUE grpc_rb_compression_options_level_value_to_name_internal(
rb_eArgError,
"Failed to convert compression level value to name for value: %d",
(int)compression_value);
+ /* return something to avoid compiler error about no return */
+ return Qnil;
}
}