aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/lib')
-rw-r--r--src/core/lib/channel/channel_stack.cc9
-rw-r--r--src/core/lib/channel/channel_stack.h8
-rw-r--r--src/core/lib/channel/channel_stack_builder.cc16
-rw-r--r--src/core/lib/channel/channel_stack_builder.h8
-rw-r--r--src/core/lib/channel/channelz.cc84
-rw-r--r--src/core/lib/channel/channelz.h16
-rw-r--r--src/core/lib/channel/channelz_registry.cc29
-rw-r--r--src/core/lib/channel/channelz_registry.h6
-rw-r--r--src/core/lib/channel/context.h8
-rw-r--r--src/core/lib/debug/trace.cc3
-rw-r--r--src/core/lib/debug/trace.h8
-rw-r--r--src/core/lib/gpr/arena.cc121
-rw-r--r--src/core/lib/gpr/arena.h2
-rw-r--r--src/core/lib/gprpp/inlined_vector.h44
-rw-r--r--src/core/lib/gprpp/orphanable.h100
-rw-r--r--src/core/lib/gprpp/ref_counted.h230
-rw-r--r--src/core/lib/gprpp/ref_counted_ptr.h42
-rw-r--r--src/core/lib/iomgr/buffer_list.cc21
-rw-r--r--src/core/lib/iomgr/buffer_list.h13
-rw-r--r--src/core/lib/iomgr/call_combiner.cc54
-rw-r--r--src/core/lib/iomgr/call_combiner.h35
-rw-r--r--src/core/lib/iomgr/closure.h1
-rw-r--r--src/core/lib/iomgr/dynamic_annotations.h67
-rw-r--r--src/core/lib/iomgr/endpoint.cc4
-rw-r--r--src/core/lib/iomgr/endpoint.h3
-rw-r--r--src/core/lib/iomgr/endpoint_cfstream.cc5
-rw-r--r--src/core/lib/iomgr/endpoint_pair_posix.cc4
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc4
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc4
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc4
-rw-r--r--src/core/lib/iomgr/ev_posix.cc22
-rw-r--r--src/core/lib/iomgr/ev_posix.h10
-rw-r--r--src/core/lib/iomgr/fork_posix.cc2
-rw-r--r--src/core/lib/iomgr/internal_errqueue.cc39
-rw-r--r--src/core/lib/iomgr/internal_errqueue.h8
-rw-r--r--src/core/lib/iomgr/iomgr.cc7
-rw-r--r--src/core/lib/iomgr/iomgr.h4
-rw-r--r--src/core/lib/iomgr/iomgr_custom.cc4
-rw-r--r--src/core/lib/iomgr/iomgr_internal.cc4
-rw-r--r--src/core/lib/iomgr/iomgr_internal.h4
-rw-r--r--src/core/lib/iomgr/iomgr_posix.cc7
-rw-r--r--src/core/lib/iomgr/iomgr_posix_cfstream.cc7
-rw-r--r--src/core/lib/iomgr/iomgr_windows.cc5
-rw-r--r--src/core/lib/iomgr/polling_entity.h8
-rw-r--r--src/core/lib/iomgr/port.h3
-rw-r--r--src/core/lib/iomgr/resolve_address.h2
-rw-r--r--src/core/lib/iomgr/resource_quota.cc74
-rw-r--r--src/core/lib/iomgr/resource_quota.h27
-rw-r--r--src/core/lib/iomgr/socket_utils_common_posix.cc11
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.cc2
-rw-r--r--src/core/lib/iomgr/tcp_custom.cc5
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc105
-rw-r--r--src/core/lib/iomgr/tcp_windows.cc99
-rw-r--r--src/core/lib/security/context/security_context.cc33
-rw-r--r--src/core/lib/security/context/security_context.h46
-rw-r--r--src/core/lib/security/credentials/credentials.h4
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.cc221
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.h5
-rw-r--r--src/core/lib/security/transport/client_auth_filter.cc44
-rw-r--r--src/core/lib/security/transport/secure_endpoint.cc100
-rw-r--r--src/core/lib/security/transport/security_handshaker.cc3
-rw-r--r--src/core/lib/security/transport/server_auth_filter.cc70
-rw-r--r--src/core/lib/surface/call.cc180
-rw-r--r--src/core/lib/surface/call.h4
-rw-r--r--src/core/lib/surface/channel.cc26
-rw-r--r--src/core/lib/surface/channel.h3
-rw-r--r--src/core/lib/surface/completion_queue.cc2
-rw-r--r--src/core/lib/surface/init.cc2
-rw-r--r--src/core/lib/surface/server.cc248
-rw-r--r--src/core/lib/surface/server.h13
-rw-r--r--src/core/lib/surface/version.cc2
-rw-r--r--src/core/lib/transport/metadata_batch.h6
-rw-r--r--src/core/lib/transport/static_metadata.cc449
-rw-r--r--src/core/lib/transport/static_metadata.h146
-rw-r--r--src/core/lib/transport/transport.cc3
-rw-r--r--src/core/lib/transport/transport.h74
-rw-r--r--src/core/lib/uri/uri_parser.cc314
-rw-r--r--src/core/lib/uri/uri_parser.h50
78 files changed, 2396 insertions, 1064 deletions
diff --git a/src/core/lib/channel/channel_stack.cc b/src/core/lib/channel/channel_stack.cc
index 056fcd93de..df956c7176 100644
--- a/src/core/lib/channel/channel_stack.cc
+++ b/src/core/lib/channel/channel_stack.cc
@@ -157,7 +157,6 @@ grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack,
size_t count = channel_stack->count;
grpc_call_element* call_elems;
char* user_data;
- size_t i;
elem_args->call_stack->count = count;
GRPC_STREAM_REF_INIT(&elem_args->call_stack->refcount, initial_refs, destroy,
@@ -168,10 +167,14 @@ grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack,
/* init per-filter data */
grpc_error* first_error = GRPC_ERROR_NONE;
- for (i = 0; i < count; i++) {
+ for (size_t i = 0; i < count; i++) {
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
+ user_data +=
+ GPR_ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
+ }
+ for (size_t i = 0; i < count; i++) {
grpc_error* error =
call_elems[i].filter->init_call_elem(&call_elems[i], elem_args);
if (error != GRPC_ERROR_NONE) {
@@ -181,8 +184,6 @@ grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack,
GRPC_ERROR_UNREF(error);
}
}
- user_data +=
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
}
return first_error;
}
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 35c3fb01ea..0de8c67079 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -79,11 +79,11 @@ typedef struct {
} grpc_call_stats;
/** Information about the call upon completion. */
-typedef struct {
+struct grpc_call_final_info {
grpc_call_stats stats;
- grpc_status_code final_status;
- const char* error_string;
-} grpc_call_final_info;
+ grpc_status_code final_status = GRPC_STATUS_OK;
+ const char* error_string = nullptr;
+};
/* Channel filters specify:
1. the amount of memory needed in the channel & call (via the sizeof_XXX
diff --git a/src/core/lib/channel/channel_stack_builder.cc b/src/core/lib/channel/channel_stack_builder.cc
index df5a783631..8b3008f221 100644
--- a/src/core/lib/channel/channel_stack_builder.cc
+++ b/src/core/lib/channel/channel_stack_builder.cc
@@ -40,6 +40,7 @@ struct grpc_channel_stack_builder {
// various set/get-able parameters
grpc_channel_args* args;
grpc_transport* transport;
+ grpc_resource_user* resource_user;
char* target;
const char* name;
};
@@ -157,6 +158,11 @@ void grpc_channel_stack_builder_set_channel_arguments(
builder->args = grpc_channel_args_copy(args);
}
+const grpc_channel_args* grpc_channel_stack_builder_get_channel_arguments(
+ grpc_channel_stack_builder* builder) {
+ return builder->args;
+}
+
void grpc_channel_stack_builder_set_transport(
grpc_channel_stack_builder* builder, grpc_transport* transport) {
GPR_ASSERT(builder->transport == nullptr);
@@ -168,9 +174,15 @@ grpc_transport* grpc_channel_stack_builder_get_transport(
return builder->transport;
}
-const grpc_channel_args* grpc_channel_stack_builder_get_channel_arguments(
+void grpc_channel_stack_builder_set_resource_user(
+ grpc_channel_stack_builder* builder, grpc_resource_user* resource_user) {
+ GPR_ASSERT(builder->resource_user == nullptr);
+ builder->resource_user = resource_user;
+}
+
+grpc_resource_user* grpc_channel_stack_builder_get_resource_user(
grpc_channel_stack_builder* builder) {
- return builder->args;
+ return builder->resource_user;
}
bool grpc_channel_stack_builder_append_filter(
diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h
index 9196de9378..89c30e0c5e 100644
--- a/src/core/lib/channel/channel_stack_builder.h
+++ b/src/core/lib/channel/channel_stack_builder.h
@@ -54,6 +54,14 @@ void grpc_channel_stack_builder_set_transport(
grpc_transport* grpc_channel_stack_builder_get_transport(
grpc_channel_stack_builder* builder);
+/// Attach \a resource_user to the builder (does not take ownership)
+void grpc_channel_stack_builder_set_resource_user(
+ grpc_channel_stack_builder* builder, grpc_resource_user* resource_user);
+
+/// Fetch attached resource user
+grpc_resource_user* grpc_channel_stack_builder_get_resource_user(
+ grpc_channel_stack_builder* builder);
+
/// Set channel arguments: copies args
void grpc_channel_stack_builder_set_channel_arguments(
grpc_channel_stack_builder* builder, const grpc_channel_args* args);
diff --git a/src/core/lib/channel/channelz.cc b/src/core/lib/channel/channelz.cc
index 33577d890a..0cb2890518 100644
--- a/src/core/lib/channel/channelz.cc
+++ b/src/core/lib/channel/channelz.cc
@@ -30,15 +30,18 @@
#include "src/core/lib/channel/channelz_registry.h"
#include "src/core/lib/channel/status_util.h"
+#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/slice/b64.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/server.h"
#include "src/core/lib/transport/error_utils.h"
+#include "src/core/lib/uri/uri_parser.h"
namespace grpc_core {
namespace channelz {
@@ -204,22 +207,20 @@ char* ServerNode::RenderServerSockets(intptr_t start_socket_id) {
grpc_json* top_level_json = grpc_json_create(GRPC_JSON_OBJECT);
grpc_json* json = top_level_json;
grpc_json* json_iterator = nullptr;
- ChildRefsList socket_refs;
- // uuids index into entities one-off (idx 0 is really uuid 1, since 0 is
- // reserved). However, we want to support requests coming in with
- // start_server_id=0, which signifies "give me everything."
- size_t start_idx = start_socket_id == 0 ? 0 : start_socket_id - 1;
- grpc_server_populate_server_sockets(server_, &socket_refs, start_idx);
+ ChildSocketsList socket_refs;
+ grpc_server_populate_server_sockets(server_, &socket_refs, start_socket_id);
if (!socket_refs.empty()) {
// create list of socket refs
grpc_json* array_parent = grpc_json_create_child(
nullptr, json, "socketRef", nullptr, GRPC_JSON_ARRAY, false);
for (size_t i = 0; i < socket_refs.size(); ++i) {
- json_iterator =
+ grpc_json* socket_ref_json =
grpc_json_create_child(json_iterator, array_parent, nullptr, nullptr,
GRPC_JSON_OBJECT, false);
- grpc_json_add_number_string_child(json_iterator, nullptr, "socketId",
- socket_refs[i]);
+ json_iterator = grpc_json_add_number_string_child(
+ socket_ref_json, nullptr, "socketId", socket_refs[i]->uuid());
+ grpc_json_create_child(json_iterator, socket_ref_json, "name",
+ socket_refs[i]->remote(), GRPC_JSON_STRING, false);
}
}
// For now we do not have any pagination rules. In the future we could
@@ -277,7 +278,61 @@ grpc_json* ServerNode::RenderJson() {
return top_level_json;
}
-SocketNode::SocketNode() : BaseNode(EntityType::kSocket) {}
+static void PopulateSocketAddressJson(grpc_json* json, const char* name,
+ const char* addr_str) {
+ if (addr_str == nullptr) return;
+ grpc_json* json_iterator = nullptr;
+ json_iterator = grpc_json_create_child(json_iterator, json, name, nullptr,
+ GRPC_JSON_OBJECT, false);
+ json = json_iterator;
+ json_iterator = nullptr;
+ grpc_uri* uri = grpc_uri_parse(addr_str, true);
+ if ((uri != nullptr) && ((strcmp(uri->scheme, "ipv4") == 0) ||
+ (strcmp(uri->scheme, "ipv6") == 0))) {
+ const char* host_port = uri->path;
+ if (*host_port == '/') ++host_port;
+ char* host = nullptr;
+ char* port = nullptr;
+ GPR_ASSERT(gpr_split_host_port(host_port, &host, &port));
+ int port_num = -1;
+ if (port != nullptr) {
+ port_num = atoi(port);
+ }
+ char* b64_host = grpc_base64_encode(host, strlen(host), false, false);
+ json_iterator = grpc_json_create_child(json_iterator, json, "tcpip_address",
+ nullptr, GRPC_JSON_OBJECT, false);
+ json = json_iterator;
+ json_iterator = nullptr;
+ json_iterator = grpc_json_add_number_string_child(json, json_iterator,
+ "port", port_num);
+ json_iterator = grpc_json_create_child(json_iterator, json, "ip_address",
+ b64_host, GRPC_JSON_STRING, true);
+ gpr_free(host);
+ gpr_free(port);
+
+ } else if (uri != nullptr && strcmp(uri->scheme, "unix") == 0) {
+ json_iterator = grpc_json_create_child(json_iterator, json, "uds_address",
+ nullptr, GRPC_JSON_OBJECT, false);
+ json = json_iterator;
+ json_iterator = nullptr;
+ json_iterator =
+ grpc_json_create_child(json_iterator, json, "filename",
+ gpr_strdup(uri->path), GRPC_JSON_STRING, true);
+ } else {
+ json_iterator = grpc_json_create_child(json_iterator, json, "other_address",
+ nullptr, GRPC_JSON_OBJECT, false);
+ json = json_iterator;
+ json_iterator = nullptr;
+ json_iterator = grpc_json_create_child(json_iterator, json, "name",
+ addr_str, GRPC_JSON_STRING, false);
+ }
+ grpc_uri_destroy(uri);
+}
+
+SocketNode::SocketNode(UniquePtr<char> local, UniquePtr<char> remote)
+ : BaseNode(EntityType::kSocket),
+ local_(std::move(local)),
+ remote_(std::move(remote)) {}
void SocketNode::RecordStreamStartedFromLocal() {
gpr_atm_no_barrier_fetch_add(&streams_started_, static_cast<gpr_atm>(1));
@@ -315,6 +370,9 @@ grpc_json* SocketNode::RenderJson() {
json_iterator = nullptr;
json_iterator = grpc_json_add_number_string_child(json, json_iterator,
"socketId", uuid());
+ json = top_level_json;
+ PopulateSocketAddressJson(json, "remote", remote_.get());
+ PopulateSocketAddressJson(json, "local", local_.get());
// reset json iterators to top level object
json = top_level_json;
json_iterator = nullptr;
@@ -374,7 +432,8 @@ grpc_json* SocketNode::RenderJson() {
return top_level_json;
}
-ListenSocketNode::ListenSocketNode() : BaseNode(EntityType::kSocket) {}
+ListenSocketNode::ListenSocketNode(UniquePtr<char> local_addr)
+ : BaseNode(EntityType::kSocket), local_addr_(std::move(local_addr)) {}
grpc_json* ListenSocketNode::RenderJson() {
// We need to track these three json objects to build our object
@@ -388,6 +447,9 @@ grpc_json* ListenSocketNode::RenderJson() {
json_iterator = nullptr;
json_iterator = grpc_json_add_number_string_child(json, json_iterator,
"socketId", uuid());
+ json = top_level_json;
+ PopulateSocketAddressJson(json, "local", local_addr_.get());
+
return top_level_json;
}
diff --git a/src/core/lib/channel/channelz.h b/src/core/lib/channel/channelz.h
index 88551befc8..96a4333083 100644
--- a/src/core/lib/channel/channelz.h
+++ b/src/core/lib/channel/channelz.h
@@ -59,6 +59,9 @@ namespace channelz {
// add human readable names as in the channelz.proto
typedef InlinedVector<intptr_t, 10> ChildRefsList;
+class SocketNode;
+typedef InlinedVector<SocketNode*, 10> ChildSocketsList;
+
namespace testing {
class CallCountingHelperPeer;
class ChannelNodePeer;
@@ -232,7 +235,7 @@ class ServerNode : public BaseNode {
// Handles channelz bookkeeping for sockets
class SocketNode : public BaseNode {
public:
- SocketNode();
+ SocketNode(UniquePtr<char> local, UniquePtr<char> remote);
~SocketNode() override {}
grpc_json* RenderJson() override;
@@ -251,6 +254,8 @@ class SocketNode : public BaseNode {
gpr_atm_no_barrier_fetch_add(&keepalives_sent_, static_cast<gpr_atm>(1));
}
+ const char* remote() { return remote_.get(); }
+
private:
gpr_atm streams_started_ = 0;
gpr_atm streams_succeeded_ = 0;
@@ -262,16 +267,21 @@ class SocketNode : public BaseNode {
gpr_atm last_remote_stream_created_millis_ = 0;
gpr_atm last_message_sent_millis_ = 0;
gpr_atm last_message_received_millis_ = 0;
- UniquePtr<char> peer_string_;
+ UniquePtr<char> local_;
+ UniquePtr<char> remote_;
};
// Handles channelz bookkeeping for listen sockets
class ListenSocketNode : public BaseNode {
public:
- ListenSocketNode();
+ // ListenSocketNode takes ownership of host.
+ explicit ListenSocketNode(UniquePtr<char> local_addr);
~ListenSocketNode() override {}
grpc_json* RenderJson() override;
+
+ private:
+ UniquePtr<char> local_addr_;
};
// Creation functions
diff --git a/src/core/lib/channel/channelz_registry.cc b/src/core/lib/channel/channelz_registry.cc
index 1fe2fad3e1..bc23b90a66 100644
--- a/src/core/lib/channel/channelz_registry.cc
+++ b/src/core/lib/channel/channelz_registry.cc
@@ -210,6 +210,17 @@ char* ChannelzRegistry::InternalGetServers(intptr_t start_server_id) {
return json_str;
}
+void ChannelzRegistry::InternalLogAllEntities() {
+ MutexLock lock(&mu_);
+ for (size_t i = 0; i < entities_.size(); ++i) {
+ if (entities_[i] != nullptr) {
+ char* json = entities_[i]->RenderJsonString();
+ gpr_log(GPR_INFO, "%s", json);
+ gpr_free(json);
+ }
+ }
+}
+
} // namespace channelz
} // namespace grpc_core
@@ -222,6 +233,24 @@ char* grpc_channelz_get_servers(intptr_t start_server_id) {
return grpc_core::channelz::ChannelzRegistry::GetServers(start_server_id);
}
+char* grpc_channelz_get_server(intptr_t server_id) {
+ grpc_core::channelz::BaseNode* server_node =
+ grpc_core::channelz::ChannelzRegistry::Get(server_id);
+ if (server_node == nullptr ||
+ server_node->type() !=
+ grpc_core::channelz::BaseNode::EntityType::kServer) {
+ return nullptr;
+ }
+ grpc_json* top_level_json = grpc_json_create(GRPC_JSON_OBJECT);
+ grpc_json* json = top_level_json;
+ grpc_json* channel_json = server_node->RenderJson();
+ channel_json->key = "server";
+ grpc_json_link_child(json, channel_json, nullptr);
+ char* json_str = grpc_json_dump_to_string(top_level_json, 0);
+ grpc_json_destroy(top_level_json);
+ return json_str;
+}
+
char* grpc_channelz_get_server_sockets(intptr_t server_id,
intptr_t start_socket_id) {
grpc_core::channelz::BaseNode* base_node =
diff --git a/src/core/lib/channel/channelz_registry.h b/src/core/lib/channel/channelz_registry.h
index 326f0201c7..73b330785d 100644
--- a/src/core/lib/channel/channelz_registry.h
+++ b/src/core/lib/channel/channelz_registry.h
@@ -62,6 +62,10 @@ class ChannelzRegistry {
return Default()->InternalGetServers(start_server_id);
}
+ // Test only helper function to dump the JSON representation to std out.
+ // This can aid in debugging channelz code.
+ static void LogAllEntities() { Default()->InternalLogAllEntities(); }
+
private:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
@@ -96,6 +100,8 @@ class ChannelzRegistry {
// Else, will return idx of the first uuid higher than the target.
int FindByUuidLocked(intptr_t uuid, bool direct_hit_needed);
+ void InternalLogAllEntities();
+
// protects members
gpr_mu mu_;
InlinedVector<BaseNode*, 20> entities_;
diff --git a/src/core/lib/channel/context.h b/src/core/lib/channel/context.h
index 5daf48a9a9..763e4ffc9f 100644
--- a/src/core/lib/channel/context.h
+++ b/src/core/lib/channel/context.h
@@ -41,9 +41,9 @@ typedef enum {
GRPC_CONTEXT_COUNT
} grpc_context_index;
-typedef struct {
- void* value;
- void (*destroy)(void*);
-} grpc_call_context_element;
+struct grpc_call_context_element {
+ void* value = nullptr;
+ void (*destroy)(void*) = nullptr;
+};
#endif /* GRPC_CORE_LIB_CHANNEL_CONTEXT_H */
diff --git a/src/core/lib/debug/trace.cc b/src/core/lib/debug/trace.cc
index 01c1e867d9..cafdb15c69 100644
--- a/src/core/lib/debug/trace.cc
+++ b/src/core/lib/debug/trace.cc
@@ -21,6 +21,7 @@
#include "src/core/lib/debug/trace.h"
#include <string.h>
+#include <type_traits>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
@@ -79,6 +80,8 @@ void TraceFlagList::LogAllTracers() {
// Flags register themselves on the list during construction
TraceFlag::TraceFlag(bool default_enabled, const char* name) : name_(name) {
+ static_assert(std::is_trivially_destructible<TraceFlag>::value,
+ "TraceFlag needs to be trivially destructible.");
set_enabled(default_enabled);
TraceFlagList::Add(this);
}
diff --git a/src/core/lib/debug/trace.h b/src/core/lib/debug/trace.h
index fe6301a3fc..4623494520 100644
--- a/src/core/lib/debug/trace.h
+++ b/src/core/lib/debug/trace.h
@@ -53,7 +53,8 @@ void grpc_tracer_enable_flag(grpc_core::TraceFlag* flag);
class TraceFlag {
public:
TraceFlag(bool default_enabled, const char* name);
- ~TraceFlag() {}
+ // This needs to be trivially destructible as it is used as global variable.
+ ~TraceFlag() = default;
const char* name() const { return name_; }
@@ -102,8 +103,9 @@ typedef TraceFlag DebugOnlyTraceFlag;
#else
class DebugOnlyTraceFlag {
public:
- DebugOnlyTraceFlag(bool default_enabled, const char* name) {}
- bool enabled() { return false; }
+ constexpr DebugOnlyTraceFlag(bool default_enabled, const char* name) {}
+ constexpr bool enabled() const { return false; }
+ constexpr const char* name() const { return "DebugOnlyTraceFlag"; }
private:
void set_enabled(bool enabled) {}
diff --git a/src/core/lib/gpr/arena.cc b/src/core/lib/gpr/arena.cc
index 77f9357146..836a7ca793 100644
--- a/src/core/lib/gpr/arena.cc
+++ b/src/core/lib/gpr/arena.cc
@@ -21,6 +21,7 @@
#include "src/core/lib/gpr/arena.h"
#include <string.h>
+#include <new>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
@@ -28,34 +29,79 @@
#include <grpc/support/sync.h>
#include "src/core/lib/gpr/alloc.h"
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/gprpp/memory.h"
+
+namespace {
+enum init_strategy {
+ NO_INIT, // Do not initialize the arena blocks.
+ ZERO_INIT, // Initialize arena blocks with 0.
+ NON_ZERO_INIT, // Initialize arena blocks with a non-zero value.
+};
+
+gpr_once g_init_strategy_once = GPR_ONCE_INIT;
+init_strategy g_init_strategy = NO_INIT;
+} // namespace
+
+static void set_strategy_from_env() {
+ char* str = gpr_getenv("GRPC_ARENA_INIT_STRATEGY");
+ if (str == nullptr) {
+ g_init_strategy = NO_INIT;
+ } else if (strcmp(str, "zero_init") == 0) {
+ g_init_strategy = ZERO_INIT;
+ } else if (strcmp(str, "non_zero_init") == 0) {
+ g_init_strategy = NON_ZERO_INIT;
+ } else {
+ g_init_strategy = NO_INIT;
+ }
+ gpr_free(str);
+}
+
+static void* gpr_arena_alloc_maybe_init(size_t size) {
+ void* mem = gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT);
+ gpr_once_init(&g_init_strategy_once, set_strategy_from_env);
+ if (GPR_UNLIKELY(g_init_strategy != NO_INIT)) {
+ if (g_init_strategy == ZERO_INIT) {
+ memset(mem, 0, size);
+ } else { // NON_ZERO_INIT.
+ memset(mem, 0xFE, size);
+ }
+ }
+ return mem;
+}
+
+void gpr_arena_init() {
+ gpr_once_init(&g_init_strategy_once, set_strategy_from_env);
+}
// Uncomment this to use a simple arena that simply allocates the
// requested amount of memory for each call to gpr_arena_alloc(). This
// effectively eliminates the efficiency gain of using an arena, but it
// may be useful for debugging purposes.
//#define SIMPLE_ARENA_FOR_DEBUGGING
-
#ifdef SIMPLE_ARENA_FOR_DEBUGGING
struct gpr_arena {
+ gpr_arena() { gpr_mu_init(&mu); }
+ ~gpr_arena() {
+ gpr_mu_destroy(&mu);
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ gpr_free_aligned(ptrs[i]);
+ }
+ gpr_free(ptrs);
+ }
+
gpr_mu mu;
- void** ptrs;
- size_t num_ptrs;
+ void** ptrs = nullptr;
+ size_t num_ptrs = 0;
};
gpr_arena* gpr_arena_create(size_t ignored_initial_size) {
- gpr_arena* arena = (gpr_arena*)gpr_zalloc(sizeof(*arena));
- gpr_mu_init(&arena->mu);
- return arena;
+ return grpc_core::New<gpr_arena>();
}
size_t gpr_arena_destroy(gpr_arena* arena) {
- gpr_mu_destroy(&arena->mu);
- for (size_t i = 0; i < arena->num_ptrs; ++i) {
- gpr_free(arena->ptrs[i]);
- }
- gpr_free(arena->ptrs);
- gpr_free(arena);
+ grpc_core::Delete(arena);
return 1; // Value doesn't matter, since it won't be used.
}
@@ -63,7 +109,8 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
gpr_mu_lock(&arena->mu);
arena->ptrs =
(void**)gpr_realloc(arena->ptrs, sizeof(void*) * (arena->num_ptrs + 1));
- void* retval = arena->ptrs[arena->num_ptrs++] = gpr_zalloc(size);
+ void* retval = arena->ptrs[arena->num_ptrs++] =
+ gpr_arena_alloc_maybe_init(size);
gpr_mu_unlock(&arena->mu);
return retval;
}
@@ -77,45 +124,45 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
// would allow us to use the alignment actually needed by the caller.
typedef struct zone {
- zone* next;
+ zone* next = nullptr;
} zone;
struct gpr_arena {
+ gpr_arena(size_t initial_size)
+ : initial_zone_size(initial_size), last_zone(&initial_zone) {
+ gpr_mu_init(&arena_growth_mutex);
+ }
+ ~gpr_arena() {
+ gpr_mu_destroy(&arena_growth_mutex);
+ zone* z = initial_zone.next;
+ while (z) {
+ zone* next_z = z->next;
+ z->~zone();
+ gpr_free_aligned(z);
+ z = next_z;
+ }
+ }
+
// Keep track of the total used size. We use this in our call sizing
// historesis.
- gpr_atm total_used;
+ gpr_atm total_used = 0;
size_t initial_zone_size;
zone initial_zone;
zone* last_zone;
gpr_mu arena_growth_mutex;
};
-static void* zalloc_aligned(size_t size) {
- void* ptr = gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT);
- memset(ptr, 0, size);
- return ptr;
-}
-
gpr_arena* gpr_arena_create(size_t initial_size) {
initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
- gpr_arena* a = static_cast<gpr_arena*>(zalloc_aligned(
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size));
- a->initial_zone_size = initial_size;
- a->last_zone = &a->initial_zone;
- gpr_mu_init(&a->arena_growth_mutex);
- return a;
+ return new (gpr_arena_alloc_maybe_init(
+ GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size))
+ gpr_arena(initial_size);
}
size_t gpr_arena_destroy(gpr_arena* arena) {
- gpr_mu_destroy(&arena->arena_growth_mutex);
- gpr_atm size = gpr_atm_no_barrier_load(&arena->total_used);
- zone* z = arena->initial_zone.next;
+ const gpr_atm size = gpr_atm_no_barrier_load(&arena->total_used);
+ arena->~gpr_arena();
gpr_free_aligned(arena);
- while (z) {
- zone* next_z = z->next;
- gpr_free_aligned(z);
- z = next_z;
- }
return static_cast<size_t>(size);
}
@@ -132,8 +179,8 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
// sizing historesis (that is, most calls should have a large enough initial
// zone and will not need to grow the arena).
gpr_mu_lock(&arena->arena_growth_mutex);
- zone* z = static_cast<zone*>(
- zalloc_aligned(GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + size));
+ zone* z = new (gpr_arena_alloc_maybe_init(
+ GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + size)) zone();
arena->last_zone->next = z;
arena->last_zone = z;
gpr_mu_unlock(&arena->arena_growth_mutex);
diff --git a/src/core/lib/gpr/arena.h b/src/core/lib/gpr/arena.h
index 6d2a073dd5..069892b228 100644
--- a/src/core/lib/gpr/arena.h
+++ b/src/core/lib/gpr/arena.h
@@ -37,5 +37,7 @@ gpr_arena* gpr_arena_create(size_t initial_size);
void* gpr_arena_alloc(gpr_arena* arena, size_t size);
// Destroy an arena, returning the total number of bytes allocated
size_t gpr_arena_destroy(gpr_arena* arena);
+// Initializes the Arena component.
+void gpr_arena_init();
#endif /* GRPC_CORE_LIB_GPR_ARENA_H */
diff --git a/src/core/lib/gprpp/inlined_vector.h b/src/core/lib/gprpp/inlined_vector.h
index 65c2b9634f..66dc751a56 100644
--- a/src/core/lib/gprpp/inlined_vector.h
+++ b/src/core/lib/gprpp/inlined_vector.h
@@ -100,10 +100,7 @@ class InlinedVector {
void reserve(size_t capacity) {
if (capacity > capacity_) {
T* new_dynamic = static_cast<T*>(gpr_malloc(sizeof(T) * capacity));
- for (size_t i = 0; i < size_; ++i) {
- new (&new_dynamic[i]) T(std::move(data()[i]));
- data()[i].~T();
- }
+ move_elements(data(), new_dynamic, size_);
gpr_free(dynamic_);
dynamic_ = new_dynamic;
capacity_ = capacity;
@@ -131,13 +128,25 @@ class InlinedVector {
size_--;
}
+ size_t size() const { return size_; }
+ bool empty() const { return size_ == 0; }
+
+ size_t capacity() const { return capacity_; }
+
+ void clear() {
+ destroy_elements();
+ init_data();
+ }
+
+ private:
void copy_from(const InlinedVector& v) {
- // if v is allocated, copy over the buffer.
+ // if v is allocated, make sure we have enough capacity.
if (v.dynamic_ != nullptr) {
reserve(v.capacity_);
- memcpy(dynamic_, v.dynamic_, v.size_ * sizeof(T));
- } else {
- memcpy(inline_, v.inline_, v.size_ * sizeof(T));
+ }
+ // copy over elements
+ for (size_t i = 0; i < v.size_; ++i) {
+ new (&(data()[i])) T(v[i]);
}
// copy over metadata
size_ = v.size_;
@@ -145,11 +154,12 @@ class InlinedVector {
}
void move_from(InlinedVector& v) {
- // if v is allocated, then we steal its buffer, else we copy it.
+ // if v is allocated, then we steal its dynamic array; otherwise, we
+ // move the elements individually.
if (v.dynamic_ != nullptr) {
dynamic_ = v.dynamic_;
} else {
- memcpy(inline_, v.inline_, v.size_ * sizeof(T));
+ move_elements(v.data(), data(), v.size_);
}
// copy over metadata
size_ = v.size_;
@@ -158,17 +168,13 @@ class InlinedVector {
v.init_data();
}
- size_t size() const { return size_; }
- bool empty() const { return size_ == 0; }
-
- size_t capacity() const { return capacity_; }
-
- void clear() {
- destroy_elements();
- init_data();
+ static void move_elements(T* src, T* dst, size_t num_elements) {
+ for (size_t i = 0; i < num_elements; ++i) {
+ new (&dst[i]) T(std::move(src[i]));
+ src[i].~T();
+ }
}
- private:
void init_data() {
dynamic_ = nullptr;
size_ = 0;
diff --git a/src/core/lib/gprpp/orphanable.h b/src/core/lib/gprpp/orphanable.h
index 3123e3f5a3..9053c60111 100644
--- a/src/core/lib/gprpp/orphanable.h
+++ b/src/core/lib/gprpp/orphanable.h
@@ -31,6 +31,7 @@
#include "src/core/lib/gprpp/abstract.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/memory.h"
+#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
namespace grpc_core {
@@ -89,107 +90,42 @@ class InternallyRefCounted : public Orphanable {
template <typename T>
friend class RefCountedPtr;
- InternallyRefCounted() { gpr_ref_init(&refs_, 1); }
- virtual ~InternallyRefCounted() {}
+ // TraceFlagT is defined to accept both DebugOnlyTraceFlag and TraceFlag.
+ // Note: RefCount tracing is only enabled on debug builds, even when a
+ // TraceFlag is used.
+ template <typename TraceFlagT = TraceFlag>
+ explicit InternallyRefCounted(TraceFlagT* trace_flag = nullptr)
+ : refs_(1, trace_flag) {}
+ virtual ~InternallyRefCounted() = default;
RefCountedPtr<Child> Ref() GRPC_MUST_USE_RESULT {
IncrementRefCount();
return RefCountedPtr<Child>(static_cast<Child*>(this));
}
-
- void Unref() {
- if (gpr_unref(&refs_)) {
- Delete(static_cast<Child*>(this));
- }
- }
-
- private:
- void IncrementRefCount() { gpr_ref(&refs_); }
-
- gpr_refcount refs_;
-};
-
-// An alternative version of the InternallyRefCounted base class that
-// supports tracing. This is intended to be used in cases where the
-// object will be handled both by idiomatic C++ code using smart
-// pointers and legacy code that is manually calling Ref() and Unref().
-// Once all of our code is converted to idiomatic C++, we may be able to
-// eliminate this class.
-template <typename Child>
-class InternallyRefCountedWithTracing : public Orphanable {
- public:
- // Not copyable nor movable.
- InternallyRefCountedWithTracing(const InternallyRefCountedWithTracing&) =
- delete;
- InternallyRefCountedWithTracing& operator=(
- const InternallyRefCountedWithTracing&) = delete;
-
- GRPC_ABSTRACT_BASE_CLASS
-
- protected:
- GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
-
- // Allow RefCountedPtr<> to access Unref() and IncrementRefCount().
- template <typename T>
- friend class RefCountedPtr;
-
- InternallyRefCountedWithTracing()
- : InternallyRefCountedWithTracing(static_cast<TraceFlag*>(nullptr)) {}
-
- explicit InternallyRefCountedWithTracing(TraceFlag* trace_flag)
- : trace_flag_(trace_flag) {
- gpr_ref_init(&refs_, 1);
- }
-
-#ifdef NDEBUG
- explicit InternallyRefCountedWithTracing(DebugOnlyTraceFlag* trace_flag)
- : InternallyRefCountedWithTracing() {}
-#endif
-
- virtual ~InternallyRefCountedWithTracing() {}
-
- RefCountedPtr<Child> Ref() GRPC_MUST_USE_RESULT {
- IncrementRefCount();
- return RefCountedPtr<Child>(static_cast<Child*>(this));
- }
-
RefCountedPtr<Child> Ref(const DebugLocation& location,
const char* reason) GRPC_MUST_USE_RESULT {
- if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
- trace_flag_->name(), this, location.file(), location.line(),
- old_refs, old_refs + 1, reason);
- }
- return Ref();
+ IncrementRefCount(location, reason);
+ return RefCountedPtr<Child>(static_cast<Child*>(this));
}
- // TODO(roth): Once all of our code is converted to C++ and can use
- // RefCountedPtr<> instead of manual ref-counting, make the Unref() methods
- // private, since they will only be used by RefCountedPtr<>, which is a
- // friend of this class.
-
void Unref() {
- if (gpr_unref(&refs_)) {
+ if (refs_.Unref()) {
Delete(static_cast<Child*>(this));
}
}
-
void Unref(const DebugLocation& location, const char* reason) {
- if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
- trace_flag_->name(), this, location.file(), location.line(),
- old_refs, old_refs - 1, reason);
+ if (refs_.Unref(location, reason)) {
+ Delete(static_cast<Child*>(this));
}
- Unref();
}
private:
- void IncrementRefCount() { gpr_ref(&refs_); }
+ void IncrementRefCount() { refs_.Ref(); }
+ void IncrementRefCount(const DebugLocation& location, const char* reason) {
+ refs_.Ref(location, reason);
+ }
- TraceFlag* trace_flag_ = nullptr;
- gpr_refcount refs_;
+ grpc_core::RefCount refs_;
};
} // namespace grpc_core
diff --git a/src/core/lib/gprpp/ref_counted.h b/src/core/lib/gprpp/ref_counted.h
index 03c293f6ed..fa97ffcfed 100644
--- a/src/core/lib/gprpp/ref_counted.h
+++ b/src/core/lib/gprpp/ref_counted.h
@@ -21,9 +21,12 @@
#include <grpc/support/port_platform.h>
+#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
+#include <atomic>
+#include <cassert>
#include <cinttypes>
#include "src/core/lib/debug/trace.h"
@@ -34,61 +37,150 @@
namespace grpc_core {
-// A base class for reference-counted objects.
-// New objects should be created via New() and start with a refcount of 1.
-// When the refcount reaches 0, the object will be deleted via Delete().
-//
-// This will commonly be used by CRTP (curiously-recurring template pattern)
-// e.g., class MyClass : public RefCounted<MyClass>
-template <typename Child>
-class RefCounted {
+// PolymorphicRefCount enforces polymorphic destruction of RefCounted.
+class PolymorphicRefCount {
public:
- RefCountedPtr<Child> Ref() GRPC_MUST_USE_RESULT {
- IncrementRefCount();
- return RefCountedPtr<Child>(static_cast<Child*>(this));
- }
+ GRPC_ABSTRACT_BASE_CLASS
- // TODO(roth): Once all of our code is converted to C++ and can use
- // RefCountedPtr<> instead of manual ref-counting, make this method
- // private, since it will only be used by RefCountedPtr<>, which is a
- // friend of this class.
- void Unref() {
- if (gpr_unref(&refs_)) {
- Delete(static_cast<Child*>(this));
- }
- }
+ protected:
+ GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
- // Not copyable nor movable.
- RefCounted(const RefCounted&) = delete;
- RefCounted& operator=(const RefCounted&) = delete;
+ virtual ~PolymorphicRefCount() = default;
+};
+// NonPolymorphicRefCount does not enforce polymorphic destruction of
+// RefCounted. Please refer to grpc_core::RefCounted for more details, and
+// when in doubt use PolymorphicRefCount.
+class NonPolymorphicRefCount {
+ public:
GRPC_ABSTRACT_BASE_CLASS
protected:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
- RefCounted() { gpr_ref_init(&refs_, 1); }
+ ~NonPolymorphicRefCount() = default;
+};
- virtual ~RefCounted() {}
+// RefCount is a simple atomic ref-count.
+//
+// This is a C++ implementation of gpr_refcount, with inline functions. Due to
+// inline functions, this class is significantly more efficient than
+// gpr_refcount and should be preferred over gpr_refcount whenever possible.
+//
+// TODO(soheil): Remove gpr_refcount after submitting the GRFC and the paragraph
+// above.
+class RefCount {
+ public:
+ using Value = intptr_t;
+
+ // `init` is the initial refcount stored in this object.
+ //
+ // TraceFlagT is defined to accept both DebugOnlyTraceFlag and TraceFlag.
+ // Note: RefCount tracing is only enabled on debug builds, even when a
+ // TraceFlag is used.
+ template <typename TraceFlagT = TraceFlag>
+ constexpr explicit RefCount(Value init = 1, TraceFlagT* trace_flag = nullptr)
+ :
+#ifndef NDEBUG
+ trace_flag_(trace_flag),
+#endif
+ value_(init) {
+ }
- private:
- // Allow RefCountedPtr<> to access IncrementRefCount().
- template <typename T>
- friend class RefCountedPtr;
+ // Increases the ref-count by `n`.
+ void Ref(Value n = 1) {
+ GPR_ATM_INC_ADD_THEN(value_.fetch_add(n, std::memory_order_relaxed));
+ }
+ void Ref(const DebugLocation& location, const char* reason, Value n = 1) {
+#ifndef NDEBUG
+ if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
+ const RefCount::Value old_refs = get();
+ gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
+ trace_flag_->name(), this, location.file(), location.line(),
+ old_refs, old_refs + n, reason);
+ }
+#endif
+ Ref(n);
+ }
+
+ // Similar to Ref() with an assert on the ref-count being non-zero.
+ void RefNonZero() {
+#ifndef NDEBUG
+ const Value prior =
+ GPR_ATM_INC_ADD_THEN(value_.fetch_add(1, std::memory_order_relaxed));
+ assert(prior > 0);
+#else
+ Ref();
+#endif
+ }
+ void RefNonZero(const DebugLocation& location, const char* reason) {
+#ifndef NDEBUG
+ if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
+ const RefCount::Value old_refs = get();
+ gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
+ trace_flag_->name(), this, location.file(), location.line(),
+ old_refs, old_refs + 1, reason);
+ }
+#endif
+ RefNonZero();
+ }
- void IncrementRefCount() { gpr_ref(&refs_); }
+ // Decrements the ref-count and returns true if the ref-count reaches 0.
+ bool Unref() {
+ const Value prior =
+ GPR_ATM_INC_ADD_THEN(value_.fetch_sub(1, std::memory_order_acq_rel));
+ GPR_DEBUG_ASSERT(prior > 0);
+ return prior == 1;
+ }
+ bool Unref(const DebugLocation& location, const char* reason) {
+#ifndef NDEBUG
+ if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
+ const RefCount::Value old_refs = get();
+ gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
+ trace_flag_->name(), this, location.file(), location.line(),
+ old_refs, old_refs - 1, reason);
+ }
+#endif
+ return Unref();
+ }
- gpr_refcount refs_;
+ private:
+ Value get() const { return value_.load(std::memory_order_relaxed); }
+
+#ifndef NDEBUG
+ TraceFlag* trace_flag_;
+#endif
+ std::atomic<Value> value_;
};
-// An alternative version of the RefCounted base class that
-// supports tracing. This is intended to be used in cases where the
-// object will be handled both by idiomatic C++ code using smart
-// pointers and legacy code that is manually calling Ref() and Unref().
-// Once all of our code is converted to idiomatic C++, we may be able to
-// eliminate this class.
-template <typename Child>
-class RefCountedWithTracing {
+// A base class for reference-counted objects.
+// New objects should be created via New() and start with a refcount of 1.
+// When the refcount reaches 0, the object will be deleted via Delete().
+//
+// This will commonly be used by CRTP (curiously-recurring template pattern)
+// e.g., class MyClass : public RefCounted<MyClass>
+//
+// Use PolymorphicRefCount and NonPolymorphicRefCount to select between
+// different implementations of RefCounted.
+//
+// Note that NonPolymorphicRefCount does not support polymorphic destruction.
+// So, use NonPolymorphicRefCount only when both of the following conditions
+// are guaranteed to hold:
+// (a) Child is a concrete leaf class in RefCounted<Child>, and
+// (b) you are gauranteed to call Unref only on concrete leaf classes and not
+// their parents.
+//
+// The following example is illegal, because calling Unref() will not call
+// the dtor of Child.
+//
+// class Parent : public RefCounted<Parent, NonPolymorphicRefCount> {}
+// class Child : public Parent {}
+//
+// Child* ch;
+// ch->Unref();
+//
+template <typename Child, typename Impl = PolymorphicRefCount>
+class RefCounted : public Impl {
public:
RefCountedPtr<Child> Ref() GRPC_MUST_USE_RESULT {
IncrementRefCount();
@@ -97,69 +189,55 @@ class RefCountedWithTracing {
RefCountedPtr<Child> Ref(const DebugLocation& location,
const char* reason) GRPC_MUST_USE_RESULT {
- if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
- trace_flag_->name(), this, location.file(), location.line(),
- old_refs, old_refs + 1, reason);
- }
- return Ref();
+ IncrementRefCount(location, reason);
+ return RefCountedPtr<Child>(static_cast<Child*>(this));
}
// TODO(roth): Once all of our code is converted to C++ and can use
- // RefCountedPtr<> instead of manual ref-counting, make the Unref() methods
- // private, since they will only be used by RefCountedPtr<>, which is a
+ // RefCountedPtr<> instead of manual ref-counting, make this method
+ // private, since it will only be used by RefCountedPtr<>, which is a
// friend of this class.
-
void Unref() {
- if (gpr_unref(&refs_)) {
+ if (refs_.Unref()) {
Delete(static_cast<Child*>(this));
}
}
-
void Unref(const DebugLocation& location, const char* reason) {
- if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
- trace_flag_->name(), this, location.file(), location.line(),
- old_refs, old_refs - 1, reason);
+ if (refs_.Unref(location, reason)) {
+ Delete(static_cast<Child*>(this));
}
- Unref();
}
// Not copyable nor movable.
- RefCountedWithTracing(const RefCountedWithTracing&) = delete;
- RefCountedWithTracing& operator=(const RefCountedWithTracing&) = delete;
+ RefCounted(const RefCounted&) = delete;
+ RefCounted& operator=(const RefCounted&) = delete;
GRPC_ABSTRACT_BASE_CLASS
protected:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
- RefCountedWithTracing()
- : RefCountedWithTracing(static_cast<TraceFlag*>(nullptr)) {}
-
- explicit RefCountedWithTracing(TraceFlag* trace_flag)
- : trace_flag_(trace_flag) {
- gpr_ref_init(&refs_, 1);
- }
-
-#ifdef NDEBUG
- explicit RefCountedWithTracing(DebugOnlyTraceFlag* trace_flag)
- : RefCountedWithTracing() {}
-#endif
+ // TraceFlagT is defined to accept both DebugOnlyTraceFlag and TraceFlag.
+ // Note: RefCount tracing is only enabled on debug builds, even when a
+ // TraceFlag is used.
+ template <typename TraceFlagT = TraceFlag>
+ explicit RefCounted(TraceFlagT* trace_flag = nullptr)
+ : refs_(1, trace_flag) {}
- virtual ~RefCountedWithTracing() {}
+ // Note: Depending on the Impl used, this dtor can be implicitly virtual.
+ ~RefCounted() = default;
private:
// Allow RefCountedPtr<> to access IncrementRefCount().
template <typename T>
friend class RefCountedPtr;
- void IncrementRefCount() { gpr_ref(&refs_); }
+ void IncrementRefCount() { refs_.Ref(); }
+ void IncrementRefCount(const DebugLocation& location, const char* reason) {
+ refs_.Ref(location, reason);
+ }
- TraceFlag* trace_flag_ = nullptr;
- gpr_refcount refs_;
+ RefCount refs_;
};
} // namespace grpc_core
diff --git a/src/core/lib/gprpp/ref_counted_ptr.h b/src/core/lib/gprpp/ref_counted_ptr.h
index c2dfbdd90f..1ed5d584c7 100644
--- a/src/core/lib/gprpp/ref_counted_ptr.h
+++ b/src/core/lib/gprpp/ref_counted_ptr.h
@@ -21,8 +21,10 @@
#include <grpc/support/port_platform.h>
+#include <type_traits>
#include <utility>
+#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/memory.h"
namespace grpc_core {
@@ -54,15 +56,13 @@ class RefCountedPtr {
// Move assignment.
RefCountedPtr& operator=(RefCountedPtr&& other) {
- if (value_ != nullptr) value_->Unref();
- value_ = other.value_;
+ reset(other.value_);
other.value_ = nullptr;
return *this;
}
template <typename Y>
RefCountedPtr& operator=(RefCountedPtr<Y>&& other) {
- if (value_ != nullptr) value_->Unref();
- value_ = other.value_;
+ reset(other.value_);
other.value_ = nullptr;
return *this;
}
@@ -74,6 +74,8 @@ class RefCountedPtr {
}
template <typename Y>
RefCountedPtr(const RefCountedPtr<Y>& other) {
+ static_assert(std::has_virtual_destructor<T>::value,
+ "T does not have a virtual dtor");
if (other.value_ != nullptr) other.value_->IncrementRefCount();
value_ = other.value_;
}
@@ -83,17 +85,17 @@ class RefCountedPtr {
// Note: Order of reffing and unreffing is important here in case value_
// and other.value_ are the same object.
if (other.value_ != nullptr) other.value_->IncrementRefCount();
- if (value_ != nullptr) value_->Unref();
- value_ = other.value_;
+ reset(other.value_);
return *this;
}
template <typename Y>
RefCountedPtr& operator=(const RefCountedPtr<Y>& other) {
+ static_assert(std::has_virtual_destructor<T>::value,
+ "T does not have a virtual dtor");
// Note: Order of reffing and unreffing is important here in case value_
// and other.value_ are the same object.
if (other.value_ != nullptr) other.value_->IncrementRefCount();
- if (value_ != nullptr) value_->Unref();
- value_ = other.value_;
+ reset(other.value_);
return *this;
}
@@ -102,15 +104,29 @@ class RefCountedPtr {
}
// If value is non-null, we take ownership of a ref to it.
- template <typename Y>
- void reset(Y* value) {
+ void reset(T* value = nullptr) {
if (value_ != nullptr) value_->Unref();
value_ = value;
}
-
- void reset() {
+ void reset(const DebugLocation& location, const char* reason,
+ T* value = nullptr) {
+ if (value_ != nullptr) value_->Unref(location, reason);
+ value_ = value;
+ }
+ template <typename Y>
+ void reset(Y* value = nullptr) {
+ static_assert(std::has_virtual_destructor<T>::value,
+ "T does not have a virtual dtor");
if (value_ != nullptr) value_->Unref();
- value_ = nullptr;
+ value_ = value;
+ }
+ template <typename Y>
+ void reset(const DebugLocation& location, const char* reason,
+ Y* value = nullptr) {
+ static_assert(std::has_virtual_destructor<T>::value,
+ "T does not have a virtual dtor");
+ if (value_ != nullptr) value_->Unref(location, reason);
+ value_ = value;
}
// TODO(roth): This method exists solely as a transition mechanism to allow
diff --git a/src/core/lib/iomgr/buffer_list.cc b/src/core/lib/iomgr/buffer_list.cc
index 6ada23db1c..ace17a108d 100644
--- a/src/core/lib/iomgr/buffer_list.cc
+++ b/src/core/lib/iomgr/buffer_list.cc
@@ -35,6 +35,9 @@ void TracedBuffer::AddNewEntry(TracedBuffer** head, uint32_t seq_no,
TracedBuffer* new_elem = New<TracedBuffer>(seq_no, arg);
/* Store the current time as the sendmsg time. */
new_elem->ts_.sendmsg_time = gpr_now(GPR_CLOCK_REALTIME);
+ new_elem->ts_.scheduled_time = gpr_inf_past(GPR_CLOCK_REALTIME);
+ new_elem->ts_.sent_time = gpr_inf_past(GPR_CLOCK_REALTIME);
+ new_elem->ts_.acked_time = gpr_inf_past(GPR_CLOCK_REALTIME);
if (*head == nullptr) {
*head = new_elem;
return;
@@ -55,10 +58,16 @@ void fill_gpr_from_timestamp(gpr_timespec* gts, const struct timespec* ts) {
gts->clock_type = GPR_CLOCK_REALTIME;
}
+void default_timestamps_callback(void* arg, grpc_core::Timestamps* ts,
+ grpc_error* shudown_err) {
+ gpr_log(GPR_DEBUG, "Timestamps callback has not been registered");
+}
+
/** The saved callback function that will be invoked when we get all the
* timestamps that we are going to get for a TracedBuffer. */
void (*timestamps_callback)(void*, grpc_core::Timestamps*,
- grpc_error* shutdown_err);
+ grpc_error* shutdown_err) =
+ default_timestamps_callback;
} /* namespace */
void TracedBuffer::ProcessTimestamp(TracedBuffer** head,
@@ -99,18 +108,20 @@ void TracedBuffer::ProcessTimestamp(TracedBuffer** head,
}
}
-void TracedBuffer::Shutdown(TracedBuffer** head, grpc_error* shutdown_err) {
+void TracedBuffer::Shutdown(TracedBuffer** head, void* remaining,
+ grpc_error* shutdown_err) {
GPR_DEBUG_ASSERT(head != nullptr);
TracedBuffer* elem = *head;
while (elem != nullptr) {
- if (timestamps_callback) {
- timestamps_callback(elem->arg_, &(elem->ts_), shutdown_err);
- }
+ timestamps_callback(elem->arg_, &(elem->ts_), shutdown_err);
auto* next = elem->next_;
Delete<TracedBuffer>(elem);
elem = next;
}
*head = nullptr;
+ if (remaining != nullptr) {
+ timestamps_callback(remaining, nullptr, shutdown_err);
+ }
GRPC_ERROR_UNREF(shutdown_err);
}
diff --git a/src/core/lib/iomgr/buffer_list.h b/src/core/lib/iomgr/buffer_list.h
index cbbf50a657..627f1bde99 100644
--- a/src/core/lib/iomgr/buffer_list.h
+++ b/src/core/lib/iomgr/buffer_list.h
@@ -37,6 +37,8 @@ struct Timestamps {
gpr_timespec scheduled_time;
gpr_timespec sent_time;
gpr_timespec acked_time;
+
+ uint32_t byte_offset; /* byte offset relative to the start of the RPC */
};
/** TracedBuffer is a class to keep track of timestamps for a specific buffer in
@@ -67,13 +69,13 @@ class TracedBuffer {
/** Cleans the list by calling the callback for each traced buffer in the list
* with timestamps that it has. */
- static void Shutdown(grpc_core::TracedBuffer** head,
+ static void Shutdown(grpc_core::TracedBuffer** head, void* remaining,
grpc_error* shutdown_err);
private:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW
- TracedBuffer(int seq_no, void* arg)
+ TracedBuffer(uint32_t seq_no, void* arg)
: seq_no_(seq_no), arg_(arg), next_(nullptr) {}
uint32_t seq_no_; /* The sequence number for the last byte in the buffer */
@@ -82,7 +84,12 @@ class TracedBuffer {
grpc_core::TracedBuffer* next_; /* The next TracedBuffer in the list */
};
#else /* GRPC_LINUX_ERRQUEUE */
-class TracedBuffer {};
+class TracedBuffer {
+ public:
+ /* Dummy shutdown function */
+ static void Shutdown(grpc_core::TracedBuffer** head, void* remaining,
+ grpc_error* shutdown_err) {}
+};
#endif /* GRPC_LINUX_ERRQUEUE */
/** Sets the callback function to call when timestamps for a write are
diff --git a/src/core/lib/iomgr/call_combiner.cc b/src/core/lib/iomgr/call_combiner.cc
index 00a839b64c..6b5759a036 100644
--- a/src/core/lib/iomgr/call_combiner.cc
+++ b/src/core/lib/iomgr/call_combiner.cc
@@ -39,8 +39,57 @@ static gpr_atm encode_cancel_state_error(grpc_error* error) {
return static_cast<gpr_atm>(1) | (gpr_atm)error;
}
+#ifdef GRPC_TSAN_ENABLED
+static void tsan_closure(void* user_data, grpc_error* error) {
+ grpc_call_combiner* call_combiner =
+ static_cast<grpc_call_combiner*>(user_data);
+ // We ref-count the lock, and check if it's already taken.
+ // If it was taken, we should do nothing. Otherwise, we will mark it as
+ // locked. Note that if two different threads try to do this, only one of
+ // them will be able to mark the lock as acquired, while they both run their
+ // callbacks. In such cases (which should never happen for call_combiner),
+ // TSAN will correctly produce an error.
+ //
+ // TODO(soheil): This only covers the callbacks scheduled by
+ // grpc_call_combiner_(start|finish). If in the future, a
+ // callback gets scheduled using other mechanisms, we will need
+ // to add APIs to externally lock call combiners.
+ grpc_core::RefCountedPtr<grpc_call_combiner::TsanLock> lock =
+ call_combiner->tsan_lock;
+ bool prev = false;
+ if (lock->taken.compare_exchange_strong(prev, true)) {
+ TSAN_ANNOTATE_RWLOCK_ACQUIRED(&lock->taken, true);
+ } else {
+ lock.reset();
+ }
+ GRPC_CLOSURE_RUN(call_combiner->original_closure, GRPC_ERROR_REF(error));
+ if (lock != nullptr) {
+ TSAN_ANNOTATE_RWLOCK_RELEASED(&lock->taken, true);
+ bool prev = true;
+ GPR_ASSERT(lock->taken.compare_exchange_strong(prev, false));
+ }
+}
+#endif
+
+static void call_combiner_sched_closure(grpc_call_combiner* call_combiner,
+ grpc_closure* closure,
+ grpc_error* error) {
+#ifdef GRPC_TSAN_ENABLED
+ call_combiner->original_closure = closure;
+ GRPC_CLOSURE_SCHED(&call_combiner->tsan_closure, error);
+#else
+ GRPC_CLOSURE_SCHED(closure, error);
+#endif
+}
+
void grpc_call_combiner_init(grpc_call_combiner* call_combiner) {
+ gpr_atm_no_barrier_store(&call_combiner->cancel_state, 0);
+ gpr_atm_no_barrier_store(&call_combiner->size, 0);
gpr_mpscq_init(&call_combiner->queue);
+#ifdef GRPC_TSAN_ENABLED
+ GRPC_CLOSURE_INIT(&call_combiner->tsan_closure, tsan_closure, call_combiner,
+ grpc_schedule_on_exec_ctx);
+#endif
}
void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) {
@@ -85,7 +134,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
gpr_log(GPR_INFO, " EXECUTING IMMEDIATELY");
}
// Queue was empty, so execute this closure immediately.
- GRPC_CLOSURE_SCHED(closure, error);
+ call_combiner_sched_closure(call_combiner, closure, error);
} else {
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO, " QUEUING");
@@ -132,7 +181,8 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
gpr_log(GPR_INFO, " EXECUTING FROM QUEUE: closure=%p error=%s",
closure, grpc_error_string(closure->error_data.error));
}
- GRPC_CLOSURE_SCHED(closure, closure->error_data.error);
+ call_combiner_sched_closure(call_combiner, closure,
+ closure->error_data.error);
break;
}
} else if (grpc_call_combiner_trace.enabled()) {
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
index 6f7ddd4043..4ec0044f05 100644
--- a/src/core/lib/iomgr/call_combiner.h
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -27,7 +27,10 @@
#include "src/core/lib/gpr/mpscq.h"
#include "src/core/lib/gprpp/inlined_vector.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/dynamic_annotations.h"
// A simple, lock-free mechanism for serializing activity related to a
// single call. This is similar to a combiner but is more lightweight.
@@ -40,14 +43,38 @@
extern grpc_core::TraceFlag grpc_call_combiner_trace;
-typedef struct {
- gpr_atm size; // size_t, num closures in queue or currently executing
+struct grpc_call_combiner {
+ gpr_atm size = 0; // size_t, num closures in queue or currently executing
gpr_mpscq queue;
// Either 0 (if not cancelled and no cancellation closure set),
// a grpc_closure* (if the lowest bit is 0),
// or a grpc_error* (if the lowest bit is 1).
- gpr_atm cancel_state;
-} grpc_call_combiner;
+ gpr_atm cancel_state = 0;
+#ifdef GRPC_TSAN_ENABLED
+ // A fake ref-counted lock that is kept alive after the destruction of
+ // grpc_call_combiner, when we are running the original closure.
+ //
+ // Ideally we want to lock and unlock the call combiner as a pointer, when the
+ // callback is called. However, original_closure is free to trigger
+ // anything on the call combiner (including destruction of grpc_call).
+ // Thus, we need a ref-counted structure that can outlive the call combiner.
+ struct TsanLock
+ : public grpc_core::RefCounted<TsanLock,
+ grpc_core::NonPolymorphicRefCount> {
+ TsanLock() { TSAN_ANNOTATE_RWLOCK_CREATE(&taken); }
+ ~TsanLock() { TSAN_ANNOTATE_RWLOCK_DESTROY(&taken); }
+
+ // To avoid double-locking by the same thread, we should acquire/release
+ // the lock only when taken is false. On each acquire taken must be set to
+ // true.
+ std::atomic<bool> taken{false};
+ };
+ grpc_core::RefCountedPtr<TsanLock> tsan_lock =
+ grpc_core::MakeRefCounted<TsanLock>();
+ grpc_closure tsan_closure;
+ grpc_closure* original_closure;
+#endif
+};
// Assumes memory was initialized to zero.
void grpc_call_combiner_init(grpc_call_combiner* call_combiner);
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index f14c723844..bde3437c02 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -114,6 +114,7 @@ inline grpc_closure* grpc_closure_init(grpc_closure* closure,
closure->cb = cb;
closure->cb_arg = cb_arg;
closure->scheduler = scheduler;
+ closure->error_data.error = GRPC_ERROR_NONE;
#ifndef NDEBUG
closure->scheduled = false;
closure->file_initiated = nullptr;
diff --git a/src/core/lib/iomgr/dynamic_annotations.h b/src/core/lib/iomgr/dynamic_annotations.h
new file mode 100644
index 0000000000..713928023a
--- /dev/null
+++ b/src/core/lib/iomgr/dynamic_annotations.h
@@ -0,0 +1,67 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_DYNAMIC_ANNOTATIONS_H
+#define GRPC_CORE_LIB_IOMGR_DYNAMIC_ANNOTATIONS_H
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GRPC_TSAN_ENABLED
+
+#define TSAN_ANNOTATE_HAPPENS_BEFORE(addr) \
+ AnnotateHappensBefore(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_HAPPENS_AFTER(addr) \
+ AnnotateHappensAfter(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_RWLOCK_CREATE(addr) \
+ AnnotateRWLockCreate(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_RWLOCK_DESTROY(addr) \
+ AnnotateRWLockDestroy(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_RWLOCK_ACQUIRED(addr, is_w) \
+ AnnotateRWLockAcquired(__FILE__, __LINE__, (void*)(addr), (is_w))
+#define TSAN_ANNOTATE_RWLOCK_RELEASED(addr, is_w) \
+ AnnotateRWLockReleased(__FILE__, __LINE__, (void*)(addr), (is_w))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+void AnnotateHappensBefore(const char* file, int line, const volatile void* cv);
+void AnnotateHappensAfter(const char* file, int line, const volatile void* cv);
+void AnnotateRWLockCreate(const char* file, int line,
+ const volatile void* lock);
+void AnnotateRWLockDestroy(const char* file, int line,
+ const volatile void* lock);
+void AnnotateRWLockAcquired(const char* file, int line,
+ const volatile void* lock, long is_w);
+void AnnotateRWLockReleased(const char* file, int line,
+ const volatile void* lock, long is_w);
+#ifdef __cplusplus
+}
+#endif
+
+#else /* GRPC_TSAN_ENABLED */
+
+#define TSAN_ANNOTATE_HAPPENS_BEFORE(addr)
+#define TSAN_ANNOTATE_HAPPENS_AFTER(addr)
+#define TSAN_ANNOTATE_RWLOCK_CREATE(addr)
+#define TSAN_ANNOTATE_RWLOCK_DESTROY(addr)
+#define TSAN_ANNOTATE_RWLOCK_ACQUIRED(addr, is_w)
+#define TSAN_ANNOTATE_RWLOCK_RELEASED(addr, is_w)
+
+#endif /* GRPC_TSAN_ENABLED */
+
+#endif /* GRPC_CORE_LIB_IOMGR_DYNAMIC_ANNOTATIONS_H */
diff --git a/src/core/lib/iomgr/endpoint.cc b/src/core/lib/iomgr/endpoint.cc
index 44fb47e19d..06316c6031 100644
--- a/src/core/lib/iomgr/endpoint.cc
+++ b/src/core/lib/iomgr/endpoint.cc
@@ -61,3 +61,7 @@ int grpc_endpoint_get_fd(grpc_endpoint* ep) { return ep->vtable->get_fd(ep); }
grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* ep) {
return ep->vtable->get_resource_user(ep);
}
+
+bool grpc_endpoint_can_track_err(grpc_endpoint* ep) {
+ return ep->vtable->can_track_err(ep);
+}
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index 1f590a80ca..79c8ece263 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -47,6 +47,7 @@ struct grpc_endpoint_vtable {
grpc_resource_user* (*get_resource_user)(grpc_endpoint* ep);
char* (*get_peer)(grpc_endpoint* ep);
int (*get_fd)(grpc_endpoint* ep);
+ bool (*can_track_err)(grpc_endpoint* ep);
};
/* When data is available on the connection, calls the callback with slices.
@@ -95,6 +96,8 @@ void grpc_endpoint_delete_from_pollset_set(grpc_endpoint* ep,
grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* endpoint);
+bool grpc_endpoint_can_track_err(grpc_endpoint* ep);
+
struct grpc_endpoint {
const grpc_endpoint_vtable* vtable;
};
diff --git a/src/core/lib/iomgr/endpoint_cfstream.cc b/src/core/lib/iomgr/endpoint_cfstream.cc
index df2cf508c8..7c4bc1ace2 100644
--- a/src/core/lib/iomgr/endpoint_cfstream.cc
+++ b/src/core/lib/iomgr/endpoint_cfstream.cc
@@ -315,6 +315,8 @@ char* CFStreamGetPeer(grpc_endpoint* ep) {
int CFStreamGetFD(grpc_endpoint* ep) { return 0; }
+bool CFStreamCanTrackErr(grpc_endpoint* ep) { return false; }
+
void CFStreamAddToPollset(grpc_endpoint* ep, grpc_pollset* pollset) {}
void CFStreamAddToPollsetSet(grpc_endpoint* ep, grpc_pollset_set* pollset) {}
void CFStreamDeleteFromPollsetSet(grpc_endpoint* ep,
@@ -329,7 +331,8 @@ static const grpc_endpoint_vtable vtable = {CFStreamRead,
CFStreamDestroy,
CFStreamGetResourceUser,
CFStreamGetPeer,
- CFStreamGetFD};
+ CFStreamGetFD,
+ CFStreamCanTrackErr};
grpc_endpoint* grpc_cfstream_endpoint_create(
CFReadStreamRef read_stream, CFWriteStreamRef write_stream,
diff --git a/src/core/lib/iomgr/endpoint_pair_posix.cc b/src/core/lib/iomgr/endpoint_pair_posix.cc
index 3afbfd7254..5c5c246f99 100644
--- a/src/core/lib/iomgr/endpoint_pair_posix.cc
+++ b/src/core/lib/iomgr/endpoint_pair_posix.cc
@@ -59,11 +59,11 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name,
grpc_core::ExecCtx exec_ctx;
gpr_asprintf(&final_name, "%s:client", name);
- p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name, true), args,
+ p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name, false), args,
"socketpair-server");
gpr_free(final_name);
gpr_asprintf(&final_name, "%s:server", name);
- p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name, true), args,
+ p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name, false), args,
"socketpair-client");
gpr_free(final_name);
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 38571b1957..4b8c891e9b 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -1242,6 +1242,8 @@ static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
* Event engine binding
*/
+static void shutdown_background_closure(void) {}
+
static void shutdown_engine(void) {
fd_global_shutdown();
pollset_global_shutdown();
@@ -1255,6 +1257,7 @@ static void shutdown_engine(void) {
static const grpc_event_engine_vtable vtable = {
sizeof(grpc_pollset),
true,
+ false,
fd_create,
fd_wrapped_fd,
@@ -1284,6 +1287,7 @@ static const grpc_event_engine_vtable vtable = {
pollset_set_add_fd,
pollset_set_del_fd,
+ shutdown_background_closure,
shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index 06a382c556..7a4870db78 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -1604,6 +1604,8 @@ static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
* Event engine binding
*/
+static void shutdown_background_closure(void) {}
+
static void shutdown_engine(void) {
fd_global_shutdown();
pollset_global_shutdown();
@@ -1612,6 +1614,7 @@ static void shutdown_engine(void) {
static const grpc_event_engine_vtable vtable = {
sizeof(grpc_pollset),
true,
+ false,
fd_create,
fd_wrapped_fd,
@@ -1641,6 +1644,7 @@ static const grpc_event_engine_vtable vtable = {
pollset_set_add_fd,
pollset_set_del_fd,
+ shutdown_background_closure,
shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 16562538a6..67cbfbbd02 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -1782,6 +1782,8 @@ static void global_cv_fd_table_shutdown() {
* event engine binding
*/
+static void shutdown_background_closure(void) {}
+
static void shutdown_engine(void) {
pollset_global_shutdown();
if (grpc_cv_wakeup_fds_enabled()) {
@@ -1796,6 +1798,7 @@ static void shutdown_engine(void) {
static const grpc_event_engine_vtable vtable = {
sizeof(grpc_pollset),
false,
+ false,
fd_create,
fd_wrapped_fd,
@@ -1825,6 +1828,7 @@ static const grpc_event_engine_vtable vtable = {
pollset_set_add_fd,
pollset_set_del_fd,
+ shutdown_background_closure,
shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index 8a7dc7b004..32d1b6c43e 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -36,6 +36,7 @@
#include "src/core/lib/iomgr/ev_epoll1_linux.h"
#include "src/core/lib/iomgr/ev_epollex_linux.h"
#include "src/core/lib/iomgr/ev_poll_posix.h"
+#include "src/core/lib/iomgr/internal_errqueue.h"
grpc_core::TraceFlag grpc_polling_trace(false,
"polling"); /* Disabled by default */
@@ -236,19 +237,22 @@ void grpc_event_engine_shutdown(void) {
}
bool grpc_event_engine_can_track_errors(void) {
-/* Only track errors if platform supports errqueue. */
-#ifdef GRPC_LINUX_ERRQUEUE
- return g_event_engine->can_track_err;
-#else
+ /* Only track errors if platform supports errqueue. */
+ if (grpc_core::kernel_supports_errqueue()) {
+ return g_event_engine->can_track_err;
+ }
return false;
-#endif /* GRPC_LINUX_ERRQUEUE */
+}
+
+bool grpc_event_engine_run_in_background(void) {
+ return g_event_engine->run_in_background;
}
grpc_fd* grpc_fd_create(int fd, const char* name, bool track_err) {
GRPC_POLLING_API_TRACE("fd_create(%d, %s, %d)", fd, name, track_err);
GRPC_FD_TRACE("fd_create(%d, %s, %d)", fd, name, track_err);
- return g_event_engine->fd_create(fd, name,
- track_err && g_event_engine->can_track_err);
+ return g_event_engine->fd_create(
+ fd, name, track_err && grpc_event_engine_can_track_errors());
}
int grpc_fd_wrapped_fd(grpc_fd* fd) {
@@ -395,4 +399,8 @@ void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
g_event_engine->pollset_set_del_fd(pollset_set, fd);
}
+void grpc_shutdown_background_closure(void) {
+ g_event_engine->shutdown_background_closure();
+}
+
#endif // GRPC_POSIX_SOCKET_EV
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index b8fb8f534b..812c7a0f0f 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -42,6 +42,7 @@ typedef struct grpc_fd grpc_fd;
typedef struct grpc_event_engine_vtable {
size_t pollset_size;
bool can_track_err;
+ bool run_in_background;
grpc_fd* (*fd_create)(int fd, const char* name, bool track_err);
int (*fd_wrapped_fd)(grpc_fd* fd);
@@ -79,6 +80,7 @@ typedef struct grpc_event_engine_vtable {
void (*pollset_set_add_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd);
void (*pollset_set_del_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd);
+ void (*shutdown_background_closure)(void);
void (*shutdown_engine)(void);
} grpc_event_engine_vtable;
@@ -101,6 +103,11 @@ const char* grpc_get_poll_strategy_name();
*/
bool grpc_event_engine_can_track_errors();
+/* Returns true if polling engine runs in the background, false otherwise.
+ * Currently only 'epollbg' runs in the background.
+ */
+bool grpc_event_engine_run_in_background();
+
/* Create a wrapped file descriptor.
Requires fd is a non-blocking file descriptor.
\a track_err if true means that error events would be tracked separately
@@ -174,6 +181,9 @@ void grpc_pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd);
void grpc_pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
+/* Shut down all the closures registered in the background poller. */
+void grpc_shutdown_background_closure();
+
/* override to allow tests to hook poll() usage */
typedef int (*grpc_poll_function_type)(struct pollfd*, nfds_t, int);
extern grpc_poll_function_type grpc_poll_function;
diff --git a/src/core/lib/iomgr/fork_posix.cc b/src/core/lib/iomgr/fork_posix.cc
index e957bad73d..05ecd2a49b 100644
--- a/src/core/lib/iomgr/fork_posix.cc
+++ b/src/core/lib/iomgr/fork_posix.cc
@@ -60,7 +60,7 @@ void grpc_prefork() {
}
if (strcmp(grpc_get_poll_strategy_name(), "epoll1") != 0 &&
strcmp(grpc_get_poll_strategy_name(), "poll") != 0) {
- gpr_log(GPR_ERROR,
+ gpr_log(GPR_INFO,
"Fork support is only compatible with the epoll1 and poll polling "
"strategies");
}
diff --git a/src/core/lib/iomgr/internal_errqueue.cc b/src/core/lib/iomgr/internal_errqueue.cc
index 99c22e9055..982d709f09 100644
--- a/src/core/lib/iomgr/internal_errqueue.cc
+++ b/src/core/lib/iomgr/internal_errqueue.cc
@@ -20,17 +20,50 @@
#include "src/core/lib/iomgr/port.h"
+#include <grpc/impl/codegen/log.h>
#include "src/core/lib/iomgr/internal_errqueue.h"
#ifdef GRPC_POSIX_SOCKET_TCP
-bool kernel_supports_errqueue() {
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/utsname.h>
+
+namespace grpc_core {
+static bool errqueue_supported = false;
+
+bool kernel_supports_errqueue() { return errqueue_supported; }
+
+void grpc_errqueue_init() {
+/* Both-compile time and run-time linux kernel versions should be atleast 4.0.0
+ */
#ifdef LINUX_VERSION_CODE
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
- return true;
+ struct utsname buffer;
+ if (uname(&buffer) != 0) {
+ gpr_log(GPR_ERROR, "uname: %s", strerror(errno));
+ return;
+ }
+ char* release = buffer.release;
+ if (release == nullptr) {
+ return;
+ }
+
+ if (strtol(release, nullptr, 10) >= 4) {
+ errqueue_supported = true;
+ } else {
+ gpr_log(GPR_DEBUG, "ERRQUEUE support not enabled");
+ }
#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(4, 0, 0) */
#endif /* LINUX_VERSION_CODE */
- return false;
}
+} /* namespace grpc_core */
+
+#else
+
+namespace grpc_core {
+void grpc_errqueue_init() {}
+} /* namespace grpc_core */
#endif /* GRPC_POSIX_SOCKET_TCP */
diff --git a/src/core/lib/iomgr/internal_errqueue.h b/src/core/lib/iomgr/internal_errqueue.h
index 9d122808f9..f8644c2536 100644
--- a/src/core/lib/iomgr/internal_errqueue.h
+++ b/src/core/lib/iomgr/internal_errqueue.h
@@ -76,8 +76,14 @@ constexpr uint32_t kTimestampingRecordingOptions =
* Currently allowing only linux kernels above 4.0.0
*/
bool kernel_supports_errqueue();
-} // namespace grpc_core
+
+} /* namespace grpc_core */
#endif /* GRPC_POSIX_SOCKET_TCP */
+namespace grpc_core {
+/* Initializes errqueue support */
+void grpc_errqueue_init();
+} /* namespace grpc_core */
+
#endif /* GRPC_CORE_LIB_IOMGR_INTERNAL_ERRQUEUE_H */
diff --git a/src/core/lib/iomgr/iomgr.cc b/src/core/lib/iomgr/iomgr.cc
index 46afda1774..eb29973514 100644
--- a/src/core/lib/iomgr/iomgr.cc
+++ b/src/core/lib/iomgr/iomgr.cc
@@ -33,8 +33,10 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/thd.h"
+#include "src/core/lib/iomgr/buffer_list.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/executor.h"
+#include "src/core/lib/iomgr/internal_errqueue.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/timer.h"
@@ -57,6 +59,7 @@ void grpc_iomgr_init() {
g_root_object.name = (char*)"root";
grpc_network_status_init();
grpc_iomgr_platform_init();
+ grpc_core::grpc_errqueue_init();
}
void grpc_iomgr_start() { grpc_timer_manager_init(); }
@@ -154,6 +157,10 @@ void grpc_iomgr_shutdown() {
gpr_cv_destroy(&g_rcv);
}
+void grpc_iomgr_shutdown_background_closure() {
+ grpc_iomgr_platform_shutdown_background_closure();
+}
+
void grpc_iomgr_register_object(grpc_iomgr_object* obj, const char* name) {
obj->name = gpr_strdup(name);
gpr_mu_lock(&g_mu);
diff --git a/src/core/lib/iomgr/iomgr.h b/src/core/lib/iomgr/iomgr.h
index 537ef8a6ff..8ea9289e06 100644
--- a/src/core/lib/iomgr/iomgr.h
+++ b/src/core/lib/iomgr/iomgr.h
@@ -35,6 +35,10 @@ void grpc_iomgr_start();
* exec_ctx. */
void grpc_iomgr_shutdown();
+/** Signals the intention to shutdown all the closures registered in the
+ * background poller. */
+void grpc_iomgr_shutdown_background_closure();
+
/* Exposed only for testing */
size_t grpc_iomgr_count_objects_for_testing();
diff --git a/src/core/lib/iomgr/iomgr_custom.cc b/src/core/lib/iomgr/iomgr_custom.cc
index d34c8e7cd1..4b112c9097 100644
--- a/src/core/lib/iomgr/iomgr_custom.cc
+++ b/src/core/lib/iomgr/iomgr_custom.cc
@@ -40,9 +40,11 @@ static void iomgr_platform_init(void) {
}
static void iomgr_platform_flush(void) {}
static void iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); }
+static void iomgr_platform_shutdown_background_closure(void) {}
static grpc_iomgr_platform_vtable vtable = {
- iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown};
+ iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown,
+ iomgr_platform_shutdown_background_closure};
void grpc_custom_iomgr_init(grpc_socket_vtable* socket,
grpc_custom_resolver_vtable* resolver,
diff --git a/src/core/lib/iomgr/iomgr_internal.cc b/src/core/lib/iomgr/iomgr_internal.cc
index 32dbabb79d..b6c9211865 100644
--- a/src/core/lib/iomgr/iomgr_internal.cc
+++ b/src/core/lib/iomgr/iomgr_internal.cc
@@ -41,3 +41,7 @@ void grpc_iomgr_platform_init() { iomgr_platform_vtable->init(); }
void grpc_iomgr_platform_flush() { iomgr_platform_vtable->flush(); }
void grpc_iomgr_platform_shutdown() { iomgr_platform_vtable->shutdown(); }
+
+void grpc_iomgr_platform_shutdown_background_closure() {
+ iomgr_platform_vtable->shutdown_background_closure();
+}
diff --git a/src/core/lib/iomgr/iomgr_internal.h b/src/core/lib/iomgr/iomgr_internal.h
index b011d9c7b1..bca7409907 100644
--- a/src/core/lib/iomgr/iomgr_internal.h
+++ b/src/core/lib/iomgr/iomgr_internal.h
@@ -35,6 +35,7 @@ typedef struct grpc_iomgr_platform_vtable {
void (*init)(void);
void (*flush)(void);
void (*shutdown)(void);
+ void (*shutdown_background_closure)(void);
} grpc_iomgr_platform_vtable;
void grpc_iomgr_register_object(grpc_iomgr_object* obj, const char* name);
@@ -52,6 +53,9 @@ void grpc_iomgr_platform_flush(void);
/** tear down all platform specific global iomgr structures */
void grpc_iomgr_platform_shutdown(void);
+/** shut down all the closures registered in the background poller */
+void grpc_iomgr_platform_shutdown_background_closure(void);
+
bool grpc_iomgr_abort_on_leaks(void);
#endif /* GRPC_CORE_LIB_IOMGR_IOMGR_INTERNAL_H */
diff --git a/src/core/lib/iomgr/iomgr_posix.cc b/src/core/lib/iomgr/iomgr_posix.cc
index ca7334c9a4..9386adf060 100644
--- a/src/core/lib/iomgr/iomgr_posix.cc
+++ b/src/core/lib/iomgr/iomgr_posix.cc
@@ -51,8 +51,13 @@ static void iomgr_platform_shutdown(void) {
grpc_wakeup_fd_global_destroy();
}
+static void iomgr_platform_shutdown_background_closure(void) {
+ grpc_shutdown_background_closure();
+}
+
static grpc_iomgr_platform_vtable vtable = {
- iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown};
+ iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown,
+ iomgr_platform_shutdown_background_closure};
void grpc_set_default_iomgr_platform() {
grpc_set_tcp_client_impl(&grpc_posix_tcp_client_vtable);
diff --git a/src/core/lib/iomgr/iomgr_posix_cfstream.cc b/src/core/lib/iomgr/iomgr_posix_cfstream.cc
index 235a9e0712..552ef4309c 100644
--- a/src/core/lib/iomgr/iomgr_posix_cfstream.cc
+++ b/src/core/lib/iomgr/iomgr_posix_cfstream.cc
@@ -54,8 +54,13 @@ static void iomgr_platform_shutdown(void) {
grpc_wakeup_fd_global_destroy();
}
+static void iomgr_platform_shutdown_background_closure(void) {
+ grpc_shutdown_background_closure();
+}
+
static grpc_iomgr_platform_vtable vtable = {
- iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown};
+ iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown,
+ iomgr_platform_shutdown_background_closure};
void grpc_set_default_iomgr_platform() {
char* enable_cfstream = getenv(grpc_cfstream_env_var);
diff --git a/src/core/lib/iomgr/iomgr_windows.cc b/src/core/lib/iomgr/iomgr_windows.cc
index cdef89cbf0..24ef0dba7b 100644
--- a/src/core/lib/iomgr/iomgr_windows.cc
+++ b/src/core/lib/iomgr/iomgr_windows.cc
@@ -71,8 +71,11 @@ static void iomgr_platform_shutdown(void) {
winsock_shutdown();
}
+static void iomgr_platform_shutdown_background_closure(void) {}
+
static grpc_iomgr_platform_vtable vtable = {
- iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown};
+ iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown,
+ iomgr_platform_shutdown_background_closure};
void grpc_set_default_iomgr_platform() {
grpc_set_tcp_client_impl(&grpc_windows_tcp_client_vtable);
diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h
index a95e08524c..6f4c5bdd66 100644
--- a/src/core/lib/iomgr/polling_entity.h
+++ b/src/core/lib/iomgr/polling_entity.h
@@ -34,13 +34,13 @@ typedef enum grpc_pollset_tag {
* functions that accept a pollset XOR a pollset_set to do so through an
* abstract interface. No ownership is taken. */
-typedef struct grpc_polling_entity {
+struct grpc_polling_entity {
union {
- grpc_pollset* pollset;
+ grpc_pollset* pollset = nullptr;
grpc_pollset_set* pollset_set;
} pollent;
- grpc_pollset_tag tag;
-} grpc_polling_entity;
+ grpc_pollset_tag tag = GRPC_POLLS_NONE;
+};
grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
grpc_pollset_set* pollset_set);
diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h
index bf56a7298d..c8046b21dc 100644
--- a/src/core/lib/iomgr/port.h
+++ b/src/core/lib/iomgr/port.h
@@ -62,8 +62,7 @@
#define GRPC_HAVE_UNIX_SOCKET 1
#ifdef LINUX_VERSION_CODE
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
-/* TODO(yashykt): Re-enable once Fathom changes are commited.
-#define GRPC_LINUX_ERRQUEUE 1 */
+#define GRPC_LINUX_ERRQUEUE 1
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) */
#endif /* LINUX_VERSION_CODE */
#define GRPC_LINUX_MULTIPOLL_WITH_EPOLL 1
diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h
index 6afe94a7a9..7016ffc31a 100644
--- a/src/core/lib/iomgr/resolve_address.h
+++ b/src/core/lib/iomgr/resolve_address.h
@@ -65,7 +65,7 @@ void grpc_set_resolver_impl(grpc_address_resolver_vtable* vtable);
/* Asynchronously resolve addr. Use default_port if a port isn't designated
in addr, otherwise use the port in addr. */
-/* TODO(ctiller): add a timeout here */
+/* TODO(apolcyn): add a timeout here */
void grpc_resolve_address(const char* addr, const char* default_port,
grpc_pollset_set* interested_parties,
grpc_closure* on_done,
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index b6fc7579f7..7e4b3c9b2f 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -90,7 +90,8 @@ struct grpc_resource_user {
grpc_closure_list on_allocated;
/* True if we are currently trying to allocate from the quota, false if not */
bool allocating;
- /* How many bytes of allocations are outstanding */
+ /* The amount of memory (in bytes) that has been requested from this user
+ * asynchronously but hasn't been granted yet. */
int64_t outstanding_allocations;
/* True if we are currently trying to add ourselves to the non-free quota
list, false otherwise */
@@ -135,6 +136,9 @@ struct grpc_resource_quota {
int64_t size;
/* Amount of free memory in the resource quota */
int64_t free_pool;
+ /* Used size of memory in the resource quota. Updated as soon as the resource
+ * users start to allocate or free the memory. */
+ gpr_atm used;
gpr_atm last_size;
@@ -371,6 +375,7 @@ static bool rq_reclaim_from_per_user_free_pool(
while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_NON_EMPTY_FREE_POOL))) {
gpr_mu_lock(&resource_user->mu);
+ resource_user->added_to_free_pool = false;
if (resource_user->free_pool > 0) {
int64_t amt = resource_user->free_pool;
resource_user->free_pool = 0;
@@ -386,6 +391,13 @@ static bool rq_reclaim_from_per_user_free_pool(
gpr_mu_unlock(&resource_user->mu);
return true;
} else {
+ if (grpc_resource_quota_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "RQ %s %s: failed to reclaim_from_per_user_free_pool; "
+ "free_pool = %" PRId64 "; rq_free_pool = %" PRId64,
+ resource_quota->name, resource_user->name,
+ resource_user->free_pool, resource_quota->free_pool);
+ }
gpr_mu_unlock(&resource_user->mu);
}
}
@@ -622,6 +634,7 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
resource_quota->combiner = grpc_combiner_create();
resource_quota->free_pool = INT64_MAX;
resource_quota->size = INT64_MAX;
+ resource_quota->used = 0;
gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX);
gpr_mu_init(&resource_quota->thread_count_mu);
resource_quota->max_threads = INT_MAX;
@@ -712,7 +725,7 @@ size_t grpc_resource_quota_peek_size(grpc_resource_quota* resource_quota) {
*/
grpc_resource_quota* grpc_resource_quota_from_channel_args(
- const grpc_channel_args* channel_args) {
+ const grpc_channel_args* channel_args, bool create) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
if (channel_args->args[i].type == GRPC_ARG_POINTER) {
@@ -724,7 +737,7 @@ grpc_resource_quota* grpc_resource_quota_from_channel_args(
}
}
}
- return grpc_resource_quota_create(nullptr);
+ return create ? grpc_resource_quota_create(nullptr) : nullptr;
}
static void* rq_copy(void* rq) {
@@ -863,33 +876,68 @@ void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu);
}
-void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
- grpc_closure* optional_on_done) {
- gpr_mu_lock(&resource_user->mu);
+static void resource_user_alloc_locked(grpc_resource_user* resource_user,
+ size_t size,
+ grpc_closure* optional_on_done) {
ru_ref_by(resource_user, static_cast<gpr_atm>(size));
resource_user->free_pool -= static_cast<int64_t>(size);
- resource_user->outstanding_allocations += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_INFO, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool);
}
if (resource_user->free_pool < 0) {
- grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
- GRPC_ERROR_NONE);
+ if (optional_on_done != nullptr) {
+ resource_user->outstanding_allocations += static_cast<int64_t>(size);
+ grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
+ GRPC_ERROR_NONE);
+ }
if (!resource_user->allocating) {
resource_user->allocating = true;
GRPC_CLOSURE_SCHED(&resource_user->allocate_closure, GRPC_ERROR_NONE);
}
} else {
- resource_user->outstanding_allocations -= static_cast<int64_t>(size);
GRPC_CLOSURE_SCHED(optional_on_done, GRPC_ERROR_NONE);
}
+}
+
+bool grpc_resource_user_safe_alloc(grpc_resource_user* resource_user,
+ size_t size) {
+ if (gpr_atm_no_barrier_load(&resource_user->shutdown)) return false;
+ gpr_mu_lock(&resource_user->mu);
+ grpc_resource_quota* resource_quota = resource_user->resource_quota;
+ bool cas_success;
+ do {
+ gpr_atm used = gpr_atm_no_barrier_load(&resource_quota->used);
+ gpr_atm new_used = used + size;
+ if (static_cast<size_t>(new_used) >
+ grpc_resource_quota_peek_size(resource_quota)) {
+ gpr_mu_unlock(&resource_user->mu);
+ return false;
+ }
+ cas_success = gpr_atm_full_cas(&resource_quota->used, used, new_used);
+ } while (!cas_success);
+ resource_user_alloc_locked(resource_user, size, nullptr);
+ gpr_mu_unlock(&resource_user->mu);
+ return true;
+}
+
+void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
+ grpc_closure* optional_on_done) {
+ // TODO(juanlishen): Maybe return immediately if shutting down. Deferring this
+ // because some tests become flaky after the change.
+ gpr_mu_lock(&resource_user->mu);
+ grpc_resource_quota* resource_quota = resource_user->resource_quota;
+ gpr_atm_no_barrier_fetch_add(&resource_quota->used, size);
+ resource_user_alloc_locked(resource_user, size, optional_on_done);
gpr_mu_unlock(&resource_user->mu);
}
void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) {
gpr_mu_lock(&resource_user->mu);
+ grpc_resource_quota* resource_quota = resource_user->resource_quota;
+ gpr_atm prior = gpr_atm_no_barrier_fetch_add(&resource_quota->used, -size);
+ GPR_ASSERT(prior >= static_cast<long>(size));
bool was_zero_or_negative = resource_user->free_pool <= 0;
resource_user->free_pool += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) {
@@ -940,6 +988,12 @@ void grpc_resource_user_slice_allocator_init(
void grpc_resource_user_alloc_slices(
grpc_resource_user_slice_allocator* slice_allocator, size_t length,
size_t count, grpc_slice_buffer* dest) {
+ if (gpr_atm_no_barrier_load(&slice_allocator->resource_user->shutdown)) {
+ GRPC_CLOSURE_SCHED(
+ &slice_allocator->on_allocated,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resource user shutdown"));
+ return;
+ }
slice_allocator->length = length;
slice_allocator->count = count;
slice_allocator->dest = dest;
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index 7b0ed7417a..1c79b52e3f 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -65,11 +65,16 @@
extern grpc_core::TraceFlag grpc_resource_quota_trace;
+// TODO(juanlishen): This is a hack. We need to do real accounting instead of
+// hard coding.
+constexpr size_t GRPC_RESOURCE_QUOTA_CALL_SIZE = 15 * 1024;
+constexpr size_t GRPC_RESOURCE_QUOTA_CHANNEL_SIZE = 50 * 1024;
+
grpc_resource_quota* grpc_resource_quota_ref_internal(
grpc_resource_quota* resource_quota);
void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota);
grpc_resource_quota* grpc_resource_quota_from_channel_args(
- const grpc_channel_args* channel_args);
+ const grpc_channel_args* channel_args, bool create = true);
/* Return a number indicating current memory pressure:
0.0 ==> no memory usage
@@ -109,11 +114,21 @@ bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user,
void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
int thread_count);
-/* Allocate from the resource user (and its quota).
- If optional_on_done is NULL, then allocate immediately. This may push the
- quota over-limit, at which point reclamation will kick in.
- If optional_on_done is non-NULL, it will be scheduled when the allocation has
- been granted by the quota. */
+/* Allocates from the resource user 'size' worth of memory if this won't exceed
+ * the resource quota's total size. Returns whether the allocation is done
+ * successfully. If allocated successfully, the memory should be freed by the
+ * caller eventually. */
+bool grpc_resource_user_safe_alloc(grpc_resource_user* resource_user,
+ size_t size);
+/* Allocates from the resource user 'size' worth of memory.
+ * If optional_on_done is NULL, then allocate immediately. This may push the
+ * quota over-limit, at which point reclamation will kick in. The caller is
+ * always responsible to free the memory eventually.
+ * If optional_on_done is non-NULL, it will be scheduled without error when the
+ * allocation has been granted by the quota, and the caller is responsible to
+ * free the memory eventually. Or it may be scheduled with an error, in which
+ * case the caller fails to allocate the memory and shouldn't free the memory.
+ */
void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
grpc_closure* optional_on_done);
/* Release memory back to the quota */
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.cc b/src/core/lib/iomgr/socket_utils_common_posix.cc
index bdfc1d70c3..4c337a0521 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.cc
+++ b/src/core/lib/iomgr/socket_utils_common_posix.cc
@@ -296,14 +296,17 @@ grpc_error* grpc_set_socket_tcp_user_timeout(
socklen_t len = sizeof(newval);
if (0 != setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &timeout,
sizeof(timeout))) {
- return GRPC_OS_ERROR(errno, "setsockopt(TCP_USER_TIMEOUT)");
+ gpr_log(GPR_ERROR, "setsockopt(TCP_USER_TIMEOUT) %s", strerror(errno));
+ return GRPC_ERROR_NONE;
}
if (0 != getsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &newval, &len)) {
- return GRPC_OS_ERROR(errno, "getsockopt(TCP_USER_TIMEOUT)");
+ gpr_log(GPR_ERROR, "getsockopt(TCP_USER_TIMEOUT) %s", strerror(errno));
+ return GRPC_ERROR_NONE;
}
if (newval != timeout) {
- return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Failed to set TCP_USER_TIMEOUT");
+ /* Do not fail on failing to set TCP_USER_TIMEOUT for now. */
+ gpr_log(GPR_ERROR, "Failed to set TCP_USER_TIMEOUT");
+ return GRPC_ERROR_NONE;
}
}
#else
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 8553ed0db4..0bff74e88b 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -76,6 +76,8 @@ static grpc_error* prepare_socket(const grpc_resolved_address* addr, int fd,
if (!grpc_is_unix_socket(addr)) {
err = grpc_set_socket_low_latency(fd, 1);
if (err != GRPC_ERROR_NONE) goto error;
+ err = grpc_set_socket_reuse_addr(fd, 1);
+ if (err != GRPC_ERROR_NONE) goto error;
err = grpc_set_socket_tcp_user_timeout(fd, channel_args,
true /* is_client */);
if (err != GRPC_ERROR_NONE) goto error;
diff --git a/src/core/lib/iomgr/tcp_custom.cc b/src/core/lib/iomgr/tcp_custom.cc
index e02a1898f2..f7a5f36cdc 100644
--- a/src/core/lib/iomgr/tcp_custom.cc
+++ b/src/core/lib/iomgr/tcp_custom.cc
@@ -326,6 +326,8 @@ static grpc_resource_user* endpoint_get_resource_user(grpc_endpoint* ep) {
static int endpoint_get_fd(grpc_endpoint* ep) { return -1; }
+static bool endpoint_can_track_err(grpc_endpoint* ep) { return false; }
+
static grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_write,
endpoint_add_to_pollset,
@@ -335,7 +337,8 @@ static grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_destroy,
endpoint_get_resource_user,
endpoint_get_peer,
- endpoint_get_fd};
+ endpoint_get_fd,
+ endpoint_can_track_err};
grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
grpc_resource_quota* resource_quota,
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index aa2704ce26..cfcb190d60 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -260,10 +260,17 @@ static void notify_on_write(grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
}
- cover_self(tcp);
- GRPC_CLOSURE_INIT(&tcp->write_done_closure,
- tcp_drop_uncovered_then_handle_write, tcp,
- grpc_schedule_on_exec_ctx);
+ if (grpc_event_engine_run_in_background()) {
+ // If there is a polling engine always running in the background, there is
+ // no need to run the backup poller.
+ GRPC_CLOSURE_INIT(&tcp->write_done_closure, tcp_handle_write, tcp,
+ grpc_schedule_on_exec_ctx);
+ } else {
+ cover_self(tcp);
+ GRPC_CLOSURE_INIT(&tcp->write_done_closure,
+ tcp_drop_uncovered_then_handle_write, tcp,
+ grpc_schedule_on_exec_ctx);
+ }
grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure);
}
@@ -384,6 +391,12 @@ static void tcp_destroy(grpc_endpoint* ep) {
grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
if (grpc_event_engine_can_track_errors()) {
+ gpr_mu_lock(&tcp->tb_mu);
+ grpc_core::TracedBuffer::Shutdown(
+ &tcp->tb_head, tcp->outgoing_buffer_arg,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
+ gpr_mu_unlock(&tcp->tb_mu);
+ tcp->outgoing_buffer_arg = nullptr;
gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
grpc_fd_set_error(tcp->em_fd);
}
@@ -621,7 +634,7 @@ static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
if (sending_length == static_cast<size_t>(length)) {
gpr_mu_lock(&tcp->tb_mu);
grpc_core::TracedBuffer::AddNewEntry(
- &tcp->tb_head, static_cast<int>(tcp->bytes_counter + length),
+ &tcp->tb_head, static_cast<uint32_t>(tcp->bytes_counter + length),
tcp->outgoing_buffer_arg);
gpr_mu_unlock(&tcp->tb_mu);
tcp->outgoing_buffer_arg = nullptr;
@@ -673,11 +686,9 @@ struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
}
/** For linux platforms, reads the socket's error queue and processes error
- * messages from the queue. Returns true if all the errors processed were
- * timestamps. Returns false if any of the errors were not timestamps. For
- * non-linux platforms, error processing is not used/enabled currently.
+ * messages from the queue.
*/
-static bool process_errors(grpc_tcp* tcp) {
+static void process_errors(grpc_tcp* tcp) {
while (true) {
struct iovec iov;
iov.iov_base = nullptr;
@@ -706,10 +717,10 @@ static bool process_errors(grpc_tcp* tcp) {
} while (r < 0 && saved_errno == EINTR);
if (r == -1 && saved_errno == EAGAIN) {
- return true; /* No more errors to process */
+ return; /* No more errors to process */
}
if (r == -1) {
- return false;
+ return;
}
if (grpc_tcp_trace.enabled()) {
if ((msg.msg_flags & MSG_CTRUNC) == 1) {
@@ -719,8 +730,9 @@ static bool process_errors(grpc_tcp* tcp) {
if (msg.msg_controllen == 0) {
/* There was no control message found. It was probably spurious. */
- return true;
+ return;
}
+ bool seen = false;
for (auto cmsg = CMSG_FIRSTHDR(&msg); cmsg && cmsg->cmsg_len;
cmsg = CMSG_NXTHDR(&msg, cmsg)) {
if (cmsg->cmsg_level != SOL_SOCKET ||
@@ -732,9 +744,13 @@ static bool process_errors(grpc_tcp* tcp) {
"unknown control message cmsg_level:%d cmsg_type:%d",
cmsg->cmsg_level, cmsg->cmsg_type);
}
- return false;
+ return;
}
- process_timestamp(tcp, &msg, cmsg);
+ cmsg = process_timestamp(tcp, &msg, cmsg);
+ seen = true;
+ }
+ if (!seen) {
+ return;
}
}
}
@@ -749,20 +765,17 @@ static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error) {
static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
/* We aren't going to register to hear on error anymore, so it is safe to
* unref. */
- grpc_core::TracedBuffer::Shutdown(&tcp->tb_head, GRPC_ERROR_REF(error));
TCP_UNREF(tcp, "error-tracking");
return;
}
/* We are still interested in collecting timestamps, so let's try reading
* them. */
- if (!process_errors(tcp)) {
- /* This was not a timestamps error. This was an actual error. Set the
- * read and write closures to be ready.
- */
- grpc_fd_set_readable(tcp->em_fd);
- grpc_fd_set_writable(tcp->em_fd);
- }
+ process_errors(tcp);
+ /* This might not a timestamps error. Set the read and write closures to be
+ * ready. */
+ grpc_fd_set_readable(tcp->em_fd);
+ grpc_fd_set_writable(tcp->em_fd);
GRPC_CLOSURE_INIT(&tcp->error_closure, tcp_handle_error, tcp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
@@ -784,6 +797,19 @@ static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error) {
}
#endif /* GRPC_LINUX_ERRQUEUE */
+/* If outgoing_buffer_arg is filled, shuts down the list early, so that any
+ * release operations needed can be performed on the arg */
+void tcp_shutdown_buffer_list(grpc_tcp* tcp) {
+ if (tcp->outgoing_buffer_arg) {
+ gpr_mu_lock(&tcp->tb_mu);
+ grpc_core::TracedBuffer::Shutdown(
+ &tcp->tb_head, tcp->outgoing_buffer_arg,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
+ gpr_mu_unlock(&tcp->tb_mu);
+ tcp->outgoing_buffer_arg = nullptr;
+ }
+}
+
/* returns true if done, false if pending; if returning true, *error is set */
#if defined(IOV_MAX) && IOV_MAX < 1000
#define MAX_WRITE_IOVEC IOV_MAX
@@ -831,8 +857,10 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
msg.msg_flags = 0;
if (tcp->outgoing_buffer_arg != nullptr) {
if (!tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
- error))
+ error)) {
+ tcp_shutdown_buffer_list(tcp);
return true; /* something went wrong with timestamps */
+ }
} else {
msg.msg_control = nullptr;
msg.msg_controllen = 0;
@@ -856,10 +884,12 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
} else if (errno == EPIPE) {
*error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
+ tcp_shutdown_buffer_list(tcp);
return true;
} else {
*error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
+ tcp_shutdown_buffer_list(tcp);
return true;
}
}
@@ -936,17 +966,18 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
GPR_ASSERT(tcp->write_cb == nullptr);
+ tcp->outgoing_buffer_arg = arg;
if (buf->length == 0) {
GRPC_CLOSURE_SCHED(
cb, grpc_fd_is_shutdown(tcp->em_fd)
? tcp_annotate_error(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp)
: GRPC_ERROR_NONE);
+ tcp_shutdown_buffer_list(tcp);
return;
}
tcp->outgoing_buffer = buf;
tcp->outgoing_byte_idx = 0;
- tcp->outgoing_buffer_arg = arg;
if (arg) {
GPR_ASSERT(grpc_event_engine_can_track_errors());
}
@@ -999,6 +1030,22 @@ static grpc_resource_user* tcp_get_resource_user(grpc_endpoint* ep) {
return tcp->resource_user;
}
+static bool tcp_can_track_err(grpc_endpoint* ep) {
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
+ if (!grpc_event_engine_can_track_errors()) {
+ return false;
+ }
+ struct sockaddr addr;
+ socklen_t len = sizeof(addr);
+ if (getsockname(tcp->fd, &addr, &len) < 0) {
+ return false;
+ }
+ if (addr.sa_family == AF_INET || addr.sa_family == AF_INET6) {
+ return true;
+ }
+ return false;
+}
+
static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_write,
tcp_add_to_pollset,
@@ -1008,7 +1055,8 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_destroy,
tcp_get_resource_user,
tcp_get_peer,
- tcp_get_fd};
+ tcp_get_fd,
+ tcp_can_track_err};
#define MAX_CHUNK_SIZE 32 * 1024 * 1024
@@ -1069,6 +1117,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
tcp->is_first_read = true;
tcp->bytes_counter = -1;
tcp->socket_ts_enabled = false;
+ tcp->outgoing_buffer_arg = nullptr;
/* paired with unref in grpc_tcp_destroy */
gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
@@ -1113,6 +1162,12 @@ void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
if (grpc_event_engine_can_track_errors()) {
/* Stop errors notification. */
+ gpr_mu_lock(&tcp->tb_mu);
+ grpc_core::TracedBuffer::Shutdown(
+ &tcp->tb_head, tcp->outgoing_buffer_arg,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
+ gpr_mu_unlock(&tcp->tb_mu);
+ tcp->outgoing_buffer_arg = nullptr;
gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
grpc_fd_set_error(tcp->em_fd);
}
diff --git a/src/core/lib/iomgr/tcp_windows.cc b/src/core/lib/iomgr/tcp_windows.cc
index 64c4a56ae9..86ee1010cf 100644
--- a/src/core/lib/iomgr/tcp_windows.cc
+++ b/src/core/lib/iomgr/tcp_windows.cc
@@ -42,6 +42,7 @@
#include "src/core/lib/iomgr/tcp_windows.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#if defined(__MSYS__) && defined(GPR_ARCH_64)
/* Nasty workaround for nasty bug when using the 64 bits msys compiler
@@ -112,7 +113,10 @@ typedef struct grpc_tcp {
grpc_closure* read_cb;
grpc_closure* write_cb;
- grpc_slice read_slice;
+
+ /* garbage after the last read */
+ grpc_slice_buffer last_read_buffer;
+
grpc_slice_buffer* write_slices;
grpc_slice_buffer* read_slices;
@@ -131,6 +135,7 @@ static void tcp_free(grpc_tcp* tcp) {
grpc_winsocket_destroy(tcp->socket);
gpr_mu_destroy(&tcp->mu);
gpr_free(tcp->peer_string);
+ grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
grpc_resource_user_unref(tcp->resource_user);
if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error);
gpr_free(tcp);
@@ -179,9 +184,12 @@ static void on_read(void* tcpp, grpc_error* error) {
grpc_tcp* tcp = (grpc_tcp*)tcpp;
grpc_closure* cb = tcp->read_cb;
grpc_winsocket* socket = tcp->socket;
- grpc_slice sub;
grpc_winsocket_callback_info* info = &socket->read_info;
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_INFO, "TCP:%p on_read", tcp);
+ }
+
GRPC_ERROR_REF(error);
if (error == GRPC_ERROR_NONE) {
@@ -189,13 +197,35 @@ static void on_read(void* tcpp, grpc_error* error) {
char* utf8_message = gpr_format_message(info->wsa_error);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message);
gpr_free(utf8_message);
- grpc_slice_unref_internal(tcp->read_slice);
+ grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
} else {
if (info->bytes_transfered != 0 && !tcp->shutting_down) {
- sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
- grpc_slice_buffer_add(tcp->read_slices, sub);
+ GPR_ASSERT((size_t)info->bytes_transfered <= tcp->read_slices->length);
+ if (static_cast<size_t>(info->bytes_transfered) !=
+ tcp->read_slices->length) {
+ grpc_slice_buffer_trim_end(
+ tcp->read_slices,
+ tcp->read_slices->length -
+ static_cast<size_t>(info->bytes_transfered),
+ &tcp->last_read_buffer);
+ }
+ GPR_ASSERT((size_t)info->bytes_transfered == tcp->read_slices->length);
+
+ if (grpc_tcp_trace.enabled()) {
+ size_t i;
+ for (i = 0; i < tcp->read_slices->count; i++) {
+ char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string,
+ dump);
+ gpr_free(dump);
+ }
+ }
} else {
- grpc_slice_unref_internal(tcp->read_slice);
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_INFO, "TCP:%p unref read_slice", tcp);
+ }
+ grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
error = tcp->shutting_down
? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"TCP stream shutting down", &tcp->shutdown_error, 1)
@@ -209,6 +239,8 @@ static void on_read(void* tcpp, grpc_error* error) {
GRPC_CLOSURE_SCHED(cb, error);
}
+#define DEFAULT_TARGET_READ_SIZE 8192
+#define MAX_WSABUF_COUNT 16
static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
grpc_closure* cb) {
grpc_tcp* tcp = (grpc_tcp*)ep;
@@ -217,7 +249,12 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
int status;
DWORD bytes_read = 0;
DWORD flags = 0;
- WSABUF buffer;
+ WSABUF buffers[MAX_WSABUF_COUNT];
+ size_t i;
+
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_INFO, "TCP:%p win_read", tcp);
+ }
if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED(
@@ -229,18 +266,27 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
tcp->read_cb = cb;
tcp->read_slices = read_slices;
grpc_slice_buffer_reset_and_unref_internal(read_slices);
+ grpc_slice_buffer_swap(read_slices, &tcp->last_read_buffer);
- tcp->read_slice = GRPC_SLICE_MALLOC(8192);
+ if (tcp->read_slices->length < DEFAULT_TARGET_READ_SIZE / 2 &&
+ tcp->read_slices->count < MAX_WSABUF_COUNT) {
+ // TODO(jtattermusch): slice should be allocated using resource quota
+ grpc_slice_buffer_add(tcp->read_slices,
+ GRPC_SLICE_MALLOC(DEFAULT_TARGET_READ_SIZE));
+ }
- buffer.len = (ULONG)GRPC_SLICE_LENGTH(
- tcp->read_slice); // we know slice size fits in 32bit.
- buffer.buf = (char*)GRPC_SLICE_START_PTR(tcp->read_slice);
+ GPR_ASSERT(tcp->read_slices->count <= MAX_WSABUF_COUNT);
+ for (i = 0; i < tcp->read_slices->count; i++) {
+ buffers[i].len = (ULONG)GRPC_SLICE_LENGTH(
+ tcp->read_slices->slices[i]); // we know slice size fits in 32bit.
+ buffers[i].buf = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[i]);
+ }
TCP_REF(tcp, "read");
/* First let's try a synchronous, non-blocking read. */
- status =
- WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
+ status = WSARecv(tcp->socket->socket, buffers, (DWORD)tcp->read_slices->count,
+ &bytes_read, &flags, NULL, NULL);
info->wsa_error = status == 0 ? 0 : WSAGetLastError();
/* Did we get data immediately ? Yay. */
@@ -252,8 +298,8 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
/* Otherwise, let's retry, by queuing a read. */
memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
- status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags,
- &info->overlapped, NULL);
+ status = WSARecv(tcp->socket->socket, buffers, (DWORD)tcp->read_slices->count,
+ &bytes_read, &flags, &info->overlapped, NULL);
if (status != 0) {
int wsa_error = WSAGetLastError();
@@ -275,6 +321,10 @@ static void on_write(void* tcpp, grpc_error* error) {
grpc_winsocket_callback_info* info = &handle->write_info;
grpc_closure* cb;
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_INFO, "TCP:%p on_write", tcp);
+ }
+
GRPC_ERROR_REF(error);
gpr_mu_lock(&tcp->mu);
@@ -303,11 +353,21 @@ static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
unsigned i;
DWORD bytes_sent;
int status;
- WSABUF local_buffers[16];
+ WSABUF local_buffers[MAX_WSABUF_COUNT];
WSABUF* allocated = NULL;
WSABUF* buffers = local_buffers;
size_t len;
+ if (grpc_tcp_trace.enabled()) {
+ size_t i;
+ for (i = 0; i < slices->count; i++) {
+ char* data =
+ grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
+ gpr_free(data);
+ }
+ }
+
if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED(
cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
@@ -412,6 +472,7 @@ static void win_shutdown(grpc_endpoint* ep, grpc_error* why) {
static void win_destroy(grpc_endpoint* ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
TCP_UNREF(tcp, "destroy");
}
@@ -427,6 +488,8 @@ static grpc_resource_user* win_get_resource_user(grpc_endpoint* ep) {
static int win_get_fd(grpc_endpoint* ep) { return -1; }
+static bool win_can_track_err(grpc_endpoint* ep) { return false; }
+
static grpc_endpoint_vtable vtable = {win_read,
win_write,
win_add_to_pollset,
@@ -436,7 +499,8 @@ static grpc_endpoint_vtable vtable = {win_read,
win_destroy,
win_get_resource_user,
win_get_peer,
- win_get_fd};
+ win_get_fd,
+ win_can_track_err};
grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
grpc_channel_args* channel_args,
@@ -460,6 +524,7 @@ grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
GRPC_CLOSURE_INIT(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
tcp->peer_string = gpr_strdup(peer_string);
+ grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
diff --git a/src/core/lib/security/context/security_context.cc b/src/core/lib/security/context/security_context.cc
index 94c9c69fcd..16f40b4f55 100644
--- a/src/core/lib/security/context/security_context.cc
+++ b/src/core/lib/security/context/security_context.cc
@@ -81,38 +81,45 @@ void grpc_auth_context_release(grpc_auth_context* context) {
}
/* --- grpc_client_security_context --- */
+grpc_client_security_context::~grpc_client_security_context() {
+ grpc_call_credentials_unref(creds);
+ GRPC_AUTH_CONTEXT_UNREF(auth_context, "client_security_context");
+ if (extension.instance != nullptr && extension.destroy != nullptr) {
+ extension.destroy(extension.instance);
+ }
+}
grpc_client_security_context* grpc_client_security_context_create(
gpr_arena* arena) {
- return static_cast<grpc_client_security_context*>(
- gpr_arena_alloc(arena, sizeof(grpc_client_security_context)));
+ return new (gpr_arena_alloc(arena, sizeof(grpc_client_security_context)))
+ grpc_client_security_context();
}
void grpc_client_security_context_destroy(void* ctx) {
grpc_core::ExecCtx exec_ctx;
grpc_client_security_context* c =
static_cast<grpc_client_security_context*>(ctx);
- grpc_call_credentials_unref(c->creds);
- GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "client_security_context");
- if (c->extension.instance != nullptr && c->extension.destroy != nullptr) {
- c->extension.destroy(c->extension.instance);
- }
+ c->~grpc_client_security_context();
}
/* --- grpc_server_security_context --- */
+grpc_server_security_context::~grpc_server_security_context() {
+ GRPC_AUTH_CONTEXT_UNREF(auth_context, "server_security_context");
+ if (extension.instance != nullptr && extension.destroy != nullptr) {
+ extension.destroy(extension.instance);
+ }
+}
+
grpc_server_security_context* grpc_server_security_context_create(
gpr_arena* arena) {
- return static_cast<grpc_server_security_context*>(
- gpr_arena_alloc(arena, sizeof(grpc_server_security_context)));
+ return new (gpr_arena_alloc(arena, sizeof(grpc_server_security_context)))
+ grpc_server_security_context();
}
void grpc_server_security_context_destroy(void* ctx) {
grpc_server_security_context* c =
static_cast<grpc_server_security_context*>(ctx);
- GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "server_security_context");
- if (c->extension.instance != nullptr && c->extension.destroy != nullptr) {
- c->extension.destroy(c->extension.instance);
- }
+ c->~grpc_server_security_context();
}
/* --- grpc_auth_context --- */
diff --git a/src/core/lib/security/context/security_context.h b/src/core/lib/security/context/security_context.h
index a8e1c3fd64..e45415f63b 100644
--- a/src/core/lib/security/context/security_context.h
+++ b/src/core/lib/security/context/security_context.h
@@ -34,18 +34,20 @@ struct gpr_arena;
/* Property names are always NULL terminated. */
-typedef struct {
- grpc_auth_property* array;
- size_t count;
- size_t capacity;
-} grpc_auth_property_array;
+struct grpc_auth_property_array {
+ grpc_auth_property* array = nullptr;
+ size_t count = 0;
+ size_t capacity = 0;
+};
struct grpc_auth_context {
- struct grpc_auth_context* chained;
+ grpc_auth_context() { gpr_ref_init(&refcount, 0); }
+
+ struct grpc_auth_context* chained = nullptr;
grpc_auth_property_array properties;
gpr_refcount refcount;
- const char* peer_identity_property_name;
- grpc_pollset* pollset;
+ const char* peer_identity_property_name = nullptr;
+ grpc_pollset* pollset = nullptr;
};
/* Creation. */
@@ -76,20 +78,23 @@ void grpc_auth_property_reset(grpc_auth_property* property);
Extension to the security context that may be set in a filter and accessed
later by a higher level method on a grpc_call object. */
-typedef struct {
- void* instance;
- void (*destroy)(void*);
-} grpc_security_context_extension;
+struct grpc_security_context_extension {
+ void* instance = nullptr;
+ void (*destroy)(void*) = nullptr;
+};
/* --- grpc_client_security_context ---
Internal client-side security context. */
-typedef struct {
- grpc_call_credentials* creds;
- grpc_auth_context* auth_context;
+struct grpc_client_security_context {
+ grpc_client_security_context() = default;
+ ~grpc_client_security_context();
+
+ grpc_call_credentials* creds = nullptr;
+ grpc_auth_context* auth_context = nullptr;
grpc_security_context_extension extension;
-} grpc_client_security_context;
+};
grpc_client_security_context* grpc_client_security_context_create(
gpr_arena* arena);
@@ -99,10 +104,13 @@ void grpc_client_security_context_destroy(void* ctx);
Internal server-side security context. */
-typedef struct {
- grpc_auth_context* auth_context;
+struct grpc_server_security_context {
+ grpc_server_security_context() = default;
+ ~grpc_server_security_context();
+
+ grpc_auth_context* auth_context = nullptr;
grpc_security_context_extension extension;
-} grpc_server_security_context;
+};
grpc_server_security_context* grpc_server_security_context_create(
gpr_arena* arena);
diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h
index b486d25ab2..3878958b38 100644
--- a/src/core/lib/security/credentials/credentials.h
+++ b/src/core/lib/security/credentials/credentials.h
@@ -142,8 +142,8 @@ grpc_channel_credentials* grpc_channel_credentials_find_in_args(
/* --- grpc_credentials_mdelem_array. --- */
typedef struct {
- grpc_mdelem* md;
- size_t size;
+ grpc_mdelem* md = nullptr;
+ size_t size = 0;
} grpc_credentials_mdelem_array;
/// Takes a new ref to \a md.
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.cc b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
index c456ffaf5d..0674540d01 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.cc
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
@@ -49,9 +49,16 @@
/* -- Default credentials. -- */
-static grpc_channel_credentials* g_default_credentials = nullptr;
-static int g_compute_engine_detection_done = 0;
+/* A sticky bit that will be set only if the result of metadata server detection
+ * is positive. We do not set the bit if the result is negative. Because it
+ * means the detection is done via network test that is unreliable and the
+ * unreliable result should not be referred by successive calls. */
+static int g_metadata_server_available = 0;
+static int g_is_on_gce = 0;
static gpr_mu g_state_mu;
+/* Protect a metadata_server_detector instance that can be modified by more than
+ * one gRPC threads */
+static gpr_mu* g_polling_mu;
static gpr_once g_once = GPR_ONCE_INIT;
static grpc_core::internal::grpc_gce_tenancy_checker g_gce_tenancy_checker =
grpc_alts_is_running_on_gcp;
@@ -63,7 +70,7 @@ typedef struct {
int is_done;
int success;
grpc_http_response response;
-} compute_engine_detector;
+} metadata_server_detector;
static void google_default_credentials_destruct(
grpc_channel_credentials* creds) {
@@ -89,15 +96,21 @@ static grpc_security_status google_default_create_security_connector(
bool use_alts =
is_grpclb_load_balancer || is_backend_from_grpclb_load_balancer;
grpc_security_status status = GRPC_SECURITY_ERROR;
+ /* Return failure if ALTS is selected but not running on GCE. */
+ if (use_alts && !g_is_on_gce) {
+ gpr_log(GPR_ERROR, "ALTS is selected, but not running on GCE.");
+ goto end;
+ }
status = use_alts ? c->alts_creds->vtable->create_security_connector(
c->alts_creds, call_creds, target, args, sc, new_args)
: c->ssl_creds->vtable->create_security_connector(
c->ssl_creds, call_creds, target, args, sc, new_args);
- /* grpclb-specific channel args are removed from the channel args set
- * to ensure backends and fallback adresses will have the same set of channel
- * args. By doing that, it guarantees the connections to backends will not be
- * torn down and re-connected when switching in and out of fallback mode.
- */
+/* grpclb-specific channel args are removed from the channel args set
+ * to ensure backends and fallback adresses will have the same set of channel
+ * args. By doing that, it guarantees the connections to backends will not be
+ * torn down and re-connected when switching in and out of fallback mode.
+ */
+end:
if (use_alts) {
static const char* args_to_remove[] = {
GRPC_ARG_ADDRESS_IS_GRPCLB_LOAD_BALANCER,
@@ -113,6 +126,93 @@ static grpc_channel_credentials_vtable google_default_credentials_vtable = {
google_default_credentials_destruct,
google_default_create_security_connector, nullptr};
+static void on_metadata_server_detection_http_response(void* user_data,
+ grpc_error* error) {
+ metadata_server_detector* detector =
+ static_cast<metadata_server_detector*>(user_data);
+ if (error == GRPC_ERROR_NONE && detector->response.status == 200 &&
+ detector->response.hdr_count > 0) {
+ /* Internet providers can return a generic response to all requests, so
+ it is necessary to check that metadata header is present also. */
+ size_t i;
+ for (i = 0; i < detector->response.hdr_count; i++) {
+ grpc_http_header* header = &detector->response.hdrs[i];
+ if (strcmp(header->key, "Metadata-Flavor") == 0 &&
+ strcmp(header->value, "Google") == 0) {
+ detector->success = 1;
+ break;
+ }
+ }
+ }
+ gpr_mu_lock(g_polling_mu);
+ detector->is_done = 1;
+ GRPC_LOG_IF_ERROR(
+ "Pollset kick",
+ grpc_pollset_kick(grpc_polling_entity_pollset(&detector->pollent),
+ nullptr));
+ gpr_mu_unlock(g_polling_mu);
+}
+
+static void destroy_pollset(void* p, grpc_error* e) {
+ grpc_pollset_destroy(static_cast<grpc_pollset*>(p));
+}
+
+static int is_metadata_server_reachable() {
+ metadata_server_detector detector;
+ grpc_httpcli_request request;
+ grpc_httpcli_context context;
+ grpc_closure destroy_closure;
+ /* The http call is local. If it takes more than one sec, it is for sure not
+ on compute engine. */
+ grpc_millis max_detection_delay = GPR_MS_PER_SEC;
+ grpc_pollset* pollset =
+ static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
+ grpc_pollset_init(pollset, &g_polling_mu);
+ detector.pollent = grpc_polling_entity_create_from_pollset(pollset);
+ detector.is_done = 0;
+ detector.success = 0;
+ memset(&detector.response, 0, sizeof(detector.response));
+ memset(&request, 0, sizeof(grpc_httpcli_request));
+ request.host = (char*)GRPC_COMPUTE_ENGINE_DETECTION_HOST;
+ request.http.path = (char*)"/";
+ grpc_httpcli_context_init(&context);
+ grpc_resource_quota* resource_quota =
+ grpc_resource_quota_create("google_default_credentials");
+ grpc_httpcli_get(
+ &context, &detector.pollent, resource_quota, &request,
+ grpc_core::ExecCtx::Get()->Now() + max_detection_delay,
+ GRPC_CLOSURE_CREATE(on_metadata_server_detection_http_response, &detector,
+ grpc_schedule_on_exec_ctx),
+ &detector.response);
+ grpc_resource_quota_unref_internal(resource_quota);
+ grpc_core::ExecCtx::Get()->Flush();
+ /* Block until we get the response. This is not ideal but this should only be
+ called once for the lifetime of the process by the default credentials. */
+ gpr_mu_lock(g_polling_mu);
+ while (!detector.is_done) {
+ grpc_pollset_worker* worker = nullptr;
+ if (!GRPC_LOG_IF_ERROR(
+ "pollset_work",
+ grpc_pollset_work(grpc_polling_entity_pollset(&detector.pollent),
+ &worker, GRPC_MILLIS_INF_FUTURE))) {
+ detector.is_done = 1;
+ detector.success = 0;
+ }
+ }
+ gpr_mu_unlock(g_polling_mu);
+ grpc_httpcli_context_destroy(&context);
+ GRPC_CLOSURE_INIT(&destroy_closure, destroy_pollset,
+ grpc_polling_entity_pollset(&detector.pollent),
+ grpc_schedule_on_exec_ctx);
+ grpc_pollset_shutdown(grpc_polling_entity_pollset(&detector.pollent),
+ &destroy_closure);
+ g_polling_mu = nullptr;
+ grpc_core::ExecCtx::Get()->Flush();
+ gpr_free(grpc_polling_entity_pollset(&detector.pollent));
+ grpc_http_response_destroy(&detector.response);
+ return detector.success;
+}
+
/* Takes ownership of creds_path if not NULL. */
static grpc_error* create_default_creds_from_path(
char* creds_path, grpc_call_credentials** creds) {
@@ -188,13 +288,6 @@ grpc_channel_credentials* grpc_google_default_credentials_create(void) {
gpr_once_init(&g_once, init_default_credentials);
- gpr_mu_lock(&g_state_mu);
-
- if (g_default_credentials != nullptr) {
- result = grpc_channel_credentials_ref(g_default_credentials);
- goto end;
- }
-
/* First, try the environment variable. */
err = create_default_creds_from_path(
gpr_getenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR), &call_creds);
@@ -207,55 +300,55 @@ grpc_channel_credentials* grpc_google_default_credentials_create(void) {
if (err == GRPC_ERROR_NONE) goto end;
error = grpc_error_add_child(error, err);
- /* At last try to see if we're on compute engine (do the detection only once
- since it requires a network test). */
- if (!g_compute_engine_detection_done) {
- int need_compute_engine_creds = g_gce_tenancy_checker();
- g_compute_engine_detection_done = 1;
- if (need_compute_engine_creds) {
- call_creds = grpc_google_compute_engine_credentials_create(nullptr);
- if (call_creds == nullptr) {
- error = grpc_error_add_child(
- error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Failed to get credentials from network"));
- }
- }
+ gpr_mu_lock(&g_state_mu);
+
+ /* Try a platform-provided hint for GCE. */
+ if (!g_metadata_server_available) {
+ g_is_on_gce = g_gce_tenancy_checker();
+ g_metadata_server_available = g_is_on_gce;
}
+ /* TODO: Add a platform-provided hint for GAE. */
-end:
- if (result == nullptr) {
- if (call_creds != nullptr) {
- /* Create google default credentials. */
- auto creds = static_cast<grpc_google_default_channel_credentials*>(
- gpr_zalloc(sizeof(grpc_google_default_channel_credentials)));
- creds->base.vtable = &google_default_credentials_vtable;
- creds->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_GOOGLE_DEFAULT;
- gpr_ref_init(&creds->base.refcount, 1);
- creds->ssl_creds =
- grpc_ssl_credentials_create(nullptr, nullptr, nullptr, nullptr);
- GPR_ASSERT(creds->ssl_creds != nullptr);
- grpc_alts_credentials_options* options =
- grpc_alts_credentials_client_options_create();
- creds->alts_creds = grpc_alts_credentials_create(options);
- grpc_alts_credentials_options_destroy(options);
- /* Add a global reference so that it can be cached and re-served. */
- g_default_credentials = grpc_composite_channel_credentials_create(
- &creds->base, call_creds, nullptr);
- GPR_ASSERT(g_default_credentials != nullptr);
- grpc_channel_credentials_unref(&creds->base);
- grpc_call_credentials_unref(call_creds);
- result = grpc_channel_credentials_ref(g_default_credentials);
- } else {
- gpr_log(GPR_ERROR, "Could not create google default credentials.");
- }
+ /* Do a network test for metadata server. */
+ if (!g_metadata_server_available) {
+ g_metadata_server_available = is_metadata_server_reachable();
}
gpr_mu_unlock(&g_state_mu);
- if (result == nullptr) {
- GRPC_LOG_IF_ERROR("grpc_google_default_credentials_create", error);
- } else {
- GRPC_ERROR_UNREF(error);
+
+ if (g_metadata_server_available) {
+ call_creds = grpc_google_compute_engine_credentials_create(nullptr);
+ if (call_creds == nullptr) {
+ error = grpc_error_add_child(
+ error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Failed to get credentials from network"));
+ }
}
+end:
+ if (call_creds != nullptr) {
+ /* Create google default credentials. */
+ auto creds = static_cast<grpc_google_default_channel_credentials*>(
+ gpr_zalloc(sizeof(grpc_google_default_channel_credentials)));
+ creds->base.vtable = &google_default_credentials_vtable;
+ creds->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_GOOGLE_DEFAULT;
+ gpr_ref_init(&creds->base.refcount, 1);
+ creds->ssl_creds =
+ grpc_ssl_credentials_create(nullptr, nullptr, nullptr, nullptr);
+ GPR_ASSERT(creds->ssl_creds != nullptr);
+ grpc_alts_credentials_options* options =
+ grpc_alts_credentials_client_options_create();
+ creds->alts_creds = grpc_alts_credentials_create(options);
+ grpc_alts_credentials_options_destroy(options);
+ result = grpc_composite_channel_credentials_create(&creds->base, call_creds,
+ nullptr);
+ GPR_ASSERT(result != nullptr);
+ grpc_channel_credentials_unref(&creds->base);
+ grpc_call_credentials_unref(call_creds);
+ } else {
+ gpr_log(GPR_ERROR, "Could not create google default credentials: %s",
+ grpc_error_string(error));
+ }
+ GRPC_ERROR_UNREF(error);
return result;
}
@@ -266,21 +359,17 @@ void set_gce_tenancy_checker_for_testing(grpc_gce_tenancy_checker checker) {
g_gce_tenancy_checker = checker;
}
-} // namespace internal
-} // namespace grpc_core
-
void grpc_flush_cached_google_default_credentials(void) {
grpc_core::ExecCtx exec_ctx;
gpr_once_init(&g_once, init_default_credentials);
gpr_mu_lock(&g_state_mu);
- if (g_default_credentials != nullptr) {
- grpc_channel_credentials_unref(g_default_credentials);
- g_default_credentials = nullptr;
- }
- g_compute_engine_detection_done = 0;
+ g_metadata_server_available = 0;
gpr_mu_unlock(&g_state_mu);
}
+} // namespace internal
+} // namespace grpc_core
+
/* -- Well known credentials path. -- */
static grpc_well_known_credentials_path_getter creds_path_getter = nullptr;
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.h b/src/core/lib/security/credentials/google_default/google_default_credentials.h
index a7dd0ea8ae..b9e2efb04f 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.h
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.h
@@ -45,8 +45,6 @@ typedef struct {
grpc_channel_credentials* ssl_creds;
} grpc_google_default_channel_credentials;
-void grpc_flush_cached_google_default_credentials(void);
-
namespace grpc_core {
namespace internal {
@@ -54,6 +52,9 @@ typedef bool (*grpc_gce_tenancy_checker)(void);
void set_gce_tenancy_checker_for_testing(grpc_gce_tenancy_checker checker);
+// TEST-ONLY. Reset the internal global state.
+void grpc_flush_cached_google_default_credentials(void);
+
} // namespace internal
} // namespace grpc_core
diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc
index e34eacc8d7..6955e8698e 100644
--- a/src/core/lib/security/transport/client_auth_filter.cc
+++ b/src/core/lib/security/transport/client_auth_filter.cc
@@ -43,20 +43,39 @@
namespace {
/* We can have a per-call credentials. */
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : arena(args.arena),
+ owning_call(args.call_stack),
+ call_combiner(args.call_combiner) {}
+
+ // This method is technically the dtor of this class. However, since
+ // `get_request_metadata_cancel_closure` can run in parallel to
+ // `destroy_call_elem`, we cannot call the dtor in them. Otherwise,
+ // fields will be accessed after calling dtor, and msan correctly complains
+ // that the memory is not initialized.
+ void destroy() {
+ grpc_credentials_mdelem_array_destroy(&md_array);
+ grpc_call_credentials_unref(creds);
+ grpc_slice_unref_internal(host);
+ grpc_slice_unref_internal(method);
+ grpc_auth_metadata_context_reset(&auth_md_context);
+ }
+
gpr_arena* arena;
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
- grpc_call_credentials* creds;
- grpc_slice host;
- grpc_slice method;
+ grpc_call_credentials* creds = nullptr;
+ grpc_slice host = grpc_empty_slice();
+ grpc_slice method = grpc_empty_slice();
/* pollset{_set} bound to this call; if we need to make external
network requests, they should be done under a pollset added to this
pollset_set so that work can progress when this call wants work to progress
*/
- grpc_polling_entity* pollent;
+ grpc_polling_entity* pollent = nullptr;
grpc_credentials_mdelem_array md_array;
- grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
- grpc_auth_metadata_context auth_md_context;
+ grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT] = {};
+ grpc_auth_metadata_context auth_md_context =
+ grpc_auth_metadata_context(); // Zero-initialize the C struct.
grpc_closure async_result_closure;
grpc_closure check_call_host_cancel_closure;
grpc_closure get_request_metadata_cancel_closure;
@@ -334,12 +353,7 @@ static void auth_start_transport_stream_op_batch(
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- calld->arena = args->arena;
- calld->owning_call = args->call_stack;
- calld->call_combiner = args->call_combiner;
- calld->host = grpc_empty_slice();
- calld->method = grpc_empty_slice();
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
@@ -354,11 +368,7 @@ static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- grpc_credentials_mdelem_array_destroy(&calld->md_array);
- grpc_call_credentials_unref(calld->creds);
- grpc_slice_unref_internal(calld->host);
- grpc_slice_unref_internal(calld->method);
- grpc_auth_metadata_context_reset(&calld->auth_md_context);
+ calld->destroy();
}
/* Constructor for channel_data */
diff --git a/src/core/lib/security/transport/secure_endpoint.cc b/src/core/lib/security/transport/secure_endpoint.cc
index f40f969bb7..14fb55884f 100644
--- a/src/core/lib/security/transport/secure_endpoint.cc
+++ b/src/core/lib/security/transport/secure_endpoint.cc
@@ -22,6 +22,8 @@
headers. Therefore, sockaddr.h must always be included first */
#include <grpc/support/port_platform.h>
+#include <new>
+
#include "src/core/lib/iomgr/sockaddr.h"
#include <grpc/slice.h>
@@ -31,6 +33,7 @@
#include <grpc/support/sync.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/tsi_error.h"
@@ -40,44 +43,68 @@
#define STAGING_BUFFER_SIZE 8192
-typedef struct {
+static void on_read(void* user_data, grpc_error* error);
+
+namespace {
+struct secure_endpoint {
+ secure_endpoint(const grpc_endpoint_vtable* vtable,
+ tsi_frame_protector* protector,
+ tsi_zero_copy_grpc_protector* zero_copy_protector,
+ grpc_endpoint* transport, grpc_slice* leftover_slices,
+ size_t leftover_nslices)
+ : wrapped_ep(transport),
+ protector(protector),
+ zero_copy_protector(zero_copy_protector) {
+ base.vtable = vtable;
+ gpr_mu_init(&protector_mu);
+ GRPC_CLOSURE_INIT(&on_read, ::on_read, this, grpc_schedule_on_exec_ctx);
+ grpc_slice_buffer_init(&source_buffer);
+ grpc_slice_buffer_init(&leftover_bytes);
+ for (size_t i = 0; i < leftover_nslices; i++) {
+ grpc_slice_buffer_add(&leftover_bytes,
+ grpc_slice_ref_internal(leftover_slices[i]));
+ }
+ grpc_slice_buffer_init(&output_buffer);
+ gpr_ref_init(&ref, 1);
+ }
+
+ ~secure_endpoint() {
+ grpc_endpoint_destroy(wrapped_ep);
+ tsi_frame_protector_destroy(protector);
+ tsi_zero_copy_grpc_protector_destroy(zero_copy_protector);
+ grpc_slice_buffer_destroy_internal(&source_buffer);
+ grpc_slice_buffer_destroy_internal(&leftover_bytes);
+ grpc_slice_unref_internal(read_staging_buffer);
+ grpc_slice_unref_internal(write_staging_buffer);
+ grpc_slice_buffer_destroy_internal(&output_buffer);
+ gpr_mu_destroy(&protector_mu);
+ }
+
grpc_endpoint base;
grpc_endpoint* wrapped_ep;
struct tsi_frame_protector* protector;
struct tsi_zero_copy_grpc_protector* zero_copy_protector;
gpr_mu protector_mu;
/* saved upper level callbacks and user_data. */
- grpc_closure* read_cb;
- grpc_closure* write_cb;
+ grpc_closure* read_cb = nullptr;
+ grpc_closure* write_cb = nullptr;
grpc_closure on_read;
- grpc_slice_buffer* read_buffer;
+ grpc_slice_buffer* read_buffer = nullptr;
grpc_slice_buffer source_buffer;
/* saved handshaker leftover data to unprotect. */
grpc_slice_buffer leftover_bytes;
/* buffers for read and write */
- grpc_slice read_staging_buffer;
-
- grpc_slice write_staging_buffer;
+ grpc_slice read_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
+ grpc_slice write_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
grpc_slice_buffer output_buffer;
gpr_refcount ref;
-} secure_endpoint;
+};
+} // namespace
grpc_core::TraceFlag grpc_trace_secure_endpoint(false, "secure_endpoint");
-static void destroy(secure_endpoint* secure_ep) {
- secure_endpoint* ep = secure_ep;
- grpc_endpoint_destroy(ep->wrapped_ep);
- tsi_frame_protector_destroy(ep->protector);
- tsi_zero_copy_grpc_protector_destroy(ep->zero_copy_protector);
- grpc_slice_buffer_destroy_internal(&ep->leftover_bytes);
- grpc_slice_unref_internal(ep->read_staging_buffer);
- grpc_slice_unref_internal(ep->write_staging_buffer);
- grpc_slice_buffer_destroy_internal(&ep->output_buffer);
- grpc_slice_buffer_destroy_internal(&ep->source_buffer);
- gpr_mu_destroy(&ep->protector_mu);
- gpr_free(ep);
-}
+static void destroy(secure_endpoint* ep) { grpc_core::Delete(ep); }
#ifndef NDEBUG
#define SECURE_ENDPOINT_UNREF(ep, reason) \
@@ -389,6 +416,11 @@ static grpc_resource_user* endpoint_get_resource_user(
return grpc_endpoint_get_resource_user(ep->wrapped_ep);
}
+static bool endpoint_can_track_err(grpc_endpoint* secure_ep) {
+ secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
+ return grpc_endpoint_can_track_err(ep->wrapped_ep);
+}
+
static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_write,
endpoint_add_to_pollset,
@@ -398,32 +430,16 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_destroy,
endpoint_get_resource_user,
endpoint_get_peer,
- endpoint_get_fd};
+ endpoint_get_fd,
+ endpoint_can_track_err};
grpc_endpoint* grpc_secure_endpoint_create(
struct tsi_frame_protector* protector,
struct tsi_zero_copy_grpc_protector* zero_copy_protector,
grpc_endpoint* transport, grpc_slice* leftover_slices,
size_t leftover_nslices) {
- size_t i;
- secure_endpoint* ep =
- static_cast<secure_endpoint*>(gpr_malloc(sizeof(secure_endpoint)));
- ep->base.vtable = &vtable;
- ep->wrapped_ep = transport;
- ep->protector = protector;
- ep->zero_copy_protector = zero_copy_protector;
- grpc_slice_buffer_init(&ep->leftover_bytes);
- for (i = 0; i < leftover_nslices; i++) {
- grpc_slice_buffer_add(&ep->leftover_bytes,
- grpc_slice_ref_internal(leftover_slices[i]));
- }
- ep->write_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
- ep->read_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
- grpc_slice_buffer_init(&ep->output_buffer);
- grpc_slice_buffer_init(&ep->source_buffer);
- ep->read_buffer = nullptr;
- GRPC_CLOSURE_INIT(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx);
- gpr_mu_init(&ep->protector_mu);
- gpr_ref_init(&ep->ref, 1);
+ secure_endpoint* ep = grpc_core::New<secure_endpoint>(
+ &vtable, protector, zero_copy_protector, transport, leftover_slices,
+ leftover_nslices);
return &ep->base;
}
diff --git a/src/core/lib/security/transport/security_handshaker.cc b/src/core/lib/security/transport/security_handshaker.cc
index 4d6b133809..854a1c4af9 100644
--- a/src/core/lib/security/transport/security_handshaker.cc
+++ b/src/core/lib/security/transport/security_handshaker.cc
@@ -275,9 +275,6 @@ static void on_handshake_next_done_grpc_wrapper(
tsi_result result, void* user_data, const unsigned char* bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result) {
security_handshaker* h = static_cast<security_handshaker*>(user_data);
- // This callback will be invoked by TSI in a non-grpc thread, so it's
- // safe to create our own exec_ctx here.
- grpc_core::ExecCtx exec_ctx;
gpr_mu_lock(&h->mu);
grpc_error* error = on_handshake_next_done_locked(
h, result, bytes_to_send, bytes_to_send_size, handshaker_result);
diff --git a/src/core/lib/security/transport/server_auth_filter.cc b/src/core/lib/security/transport/server_auth_filter.cc
index b99fc5e178..362f49a584 100644
--- a/src/core/lib/security/transport/server_auth_filter.cc
+++ b/src/core/lib/security/transport/server_auth_filter.cc
@@ -28,6 +28,9 @@
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/slice/slice_internal.h"
+static void recv_initial_metadata_ready(void* arg, grpc_error* error);
+static void recv_trailing_metadata_ready(void* user_data, grpc_error* error);
+
namespace {
enum async_state {
STATE_INIT = 0,
@@ -35,28 +38,55 @@ enum async_state {
STATE_CANCELLED,
};
+struct channel_data {
+ grpc_auth_context* auth_context;
+ grpc_server_credentials* creds;
+};
+
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : call_combiner(args.call_combiner), owning_call(args.call_stack) {
+ GRPC_CLOSURE_INIT(&recv_initial_metadata_ready,
+ ::recv_initial_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+ ::recv_trailing_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ // Create server security context. Set its auth context from channel
+ // data and save it in the call context.
+ grpc_server_security_context* server_ctx =
+ grpc_server_security_context_create(args.arena);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ server_ctx->auth_context =
+ GRPC_AUTH_CONTEXT_REF(chand->auth_context, "server_auth_filter");
+ if (args.context[GRPC_CONTEXT_SECURITY].value != nullptr) {
+ args.context[GRPC_CONTEXT_SECURITY].destroy(
+ args.context[GRPC_CONTEXT_SECURITY].value);
+ }
+ args.context[GRPC_CONTEXT_SECURITY].value = server_ctx;
+ args.context[GRPC_CONTEXT_SECURITY].destroy =
+ grpc_server_security_context_destroy;
+ }
+
+ ~call_data() { GRPC_ERROR_UNREF(recv_initial_metadata_error); }
+
grpc_call_combiner* call_combiner;
grpc_call_stack* owning_call;
grpc_transport_stream_op_batch* recv_initial_metadata_batch;
grpc_closure* original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
- grpc_error* recv_initial_metadata_error;
+ grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
grpc_closure recv_trailing_metadata_ready;
grpc_closure* original_recv_trailing_metadata_ready;
grpc_error* recv_trailing_metadata_error;
- bool seen_recv_trailing_metadata_ready;
+ bool seen_recv_trailing_metadata_ready = false;
grpc_metadata_array md;
const grpc_metadata* consumed_md;
size_t num_consumed_md;
grpc_closure cancel_closure;
- gpr_atm state; // async_state
+ gpr_atm state = STATE_INIT; // async_state
};
-struct channel_data {
- grpc_auth_context* auth_context;
- grpc_server_credentials* creds;
-};
} // namespace
static grpc_metadata_array metadata_batch_to_md_array(
@@ -244,29 +274,7 @@ static void auth_start_transport_stream_op_batch(
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
- calld->call_combiner = args->call_combiner;
- calld->owning_call = args->call_stack;
- GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
- recv_initial_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
- recv_trailing_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- // Create server security context. Set its auth context from channel
- // data and save it in the call context.
- grpc_server_security_context* server_ctx =
- grpc_server_security_context_create(args->arena);
- server_ctx->auth_context =
- GRPC_AUTH_CONTEXT_REF(chand->auth_context, "server_auth_filter");
- if (args->context[GRPC_CONTEXT_SECURITY].value != nullptr) {
- args->context[GRPC_CONTEXT_SECURITY].destroy(
- args->context[GRPC_CONTEXT_SECURITY].value);
- }
- args->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
- args->context[GRPC_CONTEXT_SECURITY].destroy =
- grpc_server_security_context_destroy;
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
@@ -275,7 +283,7 @@ static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- GRPC_ERROR_UNREF(calld->recv_initial_metadata_error);
+ calld->~call_data();
}
/* Constructor for channel_data */
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index a9349afa68..89b3f77822 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -72,8 +72,11 @@
// Used to create arena for the first call.
#define ESTIMATED_MDELEM_COUNT 16
-typedef struct batch_control {
- grpc_call* call;
+struct batch_control {
+ batch_control() { gpr_ref_init(&steps_to_complete, 0); }
+
+ grpc_call* call = nullptr;
+ grpc_transport_stream_op_batch op;
/* Share memory for cq_completion and notify_tag as they are never needed
simultaneously. Each byte used in this data structure count as six bytes
per call, so any savings we can make are worthwhile,
@@ -96,84 +99,110 @@ typedef struct batch_control {
grpc_closure start_batch;
grpc_closure finish_batch;
gpr_refcount steps_to_complete;
- gpr_atm batch_error;
- grpc_transport_stream_op_batch op;
-} batch_control;
+ gpr_atm batch_error = reinterpret_cast<gpr_atm>(GRPC_ERROR_NONE);
+};
+
+struct parent_call {
+ parent_call() { gpr_mu_init(&child_list_mu); }
+ ~parent_call() { gpr_mu_destroy(&child_list_mu); }
-typedef struct {
gpr_mu child_list_mu;
- grpc_call* first_child;
-} parent_call;
+ grpc_call* first_child = nullptr;
+};
-typedef struct {
+struct child_call {
+ child_call(grpc_call* parent) : parent(parent) {}
grpc_call* parent;
/** siblings: children of the same parent form a list, and this list is
protected under
parent->mu */
- grpc_call* sibling_next;
- grpc_call* sibling_prev;
-} child_call;
+ grpc_call* sibling_next = nullptr;
+ grpc_call* sibling_prev = nullptr;
+};
#define RECV_NONE ((gpr_atm)0)
#define RECV_INITIAL_METADATA_FIRST ((gpr_atm)1)
struct grpc_call {
+ grpc_call(gpr_arena* arena, const grpc_call_create_args& args)
+ : arena(arena),
+ cq(args.cq),
+ channel(args.channel),
+ is_client(args.server_transport_data == nullptr),
+ stream_op_payload(context) {
+ gpr_ref_init(&ext_ref, 1);
+ grpc_call_combiner_init(&call_combiner);
+ for (int i = 0; i < 2; i++) {
+ for (int j = 0; j < 2; j++) {
+ metadata_batch[i][j].deadline = GRPC_MILLIS_INF_FUTURE;
+ }
+ }
+ }
+
+ ~grpc_call() {
+ gpr_free(static_cast<void*>(const_cast<char*>(final_info.error_string)));
+ grpc_call_combiner_destroy(&call_combiner);
+ }
+
gpr_refcount ext_ref;
gpr_arena* arena;
grpc_call_combiner call_combiner;
grpc_completion_queue* cq;
grpc_polling_entity pollent;
grpc_channel* channel;
- gpr_timespec start_time;
- /* parent_call* */ gpr_atm parent_call_atm;
- child_call* child;
+ gpr_timespec start_time = gpr_now(GPR_CLOCK_MONOTONIC);
+ /* parent_call* */ gpr_atm parent_call_atm = 0;
+ child_call* child = nullptr;
/* client or server call */
bool is_client;
/** has grpc_call_unref been called */
- bool destroy_called;
+ bool destroy_called = false;
/** flag indicating that cancellation is inherited */
- bool cancellation_is_inherited;
+ bool cancellation_is_inherited = false;
/** which ops are in-flight */
- bool sent_initial_metadata;
- bool sending_message;
- bool sent_final_op;
- bool received_initial_metadata;
- bool receiving_message;
- bool requested_final_op;
- gpr_atm any_ops_sent_atm;
- gpr_atm received_final_op_atm;
-
- batch_control* active_batches[MAX_CONCURRENT_BATCHES];
+ bool sent_initial_metadata = false;
+ bool sending_message = false;
+ bool sent_final_op = false;
+ bool received_initial_metadata = false;
+ bool receiving_message = false;
+ bool requested_final_op = false;
+ gpr_atm any_ops_sent_atm = 0;
+ gpr_atm received_final_op_atm = 0;
+
+ batch_control* active_batches[MAX_CONCURRENT_BATCHES] = {};
grpc_transport_stream_op_batch_payload stream_op_payload;
/* first idx: is_receiving, second idx: is_trailing */
- grpc_metadata_batch metadata_batch[2][2];
+ grpc_metadata_batch metadata_batch[2][2] = {};
/* Buffered read metadata waiting to be returned to the application.
Element 0 is initial metadata, element 1 is trailing metadata. */
- grpc_metadata_array* buffered_metadata[2];
+ grpc_metadata_array* buffered_metadata[2] = {};
grpc_metadata compression_md;
// A char* indicating the peer name.
- gpr_atm peer_string;
+ gpr_atm peer_string = 0;
/* Call data useful used for reporting. Only valid after the call has
* completed */
grpc_call_final_info final_info;
/* Compression algorithm for *incoming* data */
- grpc_message_compression_algorithm incoming_message_compression_algorithm;
+ grpc_message_compression_algorithm incoming_message_compression_algorithm =
+ GRPC_MESSAGE_COMPRESS_NONE;
/* Stream compression algorithm for *incoming* data */
- grpc_stream_compression_algorithm incoming_stream_compression_algorithm;
- /* Supported encodings (compression algorithms), a bitset */
- uint32_t encodings_accepted_by_peer;
+ grpc_stream_compression_algorithm incoming_stream_compression_algorithm =
+ GRPC_STREAM_COMPRESS_NONE;
+ /* Supported encodings (compression algorithms), a bitset.
+ * Always support no compression. */
+ uint32_t encodings_accepted_by_peer = 1 << GRPC_MESSAGE_COMPRESS_NONE;
/* Supported stream encodings (stream compression algorithms), a bitset */
- uint32_t stream_encodings_accepted_by_peer;
+ uint32_t stream_encodings_accepted_by_peer = 0;
/* Contexts for various subsystems (security, tracing, ...). */
- grpc_call_context_element context[GRPC_CONTEXT_COUNT];
+ grpc_call_context_element context[GRPC_CONTEXT_COUNT] = {};
/* for the client, extra metadata is initial metadata; for the
server, it's trailing metadata */
@@ -184,14 +213,14 @@ struct grpc_call {
grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> sending_stream;
grpc_core::OrphanablePtr<grpc_core::ByteStream> receiving_stream;
- grpc_byte_buffer** receiving_buffer;
- grpc_slice receiving_slice;
+ grpc_byte_buffer** receiving_buffer = nullptr;
+ grpc_slice receiving_slice = grpc_empty_slice();
grpc_closure receiving_slice_ready;
grpc_closure receiving_stream_ready;
grpc_closure receiving_initial_metadata_ready;
grpc_closure receiving_trailing_metadata_ready;
- uint32_t test_only_last_message_flags;
- gpr_atm cancelled;
+ uint32_t test_only_last_message_flags = 0;
+ gpr_atm cancelled = 0;
grpc_closure release_call;
@@ -207,7 +236,7 @@ struct grpc_call {
grpc_server* server;
} server;
} final_op;
- gpr_atm status_error;
+ gpr_atm status_error = 0;
/* recv_state can contain one of the following values:
RECV_NONE : : no initial metadata and messages received
@@ -225,7 +254,7 @@ struct grpc_call {
For 1, 4: See receiving_initial_metadata_ready() function
For 2, 3: See receiving_stream_ready() function */
- gpr_atm recv_state;
+ gpr_atm recv_state = 0;
};
grpc_core::TraceFlag grpc_call_error_trace(false, "call_error");
@@ -269,11 +298,10 @@ void* grpc_call_arena_alloc(grpc_call* call, size_t size) {
static parent_call* get_or_create_parent_call(grpc_call* call) {
parent_call* p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
if (p == nullptr) {
- p = static_cast<parent_call*>(gpr_arena_alloc(call->arena, sizeof(*p)));
- gpr_mu_init(&p->child_list_mu);
+ p = new (gpr_arena_alloc(call->arena, sizeof(*p))) parent_call();
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm) nullptr,
(gpr_atm)p)) {
- gpr_mu_destroy(&p->child_list_mu);
+ p->~parent_call();
p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
}
}
@@ -292,7 +320,9 @@ size_t grpc_call_get_initial_size_estimate() {
grpc_error* grpc_call_create(const grpc_call_create_args* args,
grpc_call** out_call) {
GPR_TIMER_SCOPE("grpc_call_create", 0);
- size_t i, j;
+
+ GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
+
grpc_error* error = GRPC_ERROR_NONE;
grpc_channel_stack* channel_stack =
grpc_channel_get_channel_stack(args->channel);
@@ -300,27 +330,19 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size);
gpr_arena* arena = gpr_arena_create(initial_size);
- call = static_cast<grpc_call*>(
- gpr_arena_alloc(arena, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
- channel_stack->call_stack_size));
- gpr_ref_init(&call->ext_ref, 1);
- gpr_atm_no_barrier_store(&call->cancelled, 0);
- call->arena = arena;
- grpc_call_combiner_init(&call->call_combiner);
+ call = new (gpr_arena_alloc(
+ arena, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
+ channel_stack->call_stack_size)) grpc_call(arena, *args);
*out_call = call;
- call->channel = args->channel;
- call->cq = args->cq;
- call->start_time = gpr_now(GPR_CLOCK_MONOTONIC);
- /* Always support no compression */
- GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_MESSAGE_COMPRESS_NONE);
- call->is_client = args->server_transport_data == nullptr;
- call->stream_op_payload.context = call->context;
grpc_slice path = grpc_empty_slice();
if (call->is_client) {
+ call->final_op.client.status_details = nullptr;
+ call->final_op.client.status = nullptr;
+ call->final_op.client.error_string = nullptr;
GRPC_STATS_INC_CLIENT_CALLS_CREATED();
GPR_ASSERT(args->add_initial_metadata_count <
MAX_SEND_EXTRA_METADATA_COUNT);
- for (i = 0; i < args->add_initial_metadata_count; i++) {
+ for (size_t i = 0; i < args->add_initial_metadata_count; i++) {
call->send_extra_metadata[i].md = args->add_initial_metadata[i];
if (grpc_slice_eq(GRPC_MDKEY(args->add_initial_metadata[i]),
GRPC_MDSTR_PATH)) {
@@ -332,23 +354,18 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
static_cast<int>(args->add_initial_metadata_count);
} else {
GRPC_STATS_INC_SERVER_CALLS_CREATED();
+ call->final_op.server.cancelled = nullptr;
call->final_op.server.server = args->server;
GPR_ASSERT(args->add_initial_metadata_count == 0);
call->send_extra_metadata_count = 0;
}
- for (i = 0; i < 2; i++) {
- for (j = 0; j < 2; j++) {
- call->metadata_batch[i][j].deadline = GRPC_MILLIS_INF_FUTURE;
- }
- }
- grpc_millis send_deadline = args->send_deadline;
+ grpc_millis send_deadline = args->send_deadline;
bool immediately_cancel = false;
if (args->parent != nullptr) {
- call->child =
- static_cast<child_call*>(gpr_arena_alloc(arena, sizeof(child_call)));
- call->child->parent = args->parent;
+ call->child = new (gpr_arena_alloc(arena, sizeof(child_call)))
+ child_call(args->parent);
GRPC_CALL_INTERNAL_REF(args->parent, "child");
GPR_ASSERT(call->is_client);
@@ -382,10 +399,7 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
}
}
}
-
call->send_deadline = send_deadline;
-
- GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_unref */
grpc_call_element_args call_args = {CALL_STACK_FROM_CALL(call),
args->server_transport_data,
@@ -413,6 +427,7 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
}
gpr_mu_unlock(&pc->child_list_mu);
}
+
if (error != GRPC_ERROR_NONE) {
cancel_with_error(call, GRPC_ERROR_REF(error));
}
@@ -487,9 +502,9 @@ void grpc_call_internal_unref(grpc_call* c REF_ARG) {
static void release_call(void* call, grpc_error* error) {
grpc_call* c = static_cast<grpc_call*>(call);
grpc_channel* channel = c->channel;
- gpr_free(static_cast<void*>(const_cast<char*>(c->final_info.error_string)));
- grpc_call_combiner_destroy(&c->call_combiner);
- grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena));
+ gpr_arena* arena = c->arena;
+ c->~grpc_call();
+ grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(arena));
GRPC_CHANNEL_INTERNAL_UNREF(channel, "call");
}
@@ -505,7 +520,7 @@ static void destroy_call(void* call, grpc_error* error) {
c->receiving_stream.reset();
parent_call* pc = get_parent_call(c);
if (pc != nullptr) {
- gpr_mu_destroy(&pc->child_list_mu);
+ pc->~parent_call();
}
for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md);
@@ -679,6 +694,10 @@ static void cancel_with_error(grpc_call* c, grpc_error* error) {
execute_batch(c, op, &state->start_batch);
}
+void grpc_call_cancel_internal(grpc_call* call) {
+ cancel_with_error(call, GRPC_ERROR_CANCELLED);
+}
+
static grpc_error* error_from_status(grpc_status_code status,
const char* description) {
// copying 'description' is needed to ensure the grpc_call_cancel_with_status
@@ -1100,10 +1119,11 @@ static batch_control* reuse_or_allocate_batch_control(grpc_call* call,
if (bctl->call != nullptr) {
return nullptr;
}
- memset(bctl, 0, sizeof(*bctl));
+ bctl->~batch_control();
+ bctl->op = {};
} else {
- bctl = static_cast<batch_control*>(
- gpr_arena_alloc(call->arena, sizeof(batch_control)));
+ bctl = new (gpr_arena_alloc(call->arena, sizeof(batch_control)))
+ batch_control();
*pslot = bctl;
}
bctl->call = call;
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index b34260505a..bd7295fe11 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -81,6 +81,10 @@ grpc_call_error grpc_call_start_batch_and_execute(grpc_call* call,
size_t nops,
grpc_closure* closure);
+/* gRPC core internal version of grpc_call_cancel that does not create
+ * exec_ctx. */
+void grpc_call_cancel_internal(grpc_call* call);
+
/* Given the top call_element, get the call object. */
grpc_call* grpc_call_from_top_element(grpc_call_element* surface_element);
diff --git a/src/core/lib/surface/channel.cc b/src/core/lib/surface/channel.cc
index d7095c24d4..e47cb4360e 100644
--- a/src/core/lib/surface/channel.cc
+++ b/src/core/lib/surface/channel.cc
@@ -39,6 +39,7 @@
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
@@ -63,6 +64,7 @@ struct grpc_channel {
grpc_compression_options compression_options;
gpr_atm call_size_estimate;
+ grpc_resource_user* resource_user;
gpr_mu registered_call_mu;
registered_call* registered_calls;
@@ -82,6 +84,8 @@ grpc_channel* grpc_channel_create_with_builder(
char* target = gpr_strdup(grpc_channel_stack_builder_get_target(builder));
grpc_channel_args* args = grpc_channel_args_copy(
grpc_channel_stack_builder_get_channel_arguments(builder));
+ grpc_resource_user* resource_user =
+ grpc_channel_stack_builder_get_resource_user(builder);
grpc_channel* channel;
if (channel_stack_type == GRPC_SERVER_CHANNEL) {
GRPC_STATS_INC_SERVER_CHANNELS_CREATED();
@@ -101,9 +105,11 @@ grpc_channel* grpc_channel_create_with_builder(
}
channel->target = target;
+ channel->resource_user = resource_user;
channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type);
bool channelz_enabled = GRPC_ENABLE_CHANNELZ_DEFAULT;
- size_t channel_tracer_max_memory = 0; // default to off
+ size_t channel_tracer_max_memory =
+ GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT;
bool internal_channel = false;
// this creates the default ChannelNode. Different types of channels may
// override this to ensure a correct ChannelNode is created.
@@ -142,7 +148,6 @@ grpc_channel* grpc_channel_create_with_builder(
0x1; /* always support no compression */
} else if (0 == strcmp(args->args[i].key,
GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE)) {
- GPR_ASSERT(channel_tracer_max_memory == 0);
const grpc_integer_options options = {
GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT, 0, INT_MAX};
channel_tracer_max_memory =
@@ -217,7 +222,8 @@ grpc_core::channelz::ChannelNode* grpc_channel_get_channelz_node(
grpc_channel* grpc_channel_create(const char* target,
const grpc_channel_args* input_args,
grpc_channel_stack_type channel_stack_type,
- grpc_transport* optional_transport) {
+ grpc_transport* optional_transport,
+ grpc_resource_user* resource_user) {
grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
const grpc_core::UniquePtr<char> default_authority =
get_default_authority(input_args);
@@ -227,11 +233,17 @@ grpc_channel* grpc_channel_create(const char* target,
grpc_channel_args_destroy(args);
grpc_channel_stack_builder_set_target(builder, target);
grpc_channel_stack_builder_set_transport(builder, optional_transport);
+ grpc_channel_stack_builder_set_resource_user(builder, resource_user);
if (!grpc_channel_init_create_stack(builder, channel_stack_type)) {
grpc_channel_stack_builder_destroy(builder);
+ if (resource_user != nullptr) {
+ grpc_resource_user_free(resource_user, GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
+ }
return nullptr;
}
- return grpc_channel_create_with_builder(builder, channel_stack_type);
+ grpc_channel* channel =
+ grpc_channel_create_with_builder(builder, channel_stack_type);
+ return channel;
}
size_t grpc_channel_get_call_size_estimate(grpc_channel* channel) {
@@ -310,8 +322,8 @@ static grpc_call* grpc_channel_create_call_internal(
}
grpc_call_create_args args;
- memset(&args, 0, sizeof(args));
args.channel = channel;
+ args.server = nullptr;
args.parent = parent_call;
args.propagation_mask = propagation_mask;
args.cq = cq;
@@ -441,6 +453,10 @@ static void destroy_channel(void* arg, grpc_error* error) {
GRPC_MDELEM_UNREF(rc->authority);
gpr_free(rc);
}
+ if (channel->resource_user != nullptr) {
+ grpc_resource_user_free(channel->resource_user,
+ GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
+ }
gpr_mu_destroy(&channel->registered_call_mu);
gpr_free(channel->target);
gpr_free(channel);
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
index 4ac76b8a29..ab00b8e94f 100644
--- a/src/core/lib/surface/channel.h
+++ b/src/core/lib/surface/channel.h
@@ -29,7 +29,8 @@
grpc_channel* grpc_channel_create(const char* target,
const grpc_channel_args* args,
grpc_channel_stack_type channel_stack_type,
- grpc_transport* optional_transport);
+ grpc_transport* optional_transport,
+ grpc_resource_user* resource_user = nullptr);
grpc_channel* grpc_channel_create_with_builder(
grpc_channel_stack_builder* builder,
diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc
index b81ae73b4d..661022ec5f 100644
--- a/src/core/lib/surface/completion_queue.cc
+++ b/src/core/lib/surface/completion_queue.cc
@@ -859,8 +859,8 @@ static void cq_end_op_for_callback(
gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
- cq_finish_shutdown_callback(cq);
gpr_mu_unlock(cq->mu);
+ cq_finish_shutdown_callback(cq);
} else {
gpr_mu_unlock(cq->mu);
}
diff --git a/src/core/lib/surface/init.cc b/src/core/lib/surface/init.cc
index 0ad82fed99..67cf5d89bf 100644
--- a/src/core/lib/surface/init.cc
+++ b/src/core/lib/surface/init.cc
@@ -123,6 +123,7 @@ void grpc_init(void) {
grpc_core::Fork::GlobalInit();
grpc_fork_handlers_auto_register();
gpr_time_init();
+ gpr_arena_init();
grpc_stats_init();
grpc_slice_intern_init();
grpc_mdctx_global_init();
@@ -160,6 +161,7 @@ void grpc_shutdown(void) {
if (--g_initializations == 0) {
{
grpc_core::ExecCtx exec_ctx(0);
+ grpc_iomgr_shutdown_background_closure();
{
grpc_timer_manager_set_threading(
false); // shutdown timer_manager thread
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index 35ab2c3bce..67b38e6f0c 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -28,6 +28,8 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include <utility>
+
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/debug/stats.h"
@@ -47,6 +49,10 @@
grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
+static void server_on_recv_initial_metadata(void* ptr, grpc_error* error);
+static void server_recv_trailing_metadata_ready(void* user_data,
+ grpc_error* error);
+
namespace {
struct listener {
void* arg;
@@ -105,7 +111,7 @@ struct channel_data {
uint32_t registered_method_max_probes;
grpc_closure finish_destroy_channel_closure;
grpc_closure channel_connectivity_changed;
- intptr_t socket_uuid;
+ grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> socket_node;
};
typedef struct shutdown_tag {
@@ -128,46 +134,73 @@ typedef enum {
typedef struct request_matcher request_matcher;
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : call(grpc_call_from_top_element(elem)),
+ call_combiner(args.call_combiner) {
+ GRPC_CLOSURE_INIT(&server_on_recv_initial_metadata,
+ ::server_on_recv_initial_metadata, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+ server_recv_trailing_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ }
+ ~call_data() {
+ GPR_ASSERT(state != PENDING);
+ GRPC_ERROR_UNREF(recv_initial_metadata_error);
+ if (host_set) {
+ grpc_slice_unref_internal(host);
+ }
+ if (path_set) {
+ grpc_slice_unref_internal(path);
+ }
+ grpc_metadata_array_destroy(&initial_metadata);
+ grpc_byte_buffer_destroy(payload);
+ }
+
grpc_call* call;
- gpr_atm state;
+ gpr_atm state = NOT_STARTED;
- bool path_set;
- bool host_set;
+ bool path_set = false;
+ bool host_set = false;
grpc_slice path;
grpc_slice host;
- grpc_millis deadline;
+ grpc_millis deadline = GRPC_MILLIS_INF_FUTURE;
- grpc_completion_queue* cq_new;
+ grpc_completion_queue* cq_new = nullptr;
- grpc_metadata_batch* recv_initial_metadata;
- uint32_t recv_initial_metadata_flags;
- grpc_metadata_array initial_metadata;
+ grpc_metadata_batch* recv_initial_metadata = nullptr;
+ uint32_t recv_initial_metadata_flags = 0;
+ grpc_metadata_array initial_metadata =
+ grpc_metadata_array(); // Zero-initialize the C struct.
- request_matcher* matcher;
- grpc_byte_buffer* payload;
+ request_matcher* matcher = nullptr;
+ grpc_byte_buffer* payload = nullptr;
grpc_closure got_initial_metadata;
grpc_closure server_on_recv_initial_metadata;
grpc_closure kill_zombie_closure;
grpc_closure* on_done_recv_initial_metadata;
grpc_closure recv_trailing_metadata_ready;
- grpc_error* recv_initial_metadata_error;
+ grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
grpc_closure* original_recv_trailing_metadata_ready;
- grpc_error* recv_trailing_metadata_error;
- bool seen_recv_trailing_metadata_ready;
+ grpc_error* recv_trailing_metadata_error = GRPC_ERROR_NONE;
+ bool seen_recv_trailing_metadata_ready = false;
grpc_closure publish;
- call_data* pending_next;
+ call_data* pending_next = nullptr;
grpc_call_combiner* call_combiner;
};
struct request_matcher {
+ request_matcher(grpc_server* server);
+ ~request_matcher();
+
grpc_server* server;
- call_data* pending_head;
- call_data* pending_tail;
- gpr_locked_mpscq* requests_per_cq;
+ std::atomic<call_data*> pending_head{nullptr};
+ call_data* pending_tail = nullptr;
+ gpr_locked_mpscq* requests_per_cq = nullptr;
};
struct registered_method {
@@ -189,6 +222,8 @@ typedef struct {
struct grpc_server {
grpc_channel_args* channel_args;
+ grpc_resource_user* default_resource_user;
+
grpc_completion_queue** cqs;
grpc_pollset** pollsets;
size_t cq_count;
@@ -314,22 +349,30 @@ static void channel_broadcaster_shutdown(channel_broadcaster* cb,
* request_matcher
*/
-static void request_matcher_init(request_matcher* rm, grpc_server* server) {
- memset(rm, 0, sizeof(*rm));
- rm->server = server;
- rm->requests_per_cq = static_cast<gpr_locked_mpscq*>(
- gpr_malloc(sizeof(*rm->requests_per_cq) * server->cq_count));
+namespace {
+request_matcher::request_matcher(grpc_server* server) : server(server) {
+ requests_per_cq = static_cast<gpr_locked_mpscq*>(
+ gpr_malloc(sizeof(*requests_per_cq) * server->cq_count));
for (size_t i = 0; i < server->cq_count; i++) {
- gpr_locked_mpscq_init(&rm->requests_per_cq[i]);
+ gpr_locked_mpscq_init(&requests_per_cq[i]);
}
}
-static void request_matcher_destroy(request_matcher* rm) {
- for (size_t i = 0; i < rm->server->cq_count; i++) {
- GPR_ASSERT(gpr_locked_mpscq_pop(&rm->requests_per_cq[i]) == nullptr);
- gpr_locked_mpscq_destroy(&rm->requests_per_cq[i]);
+request_matcher::~request_matcher() {
+ for (size_t i = 0; i < server->cq_count; i++) {
+ GPR_ASSERT(gpr_locked_mpscq_pop(&requests_per_cq[i]) == nullptr);
+ gpr_locked_mpscq_destroy(&requests_per_cq[i]);
}
- gpr_free(rm->requests_per_cq);
+ gpr_free(requests_per_cq);
+}
+} // namespace
+
+static void request_matcher_init(request_matcher* rm, grpc_server* server) {
+ new (rm) request_matcher(server);
+}
+
+static void request_matcher_destroy(request_matcher* rm) {
+ rm->~request_matcher();
}
static void kill_zombie(void* elem, grpc_error* error) {
@@ -338,9 +381,10 @@ static void kill_zombie(void* elem, grpc_error* error) {
}
static void request_matcher_zombify_all_pending_calls(request_matcher* rm) {
- while (rm->pending_head) {
- call_data* calld = rm->pending_head;
- rm->pending_head = calld->pending_next;
+ call_data* calld;
+ while ((calld = rm->pending_head.load(std::memory_order_relaxed)) !=
+ nullptr) {
+ rm->pending_head.store(calld->pending_next, std::memory_order_relaxed);
gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
@@ -538,8 +582,9 @@ static void publish_new_rpc(void* arg, grpc_error* error) {
}
gpr_atm_no_barrier_store(&calld->state, PENDING);
- if (rm->pending_head == nullptr) {
- rm->pending_tail = rm->pending_head = calld;
+ if (rm->pending_head.load(std::memory_order_relaxed) == nullptr) {
+ rm->pending_head.store(calld, std::memory_order_relaxed);
+ rm->pending_tail = calld;
} else {
rm->pending_tail->pending_next = calld;
rm->pending_tail = calld;
@@ -822,11 +867,16 @@ static void accept_stream(void* cd, grpc_transport* transport,
channel_data* chand = static_cast<channel_data*>(cd);
/* create a call */
grpc_call_create_args args;
- memset(&args, 0, sizeof(args));
args.channel = chand->channel;
+ args.server = chand->server;
+ args.parent = nullptr;
+ args.propagation_mask = 0;
+ args.cq = nullptr;
+ args.pollset_set_alternative = nullptr;
args.server_transport_data = transport_server_data;
+ args.add_initial_metadata = nullptr;
+ args.add_initial_metadata_count = 0;
args.send_deadline = GRPC_MILLIS_INF_FUTURE;
- args.server = chand->server;
grpc_call* call;
grpc_error* error = grpc_call_create(&args, &call);
grpc_call_element* elem =
@@ -838,8 +888,9 @@ static void accept_stream(void* cd, grpc_transport* transport,
}
call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_op op;
- memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_INITIAL_METADATA;
+ op.flags = 0;
+ op.reserved = nullptr;
op.data.recv_initial_metadata.recv_initial_metadata =
&calld->initial_metadata;
GRPC_CLOSURE_INIT(&calld->got_initial_metadata, got_initial_metadata, elem,
@@ -867,40 +918,18 @@ static void channel_connectivity_changed(void* cd, grpc_error* error) {
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
- memset(calld, 0, sizeof(call_data));
- calld->deadline = GRPC_MILLIS_INF_FUTURE;
- calld->call = grpc_call_from_top_element(elem);
- calld->call_combiner = args->call_combiner;
-
- GRPC_CLOSURE_INIT(&calld->server_on_recv_initial_metadata,
- server_on_recv_initial_metadata, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
- server_recv_trailing_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
server_ref(chand->server);
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
-
- GPR_ASSERT(calld->state != PENDING);
- GRPC_ERROR_UNREF(calld->recv_initial_metadata_error);
- if (calld->host_set) {
- grpc_slice_unref_internal(calld->host);
- }
- if (calld->path_set) {
- grpc_slice_unref_internal(calld->path);
- }
- grpc_metadata_array_destroy(&calld->initial_metadata);
- grpc_byte_buffer_destroy(calld->payload);
-
+ calld->~call_data();
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
server_unref(chand->server);
}
@@ -923,6 +952,7 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
static void destroy_channel_elem(grpc_channel_element* elem) {
size_t i;
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ chand->socket_node.reset();
if (chand->registered_methods) {
for (i = 0; i < chand->registered_method_slots; i++) {
grpc_slice_unref_internal(chand->registered_methods[i].method);
@@ -1024,6 +1054,15 @@ grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) {
grpc_slice_from_static_string("Server created"));
}
+ if (args != nullptr) {
+ grpc_resource_quota* resource_quota =
+ grpc_resource_quota_from_channel_args(args, false /* create */);
+ if (resource_quota != nullptr) {
+ server->default_resource_user =
+ grpc_resource_user_create(resource_quota, "default");
+ }
+ }
+
return server;
}
@@ -1119,10 +1158,11 @@ void grpc_server_get_pollsets(grpc_server* server, grpc_pollset*** pollsets,
*pollsets = server->pollsets;
}
-void grpc_server_setup_transport(grpc_server* s, grpc_transport* transport,
- grpc_pollset* accepting_pollset,
- const grpc_channel_args* args,
- intptr_t socket_uuid) {
+void grpc_server_setup_transport(
+ grpc_server* s, grpc_transport* transport, grpc_pollset* accepting_pollset,
+ const grpc_channel_args* args,
+ grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> socket_node,
+ grpc_resource_user* resource_user) {
size_t num_registered_methods;
size_t alloc;
registered_method* rm;
@@ -1135,14 +1175,15 @@ void grpc_server_setup_transport(grpc_server* s, grpc_transport* transport,
uint32_t max_probes = 0;
grpc_transport_op* op = nullptr;
- channel = grpc_channel_create(nullptr, args, GRPC_SERVER_CHANNEL, transport);
+ channel = grpc_channel_create(nullptr, args, GRPC_SERVER_CHANNEL, transport,
+ resource_user);
chand = static_cast<channel_data*>(
grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0)
->channel_data);
chand->server = s;
server_ref(s);
chand->channel = channel;
- chand->socket_uuid = socket_uuid;
+ chand->socket_node = std::move(socket_node);
size_t cq_idx;
for (cq_idx = 0; cq_idx < s->cq_count; cq_idx++) {
@@ -1218,14 +1259,13 @@ void grpc_server_setup_transport(grpc_server* s, grpc_transport* transport,
}
void grpc_server_populate_server_sockets(
- grpc_server* s, grpc_core::channelz::ChildRefsList* server_sockets,
+ grpc_server* s, grpc_core::channelz::ChildSocketsList* server_sockets,
intptr_t start_idx) {
gpr_mu_lock(&s->mu_global);
channel_data* c = nullptr;
for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
- intptr_t socket_uuid = c->socket_uuid;
- if (socket_uuid >= start_idx) {
- server_sockets->push_back(socket_uuid);
+ if (c->socket_node != nullptr && c->socket_node->uuid() >= start_idx) {
+ server_sockets->push_back(c->socket_node.get());
}
}
gpr_mu_unlock(&s->mu_global);
@@ -1330,6 +1370,13 @@ void grpc_server_shutdown_and_notify(grpc_server* server,
channel_broadcaster_shutdown(&broadcaster, true /* send_goaway */,
GRPC_ERROR_NONE);
+
+ if (server->default_resource_user != nullptr) {
+ grpc_resource_quota_unref(
+ grpc_resource_user_quota(server->default_resource_user));
+ grpc_resource_user_shutdown(server->default_resource_user);
+ grpc_resource_user_unref(server->default_resource_user);
+ }
}
void grpc_server_cancel_all_calls(grpc_server* server) {
@@ -1401,30 +1448,39 @@ static grpc_call_error queue_call_request(grpc_server* server, size_t cq_idx,
rm = &rc->data.registered.method->matcher;
break;
}
- if (gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link)) {
- /* this was the first queued request: we need to lock and start
- matching calls */
- gpr_mu_lock(&server->mu_call);
- while ((calld = rm->pending_head) != nullptr) {
- rc = reinterpret_cast<requested_call*>(
- gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]));
- if (rc == nullptr) break;
- rm->pending_head = calld->pending_next;
- gpr_mu_unlock(&server->mu_call);
- if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) {
- // Zombied Call
- GRPC_CLOSURE_INIT(
- &calld->kill_zombie_closure, kill_zombie,
- grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
- } else {
- publish_call(server, calld, cq_idx, rc);
- }
- gpr_mu_lock(&server->mu_call);
- }
+
+ // Fast path: if there is no pending request to be processed, immediately
+ // return.
+ if (!gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link) ||
+ // Note: We are reading the pending_head without holding the server's call
+ // mutex. Even if we read a non-null value here due to reordering,
+ // we will check it below again after grabbing the lock.
+ rm->pending_head.load(std::memory_order_relaxed) == nullptr) {
+ return GRPC_CALL_OK;
+ }
+ // Slow path: This was the first queued request and there are pendings:
+ // We need to lock and start matching calls.
+ gpr_mu_lock(&server->mu_call);
+ while ((calld = rm->pending_head.load(std::memory_order_relaxed)) !=
+ nullptr) {
+ rc = reinterpret_cast<requested_call*>(
+ gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]));
+ if (rc == nullptr) break;
+ rm->pending_head.store(calld->pending_next, std::memory_order_relaxed);
gpr_mu_unlock(&server->mu_call);
+ if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) {
+ // Zombied Call
+ GRPC_CLOSURE_INIT(
+ &calld->kill_zombie_closure, kill_zombie,
+ grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
+ } else {
+ publish_call(server, calld, cq_idx, rc);
+ }
+ gpr_mu_lock(&server->mu_call);
}
+ gpr_mu_unlock(&server->mu_call);
return GRPC_CALL_OK;
}
@@ -1546,6 +1602,10 @@ const grpc_channel_args* grpc_server_get_channel_args(grpc_server* server) {
return server->channel_args;
}
+grpc_resource_user* grpc_server_get_default_resource_user(grpc_server* server) {
+ return server->default_resource_user;
+}
+
int grpc_server_has_open_connections(grpc_server* server) {
int r;
gpr_mu_lock(&server->mu_global);
diff --git a/src/core/lib/surface/server.h b/src/core/lib/surface/server.h
index 33c205417e..393bb24214 100644
--- a/src/core/lib/surface/server.h
+++ b/src/core/lib/surface/server.h
@@ -44,14 +44,15 @@ void grpc_server_add_listener(grpc_server* server, void* listener,
/* Setup a transport - creates a channel stack, binds the transport to the
server */
-void grpc_server_setup_transport(grpc_server* server, grpc_transport* transport,
- grpc_pollset* accepting_pollset,
- const grpc_channel_args* args,
- intptr_t socket_uuid);
+void grpc_server_setup_transport(
+ grpc_server* server, grpc_transport* transport,
+ grpc_pollset* accepting_pollset, const grpc_channel_args* args,
+ grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> socket_node,
+ grpc_resource_user* resource_user = nullptr);
/* fills in the uuids of all sockets used for connections on this server */
void grpc_server_populate_server_sockets(
- grpc_server* server, grpc_core::channelz::ChildRefsList* server_sockets,
+ grpc_server* server, grpc_core::channelz::ChildSocketsList* server_sockets,
intptr_t start_idx);
/* fills in the uuids of all listen sockets on this server */
@@ -63,6 +64,8 @@ grpc_core::channelz::ServerNode* grpc_server_get_channelz_node(
const grpc_channel_args* grpc_server_get_channel_args(grpc_server* server);
+grpc_resource_user* grpc_server_get_default_resource_user(grpc_server* server);
+
int grpc_server_has_open_connections(grpc_server* server);
/* Do not call this before grpc_server_start. Returns the pollsets and the
diff --git a/src/core/lib/surface/version.cc b/src/core/lib/surface/version.cc
index 66890ce65a..4829cc80a5 100644
--- a/src/core/lib/surface/version.cc
+++ b/src/core/lib/surface/version.cc
@@ -25,4 +25,4 @@
const char* grpc_version_string(void) { return "7.0.0-dev"; }
-const char* grpc_g_stands_for(void) { return "gizmo"; }
+const char* grpc_g_stands_for(void) { return "goose"; }
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index 0bcbb32d1f..f6e8bbf205 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -31,9 +31,11 @@
#include "src/core/lib/transport/static_metadata.h"
typedef struct grpc_linked_mdelem {
+ grpc_linked_mdelem() {}
+
grpc_mdelem md;
- struct grpc_linked_mdelem* next;
- struct grpc_linked_mdelem* prev;
+ struct grpc_linked_mdelem* next = nullptr;
+ struct grpc_linked_mdelem* prev = nullptr;
void* reserved;
} grpc_linked_mdelem;
diff --git a/src/core/lib/transport/static_metadata.cc b/src/core/lib/transport/static_metadata.cc
index 4ebe73f82a..3dfaaaad5c 100644
--- a/src/core/lib/transport/static_metadata.cc
+++ b/src/core/lib/transport/static_metadata.cc
@@ -65,51 +65,56 @@ static uint8_t g_bytes[] = {
97, 110, 99, 101, 114, 47, 66, 97, 108, 97, 110, 99, 101, 76, 111,
97, 100, 47, 103, 114, 112, 99, 46, 104, 101, 97, 108, 116, 104, 46,
118, 49, 46, 72, 101, 97, 108, 116, 104, 47, 87, 97, 116, 99, 104,
- 100, 101, 102, 108, 97, 116, 101, 103, 122, 105, 112, 115, 116, 114, 101,
- 97, 109, 47, 103, 122, 105, 112, 71, 69, 84, 80, 79, 83, 84, 47,
- 47, 105, 110, 100, 101, 120, 46, 104, 116, 109, 108, 104, 116, 116, 112,
- 104, 116, 116, 112, 115, 50, 48, 48, 50, 48, 52, 50, 48, 54, 51,
- 48, 52, 52, 48, 48, 52, 48, 52, 53, 48, 48, 97, 99, 99, 101,
- 112, 116, 45, 99, 104, 97, 114, 115, 101, 116, 103, 122, 105, 112, 44,
- 32, 100, 101, 102, 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45,
- 108, 97, 110, 103, 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45,
- 114, 97, 110, 103, 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99,
- 101, 115, 115, 45, 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108,
- 111, 119, 45, 111, 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108,
- 111, 119, 97, 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110,
- 99, 97, 99, 104, 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111,
- 110, 116, 101, 110, 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105,
- 111, 110, 99, 111, 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117,
- 97, 103, 101, 99, 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103,
- 116, 104, 99, 111, 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116,
- 105, 111, 110, 99, 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103,
- 101, 99, 111, 111, 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103,
- 101, 120, 112, 101, 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114,
- 111, 109, 105, 102, 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111,
- 100, 105, 102, 105, 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45,
- 110, 111, 110, 101, 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97,
- 110, 103, 101, 105, 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101,
- 100, 45, 115, 105, 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100,
- 105, 102, 105, 101, 100, 108, 105, 110, 107, 108, 111, 99, 97, 116, 105,
- 111, 110, 109, 97, 120, 45, 102, 111, 114, 119, 97, 114, 100, 115, 112,
- 114, 111, 120, 121, 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97,
- 116, 101, 112, 114, 111, 120, 121, 45, 97, 117, 116, 104, 111, 114, 105,
- 122, 97, 116, 105, 111, 110, 114, 97, 110, 103, 101, 114, 101, 102, 101,
- 114, 101, 114, 114, 101, 102, 114, 101, 115, 104, 114, 101, 116, 114, 121,
- 45, 97, 102, 116, 101, 114, 115, 101, 114, 118, 101, 114, 115, 101, 116,
- 45, 99, 111, 111, 107, 105, 101, 115, 116, 114, 105, 99, 116, 45, 116,
- 114, 97, 110, 115, 112, 111, 114, 116, 45, 115, 101, 99, 117, 114, 105,
- 116, 121, 116, 114, 97, 110, 115, 102, 101, 114, 45, 101, 110, 99, 111,
- 100, 105, 110, 103, 118, 97, 114, 121, 118, 105, 97, 119, 119, 119, 45,
- 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 101, 48, 105, 100,
- 101, 110, 116, 105, 116, 121, 116, 114, 97, 105, 108, 101, 114, 115, 97,
- 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99,
- 103, 114, 112, 99, 80, 85, 84, 108, 98, 45, 99, 111, 115, 116, 45,
- 98, 105, 110, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102,
- 108, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 103, 122,
- 105, 112, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112, 105,
- 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116, 101,
- 44, 103, 122, 105, 112};
+ 47, 101, 110, 118, 111, 121, 46, 115, 101, 114, 118, 105, 99, 101, 46,
+ 100, 105, 115, 99, 111, 118, 101, 114, 121, 46, 118, 50, 46, 65, 103,
+ 103, 114, 101, 103, 97, 116, 101, 100, 68, 105, 115, 99, 111, 118, 101,
+ 114, 121, 83, 101, 114, 118, 105, 99, 101, 47, 83, 116, 114, 101, 97,
+ 109, 65, 103, 103, 114, 101, 103, 97, 116, 101, 100, 82, 101, 115, 111,
+ 117, 114, 99, 101, 115, 100, 101, 102, 108, 97, 116, 101, 103, 122, 105,
+ 112, 115, 116, 114, 101, 97, 109, 47, 103, 122, 105, 112, 71, 69, 84,
+ 80, 79, 83, 84, 47, 47, 105, 110, 100, 101, 120, 46, 104, 116, 109,
+ 108, 104, 116, 116, 112, 104, 116, 116, 112, 115, 50, 48, 48, 50, 48,
+ 52, 50, 48, 54, 51, 48, 52, 52, 48, 48, 52, 48, 52, 53, 48,
+ 48, 97, 99, 99, 101, 112, 116, 45, 99, 104, 97, 114, 115, 101, 116,
+ 103, 122, 105, 112, 44, 32, 100, 101, 102, 108, 97, 116, 101, 97, 99,
+ 99, 101, 112, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 97, 99,
+ 99, 101, 112, 116, 45, 114, 97, 110, 103, 101, 115, 97, 99, 99, 101,
+ 112, 116, 97, 99, 99, 101, 115, 115, 45, 99, 111, 110, 116, 114, 111,
+ 108, 45, 97, 108, 108, 111, 119, 45, 111, 114, 105, 103, 105, 110, 97,
+ 103, 101, 97, 108, 108, 111, 119, 97, 117, 116, 104, 111, 114, 105, 122,
+ 97, 116, 105, 111, 110, 99, 97, 99, 104, 101, 45, 99, 111, 110, 116,
+ 114, 111, 108, 99, 111, 110, 116, 101, 110, 116, 45, 100, 105, 115, 112,
+ 111, 115, 105, 116, 105, 111, 110, 99, 111, 110, 116, 101, 110, 116, 45,
+ 108, 97, 110, 103, 117, 97, 103, 101, 99, 111, 110, 116, 101, 110, 116,
+ 45, 108, 101, 110, 103, 116, 104, 99, 111, 110, 116, 101, 110, 116, 45,
+ 108, 111, 99, 97, 116, 105, 111, 110, 99, 111, 110, 116, 101, 110, 116,
+ 45, 114, 97, 110, 103, 101, 99, 111, 111, 107, 105, 101, 100, 97, 116,
+ 101, 101, 116, 97, 103, 101, 120, 112, 101, 99, 116, 101, 120, 112, 105,
+ 114, 101, 115, 102, 114, 111, 109, 105, 102, 45, 109, 97, 116, 99, 104,
+ 105, 102, 45, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105, 110,
+ 99, 101, 105, 102, 45, 110, 111, 110, 101, 45, 109, 97, 116, 99, 104,
+ 105, 102, 45, 114, 97, 110, 103, 101, 105, 102, 45, 117, 110, 109, 111,
+ 100, 105, 102, 105, 101, 100, 45, 115, 105, 110, 99, 101, 108, 97, 115,
+ 116, 45, 109, 111, 100, 105, 102, 105, 101, 100, 108, 105, 110, 107, 108,
+ 111, 99, 97, 116, 105, 111, 110, 109, 97, 120, 45, 102, 111, 114, 119,
+ 97, 114, 100, 115, 112, 114, 111, 120, 121, 45, 97, 117, 116, 104, 101,
+ 110, 116, 105, 99, 97, 116, 101, 112, 114, 111, 120, 121, 45, 97, 117,
+ 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 114, 97, 110, 103,
+ 101, 114, 101, 102, 101, 114, 101, 114, 114, 101, 102, 114, 101, 115, 104,
+ 114, 101, 116, 114, 121, 45, 97, 102, 116, 101, 114, 115, 101, 114, 118,
+ 101, 114, 115, 101, 116, 45, 99, 111, 111, 107, 105, 101, 115, 116, 114,
+ 105, 99, 116, 45, 116, 114, 97, 110, 115, 112, 111, 114, 116, 45, 115,
+ 101, 99, 117, 114, 105, 116, 121, 116, 114, 97, 110, 115, 102, 101, 114,
+ 45, 101, 110, 99, 111, 100, 105, 110, 103, 118, 97, 114, 121, 118, 105,
+ 97, 119, 119, 119, 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97,
+ 116, 101, 48, 105, 100, 101, 110, 116, 105, 116, 121, 116, 114, 97, 105,
+ 108, 101, 114, 115, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110,
+ 47, 103, 114, 112, 99, 103, 114, 112, 99, 80, 85, 84, 108, 98, 45,
+ 99, 111, 115, 116, 45, 98, 105, 110, 105, 100, 101, 110, 116, 105, 116,
+ 121, 44, 100, 101, 102, 108, 97, 116, 101, 105, 100, 101, 110, 116, 105,
+ 116, 121, 44, 103, 122, 105, 112, 100, 101, 102, 108, 97, 116, 101, 44,
+ 103, 122, 105, 112, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101,
+ 102, 108, 97, 116, 101, 44, 103, 122, 105, 112};
static void static_ref(void* unused) {}
static void static_unref(void* unused) {}
@@ -227,6 +232,7 @@ grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
};
const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
@@ -266,76 +272,77 @@ const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
{&grpc_static_metadata_refcounts[33], {{g_bytes + 415, 31}}},
{&grpc_static_metadata_refcounts[34], {{g_bytes + 446, 36}}},
{&grpc_static_metadata_refcounts[35], {{g_bytes + 482, 28}}},
- {&grpc_static_metadata_refcounts[36], {{g_bytes + 510, 7}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 517, 4}}},
- {&grpc_static_metadata_refcounts[38], {{g_bytes + 521, 11}}},
- {&grpc_static_metadata_refcounts[39], {{g_bytes + 532, 3}}},
- {&grpc_static_metadata_refcounts[40], {{g_bytes + 535, 4}}},
- {&grpc_static_metadata_refcounts[41], {{g_bytes + 539, 1}}},
- {&grpc_static_metadata_refcounts[42], {{g_bytes + 540, 11}}},
- {&grpc_static_metadata_refcounts[43], {{g_bytes + 551, 4}}},
- {&grpc_static_metadata_refcounts[44], {{g_bytes + 555, 5}}},
- {&grpc_static_metadata_refcounts[45], {{g_bytes + 560, 3}}},
- {&grpc_static_metadata_refcounts[46], {{g_bytes + 563, 3}}},
- {&grpc_static_metadata_refcounts[47], {{g_bytes + 566, 3}}},
- {&grpc_static_metadata_refcounts[48], {{g_bytes + 569, 3}}},
- {&grpc_static_metadata_refcounts[49], {{g_bytes + 572, 3}}},
- {&grpc_static_metadata_refcounts[50], {{g_bytes + 575, 3}}},
- {&grpc_static_metadata_refcounts[51], {{g_bytes + 578, 3}}},
- {&grpc_static_metadata_refcounts[52], {{g_bytes + 581, 14}}},
- {&grpc_static_metadata_refcounts[53], {{g_bytes + 595, 13}}},
- {&grpc_static_metadata_refcounts[54], {{g_bytes + 608, 15}}},
- {&grpc_static_metadata_refcounts[55], {{g_bytes + 623, 13}}},
- {&grpc_static_metadata_refcounts[56], {{g_bytes + 636, 6}}},
- {&grpc_static_metadata_refcounts[57], {{g_bytes + 642, 27}}},
- {&grpc_static_metadata_refcounts[58], {{g_bytes + 669, 3}}},
- {&grpc_static_metadata_refcounts[59], {{g_bytes + 672, 5}}},
- {&grpc_static_metadata_refcounts[60], {{g_bytes + 677, 13}}},
- {&grpc_static_metadata_refcounts[61], {{g_bytes + 690, 13}}},
- {&grpc_static_metadata_refcounts[62], {{g_bytes + 703, 19}}},
- {&grpc_static_metadata_refcounts[63], {{g_bytes + 722, 16}}},
- {&grpc_static_metadata_refcounts[64], {{g_bytes + 738, 14}}},
- {&grpc_static_metadata_refcounts[65], {{g_bytes + 752, 16}}},
- {&grpc_static_metadata_refcounts[66], {{g_bytes + 768, 13}}},
- {&grpc_static_metadata_refcounts[67], {{g_bytes + 781, 6}}},
- {&grpc_static_metadata_refcounts[68], {{g_bytes + 787, 4}}},
- {&grpc_static_metadata_refcounts[69], {{g_bytes + 791, 4}}},
- {&grpc_static_metadata_refcounts[70], {{g_bytes + 795, 6}}},
- {&grpc_static_metadata_refcounts[71], {{g_bytes + 801, 7}}},
- {&grpc_static_metadata_refcounts[72], {{g_bytes + 808, 4}}},
- {&grpc_static_metadata_refcounts[73], {{g_bytes + 812, 8}}},
- {&grpc_static_metadata_refcounts[74], {{g_bytes + 820, 17}}},
- {&grpc_static_metadata_refcounts[75], {{g_bytes + 837, 13}}},
- {&grpc_static_metadata_refcounts[76], {{g_bytes + 850, 8}}},
- {&grpc_static_metadata_refcounts[77], {{g_bytes + 858, 19}}},
- {&grpc_static_metadata_refcounts[78], {{g_bytes + 877, 13}}},
- {&grpc_static_metadata_refcounts[79], {{g_bytes + 890, 4}}},
- {&grpc_static_metadata_refcounts[80], {{g_bytes + 894, 8}}},
- {&grpc_static_metadata_refcounts[81], {{g_bytes + 902, 12}}},
- {&grpc_static_metadata_refcounts[82], {{g_bytes + 914, 18}}},
- {&grpc_static_metadata_refcounts[83], {{g_bytes + 932, 19}}},
- {&grpc_static_metadata_refcounts[84], {{g_bytes + 951, 5}}},
- {&grpc_static_metadata_refcounts[85], {{g_bytes + 956, 7}}},
- {&grpc_static_metadata_refcounts[86], {{g_bytes + 963, 7}}},
- {&grpc_static_metadata_refcounts[87], {{g_bytes + 970, 11}}},
- {&grpc_static_metadata_refcounts[88], {{g_bytes + 981, 6}}},
- {&grpc_static_metadata_refcounts[89], {{g_bytes + 987, 10}}},
- {&grpc_static_metadata_refcounts[90], {{g_bytes + 997, 25}}},
- {&grpc_static_metadata_refcounts[91], {{g_bytes + 1022, 17}}},
- {&grpc_static_metadata_refcounts[92], {{g_bytes + 1039, 4}}},
- {&grpc_static_metadata_refcounts[93], {{g_bytes + 1043, 3}}},
- {&grpc_static_metadata_refcounts[94], {{g_bytes + 1046, 16}}},
- {&grpc_static_metadata_refcounts[95], {{g_bytes + 1062, 1}}},
- {&grpc_static_metadata_refcounts[96], {{g_bytes + 1063, 8}}},
- {&grpc_static_metadata_refcounts[97], {{g_bytes + 1071, 8}}},
- {&grpc_static_metadata_refcounts[98], {{g_bytes + 1079, 16}}},
- {&grpc_static_metadata_refcounts[99], {{g_bytes + 1095, 4}}},
- {&grpc_static_metadata_refcounts[100], {{g_bytes + 1099, 3}}},
- {&grpc_static_metadata_refcounts[101], {{g_bytes + 1102, 11}}},
- {&grpc_static_metadata_refcounts[102], {{g_bytes + 1113, 16}}},
- {&grpc_static_metadata_refcounts[103], {{g_bytes + 1129, 13}}},
- {&grpc_static_metadata_refcounts[104], {{g_bytes + 1142, 12}}},
- {&grpc_static_metadata_refcounts[105], {{g_bytes + 1154, 21}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 510, 80}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 590, 7}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 601, 11}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 612, 3}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 615, 4}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 619, 1}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 620, 11}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 631, 4}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 635, 5}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 640, 3}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 643, 3}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 646, 3}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 649, 3}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 652, 3}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 655, 3}}},
+ {&grpc_static_metadata_refcounts[52], {{g_bytes + 658, 3}}},
+ {&grpc_static_metadata_refcounts[53], {{g_bytes + 661, 14}}},
+ {&grpc_static_metadata_refcounts[54], {{g_bytes + 675, 13}}},
+ {&grpc_static_metadata_refcounts[55], {{g_bytes + 688, 15}}},
+ {&grpc_static_metadata_refcounts[56], {{g_bytes + 703, 13}}},
+ {&grpc_static_metadata_refcounts[57], {{g_bytes + 716, 6}}},
+ {&grpc_static_metadata_refcounts[58], {{g_bytes + 722, 27}}},
+ {&grpc_static_metadata_refcounts[59], {{g_bytes + 749, 3}}},
+ {&grpc_static_metadata_refcounts[60], {{g_bytes + 752, 5}}},
+ {&grpc_static_metadata_refcounts[61], {{g_bytes + 757, 13}}},
+ {&grpc_static_metadata_refcounts[62], {{g_bytes + 770, 13}}},
+ {&grpc_static_metadata_refcounts[63], {{g_bytes + 783, 19}}},
+ {&grpc_static_metadata_refcounts[64], {{g_bytes + 802, 16}}},
+ {&grpc_static_metadata_refcounts[65], {{g_bytes + 818, 14}}},
+ {&grpc_static_metadata_refcounts[66], {{g_bytes + 832, 16}}},
+ {&grpc_static_metadata_refcounts[67], {{g_bytes + 848, 13}}},
+ {&grpc_static_metadata_refcounts[68], {{g_bytes + 861, 6}}},
+ {&grpc_static_metadata_refcounts[69], {{g_bytes + 867, 4}}},
+ {&grpc_static_metadata_refcounts[70], {{g_bytes + 871, 4}}},
+ {&grpc_static_metadata_refcounts[71], {{g_bytes + 875, 6}}},
+ {&grpc_static_metadata_refcounts[72], {{g_bytes + 881, 7}}},
+ {&grpc_static_metadata_refcounts[73], {{g_bytes + 888, 4}}},
+ {&grpc_static_metadata_refcounts[74], {{g_bytes + 892, 8}}},
+ {&grpc_static_metadata_refcounts[75], {{g_bytes + 900, 17}}},
+ {&grpc_static_metadata_refcounts[76], {{g_bytes + 917, 13}}},
+ {&grpc_static_metadata_refcounts[77], {{g_bytes + 930, 8}}},
+ {&grpc_static_metadata_refcounts[78], {{g_bytes + 938, 19}}},
+ {&grpc_static_metadata_refcounts[79], {{g_bytes + 957, 13}}},
+ {&grpc_static_metadata_refcounts[80], {{g_bytes + 970, 4}}},
+ {&grpc_static_metadata_refcounts[81], {{g_bytes + 974, 8}}},
+ {&grpc_static_metadata_refcounts[82], {{g_bytes + 982, 12}}},
+ {&grpc_static_metadata_refcounts[83], {{g_bytes + 994, 18}}},
+ {&grpc_static_metadata_refcounts[84], {{g_bytes + 1012, 19}}},
+ {&grpc_static_metadata_refcounts[85], {{g_bytes + 1031, 5}}},
+ {&grpc_static_metadata_refcounts[86], {{g_bytes + 1036, 7}}},
+ {&grpc_static_metadata_refcounts[87], {{g_bytes + 1043, 7}}},
+ {&grpc_static_metadata_refcounts[88], {{g_bytes + 1050, 11}}},
+ {&grpc_static_metadata_refcounts[89], {{g_bytes + 1061, 6}}},
+ {&grpc_static_metadata_refcounts[90], {{g_bytes + 1067, 10}}},
+ {&grpc_static_metadata_refcounts[91], {{g_bytes + 1077, 25}}},
+ {&grpc_static_metadata_refcounts[92], {{g_bytes + 1102, 17}}},
+ {&grpc_static_metadata_refcounts[93], {{g_bytes + 1119, 4}}},
+ {&grpc_static_metadata_refcounts[94], {{g_bytes + 1123, 3}}},
+ {&grpc_static_metadata_refcounts[95], {{g_bytes + 1126, 16}}},
+ {&grpc_static_metadata_refcounts[96], {{g_bytes + 1142, 1}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}},
+ {&grpc_static_metadata_refcounts[98], {{g_bytes + 1151, 8}}},
+ {&grpc_static_metadata_refcounts[99], {{g_bytes + 1159, 16}}},
+ {&grpc_static_metadata_refcounts[100], {{g_bytes + 1175, 4}}},
+ {&grpc_static_metadata_refcounts[101], {{g_bytes + 1179, 3}}},
+ {&grpc_static_metadata_refcounts[102], {{g_bytes + 1182, 11}}},
+ {&grpc_static_metadata_refcounts[103], {{g_bytes + 1193, 16}}},
+ {&grpc_static_metadata_refcounts[104], {{g_bytes + 1209, 13}}},
+ {&grpc_static_metadata_refcounts[105], {{g_bytes + 1222, 12}}},
+ {&grpc_static_metadata_refcounts[106], {{g_bytes + 1234, 21}}},
};
uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
@@ -345,17 +352,17 @@ uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8, 2, 4, 4};
static const int8_t elems_r[] = {
- 16, 11, -8, 0, 3, -42, -81, -43, 0, 6, -8, 0, 0, 0, -7,
- -3, -10, 0, 0, 0, -1, -2, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, -63, 0, -47, -68, -69, -70, 0, 33,
- 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 20,
- 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
- 4, 4, 4, 3, 10, 9, 0, 0, 0, 0, 0, 0, -3, 0};
+ 15, 10, -8, 0, 2, -42, -81, -43, 0, 6, -8, 0, 0, 0, 2,
+ -3, -10, 0, 0, 1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, -64, 0, -67, -68, -69, -70, 0,
+ 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21,
+ 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
+ 5, 4, 5, 4, 4, 8, 8, 0, 0, 0, 0, 0, 0, -5, 0};
static uint32_t elems_phash(uint32_t i) {
- i -= 41;
- uint32_t x = i % 104;
- uint32_t y = i / 104;
+ i -= 42;
+ uint32_t x = i % 105;
+ uint32_t y = i / 105;
uint32_t h = x;
if (y < GPR_ARRAY_SIZE(elems_r)) {
uint32_t delta = (uint32_t)elems_r[y];
@@ -365,29 +372,29 @@ static uint32_t elems_phash(uint32_t i) {
}
static const uint16_t elem_keys[] = {
- 257, 258, 259, 260, 261, 262, 263, 1096, 1097, 1513, 1725, 145,
- 146, 467, 468, 1619, 41, 42, 1733, 990, 991, 767, 768, 1627,
- 627, 837, 2043, 2149, 2255, 5541, 5859, 5965, 6071, 6177, 1749, 6283,
- 6389, 6495, 6601, 6707, 6813, 6919, 7025, 7131, 7237, 7343, 7449, 7555,
- 7661, 5753, 7767, 7873, 7979, 8085, 8191, 8297, 8403, 8509, 8615, 8721,
- 8827, 8933, 9039, 9145, 9251, 9357, 9463, 1156, 9569, 523, 9675, 9781,
- 206, 1162, 1163, 1164, 1165, 1792, 1582, 1050, 9887, 9993, 1686, 10735,
- 1799, 0, 0, 0, 0, 0, 347, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0};
+ 260, 261, 262, 263, 264, 265, 266, 1107, 1108, 1741, 147, 148,
+ 472, 473, 1634, 42, 43, 1527, 1750, 1000, 1001, 774, 775, 1643,
+ 633, 845, 2062, 2169, 2276, 5700, 5914, 6021, 6128, 6235, 1766, 6342,
+ 6449, 6556, 6663, 6770, 6877, 6984, 7091, 7198, 7305, 7412, 7519, 7626,
+ 7733, 7840, 7947, 8054, 8161, 8268, 8375, 8482, 8589, 8696, 8803, 8910,
+ 9017, 9124, 9231, 9338, 9445, 9552, 9659, 1167, 528, 9766, 9873, 208,
+ 9980, 1173, 1174, 1175, 1176, 1809, 10087, 1060, 10194, 10943, 1702, 0,
+ 1816, 0, 0, 1597, 0, 0, 350, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0};
static const uint8_t elem_idxs[] = {
- 7, 8, 9, 10, 11, 12, 13, 77, 79, 30, 71, 1, 2, 5, 6, 25,
- 3, 4, 84, 66, 65, 62, 63, 73, 67, 61, 57, 37, 74, 14, 17, 18,
- 19, 20, 15, 21, 22, 23, 24, 26, 27, 28, 29, 31, 32, 33, 34, 35,
- 36, 16, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
- 52, 53, 54, 76, 55, 69, 56, 58, 70, 78, 80, 81, 82, 83, 68, 64,
- 59, 60, 72, 75, 85, 255, 255, 255, 255, 255, 0};
+ 7, 8, 9, 10, 11, 12, 13, 77, 79, 71, 1, 2, 5, 6, 25, 3,
+ 4, 30, 84, 66, 65, 62, 63, 73, 67, 61, 57, 37, 74, 14, 16, 17,
+ 18, 19, 15, 20, 21, 22, 23, 24, 26, 27, 28, 29, 31, 32, 33, 34,
+ 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 76, 69, 55, 56, 70, 58, 78, 80, 81, 82, 83, 59, 64,
+ 60, 75, 72, 255, 85, 255, 255, 68, 255, 255, 0};
grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
if (a == -1 || b == -1) return GRPC_MDNULL;
- uint32_t k = (uint32_t)(a * 106 + b);
+ uint32_t k = (uint32_t)(a * 107 + b);
uint32_t h = elems_phash(k);
return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k &&
elem_idxs[h] != 255
@@ -400,175 +407,175 @@ grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
{{&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
- {&grpc_static_metadata_refcounts[39], {{g_bytes + 532, 3}}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 612, 3}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
- {&grpc_static_metadata_refcounts[40], {{g_bytes + 535, 4}}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 615, 4}}}},
{{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
- {&grpc_static_metadata_refcounts[41], {{g_bytes + 539, 1}}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 619, 1}}}},
{{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
- {&grpc_static_metadata_refcounts[42], {{g_bytes + 540, 11}}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 620, 11}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
- {&grpc_static_metadata_refcounts[43], {{g_bytes + 551, 4}}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 631, 4}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
- {&grpc_static_metadata_refcounts[44], {{g_bytes + 555, 5}}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 635, 5}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[45], {{g_bytes + 560, 3}}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 640, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[46], {{g_bytes + 563, 3}}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 643, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[47], {{g_bytes + 566, 3}}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 646, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[48], {{g_bytes + 569, 3}}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 649, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[49], {{g_bytes + 572, 3}}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 652, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[50], {{g_bytes + 575, 3}}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 655, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[51], {{g_bytes + 578, 3}}}},
- {{&grpc_static_metadata_refcounts[52], {{g_bytes + 581, 14}}},
+ {&grpc_static_metadata_refcounts[52], {{g_bytes + 658, 3}}}},
+ {{&grpc_static_metadata_refcounts[53], {{g_bytes + 661, 14}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[53], {{g_bytes + 595, 13}}}},
- {{&grpc_static_metadata_refcounts[54], {{g_bytes + 608, 15}}},
+ {&grpc_static_metadata_refcounts[54], {{g_bytes + 675, 13}}}},
+ {{&grpc_static_metadata_refcounts[55], {{g_bytes + 688, 15}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[55], {{g_bytes + 623, 13}}},
+ {{&grpc_static_metadata_refcounts[56], {{g_bytes + 703, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[56], {{g_bytes + 636, 6}}},
+ {{&grpc_static_metadata_refcounts[57], {{g_bytes + 716, 6}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[57], {{g_bytes + 642, 27}}},
+ {{&grpc_static_metadata_refcounts[58], {{g_bytes + 722, 27}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[58], {{g_bytes + 669, 3}}},
+ {{&grpc_static_metadata_refcounts[59], {{g_bytes + 749, 3}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[59], {{g_bytes + 672, 5}}},
+ {{&grpc_static_metadata_refcounts[60], {{g_bytes + 752, 5}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[60], {{g_bytes + 677, 13}}},
+ {{&grpc_static_metadata_refcounts[61], {{g_bytes + 757, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[61], {{g_bytes + 690, 13}}},
+ {{&grpc_static_metadata_refcounts[62], {{g_bytes + 770, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[62], {{g_bytes + 703, 19}}},
+ {{&grpc_static_metadata_refcounts[63], {{g_bytes + 783, 19}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[63], {{g_bytes + 722, 16}}},
+ {{&grpc_static_metadata_refcounts[64], {{g_bytes + 802, 16}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[64], {{g_bytes + 738, 14}}},
+ {{&grpc_static_metadata_refcounts[65], {{g_bytes + 818, 14}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[65], {{g_bytes + 752, 16}}},
+ {{&grpc_static_metadata_refcounts[66], {{g_bytes + 832, 16}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[66], {{g_bytes + 768, 13}}},
+ {{&grpc_static_metadata_refcounts[67], {{g_bytes + 848, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[67], {{g_bytes + 781, 6}}},
+ {{&grpc_static_metadata_refcounts[68], {{g_bytes + 861, 6}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[68], {{g_bytes + 787, 4}}},
+ {{&grpc_static_metadata_refcounts[69], {{g_bytes + 867, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[69], {{g_bytes + 791, 4}}},
+ {{&grpc_static_metadata_refcounts[70], {{g_bytes + 871, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[70], {{g_bytes + 795, 6}}},
+ {{&grpc_static_metadata_refcounts[71], {{g_bytes + 875, 6}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[71], {{g_bytes + 801, 7}}},
+ {{&grpc_static_metadata_refcounts[72], {{g_bytes + 881, 7}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[72], {{g_bytes + 808, 4}}},
+ {{&grpc_static_metadata_refcounts[73], {{g_bytes + 888, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[73], {{g_bytes + 812, 8}}},
+ {{&grpc_static_metadata_refcounts[74], {{g_bytes + 892, 8}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[74], {{g_bytes + 820, 17}}},
+ {{&grpc_static_metadata_refcounts[75], {{g_bytes + 900, 17}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[75], {{g_bytes + 837, 13}}},
+ {{&grpc_static_metadata_refcounts[76], {{g_bytes + 917, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[76], {{g_bytes + 850, 8}}},
+ {{&grpc_static_metadata_refcounts[77], {{g_bytes + 930, 8}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[77], {{g_bytes + 858, 19}}},
+ {{&grpc_static_metadata_refcounts[78], {{g_bytes + 938, 19}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[78], {{g_bytes + 877, 13}}},
+ {{&grpc_static_metadata_refcounts[79], {{g_bytes + 957, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[79], {{g_bytes + 890, 4}}},
+ {{&grpc_static_metadata_refcounts[80], {{g_bytes + 970, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[80], {{g_bytes + 894, 8}}},
+ {{&grpc_static_metadata_refcounts[81], {{g_bytes + 974, 8}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[81], {{g_bytes + 902, 12}}},
+ {{&grpc_static_metadata_refcounts[82], {{g_bytes + 982, 12}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[82], {{g_bytes + 914, 18}}},
+ {{&grpc_static_metadata_refcounts[83], {{g_bytes + 994, 18}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[83], {{g_bytes + 932, 19}}},
+ {{&grpc_static_metadata_refcounts[84], {{g_bytes + 1012, 19}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[84], {{g_bytes + 951, 5}}},
+ {{&grpc_static_metadata_refcounts[85], {{g_bytes + 1031, 5}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[85], {{g_bytes + 956, 7}}},
+ {{&grpc_static_metadata_refcounts[86], {{g_bytes + 1036, 7}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[86], {{g_bytes + 963, 7}}},
+ {{&grpc_static_metadata_refcounts[87], {{g_bytes + 1043, 7}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[87], {{g_bytes + 970, 11}}},
+ {{&grpc_static_metadata_refcounts[88], {{g_bytes + 1050, 11}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[88], {{g_bytes + 981, 6}}},
+ {{&grpc_static_metadata_refcounts[89], {{g_bytes + 1061, 6}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[89], {{g_bytes + 987, 10}}},
+ {{&grpc_static_metadata_refcounts[90], {{g_bytes + 1067, 10}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[90], {{g_bytes + 997, 25}}},
+ {{&grpc_static_metadata_refcounts[91], {{g_bytes + 1077, 25}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[91], {{g_bytes + 1022, 17}}},
+ {{&grpc_static_metadata_refcounts[92], {{g_bytes + 1102, 17}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[92], {{g_bytes + 1039, 4}}},
+ {{&grpc_static_metadata_refcounts[93], {{g_bytes + 1119, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[93], {{g_bytes + 1043, 3}}},
+ {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1123, 3}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1046, 16}}},
+ {{&grpc_static_metadata_refcounts[95], {{g_bytes + 1126, 16}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
- {&grpc_static_metadata_refcounts[95], {{g_bytes + 1062, 1}}}},
+ {&grpc_static_metadata_refcounts[96], {{g_bytes + 1142, 1}}}},
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
{&grpc_static_metadata_refcounts[25], {{g_bytes + 350, 1}}}},
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
{&grpc_static_metadata_refcounts[26], {{g_bytes + 351, 1}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
- {&grpc_static_metadata_refcounts[96], {{g_bytes + 1063, 8}}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 517, 4}}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
- {&grpc_static_metadata_refcounts[36], {{g_bytes + 510, 7}}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 590, 7}}}},
{{&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
- {&grpc_static_metadata_refcounts[97], {{g_bytes + 1071, 8}}}},
+ {&grpc_static_metadata_refcounts[98], {{g_bytes + 1151, 8}}}},
{{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
- {&grpc_static_metadata_refcounts[98], {{g_bytes + 1079, 16}}}},
+ {&grpc_static_metadata_refcounts[99], {{g_bytes + 1159, 16}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
- {&grpc_static_metadata_refcounts[99], {{g_bytes + 1095, 4}}}},
+ {&grpc_static_metadata_refcounts[100], {{g_bytes + 1175, 4}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
- {&grpc_static_metadata_refcounts[100], {{g_bytes + 1099, 3}}}},
+ {&grpc_static_metadata_refcounts[101], {{g_bytes + 1179, 3}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
- {&grpc_static_metadata_refcounts[96], {{g_bytes + 1063, 8}}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 517, 4}}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
{{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[101], {{g_bytes + 1102, 11}}},
+ {{&grpc_static_metadata_refcounts[102], {{g_bytes + 1182, 11}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[96], {{g_bytes + 1063, 8}}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[36], {{g_bytes + 510, 7}}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 590, 7}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[102], {{g_bytes + 1113, 16}}}},
+ {&grpc_static_metadata_refcounts[103], {{g_bytes + 1193, 16}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 517, 4}}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[103], {{g_bytes + 1129, 13}}}},
+ {&grpc_static_metadata_refcounts[104], {{g_bytes + 1209, 13}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[104], {{g_bytes + 1142, 12}}}},
+ {&grpc_static_metadata_refcounts[105], {{g_bytes + 1222, 12}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[105], {{g_bytes + 1154, 21}}}},
+ {&grpc_static_metadata_refcounts[106], {{g_bytes + 1234, 21}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[96], {{g_bytes + 1063, 8}}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 517, 4}}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[103], {{g_bytes + 1129, 13}}}},
+ {&grpc_static_metadata_refcounts[104], {{g_bytes + 1209, 13}}}},
};
const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 76, 77, 78,
79, 80, 81, 82};
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index 2bb9f72838..4f9670232c 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -31,7 +31,7 @@
#include "src/core/lib/transport/metadata.h"
-#define GRPC_STATIC_MDSTR_COUNT 106
+#define GRPC_STATIC_MDSTR_COUNT 107
extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
/* ":path" */
#define GRPC_MDSTR_PATH (grpc_static_slice_table[0])
@@ -110,147 +110,151 @@ extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
/* "/grpc.health.v1.Health/Watch" */
#define GRPC_MDSTR_SLASH_GRPC_DOT_HEALTH_DOT_V1_DOT_HEALTH_SLASH_WATCH \
(grpc_static_slice_table[35])
+/* "/envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources"
+ */
+#define GRPC_MDSTR_SLASH_ENVOY_DOT_SERVICE_DOT_DISCOVERY_DOT_V2_DOT_AGGREGATEDDISCOVERYSERVICE_SLASH_STREAMAGGREGATEDRESOURCES \
+ (grpc_static_slice_table[36])
/* "deflate" */
-#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[36])
+#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[37])
/* "gzip" */
-#define GRPC_MDSTR_GZIP (grpc_static_slice_table[37])
+#define GRPC_MDSTR_GZIP (grpc_static_slice_table[38])
/* "stream/gzip" */
-#define GRPC_MDSTR_STREAM_SLASH_GZIP (grpc_static_slice_table[38])
+#define GRPC_MDSTR_STREAM_SLASH_GZIP (grpc_static_slice_table[39])
/* "GET" */
-#define GRPC_MDSTR_GET (grpc_static_slice_table[39])
+#define GRPC_MDSTR_GET (grpc_static_slice_table[40])
/* "POST" */
-#define GRPC_MDSTR_POST (grpc_static_slice_table[40])
+#define GRPC_MDSTR_POST (grpc_static_slice_table[41])
/* "/" */
-#define GRPC_MDSTR_SLASH (grpc_static_slice_table[41])
+#define GRPC_MDSTR_SLASH (grpc_static_slice_table[42])
/* "/index.html" */
-#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[42])
+#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[43])
/* "http" */
-#define GRPC_MDSTR_HTTP (grpc_static_slice_table[43])
+#define GRPC_MDSTR_HTTP (grpc_static_slice_table[44])
/* "https" */
-#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[44])
+#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[45])
/* "200" */
-#define GRPC_MDSTR_200 (grpc_static_slice_table[45])
+#define GRPC_MDSTR_200 (grpc_static_slice_table[46])
/* "204" */
-#define GRPC_MDSTR_204 (grpc_static_slice_table[46])
+#define GRPC_MDSTR_204 (grpc_static_slice_table[47])
/* "206" */
-#define GRPC_MDSTR_206 (grpc_static_slice_table[47])
+#define GRPC_MDSTR_206 (grpc_static_slice_table[48])
/* "304" */
-#define GRPC_MDSTR_304 (grpc_static_slice_table[48])
+#define GRPC_MDSTR_304 (grpc_static_slice_table[49])
/* "400" */
-#define GRPC_MDSTR_400 (grpc_static_slice_table[49])
+#define GRPC_MDSTR_400 (grpc_static_slice_table[50])
/* "404" */
-#define GRPC_MDSTR_404 (grpc_static_slice_table[50])
+#define GRPC_MDSTR_404 (grpc_static_slice_table[51])
/* "500" */
-#define GRPC_MDSTR_500 (grpc_static_slice_table[51])
+#define GRPC_MDSTR_500 (grpc_static_slice_table[52])
/* "accept-charset" */
-#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[52])
+#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[53])
/* "gzip, deflate" */
-#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[53])
+#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[54])
/* "accept-language" */
-#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[54])
+#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[55])
/* "accept-ranges" */
-#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[55])
+#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[56])
/* "accept" */
-#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[56])
+#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[57])
/* "access-control-allow-origin" */
-#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[57])
+#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[58])
/* "age" */
-#define GRPC_MDSTR_AGE (grpc_static_slice_table[58])
+#define GRPC_MDSTR_AGE (grpc_static_slice_table[59])
/* "allow" */
-#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[59])
+#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[60])
/* "authorization" */
-#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[60])
+#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[61])
/* "cache-control" */
-#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[61])
+#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[62])
/* "content-disposition" */
-#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[62])
+#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[63])
/* "content-language" */
-#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[63])
+#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[64])
/* "content-length" */
-#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[64])
+#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[65])
/* "content-location" */
-#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[65])
+#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[66])
/* "content-range" */
-#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[66])
+#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[67])
/* "cookie" */
-#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[67])
+#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[68])
/* "date" */
-#define GRPC_MDSTR_DATE (grpc_static_slice_table[68])
+#define GRPC_MDSTR_DATE (grpc_static_slice_table[69])
/* "etag" */
-#define GRPC_MDSTR_ETAG (grpc_static_slice_table[69])
+#define GRPC_MDSTR_ETAG (grpc_static_slice_table[70])
/* "expect" */
-#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[70])
+#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[71])
/* "expires" */
-#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[71])
+#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[72])
/* "from" */
-#define GRPC_MDSTR_FROM (grpc_static_slice_table[72])
+#define GRPC_MDSTR_FROM (grpc_static_slice_table[73])
/* "if-match" */
-#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[73])
+#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[74])
/* "if-modified-since" */
-#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[74])
+#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[75])
/* "if-none-match" */
-#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[75])
+#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[76])
/* "if-range" */
-#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[76])
+#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[77])
/* "if-unmodified-since" */
-#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[77])
+#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[78])
/* "last-modified" */
-#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[78])
+#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[79])
/* "link" */
-#define GRPC_MDSTR_LINK (grpc_static_slice_table[79])
+#define GRPC_MDSTR_LINK (grpc_static_slice_table[80])
/* "location" */
-#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[80])
+#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[81])
/* "max-forwards" */
-#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[81])
+#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[82])
/* "proxy-authenticate" */
-#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[82])
+#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[83])
/* "proxy-authorization" */
-#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[83])
+#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[84])
/* "range" */
-#define GRPC_MDSTR_RANGE (grpc_static_slice_table[84])
+#define GRPC_MDSTR_RANGE (grpc_static_slice_table[85])
/* "referer" */
-#define GRPC_MDSTR_REFERER (grpc_static_slice_table[85])
+#define GRPC_MDSTR_REFERER (grpc_static_slice_table[86])
/* "refresh" */
-#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[86])
+#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[87])
/* "retry-after" */
-#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[87])
+#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[88])
/* "server" */
-#define GRPC_MDSTR_SERVER (grpc_static_slice_table[88])
+#define GRPC_MDSTR_SERVER (grpc_static_slice_table[89])
/* "set-cookie" */
-#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[89])
+#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[90])
/* "strict-transport-security" */
-#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[90])
+#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[91])
/* "transfer-encoding" */
-#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[91])
+#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[92])
/* "vary" */
-#define GRPC_MDSTR_VARY (grpc_static_slice_table[92])
+#define GRPC_MDSTR_VARY (grpc_static_slice_table[93])
/* "via" */
-#define GRPC_MDSTR_VIA (grpc_static_slice_table[93])
+#define GRPC_MDSTR_VIA (grpc_static_slice_table[94])
/* "www-authenticate" */
-#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[94])
+#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[95])
/* "0" */
-#define GRPC_MDSTR_0 (grpc_static_slice_table[95])
+#define GRPC_MDSTR_0 (grpc_static_slice_table[96])
/* "identity" */
-#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[96])
+#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[97])
/* "trailers" */
-#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[97])
+#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[98])
/* "application/grpc" */
-#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[98])
+#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[99])
/* "grpc" */
-#define GRPC_MDSTR_GRPC (grpc_static_slice_table[99])
+#define GRPC_MDSTR_GRPC (grpc_static_slice_table[100])
/* "PUT" */
-#define GRPC_MDSTR_PUT (grpc_static_slice_table[100])
+#define GRPC_MDSTR_PUT (grpc_static_slice_table[101])
/* "lb-cost-bin" */
-#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[101])
+#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[102])
/* "identity,deflate" */
-#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[102])
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[103])
/* "identity,gzip" */
-#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[103])
+#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[104])
/* "deflate,gzip" */
-#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[104])
+#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[105])
/* "identity,deflate,gzip" */
#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (grpc_static_slice_table[105])
+ (grpc_static_slice_table[106])
extern const grpc_slice_refcount_vtable grpc_static_metadata_vtable;
extern grpc_slice_refcount
diff --git a/src/core/lib/transport/transport.cc b/src/core/lib/transport/transport.cc
index cbdb77c844..b32f9c6ec1 100644
--- a/src/core/lib/transport/transport.cc
+++ b/src/core/lib/transport/transport.cc
@@ -27,6 +27,7 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
+#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -149,7 +150,7 @@ void grpc_transport_move_stats(grpc_transport_stream_stats* from,
}
size_t grpc_transport_stream_size(grpc_transport* transport) {
- return transport->vtable->sizeof_stream;
+ return GPR_ROUND_UP_TO_ALIGNMENT_SIZE(transport->vtable->sizeof_stream);
}
void grpc_transport_destroy(grpc_transport* transport) {
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 9e784635c6..5ce568834e 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -81,16 +81,16 @@ void grpc_stream_unref(grpc_stream_refcount* refcount);
grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount* refcount,
void* buffer, size_t length);
-typedef struct {
- uint64_t framing_bytes;
- uint64_t data_bytes;
- uint64_t header_bytes;
-} grpc_transport_one_way_stats;
+struct grpc_transport_one_way_stats {
+ uint64_t framing_bytes = 0;
+ uint64_t data_bytes = 0;
+ uint64_t header_bytes = 0;
+};
-typedef struct grpc_transport_stream_stats {
+struct grpc_transport_stream_stats {
grpc_transport_one_way_stats incoming;
grpc_transport_one_way_stats outgoing;
-} grpc_transport_stream_stats;
+};
void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats* from,
grpc_transport_one_way_stats* to);
@@ -121,7 +121,17 @@ typedef struct grpc_transport_stream_op_batch_payload
/* Transport stream op: a set of operations to perform on a transport
against a single stream */
-typedef struct grpc_transport_stream_op_batch {
+struct grpc_transport_stream_op_batch {
+ grpc_transport_stream_op_batch()
+ : send_initial_metadata(false),
+ send_trailing_metadata(false),
+ send_message(false),
+ recv_initial_metadata(false),
+ recv_message(false),
+ recv_trailing_metadata(false),
+ cancel_stream(false),
+ is_traced(false) {}
+
/** Should be scheduled when all of the non-recv operations in the batch
are complete.
@@ -131,10 +141,10 @@ typedef struct grpc_transport_stream_op_batch {
scheduled as soon as the non-recv ops are complete, regardless of
whether or not the recv ops are complete. If a batch contains
only recv ops, on_complete can be null. */
- grpc_closure* on_complete;
+ grpc_closure* on_complete = nullptr;
/** Values for the stream op (fields set are determined by flags above) */
- grpc_transport_stream_op_batch_payload* payload;
+ grpc_transport_stream_op_batch_payload* payload = nullptr;
/** Send initial metadata to the peer, from the provided metadata batch. */
bool send_initial_metadata : 1;
@@ -158,29 +168,41 @@ typedef struct grpc_transport_stream_op_batch {
/** Cancel this stream with the provided error */
bool cancel_stream : 1;
+ /** Is this stream traced */
+ bool is_traced : 1;
+
/***************************************************************************
* remaining fields are initialized and used at the discretion of the
* current handler of the op */
grpc_handler_private_op_data handler_private;
-} grpc_transport_stream_op_batch;
+};
struct grpc_transport_stream_op_batch_payload {
+ explicit grpc_transport_stream_op_batch_payload(
+ grpc_call_context_element* context)
+ : context(context) {}
+ ~grpc_transport_stream_op_batch_payload() {
+ // We don't really own `send_message`, so release ownership and let the
+ // owner clean the data.
+ send_message.send_message.release();
+ }
+
struct {
- grpc_metadata_batch* send_initial_metadata;
+ grpc_metadata_batch* send_initial_metadata = nullptr;
/** Iff send_initial_metadata != NULL, flags associated with
send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */
- uint32_t send_initial_metadata_flags;
+ uint32_t send_initial_metadata_flags = 0;
// If non-NULL, will be set by the transport to the peer string (a char*).
// The transport retains ownership of the string.
// Note: This pointer may be used by the transport after the
// send_initial_metadata op is completed. It must remain valid
// until the call is destroyed.
- gpr_atm* peer_string;
+ gpr_atm* peer_string = nullptr;
} send_initial_metadata;
struct {
- grpc_metadata_batch* send_trailing_metadata;
+ grpc_metadata_batch* send_trailing_metadata = nullptr;
} send_trailing_metadata;
struct {
@@ -192,39 +214,39 @@ struct grpc_transport_stream_op_batch_payload {
} send_message;
struct {
- grpc_metadata_batch* recv_initial_metadata;
+ grpc_metadata_batch* recv_initial_metadata = nullptr;
// Flags are used only on the server side. If non-null, will be set to
// a bitfield of the GRPC_INITIAL_METADATA_xxx macros (e.g., to
// indicate if the call is idempotent).
- uint32_t* recv_flags;
+ uint32_t* recv_flags = nullptr;
/** Should be enqueued when initial metadata is ready to be processed. */
- grpc_closure* recv_initial_metadata_ready;
+ grpc_closure* recv_initial_metadata_ready = nullptr;
// If not NULL, will be set to true if trailing metadata is
// immediately available. This may be a signal that we received a
// Trailers-Only response.
- bool* trailing_metadata_available;
+ bool* trailing_metadata_available = nullptr;
// If non-NULL, will be set by the transport to the peer string (a char*).
// The transport retains ownership of the string.
// Note: This pointer may be used by the transport after the
// recv_initial_metadata op is completed. It must remain valid
// until the call is destroyed.
- gpr_atm* peer_string;
+ gpr_atm* peer_string = nullptr;
} recv_initial_metadata;
struct {
// Will be set by the transport to point to the byte stream
// containing a received message.
// Will be NULL if trailing metadata is received instead of a message.
- grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
+ grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message = nullptr;
/** Should be enqueued when one message is ready to be processed. */
- grpc_closure* recv_message_ready;
+ grpc_closure* recv_message_ready = nullptr;
} recv_message;
struct {
- grpc_metadata_batch* recv_trailing_metadata;
- grpc_transport_stream_stats* collect_stats;
+ grpc_metadata_batch* recv_trailing_metadata = nullptr;
+ grpc_transport_stream_stats* collect_stats = nullptr;
/** Should be enqueued when initial metadata is ready to be processed. */
- grpc_closure* recv_trailing_metadata_ready;
+ grpc_closure* recv_trailing_metadata_ready = nullptr;
} recv_trailing_metadata;
/** Forcefully close this stream.
@@ -240,7 +262,7 @@ struct grpc_transport_stream_op_batch_payload {
struct {
// Error contract: the transport that gets this op must cause cancel_error
// to be unref'ed after processing it
- grpc_error* cancel_error;
+ grpc_error* cancel_error = GRPC_ERROR_NONE;
} cancel_stream;
/* Indexes correspond to grpc_context_index enum values */
diff --git a/src/core/lib/uri/uri_parser.cc b/src/core/lib/uri/uri_parser.cc
new file mode 100644
index 0000000000..f212c7d2c0
--- /dev/null
+++ b/src/core/lib/uri/uri_parser.cc
@@ -0,0 +1,314 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/uri/uri_parser.h"
+
+#include <string.h>
+
+#include <grpc/slice_buffer.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/slice/percent_encoding.h"
+#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
+
+/** a size_t default value... maps to all 1's */
+#define NOT_SET (~(size_t)0)
+
+static grpc_uri* bad_uri(const char* uri_text, size_t pos, const char* section,
+ bool suppress_errors) {
+ char* line_prefix;
+ size_t pfx_len;
+
+ if (!suppress_errors) {
+ gpr_asprintf(&line_prefix, "bad uri.%s: '", section);
+ pfx_len = strlen(line_prefix) + pos;
+ gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text);
+ gpr_free(line_prefix);
+
+ line_prefix = static_cast<char*>(gpr_malloc(pfx_len + 1));
+ memset(line_prefix, ' ', pfx_len);
+ line_prefix[pfx_len] = 0;
+ gpr_log(GPR_ERROR, "%s^ here", line_prefix);
+ gpr_free(line_prefix);
+ }
+
+ return nullptr;
+}
+
+/** Returns a copy of percent decoded \a src[begin, end) */
+static char* decode_and_copy_component(const char* src, size_t begin,
+ size_t end) {
+ grpc_slice component =
+ (begin == NOT_SET || end == NOT_SET)
+ ? grpc_empty_slice()
+ : grpc_slice_from_copied_buffer(src + begin, end - begin);
+ grpc_slice decoded_component =
+ grpc_permissive_percent_decode_slice(component);
+ char* out = grpc_dump_slice(decoded_component, GPR_DUMP_ASCII);
+ grpc_slice_unref_internal(component);
+ grpc_slice_unref_internal(decoded_component);
+ return out;
+}
+
+static bool valid_hex(char c) {
+ return ((c >= 'a') && (c <= 'f')) || ((c >= 'A') && (c <= 'F')) ||
+ ((c >= '0') && (c <= '9'));
+}
+
+/** Returns how many chars to advance if \a uri_text[i] begins a valid \a pchar
+ * production. If \a uri_text[i] introduces an invalid \a pchar (such as percent
+ * sign not followed by two hex digits), NOT_SET is returned. */
+static size_t parse_pchar(const char* uri_text, size_t i) {
+ /* pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
+ * unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
+ * pct-encoded = "%" HEXDIG HEXDIG
+ * sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
+ / "*" / "+" / "," / ";" / "=" */
+ char c = uri_text[i];
+ switch (c) {
+ default:
+ if (((c >= 'a') && (c <= 'z')) || ((c >= 'A') && (c <= 'Z')) ||
+ ((c >= '0') && (c <= '9'))) {
+ return 1;
+ }
+ break;
+ case ':':
+ case '@':
+ case '-':
+ case '.':
+ case '_':
+ case '~':
+ case '!':
+ case '$':
+ case '&':
+ case '\'':
+ case '(':
+ case ')':
+ case '*':
+ case '+':
+ case ',':
+ case ';':
+ case '=':
+ return 1;
+ case '%': /* pct-encoded */
+ if (valid_hex(uri_text[i + 1]) && valid_hex(uri_text[i + 2])) {
+ return 2;
+ }
+ return NOT_SET;
+ }
+ return 0;
+}
+
+/* *( pchar / "?" / "/" ) */
+static int parse_fragment_or_query(const char* uri_text, size_t* i) {
+ char c;
+ while ((c = uri_text[*i]) != 0) {
+ const size_t advance = parse_pchar(uri_text, *i); /* pchar */
+ switch (advance) {
+ case 0: /* uri_text[i] isn't in pchar */
+ /* maybe it's ? or / */
+ if (uri_text[*i] == '?' || uri_text[*i] == '/') {
+ (*i)++;
+ break;
+ } else {
+ return 1;
+ }
+ GPR_UNREACHABLE_CODE(return 0);
+ default:
+ (*i) += advance;
+ break;
+ case NOT_SET: /* uri_text[i] introduces an invalid URI */
+ return 0;
+ }
+ }
+ /* *i is the first uri_text position past the \a query production, maybe \0 */
+ return 1;
+}
+
+static void parse_query_parts(grpc_uri* uri) {
+ static const char* QUERY_PARTS_SEPARATOR = "&";
+ static const char* QUERY_PARTS_VALUE_SEPARATOR = "=";
+ GPR_ASSERT(uri->query != nullptr);
+ if (uri->query[0] == '\0') {
+ uri->query_parts = nullptr;
+ uri->query_parts_values = nullptr;
+ uri->num_query_parts = 0;
+ return;
+ }
+
+ gpr_string_split(uri->query, QUERY_PARTS_SEPARATOR, &uri->query_parts,
+ &uri->num_query_parts);
+ uri->query_parts_values =
+ static_cast<char**>(gpr_malloc(uri->num_query_parts * sizeof(char**)));
+ for (size_t i = 0; i < uri->num_query_parts; i++) {
+ char** query_param_parts;
+ size_t num_query_param_parts;
+ char* full = uri->query_parts[i];
+ gpr_string_split(full, QUERY_PARTS_VALUE_SEPARATOR, &query_param_parts,
+ &num_query_param_parts);
+ GPR_ASSERT(num_query_param_parts > 0);
+ uri->query_parts[i] = query_param_parts[0];
+ if (num_query_param_parts > 1) {
+ /* TODO(dgq): only the first value after the separator is considered.
+ * Perhaps all chars after the first separator for the query part should
+ * be included, even if they include the separator. */
+ uri->query_parts_values[i] = query_param_parts[1];
+ } else {
+ uri->query_parts_values[i] = nullptr;
+ }
+ for (size_t j = 2; j < num_query_param_parts; j++) {
+ gpr_free(query_param_parts[j]);
+ }
+ gpr_free(query_param_parts);
+ gpr_free(full);
+ }
+}
+
+grpc_uri* grpc_uri_parse(const char* uri_text, bool suppress_errors) {
+ grpc_uri* uri;
+ size_t scheme_begin = 0;
+ size_t scheme_end = NOT_SET;
+ size_t authority_begin = NOT_SET;
+ size_t authority_end = NOT_SET;
+ size_t path_begin = NOT_SET;
+ size_t path_end = NOT_SET;
+ size_t query_begin = NOT_SET;
+ size_t query_end = NOT_SET;
+ size_t fragment_begin = NOT_SET;
+ size_t fragment_end = NOT_SET;
+ size_t i;
+
+ for (i = scheme_begin; uri_text[i] != 0; i++) {
+ if (uri_text[i] == ':') {
+ scheme_end = i;
+ break;
+ }
+ if (uri_text[i] >= 'a' && uri_text[i] <= 'z') continue;
+ if (uri_text[i] >= 'A' && uri_text[i] <= 'Z') continue;
+ if (i != scheme_begin) {
+ if (uri_text[i] >= '0' && uri_text[i] <= '9') continue;
+ if (uri_text[i] == '+') continue;
+ if (uri_text[i] == '-') continue;
+ if (uri_text[i] == '.') continue;
+ }
+ break;
+ }
+ if (scheme_end == NOT_SET) {
+ return bad_uri(uri_text, i, "scheme", suppress_errors);
+ }
+
+ if (uri_text[scheme_end + 1] == '/' && uri_text[scheme_end + 2] == '/') {
+ authority_begin = scheme_end + 3;
+ for (i = authority_begin; uri_text[i] != 0 && authority_end == NOT_SET;
+ i++) {
+ if (uri_text[i] == '/' || uri_text[i] == '?' || uri_text[i] == '#') {
+ authority_end = i;
+ }
+ }
+ if (authority_end == NOT_SET && uri_text[i] == 0) {
+ authority_end = i;
+ }
+ if (authority_end == NOT_SET) {
+ return bad_uri(uri_text, i, "authority", suppress_errors);
+ }
+ /* TODO(ctiller): parse the authority correctly */
+ path_begin = authority_end;
+ } else {
+ path_begin = scheme_end + 1;
+ }
+
+ for (i = path_begin; uri_text[i] != 0; i++) {
+ if (uri_text[i] == '?' || uri_text[i] == '#') {
+ path_end = i;
+ break;
+ }
+ }
+ if (path_end == NOT_SET && uri_text[i] == 0) {
+ path_end = i;
+ }
+ if (path_end == NOT_SET) {
+ return bad_uri(uri_text, i, "path", suppress_errors);
+ }
+
+ if (uri_text[i] == '?') {
+ query_begin = ++i;
+ if (!parse_fragment_or_query(uri_text, &i)) {
+ return bad_uri(uri_text, i, "query", suppress_errors);
+ } else if (uri_text[i] != 0 && uri_text[i] != '#') {
+ /* We must be at the end or at the beginning of a fragment */
+ return bad_uri(uri_text, i, "query", suppress_errors);
+ }
+ query_end = i;
+ }
+ if (uri_text[i] == '#') {
+ fragment_begin = ++i;
+ if (!parse_fragment_or_query(uri_text, &i)) {
+ return bad_uri(uri_text, i - fragment_end, "fragment", suppress_errors);
+ } else if (uri_text[i] != 0) {
+ /* We must be at the end */
+ return bad_uri(uri_text, i, "fragment", suppress_errors);
+ }
+ fragment_end = i;
+ }
+
+ uri = static_cast<grpc_uri*>(gpr_zalloc(sizeof(*uri)));
+ uri->scheme = decode_and_copy_component(uri_text, scheme_begin, scheme_end);
+ uri->authority =
+ decode_and_copy_component(uri_text, authority_begin, authority_end);
+ uri->path = decode_and_copy_component(uri_text, path_begin, path_end);
+ uri->query = decode_and_copy_component(uri_text, query_begin, query_end);
+ uri->fragment =
+ decode_and_copy_component(uri_text, fragment_begin, fragment_end);
+ parse_query_parts(uri);
+
+ return uri;
+}
+
+const char* grpc_uri_get_query_arg(const grpc_uri* uri, const char* key) {
+ GPR_ASSERT(key != nullptr);
+ if (key[0] == '\0') return nullptr;
+
+ for (size_t i = 0; i < uri->num_query_parts; ++i) {
+ if (0 == strcmp(key, uri->query_parts[i])) {
+ return uri->query_parts_values[i];
+ }
+ }
+ return nullptr;
+}
+
+void grpc_uri_destroy(grpc_uri* uri) {
+ if (!uri) return;
+ gpr_free(uri->scheme);
+ gpr_free(uri->authority);
+ gpr_free(uri->path);
+ gpr_free(uri->query);
+ for (size_t i = 0; i < uri->num_query_parts; ++i) {
+ gpr_free(uri->query_parts[i]);
+ gpr_free(uri->query_parts_values[i]);
+ }
+ gpr_free(uri->query_parts);
+ gpr_free(uri->query_parts_values);
+ gpr_free(uri->fragment);
+ gpr_free(uri);
+}
diff --git a/src/core/lib/uri/uri_parser.h b/src/core/lib/uri/uri_parser.h
new file mode 100644
index 0000000000..b6771bbde3
--- /dev/null
+++ b/src/core/lib/uri/uri_parser.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_URI_URI_PARSER_H
+#define GRPC_CORE_LIB_URI_URI_PARSER_H
+
+#include <grpc/support/port_platform.h>
+
+#include <stddef.h>
+
+typedef struct {
+ char* scheme;
+ char* authority;
+ char* path;
+ char* query;
+ /** Query substrings separated by '&' */
+ char** query_parts;
+ /** Number of elements in \a query_parts and \a query_parts_values */
+ size_t num_query_parts;
+ /** Split each query part by '='. NULL if not present. */
+ char** query_parts_values;
+ char* fragment;
+} grpc_uri;
+
+/** parse a uri, return NULL on failure */
+grpc_uri* grpc_uri_parse(const char* uri_text, bool suppress_errors);
+
+/** return the part of a query string after the '=' in "?key=xxx&...", or NULL
+ * if key is not present */
+const char* grpc_uri_get_query_arg(const grpc_uri* uri, const char* key);
+
+/** destroy a uri */
+void grpc_uri_destroy(grpc_uri* uri);
+
+#endif /* GRPC_CORE_LIB_URI_URI_PARSER_H */