aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Vijay Pai <vpai@google.com>2018-03-01 11:48:29 -0800
committerGravatar Vijay Pai <vpai@google.com>2018-03-01 11:48:29 -0800
commitb357f2f548884e0f1766b5b1c3b12e1aa64642cc (patch)
tree84a5f405562ee887fe79fb36b8a7fe42773933c6 /src
parent2fe87b09055cd256cdce038c4c70d92b955c991b (diff)
parentccd1d55807bdb13b661dcf1d651468b2d98ff5af (diff)
Merge branch 'master' into 2phase_thd
Diffstat (limited to 'src')
-rw-r--r--src/compiler/csharp_generator.cc15
-rw-r--r--src/compiler/objective_c_generator.cc104
-rw-r--r--src/compiler/objective_c_generator.h10
-rw-r--r--src/compiler/objective_c_generator_helpers.h40
-rw-r--r--src/compiler/objective_c_plugin.cc145
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.cc19
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.h2
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc2197
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc55
-rw-r--r--src/core/ext/filters/client_channel/method_params.cc178
-rw-r--r--src/core/ext/filters/client_channel/method_params.h74
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc8
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.cc4
-rw-r--r--src/core/ext/filters/client_channel/status_util.cc100
-rw-r--r--src/core/ext/filters/client_channel/status_util.h58
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc15
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h9
-rw-r--r--src/core/ext/filters/max_age/max_age_filter.cc3
-rw-r--r--src/core/ext/filters/message_size/message_size_filter.cc89
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc39
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.cc12
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.cc3
-rw-r--r--src/core/lib/gpr/arena.cc45
-rw-r--r--src/core/lib/gprpp/orphanable.h1
-rw-r--r--src/core/lib/gprpp/ref_counted.h2
-rw-r--r--src/core/lib/gprpp/ref_counted_ptr.h1
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.cc3
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.h3
-rw-r--r--src/core/lib/security/security_connector/security_connector.cc13
-rw-r--r--src/core/lib/security/transport/lb_targets_info.cc61
-rw-r--r--src/core/lib/security/transport/target_authority_table.cc75
-rw-r--r--src/core/lib/security/transport/target_authority_table.h40
-rw-r--r--src/core/lib/slice/slice_hash_table.cc147
-rw-r--r--src/core/lib/slice/slice_hash_table.h221
-rw-r--r--src/core/lib/slice/slice_weak_hash_table.h105
-rw-r--r--src/core/lib/surface/call.cc30
-rw-r--r--src/core/lib/transport/metadata_batch.cc24
-rw-r--r--src/core/lib/transport/metadata_batch.h9
-rw-r--r--src/core/lib/transport/service_config.cc194
-rw-r--r--src/core/lib/transport/service_config.h256
-rw-r--r--src/core/lib/transport/static_metadata.cc602
-rw-r--r--src/core/lib/transport/static_metadata.h176
-rw-r--r--src/core/lib/transport/status_metadata.cc54
-rw-r--r--src/core/lib/transport/status_metadata.h (renamed from src/core/lib/security/transport/lb_targets_info.h)16
-rw-r--r--src/core/lib/transport/transport.h27
-rw-r--r--src/csharp/Grpc.Examples/MathGrpc.cs13
-rw-r--r--src/csharp/Grpc.HealthCheck/HealthGrpc.cs7
-rw-r--r--src/csharp/Grpc.IntegrationTesting/Control.cs142
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj2
-rw-r--r--src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs9
-rw-r--r--src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs31
-rw-r--r--src/csharp/Grpc.IntegrationTesting/TestGrpc.cs39
-rw-r--r--src/csharp/Grpc.Reflection/ReflectionGrpc.cs5
-rw-r--r--src/csharp/global.json2
-rw-r--r--src/objective-c/GRPCClient/GRPCCall.m136
-rw-r--r--src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h58
-rw-r--r--src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m199
-rw-r--r--src/objective-c/GRPCClient/private/GRPCHost.m25
-rw-r--r--src/python/grpcio/grpc_core_dependencies.py6
59 files changed, 4128 insertions, 1830 deletions
diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc
index 7c97056402..6e2730579a 100644
--- a/src/compiler/csharp_generator.cc
+++ b/src/compiler/csharp_generator.cc
@@ -451,8 +451,10 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor* service) {
out->Print(
"public virtual $response$ $methodname$($request$ request, "
"grpc::Metadata "
- "headers = null, DateTime? deadline = null, CancellationToken "
- "cancellationToken = default(CancellationToken))\n",
+ "headers = null, global::System.DateTime? deadline = null, "
+ "global::System.Threading.CancellationToken "
+ "cancellationToken = "
+ "default(global::System.Threading.CancellationToken))\n",
"methodname", method->name(), "request",
GetClassName(method->input_type()), "response",
GetClassName(method->output_type()));
@@ -492,8 +494,10 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor* service) {
out->Print(
"public virtual $returntype$ "
"$methodname$($request_maybe$grpc::Metadata "
- "headers = null, DateTime? deadline = null, CancellationToken "
- "cancellationToken = default(CancellationToken))\n",
+ "headers = null, global::System.DateTime? deadline = null, "
+ "global::System.Threading.CancellationToken "
+ "cancellationToken = "
+ "default(global::System.Threading.CancellationToken))\n",
"methodname", method_name, "request_maybe",
GetMethodRequestParamMaybe(method), "returntype",
GetMethodReturnTypeClient(method));
@@ -675,9 +679,6 @@ grpc::string GetServices(const FileDescriptor* file, bool generate_client,
out.Print("#pragma warning disable 1591\n");
out.Print("#region Designer generated code\n");
out.Print("\n");
- out.Print("using System;\n");
- out.Print("using System.Threading;\n");
- out.Print("using System.Threading.Tasks;\n");
out.Print("using grpc = global::Grpc.Core;\n");
out.Print("\n");
diff --git a/src/compiler/objective_c_generator.cc b/src/compiler/objective_c_generator.cc
index ab7d869758..ffdeb8f6b0 100644
--- a/src/compiler/objective_c_generator.cc
+++ b/src/compiler/objective_c_generator.cc
@@ -212,37 +212,49 @@ void PrintMethodImplementations(Printer* printer,
return output;
}
-::grpc::string GetHeader(const ServiceDescriptor* service) {
+::grpc::string GetProtocol(const ServiceDescriptor* service) {
::grpc::string output;
- {
- // Scope the output stream so it closes and finalizes output to the string.
- grpc::protobuf::io::StringOutputStream output_stream(&output);
- Printer printer(&output_stream, '$');
-
- map< ::grpc::string, ::grpc::string> vars = {
- {"service_class", ServiceClassName(service)}};
- printer.Print(vars, "@protocol $service_class$ <NSObject>\n\n");
+ // Scope the output stream so it closes and finalizes output to the string.
+ grpc::protobuf::io::StringOutputStream output_stream(&output);
+ Printer printer(&output_stream, '$');
- for (int i = 0; i < service->method_count(); i++) {
- PrintMethodDeclarations(&printer, service->method(i));
- }
- printer.Print("@end\n\n");
+ map< ::grpc::string, ::grpc::string> vars = {
+ {"service_class", ServiceClassName(service)}};
- printer.Print(
- "/**\n"
- " * Basic service implementation, over gRPC, that only does\n"
- " * marshalling and parsing.\n"
- " */\n");
- printer.Print(vars,
- "@interface $service_class$ :"
- " GRPCProtoService<$service_class$>\n");
- printer.Print(
- "- (instancetype)initWithHost:(NSString *)host"
- " NS_DESIGNATED_INITIALIZER;\n");
- printer.Print("+ (instancetype)serviceWithHost:(NSString *)host;\n");
- printer.Print("@end\n");
+ printer.Print(vars, "@protocol $service_class$ <NSObject>\n\n");
+ for (int i = 0; i < service->method_count(); i++) {
+ PrintMethodDeclarations(&printer, service->method(i));
}
+ printer.Print("@end\n\n");
+
+ return output;
+}
+
+::grpc::string GetInterface(const ServiceDescriptor* service) {
+ ::grpc::string output;
+
+ // Scope the output stream so it closes and finalizes output to the string.
+ grpc::protobuf::io::StringOutputStream output_stream(&output);
+ Printer printer(&output_stream, '$');
+
+ map< ::grpc::string, ::grpc::string> vars = {
+ {"service_class", ServiceClassName(service)}};
+
+ printer.Print(vars,
+ "/**\n"
+ " * Basic service implementation, over gRPC, that only does\n"
+ " * marshalling and parsing.\n"
+ " */\n");
+ printer.Print(vars,
+ "@interface $service_class$ :"
+ " GRPCProtoService<$service_class$>\n");
+ printer.Print(
+ "- (instancetype)initWithHost:(NSString *)host"
+ " NS_DESIGNATED_INITIALIZER;\n");
+ printer.Print("+ (instancetype)serviceWithHost:(NSString *)host;\n");
+ printer.Print("@end\n");
+
return output;
}
@@ -258,26 +270,32 @@ void PrintMethodImplementations(Printer* printer,
{"service_class", ServiceClassName(service)},
{"package", service->file()->package()}};
- printer.Print(vars, "@implementation $service_class$\n\n");
+ printer.Print(vars,
+ "@implementation $service_class$\n\n"
+ "// Designated initializer\n"
+ "- (instancetype)initWithHost:(NSString *)host {\n"
+ " self = [super initWithHost:host\n"
+ " packageName:@\"$package$\"\n"
+ " serviceName:@\"$service_name$\"];\n"
+ " return self;\n"
+ "}\n\n");
- printer.Print("// Designated initializer\n");
- printer.Print("- (instancetype)initWithHost:(NSString *)host {\n");
- printer.Print(
- vars,
- " return (self = [super initWithHost:host"
- " packageName:@\"$package$\" serviceName:@\"$service_name$\"]);\n");
- printer.Print("}\n\n");
printer.Print(
"// Override superclass initializer to disallow different"
- " package and service names.\n");
- printer.Print("- (instancetype)initWithHost:(NSString *)host\n");
- printer.Print(" packageName:(NSString *)packageName\n");
- printer.Print(" serviceName:(NSString *)serviceName {\n");
- printer.Print(" return [self initWithHost:host];\n");
- printer.Print("}\n\n");
- printer.Print("+ (instancetype)serviceWithHost:(NSString *)host {\n");
- printer.Print(" return [[self alloc] initWithHost:host];\n");
- printer.Print("}\n\n\n");
+ " package and service names.\n"
+ "- (instancetype)initWithHost:(NSString *)host\n"
+ " packageName:(NSString *)packageName\n"
+ " serviceName:(NSString *)serviceName {\n"
+ " return [self initWithHost:host];\n"
+ "}\n\n");
+
+ printer.Print(
+ "#pragma mark - Class Methods\n\n"
+ "+ (instancetype)serviceWithHost:(NSString *)host {\n"
+ " return [[self alloc] initWithHost:host];\n"
+ "}\n\n");
+
+ printer.Print("#pragma mark - Method Implementations\n\n");
for (int i = 0; i < service->method_count(); i++) {
PrintMethodImplementations(&printer, service->method(i));
diff --git a/src/compiler/objective_c_generator.h b/src/compiler/objective_c_generator.h
index d3aed76c4f..eb1c7ff005 100644
--- a/src/compiler/objective_c_generator.h
+++ b/src/compiler/objective_c_generator.h
@@ -31,9 +31,13 @@ using ::grpc::string;
// Returns forward declaration of classes in the generated header file.
string GetAllMessageClasses(const FileDescriptor* file);
-// Returns the content to be included in the "global_scope" insertion point of
-// the generated header file.
-string GetHeader(const ServiceDescriptor* service);
+// Returns the content to be included defining the @protocol segment at the
+// insertion point of the generated implementation file.
+string GetProtocol(const ServiceDescriptor* service);
+
+// Returns the content to be included defining the @interface segment at the
+// insertion point of the generated implementation file.
+string GetInterface(const ServiceDescriptor* service);
// Returns the content to be included in the "global_scope" insertion point of
// the generated implementation file.
diff --git a/src/compiler/objective_c_generator_helpers.h b/src/compiler/objective_c_generator_helpers.h
index 4004e6aef8..a284da97f4 100644
--- a/src/compiler/objective_c_generator_helpers.h
+++ b/src/compiler/objective_c_generator_helpers.h
@@ -40,5 +40,45 @@ inline string ServiceClassName(const ServiceDescriptor* service) {
string prefix = file->options().objc_class_prefix();
return prefix + service->name();
}
+
+inline ::grpc::string LocalImport(const ::grpc::string& import) {
+ return ::grpc::string("#import \"" + import + "\"\n");
+}
+
+inline ::grpc::string SystemImport(const ::grpc::string& import) {
+ return ::grpc::string("#import <" + import + ">\n");
+}
+
+inline ::grpc::string PreprocConditional(::grpc::string symbol, bool invert) {
+ return invert ? "!defined(" + symbol + ") || !" + symbol
+ : "defined(" + symbol + ") && " + symbol;
+}
+
+inline ::grpc::string PreprocIf(const ::grpc::string& symbol,
+ const ::grpc::string& if_true) {
+ return ::grpc::string("#if " + PreprocConditional(symbol, false) + "\n" +
+ if_true + "#endif\n");
+}
+
+inline ::grpc::string PreprocIfNot(const ::grpc::string& symbol,
+ const ::grpc::string& if_true) {
+ return ::grpc::string("#if " + PreprocConditional(symbol, true) + "\n" +
+ if_true + "#endif\n");
+}
+
+inline ::grpc::string PreprocIfElse(const ::grpc::string& symbol,
+ const ::grpc::string& if_true,
+ const ::grpc::string& if_false) {
+ return ::grpc::string("#if " + PreprocConditional(symbol, false) + "\n" +
+ if_true + "#else\n" + if_false + "#endif\n");
+}
+
+inline ::grpc::string PreprocIfNotElse(const ::grpc::string& symbol,
+ const ::grpc::string& if_true,
+ const ::grpc::string& if_false) {
+ return ::grpc::string("#if " + PreprocConditional(symbol, true) + "\n" +
+ if_true + "#else\n" + if_false + "#endif\n");
+}
+
} // namespace grpc_objective_c_generator
#endif // GRPC_INTERNAL_COMPILER_OBJECTIVE_C_GENERATOR_HELPERS_H
diff --git a/src/compiler/objective_c_plugin.cc b/src/compiler/objective_c_plugin.cc
index d5d488e84d..76703d79cd 100644
--- a/src/compiler/objective_c_plugin.cc
+++ b/src/compiler/objective_c_plugin.cc
@@ -29,12 +29,42 @@
using ::google::protobuf::compiler::objectivec::
IsProtobufLibraryBundledProtoFile;
using ::google::protobuf::compiler::objectivec::ProtobufLibraryFrameworkName;
+using ::grpc_objective_c_generator::LocalImport;
+using ::grpc_objective_c_generator::PreprocIfElse;
+using ::grpc_objective_c_generator::PreprocIfNot;
+using ::grpc_objective_c_generator::SystemImport;
+
+namespace {
+
+inline ::grpc::string ImportProtoHeaders(
+ const grpc::protobuf::FileDescriptor* dep, const char* indent) {
+ ::grpc::string header = grpc_objective_c_generator::MessageHeaderName(dep);
+
+ if (!IsProtobufLibraryBundledProtoFile(dep)) {
+ return indent + LocalImport(header);
+ }
+
+ ::grpc::string base_name = header;
+ grpc_generator::StripPrefix(&base_name, "google/protobuf/");
+ // create the import code snippet
+ ::grpc::string framework_header =
+ ::grpc::string(ProtobufLibraryFrameworkName) + "/" + base_name;
+
+ static const ::grpc::string kFrameworkImportsCondition =
+ "GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS";
+ return PreprocIfElse(kFrameworkImportsCondition,
+ indent + SystemImport(framework_header),
+ indent + LocalImport(header));
+}
+
+} // namespace
class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
public:
ObjectiveCGrpcGenerator() {}
virtual ~ObjectiveCGrpcGenerator() {}
+ public:
virtual bool Generate(const grpc::protobuf::FileDescriptor* file,
const ::grpc::string& parameter,
grpc::protobuf::compiler::GeneratorContext* context,
@@ -44,97 +74,68 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
return true;
}
+ static const ::grpc::string kNonNullBegin = "NS_ASSUME_NONNULL_BEGIN\n";
+ static const ::grpc::string kNonNullEnd = "NS_ASSUME_NONNULL_END\n";
+ static const ::grpc::string kProtocolOnly = "GPB_GRPC_PROTOCOL_ONLY";
+ static const ::grpc::string kForwardDeclare =
+ "GPB_GRPC_FORWARD_DECLARE_MESSAGE_PROTO";
+
::grpc::string file_name =
google::protobuf::compiler::objectivec::FilePath(file);
- ::grpc::string prefix = file->options().objc_class_prefix();
{
// Generate .pbrpc.h
- ::grpc::string imports =
- ::grpc::string("#if !GPB_GRPC_FORWARD_DECLARE_MESSAGE_PROTO\n") +
- "#import \"" + file_name +
- ".pbobjc.h\"\n"
- "#endif\n\n"
- "#import <ProtoRPC/ProtoService.h>\n"
- "#import <ProtoRPC/ProtoRPC.h>\n"
- "#import <RxLibrary/GRXWriteable.h>\n"
- "#import <RxLibrary/GRXWriter.h>\n";
-
- ::grpc::string proto_imports;
- proto_imports += "#if GPB_GRPC_FORWARD_DECLARE_MESSAGE_PROTO\n" +
- grpc_objective_c_generator::GetAllMessageClasses(file) +
- "#else\n";
+ ::grpc::string imports = LocalImport(file_name + ".pbobjc.h");
+
+ ::grpc::string system_imports = SystemImport("ProtoRPC/ProtoService.h") +
+ SystemImport("ProtoRPC/ProtoRPC.h") +
+ SystemImport("RxLibrary/GRXWriteable.h") +
+ SystemImport("RxLibrary/GRXWriter.h");
+
+ ::grpc::string forward_declarations = "@class GRPCProtoCall;\n\n";
+
+ ::grpc::string class_declarations =
+ grpc_objective_c_generator::GetAllMessageClasses(file);
+
+ ::grpc::string class_imports;
for (int i = 0; i < file->dependency_count(); i++) {
- ::grpc::string header =
- grpc_objective_c_generator::MessageHeaderName(file->dependency(i));
- const grpc::protobuf::FileDescriptor* dependency = file->dependency(i);
- if (IsProtobufLibraryBundledProtoFile(dependency)) {
- ::grpc::string base_name = header;
- grpc_generator::StripPrefix(&base_name, "google/protobuf/");
- // create the import code snippet
- proto_imports +=
- " #if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS\n"
- " #import <" +
- ::grpc::string(ProtobufLibraryFrameworkName) + "/" + base_name +
- ">\n"
- " #else\n"
- " #import \"" +
- header +
- "\"\n"
- " #endif\n";
- } else {
- proto_imports += ::grpc::string(" #import \"") + header + "\"\n";
- }
+ class_imports += ImportProtoHeaders(file->dependency(i), " ");
}
- proto_imports += "#endif\n";
- ::grpc::string declarations;
+ ::grpc::string protocols;
for (int i = 0; i < file->service_count(); i++) {
const grpc::protobuf::ServiceDescriptor* service = file->service(i);
- declarations += grpc_objective_c_generator::GetHeader(service);
+ protocols += grpc_objective_c_generator::GetProtocol(service);
}
- static const ::grpc::string kNonNullBegin =
- "\nNS_ASSUME_NONNULL_BEGIN\n\n";
- static const ::grpc::string kNonNullEnd = "\nNS_ASSUME_NONNULL_END\n";
+ ::grpc::string interfaces;
+ for (int i = 0; i < file->service_count(); i++) {
+ const grpc::protobuf::ServiceDescriptor* service = file->service(i);
+ interfaces += grpc_objective_c_generator::GetInterface(service);
+ }
Write(context, file_name + ".pbrpc.h",
- imports + '\n' + proto_imports + '\n' + kNonNullBegin +
- declarations + kNonNullEnd);
+ PreprocIfNot(kForwardDeclare, imports) + "\n" +
+ PreprocIfNot(kProtocolOnly, system_imports) + "\n" +
+ PreprocIfElse(kForwardDeclare, class_declarations,
+ class_imports) +
+ "\n" + forward_declarations + "\n" + kNonNullBegin + "\n" +
+ protocols + "\n" + PreprocIfNot(kProtocolOnly, interfaces) +
+ "\n" + kNonNullEnd + "\n");
}
{
// Generate .pbrpc.m
- ::grpc::string imports = ::grpc::string("#import \"") + file_name +
- ".pbrpc.h\"\n"
- "#import \"" +
- file_name +
- ".pbobjc.h\"\n\n"
- "#import <ProtoRPC/ProtoRPC.h>\n"
- "#import <RxLibrary/GRXWriter+Immediate.h>\n";
+ ::grpc::string imports = LocalImport(file_name + ".pbrpc.h") +
+ LocalImport(file_name + ".pbobjc.h") +
+ SystemImport("ProtoRPC/ProtoRPC.h") +
+ SystemImport("RxLibrary/GRXWriter+Immediate.h");
+
+ ::grpc::string class_imports;
for (int i = 0; i < file->dependency_count(); i++) {
- ::grpc::string header =
- grpc_objective_c_generator::MessageHeaderName(file->dependency(i));
- const grpc::protobuf::FileDescriptor* dependency = file->dependency(i);
- if (IsProtobufLibraryBundledProtoFile(dependency)) {
- ::grpc::string base_name = header;
- grpc_generator::StripPrefix(&base_name, "google/protobuf/");
- // create the import code snippet
- imports +=
- "#if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS\n"
- " #import <" +
- ::grpc::string(ProtobufLibraryFrameworkName) + "/" + base_name +
- ">\n"
- "#else\n"
- " #import \"" +
- header +
- "\"\n"
- "#endif\n";
- } else {
- imports += ::grpc::string("#import \"") + header + "\"\n";
- }
+ class_imports += ImportProtoHeaders(file->dependency(i), "");
}
::grpc::string definitions;
@@ -143,7 +144,9 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
definitions += grpc_objective_c_generator::GetSource(service);
}
- Write(context, file_name + ".pbrpc.m", imports + '\n' + definitions);
+ Write(context, file_name + ".pbrpc.m",
+ PreprocIfNot(kProtocolOnly,
+ imports + "\n" + class_imports + "\n" + definitions));
}
return true;
diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc
index e7d72d1fde..3e2faa57bc 100644
--- a/src/core/ext/filters/client_channel/backup_poller.cc
+++ b/src/core/ext/filters/client_channel/backup_poller.cc
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -127,13 +127,7 @@ static void run_poller(void* arg, grpc_error* error) {
&p->run_poller_closure);
}
-void grpc_client_channel_start_backup_polling(
- grpc_pollset_set* interested_parties) {
- gpr_once_init(&g_once, init_globals);
- if (g_poll_interval_ms == 0) {
- return;
- }
- gpr_mu_lock(&g_poller_mu);
+static void g_poller_init_locked() {
if (g_poller == nullptr) {
g_poller = static_cast<backup_poller*>(gpr_zalloc(sizeof(backup_poller)));
g_poller->pollset =
@@ -149,7 +143,16 @@ void grpc_client_channel_start_backup_polling(
grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms,
&g_poller->run_poller_closure);
}
+}
+void grpc_client_channel_start_backup_polling(
+ grpc_pollset_set* interested_parties) {
+ gpr_once_init(&g_once, init_globals);
+ if (g_poll_interval_ms == 0) {
+ return;
+ }
+ gpr_mu_lock(&g_poller_mu);
+ g_poller_init_locked();
gpr_ref(&g_poller->refs);
/* Get a reference to g_poller->pollset before releasing g_poller_mu to make
* TSAN happy. Otherwise, reading from g_poller (i.e g_poller->pollset) after
diff --git a/src/core/ext/filters/client_channel/backup_poller.h b/src/core/ext/filters/client_channel/backup_poller.h
index 45bdf10d6c..7285b9b93e 100644
--- a/src/core/ext/filters/client_channel/backup_poller.h
+++ b/src/core/ext/filters/client_channel/backup_poller.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 9a8f25b630..90b93fbe23 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -21,6 +21,7 @@
#include "src/core/ext/filters/client_channel/client_channel.h"
#include <inttypes.h>
+#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
@@ -33,144 +34,65 @@
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
+#include "src/core/ext/filters/client_channel/method_params.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/ext/filters/client_channel/retry_throttle.h"
+#include "src/core/ext/filters/client_channel/status_util.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/deadline/deadline_filter.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/inlined_vector.h"
+#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
+#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/service_config.h"
#include "src/core/lib/transport/static_metadata.h"
+#include "src/core/lib/transport/status_metadata.h"
+
+using grpc_core::internal::ClientChannelMethodParams;
/* Client channel implementation */
+// By default, we buffer 256 KiB per RPC for retries.
+// TODO(roth): Do we have any data to suggest a better value?
+#define DEFAULT_PER_RPC_RETRY_BUFFER_SIZE (256 << 10)
+
+// This value was picked arbitrarily. It can be changed if there is
+// any even moderately compelling reason to do so.
+#define RETRY_BACKOFF_JITTER 0.2
+
grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
/*************************************************************************
- * METHOD-CONFIG TABLE
+ * CHANNEL-WIDE FUNCTIONS
*/
-typedef enum {
- /* zero so it can be default initialized */
- WAIT_FOR_READY_UNSET = 0,
- WAIT_FOR_READY_FALSE,
- WAIT_FOR_READY_TRUE
-} wait_for_ready_value;
-
-typedef struct {
- gpr_refcount refs;
- grpc_millis timeout;
- wait_for_ready_value wait_for_ready;
-} method_parameters;
-
-static method_parameters* method_parameters_ref(
- method_parameters* method_params) {
- gpr_ref(&method_params->refs);
- return method_params;
-}
-
-static void method_parameters_unref(method_parameters* method_params) {
- if (gpr_unref(&method_params->refs)) {
- gpr_free(method_params);
- }
-}
-
-// Wrappers to pass to grpc_service_config_create_method_config_table().
-static void* method_parameters_ref_wrapper(void* value) {
- return method_parameters_ref(static_cast<method_parameters*>(value));
-}
-static void method_parameters_unref_wrapper(void* value) {
- method_parameters_unref(static_cast<method_parameters*>(value));
-}
-
-static bool parse_wait_for_ready(grpc_json* field,
- wait_for_ready_value* wait_for_ready) {
- if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
- return false;
- }
- *wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE
- : WAIT_FOR_READY_FALSE;
- return true;
-}
-
-static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
- if (field->type != GRPC_JSON_STRING) return false;
- size_t len = strlen(field->value);
- if (field->value[len - 1] != 's') return false;
- char* buf = gpr_strdup(field->value);
- buf[len - 1] = '\0'; // Remove trailing 's'.
- char* decimal_point = strchr(buf, '.');
- int nanos = 0;
- if (decimal_point != nullptr) {
- *decimal_point = '\0';
- nanos = gpr_parse_nonnegative_int(decimal_point + 1);
- if (nanos == -1) {
- gpr_free(buf);
- return false;
- }
- int num_digits = static_cast<int>(strlen(decimal_point + 1));
- if (num_digits > 9) { // We don't accept greater precision than nanos.
- gpr_free(buf);
- return false;
- }
- for (int i = 0; i < (9 - num_digits); ++i) {
- nanos *= 10;
- }
- }
- int seconds = decimal_point == buf ? 0 : gpr_parse_nonnegative_int(buf);
- gpr_free(buf);
- if (seconds == -1) return false;
- *timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
- return true;
-}
-
-static void* method_parameters_create_from_json(const grpc_json* json) {
- wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
- grpc_millis timeout = 0;
- for (grpc_json* field = json->child; field != nullptr; field = field->next) {
- if (field->key == nullptr) continue;
- if (strcmp(field->key, "waitForReady") == 0) {
- if (wait_for_ready != WAIT_FOR_READY_UNSET) return nullptr; // Duplicate.
- if (!parse_wait_for_ready(field, &wait_for_ready)) return nullptr;
- } else if (strcmp(field->key, "timeout") == 0) {
- if (timeout > 0) return nullptr; // Duplicate.
- if (!parse_timeout(field, &timeout)) return nullptr;
- }
- }
- method_parameters* value =
- static_cast<method_parameters*>(gpr_malloc(sizeof(method_parameters)));
- gpr_ref_init(&value->refs, 1);
- value->timeout = timeout;
- value->wait_for_ready = wait_for_ready;
- return value;
-}
-
struct external_connectivity_watcher;
-/*************************************************************************
- * CHANNEL-WIDE FUNCTIONS
- */
+typedef grpc_core::SliceHashTable<
+ grpc_core::RefCountedPtr<ClientChannelMethodParams>>
+ MethodParamsTable;
typedef struct client_channel_channel_data {
- /** resolver for this channel */
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver;
- /** have we started resolving this channel */
bool started_resolving;
- /** is deadline checking enabled? */
bool deadline_checking_enabled;
- /** client channel factory */
grpc_client_channel_factory* client_channel_factory;
+ bool enable_retries;
+ size_t per_rpc_retry_buffer_size;
/** combiner protecting all variables below in this data structure */
grpc_combiner* combiner;
@@ -179,7 +101,7 @@ typedef struct client_channel_channel_data {
/** retry throttle data */
grpc_server_retry_throttle_data* retry_throttle_data;
/** maps method names to method_parameters structs */
- grpc_slice_hash_table* method_params_table;
+ grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
/** incoming resolver result - set by resolver.next() */
grpc_channel_args* resolver_result;
/** a list of closures that are all waiting for resolver result to come in */
@@ -200,7 +122,7 @@ typedef struct client_channel_channel_data {
gpr_mu external_connectivity_watcher_list_mu;
struct external_connectivity_watcher* external_connectivity_watcher_list_head;
- /* the following properties are guarded by a mutex since API's require them
+ /* the following properties are guarded by a mutex since APIs require them
to be instantaneously available */
gpr_mu info_mu;
char* info_lb_policy_name;
@@ -306,9 +228,8 @@ typedef struct {
grpc_server_retry_throttle_data* retry_throttle_data;
} service_config_parsing_state;
-static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
- service_config_parsing_state* parsing_state =
- static_cast<service_config_parsing_state*>(arg);
+static void parse_retry_throttle_params(
+ const grpc_json* field, service_config_parsing_state* parsing_state) {
if (strcmp(field->key, "retryThrottling") == 0) {
if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
@@ -388,7 +309,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
}
- // Extract the following fields from the resolver result, if non-NULL.
+ // Extract the following fields from the resolver result, if non-nullptr.
bool lb_policy_updated = false;
bool lb_policy_created = false;
char* lb_policy_name_dup = nullptr;
@@ -396,7 +317,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy;
char* service_config_json = nullptr;
grpc_server_retry_throttle_data* retry_throttle_data = nullptr;
- grpc_slice_hash_table* method_params_table = nullptr;
+ grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
if (chand->resolver_result != nullptr) {
if (chand->resolver != nullptr) {
// Find LB policy name.
@@ -431,7 +352,6 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
-
// Check to see if we're already using the right LB policy.
// Note: It's safe to use chand->info_lb_policy_name here without
// taking a lock on chand->info_mu, because this function is the
@@ -469,39 +389,39 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
new_lb_policy->SetReresolutionClosureLocked(&args->closure);
}
}
+ // Before we clean up, save a copy of lb_policy_name, since it might
+ // be pointing to data inside chand->resolver_result.
+ // The copy will be saved in chand->lb_policy_name below.
+ lb_policy_name_dup = gpr_strdup(lb_policy_name);
// Find service config.
channel_arg = grpc_channel_args_find(chand->resolver_result,
GRPC_ARG_SERVICE_CONFIG);
service_config_json =
gpr_strdup(grpc_channel_arg_get_string(channel_arg));
if (service_config_json != nullptr) {
- grpc_service_config* service_config =
- grpc_service_config_create(service_config_json);
+ grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
+ grpc_core::ServiceConfig::Create(service_config_json);
if (service_config != nullptr) {
- channel_arg = grpc_channel_args_find(chand->resolver_result,
- GRPC_ARG_SERVER_URI);
- const char* server_uri = grpc_channel_arg_get_string(channel_arg);
- GPR_ASSERT(server_uri != nullptr);
- grpc_uri* uri = grpc_uri_parse(server_uri, true);
- GPR_ASSERT(uri->path[0] != '\0');
- service_config_parsing_state parsing_state;
- memset(&parsing_state, 0, sizeof(parsing_state));
- parsing_state.server_name =
- uri->path[0] == '/' ? uri->path + 1 : uri->path;
- grpc_service_config_parse_global_params(
- service_config, parse_retry_throttle_params, &parsing_state);
- grpc_uri_destroy(uri);
- retry_throttle_data = parsing_state.retry_throttle_data;
- method_params_table = grpc_service_config_create_method_config_table(
- service_config, method_parameters_create_from_json,
- method_parameters_ref_wrapper, method_parameters_unref_wrapper);
- grpc_service_config_destroy(service_config);
+ if (chand->enable_retries) {
+ channel_arg = grpc_channel_args_find(chand->resolver_result,
+ GRPC_ARG_SERVER_URI);
+ const char* server_uri = grpc_channel_arg_get_string(channel_arg);
+ GPR_ASSERT(server_uri != nullptr);
+ grpc_uri* uri = grpc_uri_parse(server_uri, true);
+ GPR_ASSERT(uri->path[0] != '\0');
+ service_config_parsing_state parsing_state;
+ memset(&parsing_state, 0, sizeof(parsing_state));
+ parsing_state.server_name =
+ uri->path[0] == '/' ? uri->path + 1 : uri->path;
+ service_config->ParseGlobalParams(parse_retry_throttle_params,
+ &parsing_state);
+ grpc_uri_destroy(uri);
+ retry_throttle_data = parsing_state.retry_throttle_data;
+ }
+ method_params_table = service_config->CreateMethodConfigTable(
+ ClientChannelMethodParams::CreateFromJson);
}
}
- // Before we clean up, save a copy of lb_policy_name, since it might
- // be pointing to data inside chand->resolver_result.
- // The copy will be saved in chand->lb_policy_name below.
- lb_policy_name_dup = gpr_strdup(lb_policy_name);
}
grpc_channel_args_destroy(chand->resolver_result);
chand->resolver_result = nullptr;
@@ -514,7 +434,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
lb_policy_name_changed ? " (changed)" : "", service_config_json);
}
// Now swap out fields in chand. Note that the new values may still
- // be NULL if (e.g.) the resolver failed to return results or the
+ // be nullptr if (e.g.) the resolver failed to return results or the
// results did not contain the necessary data.
//
// First, swap out the data used by cc_get_channel_info().
@@ -534,16 +454,13 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
}
chand->retry_throttle_data = retry_throttle_data;
// Swap out the method params table.
- if (chand->method_params_table != nullptr) {
- grpc_slice_hash_table_unref(chand->method_params_table);
- }
- chand->method_params_table = method_params_table;
+ chand->method_params_table = std::move(method_params_table);
// If we have a new LB policy or are shutting down (in which case
- // new_lb_policy will be NULL), swap out the LB policy, unreffing the old one
- // and removing its fds from chand->interested_parties. Note that we do NOT do
- // this if either (a) we updated the existing LB policy above or (b) we failed
- // to create the new LB policy (in which case we want to continue using the
- // most recent one we had).
+ // new_lb_policy will be nullptr), swap out the LB policy, unreffing the
+ // old one and removing its fds from chand->interested_parties.
+ // Note that we do NOT do this if either (a) we updated the existing
+ // LB policy above or (b) we failed to create the new LB policy (in
+ // which case we want to continue using the most recent one we had).
if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
chand->resolver == nullptr) {
if (chand->lb_policy != nullptr) {
@@ -722,9 +639,17 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
grpc_client_channel_start_backup_polling(chand->interested_parties);
+ // Record max per-RPC retry buffer size.
+ const grpc_arg* arg = grpc_channel_args_find(
+ args->channel_args, GRPC_ARG_PER_RPC_RETRY_BUFFER_SIZE);
+ chand->per_rpc_retry_buffer_size = (size_t)grpc_channel_arg_get_integer(
+ arg, {DEFAULT_PER_RPC_RETRY_BUFFER_SIZE, 0, INT_MAX});
+ // Record enable_retries.
+ arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_ENABLE_RETRIES);
+ chand->enable_retries = grpc_channel_arg_get_bool(arg, true);
// Record client channel factory.
- const grpc_arg* arg = grpc_channel_args_find(args->channel_args,
- GRPC_ARG_CLIENT_CHANNEL_FACTORY);
+ arg = grpc_channel_args_find(args->channel_args,
+ GRPC_ARG_CLIENT_CHANNEL_FACTORY);
if (arg == nullptr) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Missing client channel factory in args for client channel filter");
@@ -794,7 +719,7 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
}
if (chand->method_params_table != nullptr) {
- grpc_slice_hash_table_unref(chand->method_params_table);
+ chand->method_params_table.reset();
}
grpc_client_channel_stop_backup_polling(chand->interested_parties);
grpc_connectivity_state_destroy(&chand->state_tracker);
@@ -809,15 +734,122 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
*/
// Max number of batches that can be pending on a call at any given
-// time. This includes:
+// time. This includes one batch for each of the following ops:
// recv_initial_metadata
// send_initial_metadata
// recv_message
// send_message
// recv_trailing_metadata
// send_trailing_metadata
-// We also add room for a single cancel_stream batch.
-#define MAX_WAITING_BATCHES 7
+#define MAX_PENDING_BATCHES 6
+
+// Retry support:
+//
+// In order to support retries, we act as a proxy for stream op batches.
+// When we get a batch from the surface, we add it to our list of pending
+// batches, and we then use those batches to construct separate "child"
+// batches to be started on the subchannel call. When the child batches
+// return, we then decide which pending batches have been completed and
+// schedule their callbacks accordingly. If a subchannel call fails and
+// we want to retry it, we do a new pick and start again, constructing
+// new "child" batches for the new subchannel call.
+//
+// Note that retries are committed when receiving data from the server
+// (except for Trailers-Only responses). However, there may be many
+// send ops started before receiving any data, so we may have already
+// completed some number of send ops (and returned the completions up to
+// the surface) by the time we realize that we need to retry. To deal
+// with this, we cache data for send ops, so that we can replay them on a
+// different subchannel call even after we have completed the original
+// batches.
+//
+// There are two sets of data to maintain:
+// - In call_data (in the parent channel), we maintain a list of pending
+// ops and cached data for send ops.
+// - In the subchannel call, we maintain state to indicate what ops have
+// already been sent down to that call.
+//
+// When constructing the "child" batches, we compare those two sets of
+// data to see which batches need to be sent to the subchannel call.
+
+// TODO(roth): In subsequent PRs:
+// - add support for transparent retries (including initial metadata)
+// - figure out how to record stats in census for retries
+// (census filter is on top of this one)
+// - add census stats for retries
+
+// State used for starting a retryable batch on a subchannel call.
+// This provides its own grpc_transport_stream_op_batch and other data
+// structures needed to populate the ops in the batch.
+// We allocate one struct on the arena for each attempt at starting a
+// batch on a given subchannel call.
+typedef struct {
+ gpr_refcount refs;
+ grpc_call_element* elem;
+ grpc_subchannel_call* subchannel_call; // Holds a ref.
+ // The batch to use in the subchannel call.
+ // Its payload field points to subchannel_call_retry_state.batch_payload.
+ grpc_transport_stream_op_batch batch;
+ // For send_initial_metadata.
+ // Note that we need to make a copy of the initial metadata for each
+ // subchannel call instead of just referring to the copy in call_data,
+ // because filters in the subchannel stack will probably add entries,
+ // so we need to start in a pristine state for each attempt of the call.
+ grpc_linked_mdelem* send_initial_metadata_storage;
+ grpc_metadata_batch send_initial_metadata;
+ // For send_message.
+ grpc_caching_byte_stream send_message;
+ // For send_trailing_metadata.
+ grpc_linked_mdelem* send_trailing_metadata_storage;
+ grpc_metadata_batch send_trailing_metadata;
+ // For intercepting recv_initial_metadata.
+ grpc_metadata_batch recv_initial_metadata;
+ grpc_closure recv_initial_metadata_ready;
+ bool trailing_metadata_available;
+ // For intercepting recv_message.
+ grpc_closure recv_message_ready;
+ grpc_byte_stream* recv_message;
+ // For intercepting recv_trailing_metadata.
+ grpc_metadata_batch recv_trailing_metadata;
+ grpc_transport_stream_stats collect_stats;
+ // For intercepting on_complete.
+ grpc_closure on_complete;
+} subchannel_batch_data;
+
+// Retry state associated with a subchannel call.
+// Stored in the parent_data of the subchannel call object.
+typedef struct {
+ // subchannel_batch_data.batch.payload points to this.
+ grpc_transport_stream_op_batch_payload batch_payload;
+ // These fields indicate which ops have been started and completed on
+ // this subchannel call.
+ size_t started_send_message_count;
+ size_t completed_send_message_count;
+ size_t started_recv_message_count;
+ size_t completed_recv_message_count;
+ bool started_send_initial_metadata : 1;
+ bool completed_send_initial_metadata : 1;
+ bool started_send_trailing_metadata : 1;
+ bool completed_send_trailing_metadata : 1;
+ bool started_recv_initial_metadata : 1;
+ bool completed_recv_initial_metadata : 1;
+ bool started_recv_trailing_metadata : 1;
+ bool completed_recv_trailing_metadata : 1;
+ // State for callback processing.
+ bool retry_dispatched : 1;
+ bool recv_initial_metadata_ready_deferred : 1;
+ bool recv_message_ready_deferred : 1;
+ grpc_error* recv_initial_metadata_error;
+ grpc_error* recv_message_error;
+} subchannel_call_retry_state;
+
+// Pending batches stored in call data.
+typedef struct {
+ // The pending batch. If nullptr, this slot is empty.
+ grpc_transport_stream_op_batch* batch;
+ // Indicates whether payload for send ops has been cached in call data.
+ bool send_ops_cached;
+} pending_batch;
/** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer.
@@ -841,159 +873,1592 @@ typedef struct client_channel_call_data {
grpc_call_combiner* call_combiner;
grpc_server_retry_throttle_data* retry_throttle_data;
- method_parameters* method_params;
+ grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
grpc_subchannel_call* subchannel_call;
- grpc_error* error;
+
+ // Set when we get a cancel_stream op.
+ grpc_error* cancel_error;
grpc_core::LoadBalancingPolicy::PickState pick;
- grpc_closure lb_pick_closure;
- grpc_closure lb_pick_cancel_closure;
+ grpc_closure pick_closure;
+ grpc_closure pick_cancel_closure;
grpc_polling_entity* pollent;
- grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
- size_t waiting_for_pick_batches_count;
- grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
+ // Batches are added to this list when received from above.
+ // They are removed when we are done handling the batch (i.e., when
+ // either we have invoked all of the batch's callbacks or we have
+ // passed the batch down to the subchannel call and are not
+ // intercepting any of its callbacks).
+ pending_batch pending_batches[MAX_PENDING_BATCHES];
+ bool pending_send_initial_metadata : 1;
+ bool pending_send_message : 1;
+ bool pending_send_trailing_metadata : 1;
+
+ // Retry state.
+ bool enable_retries : 1;
+ bool retry_committed : 1;
+ bool last_attempt_got_server_pushback : 1;
+ int num_attempts_completed;
+ size_t bytes_buffered_for_retry;
+ grpc_core::ManualConstructor<grpc_core::BackOff> retry_backoff;
+ grpc_timer retry_timer;
+
+ // Cached data for retrying send ops.
+ // send_initial_metadata
+ bool seen_send_initial_metadata;
+ grpc_linked_mdelem* send_initial_metadata_storage;
+ grpc_metadata_batch send_initial_metadata;
+ uint32_t send_initial_metadata_flags;
+ gpr_atm* peer_string;
+ // send_message
+ // When we get a send_message op, we replace the original byte stream
+ // with a grpc_caching_byte_stream that caches the slices to a
+ // local buffer for use in retries.
+ // Note: We inline the cache for the first 3 send_message ops and use
+ // dynamic allocation after that. This number was essentially picked
+ // at random; it could be changed in the future to tune performance.
+ grpc_core::InlinedVector<grpc_byte_stream_cache*, 3> send_messages;
+ // send_trailing_metadata
+ bool seen_send_trailing_metadata;
+ grpc_linked_mdelem* send_trailing_metadata_storage;
+ grpc_metadata_batch send_trailing_metadata;
+} call_data;
- grpc_transport_stream_op_batch* initial_metadata_batch;
+// Forward declarations.
+static void retry_commit(grpc_call_element* elem,
+ subchannel_call_retry_state* retry_state);
+static void start_internal_recv_trailing_metadata(grpc_call_element* elem);
+static void on_complete(void* arg, grpc_error* error);
+static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored);
+static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
+static void start_pick_locked(void* arg, grpc_error* ignored);
+
+//
+// send op data caching
+//
+
+// Caches data for send ops so that it can be retried later, if not
+// already cached.
+static void maybe_cache_send_ops_for_batch(call_data* calld,
+ pending_batch* pending) {
+ if (pending->send_ops_cached) return;
+ pending->send_ops_cached = true;
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ // Save a copy of metadata for send_initial_metadata ops.
+ if (batch->send_initial_metadata) {
+ calld->seen_send_initial_metadata = true;
+ GPR_ASSERT(calld->send_initial_metadata_storage == nullptr);
+ grpc_metadata_batch* send_initial_metadata =
+ batch->payload->send_initial_metadata.send_initial_metadata;
+ calld->send_initial_metadata_storage = (grpc_linked_mdelem*)gpr_arena_alloc(
+ calld->arena,
+ sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count);
+ grpc_metadata_batch_copy(send_initial_metadata,
+ &calld->send_initial_metadata,
+ calld->send_initial_metadata_storage);
+ calld->send_initial_metadata_flags =
+ batch->payload->send_initial_metadata.send_initial_metadata_flags;
+ calld->peer_string = batch->payload->send_initial_metadata.peer_string;
+ }
+ // Set up cache for send_message ops.
+ if (batch->send_message) {
+ grpc_byte_stream_cache* cache = (grpc_byte_stream_cache*)gpr_arena_alloc(
+ calld->arena, sizeof(grpc_byte_stream_cache));
+ grpc_byte_stream_cache_init(cache,
+ batch->payload->send_message.send_message);
+ calld->send_messages.push_back(cache);
+ }
+ // Save metadata batch for send_trailing_metadata ops.
+ if (batch->send_trailing_metadata) {
+ calld->seen_send_trailing_metadata = true;
+ GPR_ASSERT(calld->send_trailing_metadata_storage == nullptr);
+ grpc_metadata_batch* send_trailing_metadata =
+ batch->payload->send_trailing_metadata.send_trailing_metadata;
+ calld->send_trailing_metadata_storage =
+ (grpc_linked_mdelem*)gpr_arena_alloc(
+ calld->arena,
+ sizeof(grpc_linked_mdelem) * send_trailing_metadata->list.count);
+ grpc_metadata_batch_copy(send_trailing_metadata,
+ &calld->send_trailing_metadata,
+ calld->send_trailing_metadata_storage);
+ }
+}
- grpc_closure on_complete;
- grpc_closure* original_on_complete;
-} call_data;
+// Frees cached send ops that have already been completed after
+// committing the call.
+static void free_cached_send_op_data_after_commit(
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (retry_state->completed_send_initial_metadata) {
+ grpc_metadata_batch_destroy(&calld->send_initial_metadata);
+ }
+ for (size_t i = 0; i < retry_state->completed_send_message_count; ++i) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
+ "]",
+ chand, calld, i);
+ }
+ grpc_byte_stream_cache_destroy(calld->send_messages[i]);
+ }
+ if (retry_state->completed_send_trailing_metadata) {
+ grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
+ }
+}
-grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
- grpc_call_element* elem) {
+// Frees cached send ops that were completed by the completed batch in
+// batch_data. Used when batches are completed after the call is committed.
+static void free_cached_send_op_data_for_completed_batch(
+ grpc_call_element* elem, subchannel_batch_data* batch_data,
+ subchannel_call_retry_state* retry_state) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
- return calld->subchannel_call;
+ if (batch_data->batch.send_initial_metadata) {
+ grpc_metadata_batch_destroy(&calld->send_initial_metadata);
+ }
+ if (batch_data->batch.send_message) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
+ "]",
+ chand, calld, retry_state->completed_send_message_count - 1);
+ }
+ grpc_byte_stream_cache_destroy(
+ calld->send_messages[retry_state->completed_send_message_count - 1]);
+ }
+ if (batch_data->batch.send_trailing_metadata) {
+ grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
+ }
+}
+
+//
+// pending_batches management
+//
+
+// Returns the index into calld->pending_batches to be used for batch.
+static size_t get_batch_index(grpc_transport_stream_op_batch* batch) {
+ // Note: It is important the send_initial_metadata be the first entry
+ // here, since the code in pick_subchannel_locked() assumes it will be.
+ if (batch->send_initial_metadata) return 0;
+ if (batch->send_message) return 1;
+ if (batch->send_trailing_metadata) return 2;
+ if (batch->recv_initial_metadata) return 3;
+ if (batch->recv_message) return 4;
+ if (batch->recv_trailing_metadata) return 5;
+ GPR_UNREACHABLE_CODE(return (size_t)-1);
}
// This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_add(
- call_data* calld, grpc_transport_stream_op_batch* batch) {
- if (batch->send_initial_metadata) {
- GPR_ASSERT(calld->initial_metadata_batch == nullptr);
- calld->initial_metadata_batch = batch;
- } else {
- GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
- batch;
+static void pending_batches_add(grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ const size_t idx = get_batch_index(batch);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
+ calld, idx);
+ }
+ pending_batch* pending = &calld->pending_batches[idx];
+ GPR_ASSERT(pending->batch == nullptr);
+ pending->batch = batch;
+ pending->send_ops_cached = false;
+ if (calld->enable_retries) {
+ // Update state in calld about pending batches.
+ // Also check if the batch takes us over the retry buffer limit.
+ // Note: We don't check the size of trailing metadata here, because
+ // gRPC clients do not send trailing metadata.
+ if (batch->send_initial_metadata) {
+ calld->pending_send_initial_metadata = true;
+ calld->bytes_buffered_for_retry += grpc_metadata_batch_size(
+ batch->payload->send_initial_metadata.send_initial_metadata);
+ }
+ if (batch->send_message) {
+ calld->pending_send_message = true;
+ calld->bytes_buffered_for_retry +=
+ batch->payload->send_message.send_message->length;
+ }
+ if (batch->send_trailing_metadata) {
+ calld->pending_send_trailing_metadata = true;
+ }
+ if (calld->bytes_buffered_for_retry > chand->per_rpc_retry_buffer_size) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: exceeded retry buffer size, committing",
+ chand, calld);
+ }
+ subchannel_call_retry_state* retry_state =
+ calld->subchannel_call == nullptr
+ ? nullptr
+ : static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call));
+ retry_commit(elem, retry_state);
+ // If we are not going to retry and have not yet started, pretend
+ // retries are disabled so that we don't bother with retry overhead.
+ if (calld->num_attempts_completed == 0) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: disabling retries before first attempt",
+ chand, calld);
+ }
+ calld->enable_retries = false;
+ }
+ }
+ }
+}
+
+static void pending_batch_clear(call_data* calld, pending_batch* pending) {
+ if (calld->enable_retries) {
+ if (pending->batch->send_initial_metadata) {
+ calld->pending_send_initial_metadata = false;
+ }
+ if (pending->batch->send_message) {
+ calld->pending_send_message = false;
+ }
+ if (pending->batch->send_trailing_metadata) {
+ calld->pending_send_trailing_metadata = false;
+ }
}
+ pending->batch = nullptr;
}
// This is called via the call combiner, so access to calld is synchronized.
static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
- call_data* calld = static_cast<call_data*>(arg);
- if (calld->waiting_for_pick_batches_count > 0) {
- --calld->waiting_for_pick_batches_count;
- grpc_transport_stream_op_batch_finish_with_failure(
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
- GRPC_ERROR_REF(error), calld->call_combiner);
- }
+ grpc_transport_stream_op_batch* batch =
+ static_cast<grpc_transport_stream_op_batch*>(arg);
+ call_data* calld = static_cast<call_data*>(batch->handler_private.extra_arg);
+ // Note: This will release the call combiner.
+ grpc_transport_stream_op_batch_finish_with_failure(
+ batch, GRPC_ERROR_REF(error), calld->call_combiner);
}
// This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_fail(grpc_call_element* elem,
- grpc_error* error) {
+// If yield_call_combiner is true, assumes responsibility for yielding
+// the call combiner.
+static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
+ bool yield_call_combiner) {
+ GPR_ASSERT(error != GRPC_ERROR_NONE);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
+ size_t num_batches = 0;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ if (calld->pending_batches[i].batch != nullptr) ++num_batches;
+ }
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
- elem->channel_data, calld, calld->waiting_for_pick_batches_count,
- grpc_error_string(error));
+ elem->channel_data, calld, num_batches, grpc_error_string(error));
+ }
+ grpc_transport_stream_op_batch*
+ batches[GPR_ARRAY_SIZE(calld->pending_batches)];
+ size_t num_batches = 0;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ if (batch != nullptr) {
+ batches[num_batches++] = batch;
+ pending_batch_clear(calld, pending);
+ }
}
- for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
- GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
- fail_pending_batch_in_call_combiner, calld,
+ for (size_t i = yield_call_combiner ? 1 : 0; i < num_batches; ++i) {
+ grpc_transport_stream_op_batch* batch = batches[i];
+ batch->handler_private.extra_arg = calld;
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure,
+ fail_pending_batch_in_call_combiner, batch,
grpc_schedule_on_exec_ctx);
- GRPC_CALL_COMBINER_START(
- calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
- GRPC_ERROR_REF(error), "waiting_for_pick_batches_fail");
- }
- if (calld->initial_metadata_batch != nullptr) {
- grpc_transport_stream_op_batch_finish_with_failure(
- calld->initial_metadata_batch, GRPC_ERROR_REF(error),
- calld->call_combiner);
- } else {
- GRPC_CALL_COMBINER_STOP(calld->call_combiner,
- "waiting_for_pick_batches_fail");
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
+ &batch->handler_private.closure,
+ GRPC_ERROR_REF(error), "pending_batches_fail");
+ }
+ if (yield_call_combiner) {
+ if (num_batches > 0) {
+ // Note: This will release the call combiner.
+ grpc_transport_stream_op_batch_finish_with_failure(
+ batches[0], GRPC_ERROR_REF(error), calld->call_combiner);
+ } else {
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner, "pending_batches_fail");
+ }
}
GRPC_ERROR_UNREF(error);
}
// This is called via the call combiner, so access to calld is synchronized.
-static void run_pending_batch_in_call_combiner(void* arg, grpc_error* ignored) {
- call_data* calld = static_cast<call_data*>(arg);
- if (calld->waiting_for_pick_batches_count > 0) {
- --calld->waiting_for_pick_batches_count;
- grpc_subchannel_call_process_op(
- calld->subchannel_call,
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
- }
+static void resume_pending_batch_in_call_combiner(void* arg,
+ grpc_error* ignored) {
+ grpc_transport_stream_op_batch* batch =
+ static_cast<grpc_transport_stream_op_batch*>(arg);
+ grpc_subchannel_call* subchannel_call =
+ static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(subchannel_call, batch);
}
// This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_resume(grpc_call_element* elem) {
+static void pending_batches_resume(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (calld->enable_retries) {
+ start_retriable_subchannel_batches(elem, GRPC_ERROR_NONE);
+ return;
+ }
+ // Retries not enabled; send down batches as-is.
if (grpc_client_channel_trace.enabled()) {
+ size_t num_batches = 0;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ if (calld->pending_batches[i].batch != nullptr) ++num_batches;
+ }
gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending %" PRIuPTR
- " pending batches to subchannel_call=%p",
- chand, calld, calld->waiting_for_pick_batches_count,
- calld->subchannel_call);
- }
- for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
- GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
- run_pending_batch_in_call_combiner, calld,
+ "chand=%p calld=%p: starting %" PRIuPTR
+ " pending batches on subchannel_call=%p",
+ chand, calld, num_batches, calld->subchannel_call);
+ }
+ grpc_transport_stream_op_batch*
+ batches[GPR_ARRAY_SIZE(calld->pending_batches)];
+ size_t num_batches = 0;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ if (batch != nullptr) {
+ batches[num_batches++] = batch;
+ pending_batch_clear(calld, pending);
+ }
+ }
+ for (size_t i = 1; i < num_batches; ++i) {
+ grpc_transport_stream_op_batch* batch = batches[i];
+ batch->handler_private.extra_arg = calld->subchannel_call;
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure,
+ resume_pending_batch_in_call_combiner, batch,
grpc_schedule_on_exec_ctx);
- GRPC_CALL_COMBINER_START(
- calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
- GRPC_ERROR_NONE, "waiting_for_pick_batches_resume");
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
+ &batch->handler_private.closure, GRPC_ERROR_NONE,
+ "pending_batches_resume");
}
- GPR_ASSERT(calld->initial_metadata_batch != nullptr);
- grpc_subchannel_call_process_op(calld->subchannel_call,
- calld->initial_metadata_batch);
+ GPR_ASSERT(num_batches > 0);
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
}
-// Applies service config to the call. Must be invoked once we know
-// that the resolver has returned results to the channel.
-static void apply_service_config_to_call_locked(grpc_call_element* elem) {
+static void maybe_clear_pending_batch(grpc_call_element* elem,
+ pending_batch* pending) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ // We clear the pending batch if all of its callbacks have been
+ // scheduled and reset to nullptr.
+ if (batch->on_complete == nullptr &&
+ (!batch->recv_initial_metadata ||
+ batch->payload->recv_initial_metadata.recv_initial_metadata_ready ==
+ nullptr) &&
+ (!batch->recv_message ||
+ batch->payload->recv_message.recv_message_ready == nullptr)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: clearing pending batch", chand,
+ calld);
+ }
+ pending_batch_clear(calld, pending);
+ }
+}
+
+// Returns true if all ops in the pending batch have been completed.
+static bool pending_batch_is_completed(
+ pending_batch* pending, call_data* calld,
+ subchannel_call_retry_state* retry_state) {
+ if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
+ return false;
+ }
+ if (pending->batch->send_initial_metadata &&
+ !retry_state->completed_send_initial_metadata) {
+ return false;
+ }
+ if (pending->batch->send_message &&
+ retry_state->completed_send_message_count < calld->send_messages.size()) {
+ return false;
+ }
+ if (pending->batch->send_trailing_metadata &&
+ !retry_state->completed_send_trailing_metadata) {
+ return false;
+ }
+ if (pending->batch->recv_initial_metadata &&
+ !retry_state->completed_recv_initial_metadata) {
+ return false;
+ }
+ if (pending->batch->recv_message &&
+ retry_state->completed_recv_message_count <
+ retry_state->started_recv_message_count) {
+ return false;
+ }
+ if (pending->batch->recv_trailing_metadata &&
+ !retry_state->completed_recv_trailing_metadata) {
+ return false;
+ }
+ return true;
+}
+
+// Returns true if any op in the batch was not yet started.
+static bool pending_batch_is_unstarted(
+ pending_batch* pending, call_data* calld,
+ subchannel_call_retry_state* retry_state) {
+ if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
+ return false;
+ }
+ if (pending->batch->send_initial_metadata &&
+ !retry_state->started_send_initial_metadata) {
+ return true;
+ }
+ if (pending->batch->send_message &&
+ retry_state->started_send_message_count < calld->send_messages.size()) {
+ return true;
+ }
+ if (pending->batch->send_trailing_metadata &&
+ !retry_state->started_send_trailing_metadata) {
+ return true;
+ }
+ if (pending->batch->recv_initial_metadata &&
+ !retry_state->started_recv_initial_metadata) {
+ return true;
+ }
+ if (pending->batch->recv_message &&
+ retry_state->completed_recv_message_count ==
+ retry_state->started_recv_message_count) {
+ return true;
+ }
+ if (pending->batch->recv_trailing_metadata &&
+ !retry_state->started_recv_trailing_metadata) {
+ return true;
+ }
+ return false;
+}
+
+//
+// retry code
+//
+
+// Commits the call so that no further retry attempts will be performed.
+static void retry_commit(grpc_call_element* elem,
+ subchannel_call_retry_state* retry_state) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (calld->retry_committed) return;
+ calld->retry_committed = true;
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: committing retries", chand, calld);
+ }
+ if (retry_state != nullptr) {
+ free_cached_send_op_data_after_commit(elem, retry_state);
+ }
+}
+
+// Starts a retry after appropriate back-off.
+static void do_retry(grpc_call_element* elem,
+ subchannel_call_retry_state* retry_state,
+ grpc_millis server_pushback_ms) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ GPR_ASSERT(calld->method_params != nullptr);
+ const ClientChannelMethodParams::RetryPolicy* retry_policy =
+ calld->method_params->retry_policy();
+ GPR_ASSERT(retry_policy != nullptr);
+ // Reset subchannel call and connected subchannel.
+ if (calld->subchannel_call != nullptr) {
+ GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
+ "client_channel_call_retry");
+ calld->subchannel_call = nullptr;
+ }
+ if (calld->pick.connected_subchannel != nullptr) {
+ calld->pick.connected_subchannel.reset();
+ }
+ // Compute backoff delay.
+ grpc_millis next_attempt_time;
+ if (server_pushback_ms >= 0) {
+ next_attempt_time = grpc_core::ExecCtx::Get()->Now() + server_pushback_ms;
+ calld->last_attempt_got_server_pushback = true;
+ } else {
+ if (calld->num_attempts_completed == 1 ||
+ calld->last_attempt_got_server_pushback) {
+ calld->retry_backoff.Init(
+ grpc_core::BackOff::Options()
+ .set_initial_backoff(retry_policy->initial_backoff)
+ .set_multiplier(retry_policy->backoff_multiplier)
+ .set_jitter(RETRY_BACKOFF_JITTER)
+ .set_max_backoff(retry_policy->max_backoff));
+ calld->last_attempt_got_server_pushback = false;
+ }
+ next_attempt_time = calld->retry_backoff->NextAttemptTime();
+ }
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand,
+ calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
+ }
+ // Schedule retry after computed delay.
+ GRPC_CLOSURE_INIT(&calld->pick_closure, start_pick_locked, elem,
+ grpc_combiner_scheduler(chand->combiner));
+ grpc_timer_init(&calld->retry_timer, next_attempt_time, &calld->pick_closure);
+ // Update bookkeeping.
+ if (retry_state != nullptr) retry_state->retry_dispatched = true;
+}
+
+// Returns true if the call is being retried.
+static bool maybe_retry(grpc_call_element* elem,
+ subchannel_batch_data* batch_data,
+ grpc_status_code status,
+ grpc_mdelem* server_pushback_md) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ // Get retry policy.
+ if (calld->method_params == nullptr) return false;
+ const ClientChannelMethodParams::RetryPolicy* retry_policy =
+ calld->method_params->retry_policy();
+ if (retry_policy == nullptr) return false;
+ // If we've already dispatched a retry from this call, return true.
+ // This catches the case where the batch has multiple callbacks
+ // (i.e., it includes either recv_message or recv_initial_metadata).
+ subchannel_call_retry_state* retry_state = nullptr;
+ if (batch_data != nullptr) {
+ retry_state = static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ batch_data->subchannel_call));
+ if (retry_state->retry_dispatched) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: retry already dispatched", chand,
+ calld);
+ }
+ return true;
+ }
+ }
+ // Check status.
+ if (status == GRPC_STATUS_OK) {
+ grpc_server_retry_throttle_data_record_success(calld->retry_throttle_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: call succeeded", chand, calld);
+ }
+ return false;
+ }
+ // Status is not OK. Check whether the status is retryable.
+ if (!retry_policy->retryable_status_codes.Contains(status)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: status %s not configured as retryable", chand,
+ calld, grpc_status_code_to_string(status));
+ }
+ return false;
+ }
+ // Record the failure and check whether retries are throttled.
+ // Note that it's important for this check to come after the status
+ // code check above, since we should only record failures whose statuses
+ // match the configured retryable status codes, so that we don't count
+ // things like failures due to malformed requests (INVALID_ARGUMENT).
+ // Conversely, it's important for this to come before the remaining
+ // checks, so that we don't fail to record failures due to other factors.
+ if (!grpc_server_retry_throttle_data_record_failure(
+ calld->retry_throttle_data)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries throttled", chand, calld);
+ }
+ return false;
+ }
+ // Check whether the call is committed.
+ if (calld->retry_committed) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries already committed", chand,
+ calld);
+ }
+ return false;
+ }
+ // Check whether we have retries remaining.
+ ++calld->num_attempts_completed;
+ if (calld->num_attempts_completed >= retry_policy->max_attempts) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: exceeded %d retry attempts", chand,
+ calld, retry_policy->max_attempts);
+ }
+ return false;
+ }
+ // If the call was cancelled from the surface, don't retry.
+ if (calld->cancel_error != GRPC_ERROR_NONE) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: call cancelled from surface, not retrying",
+ chand, calld);
+ }
+ return false;
+ }
+ // Check server push-back.
+ grpc_millis server_pushback_ms = -1;
+ if (server_pushback_md != nullptr) {
+ // If the value is "-1" or any other unparseable string, we do not retry.
+ uint32_t ms;
+ if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: not retrying due to server push-back",
+ chand, calld);
+ }
+ return false;
+ } else {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: server push-back: retry in %u ms", chand,
+ calld, ms);
+ }
+ server_pushback_ms = (grpc_millis)ms;
+ }
+ }
+ do_retry(elem, retry_state, server_pushback_ms);
+ return true;
+}
+
+//
+// subchannel_batch_data
+//
+
+static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
+ int refcount) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call));
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(
+ gpr_arena_alloc(calld->arena, sizeof(*batch_data)));
+ batch_data->elem = elem;
+ batch_data->subchannel_call =
+ GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create");
+ batch_data->batch.payload = &retry_state->batch_payload;
+ gpr_ref_init(&batch_data->refs, refcount);
+ GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
+ grpc_schedule_on_exec_ctx);
+ batch_data->batch.on_complete = &batch_data->on_complete;
+ GRPC_CALL_STACK_REF(calld->owning_call, "batch_data");
+ return batch_data;
+}
+
+static void batch_data_unref(subchannel_batch_data* batch_data) {
+ if (gpr_unref(&batch_data->refs)) {
+ if (batch_data->send_initial_metadata_storage != nullptr) {
+ grpc_metadata_batch_destroy(&batch_data->send_initial_metadata);
+ }
+ if (batch_data->send_trailing_metadata_storage != nullptr) {
+ grpc_metadata_batch_destroy(&batch_data->send_trailing_metadata);
+ }
+ if (batch_data->batch.recv_initial_metadata) {
+ grpc_metadata_batch_destroy(&batch_data->recv_initial_metadata);
+ }
+ if (batch_data->batch.recv_trailing_metadata) {
+ grpc_metadata_batch_destroy(&batch_data->recv_trailing_metadata);
+ }
+ GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref");
+ call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
+ GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
+ }
+}
+
+//
+// recv_initial_metadata callback handling
+//
+
+// Invokes recv_initial_metadata_ready for a subchannel batch.
+static void invoke_recv_initial_metadata_callback(void* arg,
+ grpc_error* error) {
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
+ channel_data* chand =
+ static_cast<channel_data*>(batch_data->elem->channel_data);
+ call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
+ // Find pending batch.
+ pending_batch* pending = nullptr;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
+ if (batch != nullptr && batch->recv_initial_metadata &&
+ batch->payload->recv_initial_metadata.recv_initial_metadata_ready !=
+ nullptr) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: invoking recv_initial_metadata_ready for "
+ "pending batch at index %" PRIuPTR,
+ chand, calld, i);
+ }
+ pending = &calld->pending_batches[i];
+ break;
+ }
+ }
+ GPR_ASSERT(pending != nullptr);
+ // Return metadata.
+ grpc_metadata_batch_move(
+ &batch_data->recv_initial_metadata,
+ pending->batch->payload->recv_initial_metadata.recv_initial_metadata);
+ // Update bookkeeping.
+ // Note: Need to do this before invoking the callback, since invoking
+ // the callback will result in yielding the call combiner.
+ grpc_closure* recv_initial_metadata_ready =
+ pending->batch->payload->recv_initial_metadata
+ .recv_initial_metadata_ready;
+ pending->batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
+ nullptr;
+ maybe_clear_pending_batch(batch_data->elem, pending);
+ batch_data_unref(batch_data);
+ // Invoke callback.
+ GRPC_CLOSURE_RUN(recv_initial_metadata_ready, GRPC_ERROR_REF(error));
+}
+
+// Intercepts recv_initial_metadata_ready callback for retries.
+// Commits the call and returns the initial metadata up the stack.
+static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
+ grpc_call_element* elem = batch_data->elem;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: got recv_initial_metadata_ready, error=%s",
+ chand, calld, grpc_error_string(error));
+ }
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ batch_data->subchannel_call));
+ // If we got an error or a Trailers-Only response and have not yet gotten
+ // the recv_trailing_metadata on_complete callback, then defer
+ // propagating this callback back to the surface. We can evaluate whether
+ // to retry when recv_trailing_metadata comes back.
+ if ((batch_data->trailing_metadata_available || error != GRPC_ERROR_NONE) &&
+ !retry_state->completed_recv_trailing_metadata) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: deferring recv_initial_metadata_ready "
+ "(Trailers-Only)",
+ chand, calld);
+ }
+ retry_state->recv_initial_metadata_ready_deferred = true;
+ retry_state->recv_initial_metadata_error = GRPC_ERROR_REF(error);
+ if (!retry_state->started_recv_trailing_metadata) {
+ // recv_trailing_metadata not yet started by application; start it
+ // ourselves to get status.
+ start_internal_recv_trailing_metadata(elem);
+ } else {
+ GRPC_CALL_COMBINER_STOP(
+ calld->call_combiner,
+ "recv_initial_metadata_ready trailers-only or error");
+ }
+ return;
+ }
+ // Received valid initial metadata, so commit the call.
+ retry_commit(elem, retry_state);
+ // Manually invoking a callback function; it does not take ownership of error.
+ invoke_recv_initial_metadata_callback(batch_data, error);
+ GRPC_ERROR_UNREF(error);
+}
+
+//
+// recv_message callback handling
+//
+
+// Invokes recv_message_ready for a subchannel batch.
+static void invoke_recv_message_callback(void* arg, grpc_error* error) {
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
+ channel_data* chand =
+ static_cast<channel_data*>(batch_data->elem->channel_data);
+ call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
+ // Find pending op.
+ pending_batch* pending = nullptr;
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
+ if (batch != nullptr && batch->recv_message &&
+ batch->payload->recv_message.recv_message_ready != nullptr) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: invoking recv_message_ready for "
+ "pending batch at index %" PRIuPTR,
+ chand, calld, i);
+ }
+ pending = &calld->pending_batches[i];
+ break;
+ }
+ }
+ GPR_ASSERT(pending != nullptr);
+ // Return payload.
+ *pending->batch->payload->recv_message.recv_message =
+ batch_data->recv_message;
+ // Update bookkeeping.
+ // Note: Need to do this before invoking the callback, since invoking
+ // the callback will result in yielding the call combiner.
+ grpc_closure* recv_message_ready =
+ pending->batch->payload->recv_message.recv_message_ready;
+ pending->batch->payload->recv_message.recv_message_ready = nullptr;
+ maybe_clear_pending_batch(batch_data->elem, pending);
+ batch_data_unref(batch_data);
+ // Invoke callback.
+ GRPC_CLOSURE_RUN(recv_message_ready, GRPC_ERROR_REF(error));
+}
+
+// Intercepts recv_message_ready callback for retries.
+// Commits the call and returns the message up the stack.
+static void recv_message_ready(void* arg, grpc_error* error) {
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
+ grpc_call_element* elem = batch_data->elem;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: got recv_message_ready, error=%s",
+ chand, calld, grpc_error_string(error));
+ }
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ batch_data->subchannel_call));
+ // If we got an error or the payload was nullptr and we have not yet gotten
+ // the recv_trailing_metadata on_complete callback, then defer
+ // propagating this callback back to the surface. We can evaluate whether
+ // to retry when recv_trailing_metadata comes back.
+ if ((batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
+ !retry_state->completed_recv_trailing_metadata) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: deferring recv_message_ready (nullptr "
+ "message and recv_trailing_metadata pending)",
+ chand, calld);
+ }
+ retry_state->recv_message_ready_deferred = true;
+ retry_state->recv_message_error = GRPC_ERROR_REF(error);
+ if (!retry_state->started_recv_trailing_metadata) {
+ // recv_trailing_metadata not yet started by application; start it
+ // ourselves to get status.
+ start_internal_recv_trailing_metadata(elem);
+ } else {
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner, "recv_message_ready null");
+ }
+ return;
+ }
+ // Received a valid message, so commit the call.
+ retry_commit(elem, retry_state);
+ // Manually invoking a callback function; it does not take ownership of error.
+ invoke_recv_message_callback(batch_data, error);
+ GRPC_ERROR_UNREF(error);
+}
+
+//
+// on_complete callback handling
+//
+
+// Updates retry_state to reflect the ops completed in batch_data.
+static void update_retry_state_for_completed_batch(
+ subchannel_batch_data* batch_data,
+ subchannel_call_retry_state* retry_state) {
+ if (batch_data->batch.send_initial_metadata) {
+ retry_state->completed_send_initial_metadata = true;
+ }
+ if (batch_data->batch.send_message) {
+ ++retry_state->completed_send_message_count;
+ }
+ if (batch_data->batch.send_trailing_metadata) {
+ retry_state->completed_send_trailing_metadata = true;
+ }
+ if (batch_data->batch.recv_initial_metadata) {
+ retry_state->completed_recv_initial_metadata = true;
+ }
+ if (batch_data->batch.recv_message) {
+ ++retry_state->completed_recv_message_count;
+ }
+ if (batch_data->batch.recv_trailing_metadata) {
+ retry_state->completed_recv_trailing_metadata = true;
+ }
+}
+
+// Represents a closure that needs to run as a result of a completed batch.
+typedef struct {
+ grpc_closure* closure;
+ grpc_error* error;
+ const char* reason;
+} closure_to_execute;
+
+// Adds any necessary closures for deferred recv_initial_metadata and
+// recv_message callbacks to closures, updating *num_closures as needed.
+static void add_closures_for_deferred_recv_callbacks(
+ subchannel_batch_data* batch_data, subchannel_call_retry_state* retry_state,
+ closure_to_execute* closures, size_t* num_closures) {
+ if (batch_data->batch.recv_trailing_metadata &&
+ retry_state->recv_initial_metadata_ready_deferred) {
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure =
+ GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
+ invoke_recv_initial_metadata_callback, batch_data,
+ grpc_schedule_on_exec_ctx);
+ closure->error = retry_state->recv_initial_metadata_error;
+ closure->reason = "resuming recv_initial_metadata_ready";
+ }
+ if (batch_data->batch.recv_trailing_metadata &&
+ retry_state->recv_message_ready_deferred) {
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = GRPC_CLOSURE_INIT(&batch_data->recv_message_ready,
+ invoke_recv_message_callback,
+ batch_data, grpc_schedule_on_exec_ctx);
+ closure->error = retry_state->recv_message_error;
+ closure->reason = "resuming recv_message_ready";
+ }
+}
+
+// If there are any cached ops to replay or pending ops to start on the
+// subchannel call, adds a closure to closures to invoke
+// start_retriable_subchannel_batches(), updating *num_closures as needed.
+static void add_closures_for_replay_or_pending_send_ops(
+ grpc_call_element* elem, subchannel_batch_data* batch_data,
+ subchannel_call_retry_state* retry_state, closure_to_execute* closures,
+ size_t* num_closures) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ bool have_pending_send_message_ops =
+ retry_state->started_send_message_count < calld->send_messages.size();
+ bool have_pending_send_trailing_metadata_op =
+ calld->seen_send_trailing_metadata &&
+ !retry_state->started_send_trailing_metadata;
+ if (!have_pending_send_message_ops &&
+ !have_pending_send_trailing_metadata_op) {
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ if (batch == nullptr || pending->send_ops_cached) continue;
+ if (batch->send_message) have_pending_send_message_ops = true;
+ if (batch->send_trailing_metadata) {
+ have_pending_send_trailing_metadata_op = true;
+ }
+ }
+ }
+ if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: starting next batch for pending send op(s)",
+ chand, calld);
+ }
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = GRPC_CLOSURE_INIT(
+ &batch_data->batch.handler_private.closure,
+ start_retriable_subchannel_batches, elem, grpc_schedule_on_exec_ctx);
+ closure->error = GRPC_ERROR_NONE;
+ closure->reason = "starting next batch for send_* op(s)";
+ }
+}
+
+// For any pending batch completed in batch_data, adds the necessary
+// completion closures to closures, updating *num_closures as needed.
+static void add_closures_for_completed_pending_batches(
+ grpc_call_element* elem, subchannel_batch_data* batch_data,
+ subchannel_call_retry_state* retry_state, grpc_error* error,
+ closure_to_execute* closures, size_t* num_closures) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ if (pending_batch_is_completed(pending, calld, retry_state)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: pending batch completed at index %" PRIuPTR,
+ chand, calld, i);
+ }
+ // Copy the trailing metadata to return it to the surface.
+ if (batch_data->batch.recv_trailing_metadata) {
+ grpc_metadata_batch_move(&batch_data->recv_trailing_metadata,
+ pending->batch->payload->recv_trailing_metadata
+ .recv_trailing_metadata);
+ }
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = pending->batch->on_complete;
+ closure->error = GRPC_ERROR_REF(error);
+ closure->reason = "on_complete for pending batch";
+ pending->batch->on_complete = nullptr;
+ maybe_clear_pending_batch(elem, pending);
+ }
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+// For any pending batch containing an op that has not yet been started,
+// adds the pending batch's completion closures to closures, updating
+// *num_closures as needed.
+static void add_closures_to_fail_unstarted_pending_batches(
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state,
+ grpc_error* error, closure_to_execute* closures, size_t* num_closures) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ if (pending_batch_is_unstarted(pending, calld, retry_state)) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: failing unstarted pending batch at index "
+ "%" PRIuPTR,
+ chand, calld, i);
+ }
+ if (pending->batch->recv_initial_metadata) {
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = pending->batch->payload->recv_initial_metadata
+ .recv_initial_metadata_ready;
+ closure->error = GRPC_ERROR_REF(error);
+ closure->reason =
+ "failing recv_initial_metadata_ready for pending batch";
+ pending->batch->payload->recv_initial_metadata
+ .recv_initial_metadata_ready = nullptr;
+ }
+ if (pending->batch->recv_message) {
+ *pending->batch->payload->recv_message.recv_message = nullptr;
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure =
+ pending->batch->payload->recv_message.recv_message_ready;
+ closure->error = GRPC_ERROR_REF(error);
+ closure->reason = "failing recv_message_ready for pending batch";
+ pending->batch->payload->recv_message.recv_message_ready = nullptr;
+ }
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = pending->batch->on_complete;
+ closure->error = GRPC_ERROR_REF(error);
+ closure->reason = "failing on_complete for pending batch";
+ pending->batch->on_complete = nullptr;
+ maybe_clear_pending_batch(elem, pending);
+ }
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+// Callback used to intercept on_complete from subchannel calls.
+// Called only when retries are enabled.
+static void on_complete(void* arg, grpc_error* error) {
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
+ grpc_call_element* elem = batch_data->elem;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch);
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
+ chand, calld, grpc_error_string(error), batch_str);
+ gpr_free(batch_str);
+ }
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ batch_data->subchannel_call));
+ // If we have previously completed recv_trailing_metadata, then the
+ // call is finished.
+ bool call_finished = retry_state->completed_recv_trailing_metadata;
+ // Update bookkeeping in retry_state.
+ update_retry_state_for_completed_batch(batch_data, retry_state);
+ if (call_finished) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: call already finished", chand,
+ calld);
+ }
+ } else {
+ // Check if this batch finished the call, and if so, get its status.
+ // The call is finished if either (a) this callback was invoked with
+ // an error or (b) we receive status.
+ grpc_status_code status = GRPC_STATUS_OK;
+ grpc_mdelem* server_pushback_md = nullptr;
+ if (error != GRPC_ERROR_NONE) { // Case (a).
+ call_finished = true;
+ grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
+ nullptr);
+ } else if (batch_data->batch.recv_trailing_metadata) { // Case (b).
+ call_finished = true;
+ grpc_metadata_batch* md_batch =
+ batch_data->batch.payload->recv_trailing_metadata
+ .recv_trailing_metadata;
+ GPR_ASSERT(md_batch->idx.named.grpc_status != nullptr);
+ status = grpc_get_status_code_from_metadata(
+ md_batch->idx.named.grpc_status->md);
+ if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
+ server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
+ }
+ } else if (retry_state->completed_recv_trailing_metadata) {
+ call_finished = true;
+ }
+ if (call_finished && grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: call finished, status=%s", chand,
+ calld, grpc_status_code_to_string(status));
+ }
+ // If the call is finished, check if we should retry.
+ if (call_finished &&
+ maybe_retry(elem, batch_data, status, server_pushback_md)) {
+ // Unref batch_data for deferred recv_initial_metadata_ready or
+ // recv_message_ready callbacks, if any.
+ if (batch_data->batch.recv_trailing_metadata &&
+ retry_state->recv_initial_metadata_ready_deferred) {
+ batch_data_unref(batch_data);
+ GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
+ }
+ if (batch_data->batch.recv_trailing_metadata &&
+ retry_state->recv_message_ready_deferred) {
+ batch_data_unref(batch_data);
+ GRPC_ERROR_UNREF(retry_state->recv_message_error);
+ }
+ batch_data_unref(batch_data);
+ return;
+ }
+ }
+ // If the call is finished or retries are committed, free cached data for
+ // send ops that we've just completed.
+ if (call_finished || calld->retry_committed) {
+ free_cached_send_op_data_for_completed_batch(elem, batch_data, retry_state);
+ }
+ // Call not being retried.
+ // Construct list of closures to execute.
+ // Max number of closures is number of pending batches plus one for
+ // each of:
+ // - recv_initial_metadata_ready (either deferred or unstarted)
+ // - recv_message_ready (either deferred or unstarted)
+ // - starting a new batch for pending send ops
+ closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches) + 3];
+ size_t num_closures = 0;
+ // If there are deferred recv_initial_metadata_ready or recv_message_ready
+ // callbacks, add them to closures.
+ add_closures_for_deferred_recv_callbacks(batch_data, retry_state, closures,
+ &num_closures);
+ // Find pending batches whose ops are now complete and add their
+ // on_complete callbacks to closures.
+ add_closures_for_completed_pending_batches(elem, batch_data, retry_state,
+ GRPC_ERROR_REF(error), closures,
+ &num_closures);
+ // Add closures to handle any pending batches that have not yet been started.
+ // If the call is finished, we fail these batches; otherwise, we add a
+ // callback to start_retriable_subchannel_batches() to start them on
+ // the subchannel call.
+ if (call_finished) {
+ add_closures_to_fail_unstarted_pending_batches(
+ elem, retry_state, GRPC_ERROR_REF(error), closures, &num_closures);
+ } else {
+ add_closures_for_replay_or_pending_send_ops(elem, batch_data, retry_state,
+ closures, &num_closures);
+ }
+ // Don't need batch_data anymore.
+ batch_data_unref(batch_data);
+ // Schedule all of the closures identified above.
+ // Note that the call combiner will be yielded for each closure that
+ // we schedule. We're already running in the call combiner, so one of
+ // the closures can be scheduled directly, but the others will
+ // have to re-enter the call combiner.
+ if (num_closures > 0) {
+ GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
+ for (size_t i = 1; i < num_closures; ++i) {
+ GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
+ closures[i].error, closures[i].reason);
+ }
+ } else {
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner,
+ "no closures to run for on_complete");
+ }
+}
+
+//
+// subchannel batch construction
+//
+
+// Helper function used to start a subchannel batch in the call combiner.
+static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
+ grpc_transport_stream_op_batch* batch =
+ static_cast<grpc_transport_stream_op_batch*>(arg);
+ grpc_subchannel_call* subchannel_call =
+ static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(subchannel_call, batch);
+}
+
+// Adds retriable send_initial_metadata op to batch_data.
+static void add_retriable_send_initial_metadata_op(
+ call_data* calld, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ // Maps the number of retries to the corresponding metadata value slice.
+ static const grpc_slice* retry_count_strings[] = {
+ &GRPC_MDSTR_1, &GRPC_MDSTR_2, &GRPC_MDSTR_3, &GRPC_MDSTR_4};
+ // We need to make a copy of the metadata batch for each attempt, since
+ // the filters in the subchannel stack may modify this batch, and we don't
+ // want those modifications to be passed forward to subsequent attempts.
+ //
+ // If we've already completed one or more attempts, add the
+ // grpc-retry-attempts header.
+ batch_data->send_initial_metadata_storage =
+ static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
+ calld->arena, sizeof(grpc_linked_mdelem) *
+ (calld->send_initial_metadata.list.count +
+ (calld->num_attempts_completed > 0))));
+ grpc_metadata_batch_copy(&calld->send_initial_metadata,
+ &batch_data->send_initial_metadata,
+ batch_data->send_initial_metadata_storage);
+ if (batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts !=
+ nullptr) {
+ grpc_metadata_batch_remove(
+ &batch_data->send_initial_metadata,
+ batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts);
+ }
+ if (calld->num_attempts_completed > 0) {
+ grpc_mdelem retry_md = grpc_mdelem_from_slices(
+ GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS,
+ *retry_count_strings[calld->num_attempts_completed - 1]);
+ grpc_error* error = grpc_metadata_batch_add_tail(
+ &batch_data->send_initial_metadata,
+ &batch_data->send_initial_metadata_storage[calld->send_initial_metadata
+ .list.count],
+ retry_md);
+ if (error != GRPC_ERROR_NONE) {
+ gpr_log(GPR_ERROR, "error adding retry metadata: %s",
+ grpc_error_string(error));
+ GPR_ASSERT(false);
+ }
+ }
+ retry_state->started_send_initial_metadata = true;
+ batch_data->batch.send_initial_metadata = true;
+ batch_data->batch.payload->send_initial_metadata.send_initial_metadata =
+ &batch_data->send_initial_metadata;
+ batch_data->batch.payload->send_initial_metadata.send_initial_metadata_flags =
+ calld->send_initial_metadata_flags;
+ batch_data->batch.payload->send_initial_metadata.peer_string =
+ calld->peer_string;
+}
+
+// Adds retriable send_message op to batch_data.
+static void add_retriable_send_message_op(
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]",
+ chand, calld, retry_state->started_send_message_count);
+ }
+ grpc_byte_stream_cache* cache =
+ calld->send_messages[retry_state->started_send_message_count];
+ ++retry_state->started_send_message_count;
+ grpc_caching_byte_stream_init(&batch_data->send_message, cache);
+ batch_data->batch.send_message = true;
+ batch_data->batch.payload->send_message.send_message =
+ &batch_data->send_message.base;
+}
+
+// Adds retriable send_trailing_metadata op to batch_data.
+static void add_retriable_send_trailing_metadata_op(
+ call_data* calld, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ // We need to make a copy of the metadata batch for each attempt, since
+ // the filters in the subchannel stack may modify this batch, and we don't
+ // want those modifications to be passed forward to subsequent attempts.
+ batch_data->send_trailing_metadata_storage =
+ static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
+ calld->arena, sizeof(grpc_linked_mdelem) *
+ calld->send_trailing_metadata.list.count));
+ grpc_metadata_batch_copy(&calld->send_trailing_metadata,
+ &batch_data->send_trailing_metadata,
+ batch_data->send_trailing_metadata_storage);
+ retry_state->started_send_trailing_metadata = true;
+ batch_data->batch.send_trailing_metadata = true;
+ batch_data->batch.payload->send_trailing_metadata.send_trailing_metadata =
+ &batch_data->send_trailing_metadata;
+}
+
+// Adds retriable recv_initial_metadata op to batch_data.
+static void add_retriable_recv_initial_metadata_op(
+ call_data* calld, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ retry_state->started_recv_initial_metadata = true;
+ batch_data->batch.recv_initial_metadata = true;
+ grpc_metadata_batch_init(&batch_data->recv_initial_metadata);
+ batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata =
+ &batch_data->recv_initial_metadata;
+ batch_data->batch.payload->recv_initial_metadata.trailing_metadata_available =
+ &batch_data->trailing_metadata_available;
+ GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
+ recv_initial_metadata_ready, batch_data,
+ grpc_schedule_on_exec_ctx);
+ batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata_ready =
+ &batch_data->recv_initial_metadata_ready;
+}
+
+// Adds retriable recv_message op to batch_data.
+static void add_retriable_recv_message_op(
+ call_data* calld, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ ++retry_state->started_recv_message_count;
+ batch_data->batch.recv_message = true;
+ batch_data->batch.payload->recv_message.recv_message =
+ &batch_data->recv_message;
+ GRPC_CLOSURE_INIT(&batch_data->recv_message_ready, recv_message_ready,
+ batch_data, grpc_schedule_on_exec_ctx);
+ batch_data->batch.payload->recv_message.recv_message_ready =
+ &batch_data->recv_message_ready;
+}
+
+// Adds retriable recv_trailing_metadata op to batch_data.
+static void add_retriable_recv_trailing_metadata_op(
+ call_data* calld, subchannel_call_retry_state* retry_state,
+ subchannel_batch_data* batch_data) {
+ retry_state->started_recv_trailing_metadata = true;
+ batch_data->batch.recv_trailing_metadata = true;
+ grpc_metadata_batch_init(&batch_data->recv_trailing_metadata);
+ batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata =
+ &batch_data->recv_trailing_metadata;
+ batch_data->batch.collect_stats = true;
+ batch_data->batch.payload->collect_stats.collect_stats =
+ &batch_data->collect_stats;
+}
+
+// Helper function used to start a recv_trailing_metadata batch. This
+// is used in the case where a recv_initial_metadata or recv_message
+// op fails in a way that we know the call is over but when the application
+// has not yet started its own recv_trailing_metadata op.
+static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: call failed but recv_trailing_metadata not "
+ "started; starting it internally",
chand, calld);
}
- if (chand->retry_throttle_data != nullptr) {
- calld->retry_throttle_data =
- grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call));
+ subchannel_batch_data* batch_data = batch_data_create(elem, 1);
+ add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(calld->subchannel_call, &batch_data->batch);
+}
+
+// If there are any cached send ops that need to be replayed on the
+// current subchannel call, creates and returns a new subchannel batch
+// to replay those ops. Otherwise, returns nullptr.
+static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ subchannel_batch_data* replay_batch_data = nullptr;
+ // send_initial_metadata.
+ if (calld->seen_send_initial_metadata &&
+ !retry_state->started_send_initial_metadata &&
+ !calld->pending_send_initial_metadata) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: replaying previously completed "
+ "send_initial_metadata op",
+ chand, calld);
+ }
+ replay_batch_data = batch_data_create(elem, 1);
+ add_retriable_send_initial_metadata_op(calld, retry_state,
+ replay_batch_data);
+ }
+ // send_message.
+ // Note that we can only have one send_message op in flight at a time.
+ if (retry_state->started_send_message_count < calld->send_messages.size() &&
+ retry_state->started_send_message_count ==
+ retry_state->completed_send_message_count &&
+ !calld->pending_send_message) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: replaying previously completed "
+ "send_message op",
+ chand, calld);
+ }
+ if (replay_batch_data == nullptr) {
+ replay_batch_data = batch_data_create(elem, 1);
+ }
+ add_retriable_send_message_op(elem, retry_state, replay_batch_data);
+ }
+ // send_trailing_metadata.
+ // Note that we only add this op if we have no more send_message ops
+ // to start, since we can't send down any more send_message ops after
+ // send_trailing_metadata.
+ if (calld->seen_send_trailing_metadata &&
+ retry_state->started_send_message_count == calld->send_messages.size() &&
+ !retry_state->started_send_trailing_metadata &&
+ !calld->pending_send_trailing_metadata) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: replaying previously completed "
+ "send_trailing_metadata op",
+ chand, calld);
+ }
+ if (replay_batch_data == nullptr) {
+ replay_batch_data = batch_data_create(elem, 1);
+ }
+ add_retriable_send_trailing_metadata_op(calld, retry_state,
+ replay_batch_data);
}
- if (chand->method_params_table != nullptr) {
- calld->method_params = static_cast<method_parameters*>(
- grpc_method_config_table_get(chand->method_params_table, calld->path));
- if (calld->method_params != nullptr) {
- method_parameters_ref(calld->method_params);
- // If the deadline from the service config is shorter than the one
- // from the client API, reset the deadline timer.
- if (chand->deadline_checking_enabled &&
- calld->method_params->timeout != 0) {
- const grpc_millis per_method_deadline =
- grpc_timespec_to_millis_round_up(calld->call_start_time) +
- calld->method_params->timeout;
- if (per_method_deadline < calld->deadline) {
- calld->deadline = per_method_deadline;
- grpc_deadline_state_reset(elem, calld->deadline);
- }
+ return replay_batch_data;
+}
+
+// Adds subchannel batches for pending batches to batches, updating
+// *num_batches as needed.
+static void add_subchannel_batches_for_pending_batches(
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state,
+ grpc_transport_stream_op_batch** batches, size_t* num_batches) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ pending_batch* pending = &calld->pending_batches[i];
+ grpc_transport_stream_op_batch* batch = pending->batch;
+ if (batch == nullptr) continue;
+ // Skip any batch that either (a) has already been started on this
+ // subchannel call or (b) we can't start yet because we're still
+ // replaying send ops that need to be completed first.
+ // TODO(roth): Note that if any one op in the batch can't be sent
+ // yet due to ops that we're replaying, we don't start any of the ops
+ // in the batch. This is probably okay, but it could conceivably
+ // lead to increased latency in some cases -- e.g., we could delay
+ // starting a recv op due to it being in the same batch with a send
+ // op. If/when we revamp the callback protocol in
+ // transport_stream_op_batch, we may be able to fix this.
+ if (batch->send_initial_metadata &&
+ retry_state->started_send_initial_metadata) {
+ continue;
+ }
+ if (batch->send_message && retry_state->completed_send_message_count <
+ retry_state->started_send_message_count) {
+ continue;
+ }
+ // Note that we only start send_trailing_metadata if we have no more
+ // send_message ops to start, since we can't send down any more
+ // send_message ops after send_trailing_metadata.
+ if (batch->send_trailing_metadata &&
+ (retry_state->started_send_message_count + batch->send_message <
+ calld->send_messages.size() ||
+ retry_state->started_send_trailing_metadata)) {
+ continue;
+ }
+ if (batch->recv_initial_metadata &&
+ retry_state->started_recv_initial_metadata) {
+ continue;
+ }
+ if (batch->recv_message && retry_state->completed_recv_message_count <
+ retry_state->started_recv_message_count) {
+ continue;
+ }
+ if (batch->recv_trailing_metadata &&
+ retry_state->started_recv_trailing_metadata) {
+ continue;
+ }
+ // If we're not retrying, just send the batch as-is.
+ if (calld->method_params == nullptr ||
+ calld->method_params->retry_policy() == nullptr ||
+ calld->retry_committed) {
+ batches[(*num_batches)++] = batch;
+ pending_batch_clear(calld, pending);
+ continue;
+ }
+ // Create batch with the right number of callbacks.
+ const int num_callbacks =
+ 1 + batch->recv_initial_metadata + batch->recv_message;
+ subchannel_batch_data* batch_data = batch_data_create(elem, num_callbacks);
+ // Cache send ops if needed.
+ maybe_cache_send_ops_for_batch(calld, pending);
+ // send_initial_metadata.
+ if (batch->send_initial_metadata) {
+ add_retriable_send_initial_metadata_op(calld, retry_state, batch_data);
+ }
+ // send_message.
+ if (batch->send_message) {
+ add_retriable_send_message_op(elem, retry_state, batch_data);
+ }
+ // send_trailing_metadata.
+ if (batch->send_trailing_metadata) {
+ add_retriable_send_trailing_metadata_op(calld, retry_state, batch_data);
+ }
+ // recv_initial_metadata.
+ if (batch->recv_initial_metadata) {
+ // recv_flags is only used on the server side.
+ GPR_ASSERT(batch->payload->recv_initial_metadata.recv_flags == nullptr);
+ add_retriable_recv_initial_metadata_op(calld, retry_state, batch_data);
+ }
+ // recv_message.
+ if (batch->recv_message) {
+ add_retriable_recv_message_op(calld, retry_state, batch_data);
+ }
+ // recv_trailing_metadata.
+ if (batch->recv_trailing_metadata) {
+ GPR_ASSERT(batch->collect_stats);
+ add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
+ }
+ batches[(*num_batches)++] = &batch_data->batch;
+ }
+}
+
+// Constructs and starts whatever subchannel batches are needed on the
+// subchannel call.
+static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: constructing retriable batches",
+ chand, calld);
+ }
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call));
+ // We can start up to 6 batches.
+ grpc_transport_stream_op_batch*
+ batches[GPR_ARRAY_SIZE(calld->pending_batches)];
+ size_t num_batches = 0;
+ // Replay previously-returned send_* ops if needed.
+ subchannel_batch_data* replay_batch_data =
+ maybe_create_subchannel_batch_for_replay(elem, retry_state);
+ if (replay_batch_data != nullptr) {
+ batches[num_batches++] = &replay_batch_data->batch;
+ }
+ // Now add pending batches.
+ add_subchannel_batches_for_pending_batches(elem, retry_state, batches,
+ &num_batches);
+ // Start batches on subchannel call.
+ // Note that the call combiner will be yielded for each batch that we
+ // send down. We're already running in the call combiner, so one of
+ // the batches can be started directly, but the others will have to
+ // re-enter the call combiner.
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: starting %" PRIuPTR
+ " retriable batches on subchannel_call=%p",
+ chand, calld, num_batches, calld->subchannel_call);
+ }
+ if (num_batches == 0) {
+ // This should be fairly rare, but it can happen when (e.g.) an
+ // attempt completes before it has finished replaying all
+ // previously sent messages.
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner,
+ "no retriable subchannel batches to start");
+ } else {
+ for (size_t i = 1; i < num_batches; ++i) {
+ if (grpc_client_channel_trace.enabled()) {
+ char* batch_str = grpc_transport_stream_op_batch_string(batches[i]);
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: starting batch in call combiner: %s", chand,
+ calld, batch_str);
+ gpr_free(batch_str);
}
+ batches[i]->handler_private.extra_arg = calld->subchannel_call;
+ GRPC_CLOSURE_INIT(&batches[i]->handler_private.closure,
+ start_batch_in_call_combiner, batches[i],
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
+ &batches[i]->handler_private.closure,
+ GRPC_ERROR_NONE, "start_subchannel_batch");
+ }
+ if (grpc_client_channel_trace.enabled()) {
+ char* batch_str = grpc_transport_stream_op_batch_string(batches[0]);
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting batch: %s", chand, calld,
+ batch_str);
+ gpr_free(batch_str);
}
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
}
}
-static void create_subchannel_call_locked(grpc_call_element* elem,
- grpc_error* error) {
+//
+// LB pick
+//
+
+static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
+ const size_t parent_data_size =
+ calld->enable_retries ? sizeof(subchannel_call_retry_state) : 0;
const grpc_core::ConnectedSubchannel::CallArgs call_args = {
calld->pollent, // pollent
calld->path, // path
@@ -1001,7 +2466,8 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
calld->deadline, // deadline
calld->arena, // arena
calld->pick.subchannel_call_context, // context
- calld->call_combiner // call_combiner
+ calld->call_combiner, // call_combiner
+ parent_data_size // parent_data_size
};
grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
call_args, &calld->subchannel_call);
@@ -1011,36 +2477,61 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
}
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
- waiting_for_pick_batches_fail(elem, new_error);
+ pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
} else {
- waiting_for_pick_batches_resume(elem);
+ if (parent_data_size > 0) {
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call));
+ retry_state->batch_payload.context = calld->pick.subchannel_call_context;
+ }
+ pending_batches_resume(elem);
}
GRPC_ERROR_UNREF(error);
}
// Invoked when a pick is completed, on both success or failure.
-static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
+static void pick_done(void* arg, grpc_error* error) {
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->pick.connected_subchannel == nullptr) {
// Failed to create subchannel.
- GRPC_ERROR_UNREF(calld->error);
- calld->error = error == GRPC_ERROR_NONE
- ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Call dropped by load balancing policy")
- : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Failed to create subchannel", &error, 1);
- if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: failed to create subchannel: error=%s", chand,
- calld, grpc_error_string(calld->error));
+ // If there was no error, this is an LB policy drop, in which case
+ // we return an error; otherwise, we may retry.
+ grpc_status_code status = GRPC_STATUS_OK;
+ grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
+ nullptr);
+ if (error == GRPC_ERROR_NONE || !calld->enable_retries ||
+ !maybe_retry(elem, nullptr /* batch_data */, status,
+ nullptr /* server_pushback_md */)) {
+ grpc_error* new_error =
+ error == GRPC_ERROR_NONE
+ ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Call dropped by load balancing policy")
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to create subchannel", &error, 1);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: failed to create subchannel: error=%s",
+ chand, calld, grpc_error_string(new_error));
+ }
+ pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
}
- waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
} else {
/* Create call on subchannel. */
- create_subchannel_call_locked(elem, GRPC_ERROR_REF(error));
+ create_subchannel_call(elem, GRPC_ERROR_REF(error));
}
- GRPC_ERROR_UNREF(error);
+}
+
+// Invoked when a pick is completed to leave the client_channel combiner
+// and continue processing in the call combiner.
+static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_SCHED(&calld->pick_closure, error);
}
// A wrapper around pick_done_locked() that is used in cases where
@@ -1088,6 +2579,45 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
}
+// Applies service config to the call. Must be invoked once we know
+// that the resolver has returned results to the channel.
+static void apply_service_config_to_call_locked(grpc_call_element* elem) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
+ chand, calld);
+ }
+ if (chand->retry_throttle_data != nullptr) {
+ calld->retry_throttle_data =
+ grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
+ }
+ if (chand->method_params_table != nullptr) {
+ calld->method_params = grpc_core::ServiceConfig::MethodConfigTableLookup(
+ *chand->method_params_table, calld->path);
+ if (calld->method_params != nullptr) {
+ // If the deadline from the service config is shorter than the one
+ // from the client API, reset the deadline timer.
+ if (chand->deadline_checking_enabled &&
+ calld->method_params->timeout() != 0) {
+ const grpc_millis per_method_deadline =
+ grpc_timespec_to_millis_round_up(calld->call_start_time) +
+ calld->method_params->timeout();
+ if (per_method_deadline < calld->deadline) {
+ calld->deadline = per_method_deadline;
+ grpc_deadline_state_reset(elem, calld->deadline);
+ }
+ }
+ }
+ }
+ // If no retry policy, disable retries.
+ // TODO(roth): Remove this when adding support for transparent retries.
+ if (calld->method_params == nullptr ||
+ calld->method_params->retry_policy() == nullptr) {
+ calld->enable_retries = false;
+ }
+}
+
// Starts a pick on chand->lb_policy.
// Returns true if pick is completed synchronously.
static bool pick_callback_start_locked(grpc_call_element* elem) {
@@ -1097,33 +2627,46 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy.get());
}
- apply_service_config_to_call_locked(elem);
+ // Only get service config data on the first attempt.
+ if (calld->num_attempts_completed == 0) {
+ apply_service_config_to_call_locked(elem);
+ }
// If the application explicitly set wait_for_ready, use that.
// Otherwise, if the service config specified a value for this
// method, use that.
- uint32_t initial_metadata_flags =
- calld->initial_metadata_batch->payload->send_initial_metadata
- .send_initial_metadata_flags;
+ //
+ // The send_initial_metadata batch will be the first one in the list,
+ // as set by get_batch_index() above.
+ calld->pick.initial_metadata =
+ calld->seen_send_initial_metadata
+ ? &calld->send_initial_metadata
+ : calld->pending_batches[0]
+ .batch->payload->send_initial_metadata.send_initial_metadata;
+ uint32_t send_initial_metadata_flags =
+ calld->seen_send_initial_metadata
+ ? calld->send_initial_metadata_flags
+ : calld->pending_batches[0]
+ .batch->payload->send_initial_metadata
+ .send_initial_metadata_flags;
const bool wait_for_ready_set_from_api =
- initial_metadata_flags &
+ send_initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
const bool wait_for_ready_set_from_service_config =
calld->method_params != nullptr &&
- calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
+ calld->method_params->wait_for_ready() !=
+ ClientChannelMethodParams::WAIT_FOR_READY_UNSET;
if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
- if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
- initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ if (calld->method_params->wait_for_ready() ==
+ ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
+ send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
} else {
- initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
- calld->pick.initial_metadata =
- calld->initial_metadata_batch->payload->send_initial_metadata
- .send_initial_metadata;
- calld->pick.initial_metadata_flags = initial_metadata_flags;
- GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
+ calld->pick.initial_metadata_flags = send_initial_metadata_flags;
+ GRPC_CLOSURE_INIT(&calld->pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
- calld->pick.on_complete = &calld->lb_pick_closure;
+ calld->pick.on_complete = &calld->pick_closure;
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
if (pick_done) {
@@ -1137,7 +2680,7 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
calld->call_combiner,
- GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
+ GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
pick_callback_cancel_locked, elem,
grpc_combiner_scheduler(chand->combiner)));
}
@@ -1186,8 +2729,6 @@ static void pick_after_resolver_result_cancel_locked(void* arg,
"Pick cancelled", &error, 1));
}
-static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
-
static void pick_after_resolver_result_done_locked(void* arg,
grpc_error* error) {
pick_after_resolver_result_args* args =
@@ -1224,7 +2765,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
async_pick_done_locked(elem, GRPC_ERROR_NONE);
}
}
- // TODO(roth): It should be impossible for chand->lb_policy to be NULL
+ // TODO(roth): It should be impossible for chand->lb_policy to be nullptr
// here, so the rest of this code should never actually be executed.
// However, we have reports of a crash on iOS that triggers this case,
// so we are temporarily adding this to restore branches that were
@@ -1277,6 +2818,7 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
+ GPR_ASSERT(calld->subchannel_call == nullptr);
if (chand->lb_policy != nullptr) {
// We already have an LB policy, so ask it for a pick.
if (pick_callback_start_locked(elem)) {
@@ -1305,24 +2847,9 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
chand->interested_parties);
}
-static void on_complete(void* arg, grpc_error* error) {
- grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
- call_data* calld = static_cast<call_data*>(elem->call_data);
- if (calld->retry_throttle_data != nullptr) {
- if (error == GRPC_ERROR_NONE) {
- grpc_server_retry_throttle_data_record_success(
- calld->retry_throttle_data);
- } else {
- // TODO(roth): In a subsequent PR, check the return value here and
- // decide whether or not to retry. Note that we should only
- // record failures whose statuses match the configured retryable
- // or non-fatal status codes.
- grpc_server_retry_throttle_data_record_failure(
- calld->retry_throttle_data);
- }
- }
- GRPC_CLOSURE_RUN(calld->original_on_complete, GRPC_ERROR_REF(error));
-}
+//
+// filter call vtable functions
+//
static void cc_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
@@ -1333,46 +2860,47 @@ static void cc_start_transport_stream_op_batch(
grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
}
// If we've previously been cancelled, immediately fail any new batches.
- if (calld->error != GRPC_ERROR_NONE) {
+ if (calld->cancel_error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
- chand, calld, grpc_error_string(calld->error));
+ chand, calld, grpc_error_string(calld->cancel_error));
}
+ // Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
- batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
+ batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
return;
}
+ // Handle cancellation.
if (batch->cancel_stream) {
// Stash a copy of cancel_error in our call data, so that we can use
// it for subsequent operations. This ensures that if the call is
// cancelled before any batches are passed down (e.g., if the deadline
// is in the past when the call starts), we can return the right
// error to the caller when the first batch does get passed down.
- GRPC_ERROR_UNREF(calld->error);
- calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
+ GRPC_ERROR_UNREF(calld->cancel_error);
+ calld->cancel_error =
+ GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
- calld, grpc_error_string(calld->error));
+ calld, grpc_error_string(calld->cancel_error));
}
- // If we have a subchannel call, send the cancellation batch down.
- // Otherwise, fail all pending batches.
- if (calld->subchannel_call != nullptr) {
- grpc_subchannel_call_process_op(calld->subchannel_call, batch);
+ // If we do not have a subchannel call (i.e., a pick has not yet
+ // been started), fail all pending batches. Otherwise, send the
+ // cancellation down to the subchannel call.
+ if (calld->subchannel_call == nullptr) {
+ pending_batches_fail(elem, GRPC_ERROR_REF(calld->cancel_error),
+ false /* yield_call_combiner */);
+ // Note: This will release the call combiner.
+ grpc_transport_stream_op_batch_finish_with_failure(
+ batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
} else {
- waiting_for_pick_batches_add(calld, batch);
- waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
+ // Note: This will release the call combiner.
+ grpc_subchannel_call_process_op(calld->subchannel_call, batch);
}
return;
}
- // Intercept on_complete for recv_trailing_metadata so that we can
- // check retry throttle status.
- if (batch->recv_trailing_metadata) {
- GPR_ASSERT(batch->on_complete != nullptr);
- calld->original_on_complete = batch->on_complete;
- GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
- grpc_schedule_on_exec_ctx);
- batch->on_complete = &calld->on_complete;
- }
+ // Add the batch to the pending list.
+ pending_batches_add(elem, batch);
// Check if we've already gotten a subchannel call.
// Note that once we have completed the pick, we do not need to enter
// the channel combiner, which is more efficient (especially for
@@ -1380,15 +2908,13 @@ static void cc_start_transport_stream_op_batch(
if (calld->subchannel_call != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
+ "chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
calld, calld->subchannel_call);
}
- grpc_subchannel_call_process_op(calld->subchannel_call, batch);
+ pending_batches_resume(elem);
return;
}
// We do not yet have a subchannel call.
- // Add the batch to the waiting-for-pick list.
- waiting_for_pick_batches_add(calld, batch);
// For batches containing a send_initial_metadata op, enter the channel
// combiner to start a pick.
if (batch->send_initial_metadata) {
@@ -1428,6 +2954,7 @@ static grpc_error* cc_init_call_elem(grpc_call_element* elem,
grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
calld->deadline);
}
+ calld->enable_retries = chand->enable_retries;
return GRPC_ERROR_NONE;
}
@@ -1441,10 +2968,8 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
grpc_deadline_state_destroy(elem);
}
grpc_slice_unref_internal(calld->path);
- if (calld->method_params != nullptr) {
- method_parameters_unref(calld->method_params);
- }
- GRPC_ERROR_UNREF(calld->error);
+ calld->method_params.reset();
+ GRPC_ERROR_UNREF(calld->cancel_error);
if (calld->subchannel_call != nullptr) {
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
then_schedule_closure);
@@ -1452,7 +2977,9 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
"client_channel_destroy_call");
}
- GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
+ GPR_ASSERT(calld->pending_batches[i].batch == nullptr);
+ }
if (calld->pick.connected_subchannel != nullptr) {
calld->pick.connected_subchannel.reset();
}
@@ -1652,3 +3179,9 @@ void grpc_client_channel_watch_connectivity_state(
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
+
+grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
+ grpc_call_element* elem) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ return calld->subchannel_call;
+}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
index 7abd7f37f9..441efd5e23 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
@@ -30,47 +30,41 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/security/credentials/credentials.h"
-#include "src/core/lib/security/transport/lb_targets_info.h"
+#include "src/core/lib/security/transport/target_authority_table.h"
#include "src/core/lib/slice/slice_internal.h"
-static void destroy_balancer_name(void* balancer_name) {
- gpr_free(balancer_name);
-}
-
-static grpc_slice_hash_table_entry targets_info_entry_create(
- const char* address, const char* balancer_name) {
- grpc_slice_hash_table_entry entry;
- entry.key = grpc_slice_from_copied_string(address);
- entry.value = gpr_strdup(balancer_name);
- return entry;
-}
+namespace grpc_core {
+namespace {
-static int balancer_name_cmp_fn(void* a, void* b) {
- const char* a_str = static_cast<const char*>(a);
- const char* b_str = static_cast<const char*>(b);
- return strcmp(a_str, b_str);
+int BalancerNameCmp(const grpc_core::UniquePtr<char>& a,
+ const grpc_core::UniquePtr<char>& b) {
+ return strcmp(a.get(), b.get());
}
-static grpc_slice_hash_table* build_targets_info_table(
+RefCountedPtr<TargetAuthorityTable> CreateTargetAuthorityTable(
grpc_lb_addresses* addresses) {
- grpc_slice_hash_table_entry* targets_info_entries =
- static_cast<grpc_slice_hash_table_entry*>(
- gpr_zalloc(sizeof(*targets_info_entries) * addresses->num_addresses));
+ TargetAuthorityTable::Entry* target_authority_entries =
+ static_cast<TargetAuthorityTable::Entry*>(gpr_zalloc(
+ sizeof(*target_authority_entries) * addresses->num_addresses));
for (size_t i = 0; i < addresses->num_addresses; ++i) {
char* addr_str;
GPR_ASSERT(grpc_sockaddr_to_string(
&addr_str, &addresses->addresses[i].address, true) > 0);
- targets_info_entries[i] = targets_info_entry_create(
- addr_str, addresses->addresses[i].balancer_name);
+ target_authority_entries[i].key = grpc_slice_from_copied_string(addr_str);
+ target_authority_entries[i].value.reset(
+ gpr_strdup(addresses->addresses[i].balancer_name));
gpr_free(addr_str);
}
- grpc_slice_hash_table* targets_info = grpc_slice_hash_table_create(
- addresses->num_addresses, targets_info_entries, destroy_balancer_name,
- balancer_name_cmp_fn);
- gpr_free(targets_info_entries);
- return targets_info;
+ RefCountedPtr<TargetAuthorityTable> target_authority_table =
+ TargetAuthorityTable::Create(addresses->num_addresses,
+ target_authority_entries, BalancerNameCmp);
+ gpr_free(target_authority_entries);
+ return target_authority_table;
}
+} // namespace
+} // namespace grpc_core
+
grpc_channel_args* grpc_lb_policy_grpclb_modify_lb_channel_args(
grpc_channel_args* args) {
const char* args_to_remove[1];
@@ -83,9 +77,11 @@ grpc_channel_args* grpc_lb_policy_grpclb_modify_lb_channel_args(
GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
grpc_lb_addresses* addresses =
static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
- grpc_slice_hash_table* targets_info = build_targets_info_table(addresses);
+ grpc_core::RefCountedPtr<grpc_core::TargetAuthorityTable>
+ target_authority_table = grpc_core::CreateTargetAuthorityTable(addresses);
args_to_add[num_args_to_add++] =
- grpc_lb_targets_info_create_channel_arg(targets_info);
+ grpc_core::CreateTargetAuthorityTableChannelArg(
+ target_authority_table.get());
// Substitute the channel credentials with a version without call
// credentials: the load balancer is not necessarily trusted to handle
// bearer token credentials.
@@ -105,7 +101,6 @@ grpc_channel_args* grpc_lb_policy_grpclb_modify_lb_channel_args(
args, args_to_remove, num_args_to_remove, args_to_add, num_args_to_add);
// Clean up.
grpc_channel_args_destroy(args);
- grpc_slice_hash_table_unref(targets_info);
if (creds_sans_call_creds != nullptr) {
grpc_channel_credentials_unref(creds_sans_call_creds);
}
diff --git a/src/core/ext/filters/client_channel/method_params.cc b/src/core/ext/filters/client_channel/method_params.cc
new file mode 100644
index 0000000000..374b87e170
--- /dev/null
+++ b/src/core/ext/filters/client_channel/method_params.cc
@@ -0,0 +1,178 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/ext/filters/client_channel/method_params.h"
+#include "src/core/ext/filters/client_channel/status_util.h"
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/memory.h"
+
+// As per the retry design, we do not allow more than 5 retry attempts.
+#define MAX_MAX_RETRY_ATTEMPTS 5
+
+namespace grpc_core {
+namespace internal {
+
+namespace {
+
+bool ParseWaitForReady(
+ grpc_json* field, ClientChannelMethodParams::WaitForReady* wait_for_ready) {
+ if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
+ return false;
+ }
+ *wait_for_ready = field->type == GRPC_JSON_TRUE
+ ? ClientChannelMethodParams::WAIT_FOR_READY_TRUE
+ : ClientChannelMethodParams::WAIT_FOR_READY_FALSE;
+ return true;
+}
+
+// Parses a JSON field of the form generated for a google.proto.Duration
+// proto message, as per:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+bool ParseDuration(grpc_json* field, grpc_millis* duration) {
+ if (field->type != GRPC_JSON_STRING) return false;
+ size_t len = strlen(field->value);
+ if (field->value[len - 1] != 's') return false;
+ UniquePtr<char> buf(gpr_strdup(field->value));
+ *(buf.get() + len - 1) = '\0'; // Remove trailing 's'.
+ char* decimal_point = strchr(buf.get(), '.');
+ int nanos = 0;
+ if (decimal_point != nullptr) {
+ *decimal_point = '\0';
+ nanos = gpr_parse_nonnegative_int(decimal_point + 1);
+ if (nanos == -1) {
+ return false;
+ }
+ int num_digits = static_cast<int>(strlen(decimal_point + 1));
+ if (num_digits > 9) { // We don't accept greater precision than nanos.
+ return false;
+ }
+ for (int i = 0; i < (9 - num_digits); ++i) {
+ nanos *= 10;
+ }
+ }
+ int seconds =
+ decimal_point == buf.get() ? 0 : gpr_parse_nonnegative_int(buf.get());
+ if (seconds == -1) return false;
+ *duration = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
+ return true;
+}
+
+UniquePtr<ClientChannelMethodParams::RetryPolicy> ParseRetryPolicy(
+ grpc_json* field) {
+ auto retry_policy = MakeUnique<ClientChannelMethodParams::RetryPolicy>();
+ if (field->type != GRPC_JSON_OBJECT) return nullptr;
+ for (grpc_json* sub_field = field->child; sub_field != nullptr;
+ sub_field = sub_field->next) {
+ if (sub_field->key == nullptr) return nullptr;
+ if (strcmp(sub_field->key, "maxAttempts") == 0) {
+ if (retry_policy->max_attempts != 0) return nullptr; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
+ retry_policy->max_attempts = gpr_parse_nonnegative_int(sub_field->value);
+ if (retry_policy->max_attempts <= 1) return nullptr;
+ if (retry_policy->max_attempts > MAX_MAX_RETRY_ATTEMPTS) {
+ gpr_log(GPR_ERROR,
+ "service config: clamped retryPolicy.maxAttempts at %d",
+ MAX_MAX_RETRY_ATTEMPTS);
+ retry_policy->max_attempts = MAX_MAX_RETRY_ATTEMPTS;
+ }
+ } else if (strcmp(sub_field->key, "initialBackoff") == 0) {
+ if (retry_policy->initial_backoff > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(sub_field, &retry_policy->initial_backoff)) {
+ return nullptr;
+ }
+ if (retry_policy->initial_backoff == 0) return nullptr;
+ } else if (strcmp(sub_field->key, "maxBackoff") == 0) {
+ if (retry_policy->max_backoff > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(sub_field, &retry_policy->max_backoff)) {
+ return nullptr;
+ }
+ if (retry_policy->max_backoff == 0) return nullptr;
+ } else if (strcmp(sub_field->key, "backoffMultiplier") == 0) {
+ if (retry_policy->backoff_multiplier != 0) return nullptr; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
+ if (sscanf(sub_field->value, "%f", &retry_policy->backoff_multiplier) !=
+ 1) {
+ return nullptr;
+ }
+ if (retry_policy->backoff_multiplier <= 0) return nullptr;
+ } else if (strcmp(sub_field->key, "retryableStatusCodes") == 0) {
+ if (!retry_policy->retryable_status_codes.Empty()) {
+ return nullptr; // Duplicate.
+ }
+ if (sub_field->type != GRPC_JSON_ARRAY) return nullptr;
+ for (grpc_json* element = sub_field->child; element != nullptr;
+ element = element->next) {
+ if (element->type != GRPC_JSON_STRING) return nullptr;
+ grpc_status_code status;
+ if (!grpc_status_code_from_string(element->value, &status)) {
+ return nullptr;
+ }
+ retry_policy->retryable_status_codes.Add(status);
+ }
+ if (retry_policy->retryable_status_codes.Empty()) return nullptr;
+ }
+ }
+ // Make sure required fields are set.
+ if (retry_policy->max_attempts == 0 || retry_policy->initial_backoff == 0 ||
+ retry_policy->max_backoff == 0 || retry_policy->backoff_multiplier == 0 ||
+ retry_policy->retryable_status_codes.Empty()) {
+ return nullptr;
+ }
+ return retry_policy;
+}
+
+} // namespace
+
+RefCountedPtr<ClientChannelMethodParams>
+ClientChannelMethodParams::CreateFromJson(const grpc_json* json) {
+ RefCountedPtr<ClientChannelMethodParams> method_params =
+ MakeRefCounted<ClientChannelMethodParams>();
+ for (grpc_json* field = json->child; field != nullptr; field = field->next) {
+ if (field->key == nullptr) continue;
+ if (strcmp(field->key, "waitForReady") == 0) {
+ if (method_params->wait_for_ready_ != WAIT_FOR_READY_UNSET) {
+ return nullptr; // Duplicate.
+ }
+ if (!ParseWaitForReady(field, &method_params->wait_for_ready_)) {
+ return nullptr;
+ }
+ } else if (strcmp(field->key, "timeout") == 0) {
+ if (method_params->timeout_ > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(field, &method_params->timeout_)) return nullptr;
+ } else if (strcmp(field->key, "retryPolicy") == 0) {
+ if (method_params->retry_policy_ != nullptr) {
+ return nullptr; // Duplicate.
+ }
+ method_params->retry_policy_ = ParseRetryPolicy(field);
+ if (method_params->retry_policy_ == nullptr) return nullptr;
+ }
+ }
+ return method_params;
+}
+
+} // namespace internal
+} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/method_params.h b/src/core/ext/filters/client_channel/method_params.h
new file mode 100644
index 0000000000..48ece29867
--- /dev/null
+++ b/src/core/ext/filters/client_channel/method_params.h
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/ext/filters/client_channel/status_util.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis
+#include "src/core/lib/json/json.h"
+
+namespace grpc_core {
+namespace internal {
+
+class ClientChannelMethodParams : public RefCounted<ClientChannelMethodParams> {
+ public:
+ enum WaitForReady {
+ WAIT_FOR_READY_UNSET = 0,
+ WAIT_FOR_READY_FALSE,
+ WAIT_FOR_READY_TRUE
+ };
+
+ struct RetryPolicy {
+ int max_attempts = 0;
+ grpc_millis initial_backoff = 0;
+ grpc_millis max_backoff = 0;
+ float backoff_multiplier = 0;
+ StatusCodeSet retryable_status_codes;
+ };
+
+ /// Creates a method_parameters object from \a json.
+ /// Intended for use with ServiceConfig::CreateMethodConfigTable().
+ static RefCountedPtr<ClientChannelMethodParams> CreateFromJson(
+ const grpc_json* json);
+
+ grpc_millis timeout() const { return timeout_; }
+ WaitForReady wait_for_ready() const { return wait_for_ready_; }
+ const RetryPolicy* retry_policy() const { return retry_policy_.get(); }
+
+ private:
+ // So New() can call our private ctor.
+ template <typename T, typename... Args>
+ friend T* grpc_core::New(Args&&... args);
+
+ ClientChannelMethodParams() {}
+ virtual ~ClientChannelMethodParams() {}
+
+ grpc_millis timeout_ = 0;
+ WaitForReady wait_for_ready_ = WAIT_FOR_READY_UNSET;
+ UniquePtr<RetryPolicy> retry_policy_;
+};
+
+} // namespace internal
+} // namespace grpc_core
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index a24e8ff352..aa93e5d8de 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -295,7 +295,7 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
size_t num_args_to_add = 0;
new_args[num_args_to_add++] =
grpc_lb_addresses_create_channel_arg(r->lb_addresses_);
- grpc_service_config* service_config = nullptr;
+ grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config;
char* service_config_string = nullptr;
if (r->service_config_json_ != nullptr) {
service_config_string = ChooseServiceConfig(r->service_config_json_);
@@ -306,10 +306,11 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
(char*)GRPC_ARG_SERVICE_CONFIG, service_config_string);
- service_config = grpc_service_config_create(service_config_string);
+ service_config =
+ grpc_core::ServiceConfig::Create(service_config_string);
if (service_config != nullptr) {
const char* lb_policy_name =
- grpc_service_config_get_lb_policy_name(service_config);
+ service_config->GetLoadBalancingPolicyName();
if (lb_policy_name != nullptr) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
@@ -322,7 +323,6 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
result = grpc_channel_args_copy_and_add_and_remove(
r->channel_args_, args_to_remove, num_args_to_remove, new_args,
num_args_to_add);
- if (service_config != nullptr) grpc_service_config_destroy(service_config);
gpr_free(service_config_string);
grpc_lb_addresses_destroy(r->lb_addresses_);
// Reset backoff state so that we start from the beginning when the
diff --git a/src/core/ext/filters/client_channel/retry_throttle.cc b/src/core/ext/filters/client_channel/retry_throttle.cc
index 450a332342..45de6667c8 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.cc
+++ b/src/core/ext/filters/client_channel/retry_throttle.cc
@@ -40,7 +40,7 @@ struct grpc_server_retry_throttle_data {
int milli_token_ratio;
gpr_atm milli_tokens;
// A pointer to the replacement for this grpc_server_retry_throttle_data
- // entry. If non-NULL, then this entry is stale and must not be used.
+ // entry. If non-nullptr, then this entry is stale and must not be used.
// We hold a reference to the replacement.
gpr_atm replacement;
};
@@ -58,6 +58,7 @@ static void get_replacement_throttle_data_if_needed(
bool grpc_server_retry_throttle_data_record_failure(
grpc_server_retry_throttle_data* throttle_data) {
+ if (throttle_data == nullptr) return true;
// First, check if we are stale and need to be replaced.
get_replacement_throttle_data_if_needed(&throttle_data);
// We decrement milli_tokens by 1000 (1 token) for each failure.
@@ -72,6 +73,7 @@ bool grpc_server_retry_throttle_data_record_failure(
void grpc_server_retry_throttle_data_record_success(
grpc_server_retry_throttle_data* throttle_data) {
+ if (throttle_data == nullptr) return;
// First, check if we are stale and need to be replaced.
get_replacement_throttle_data_if_needed(&throttle_data);
// We increment milli_tokens by milli_token_ratio for each success.
diff --git a/src/core/ext/filters/client_channel/status_util.cc b/src/core/ext/filters/client_channel/status_util.cc
new file mode 100644
index 0000000000..11f732ab44
--- /dev/null
+++ b/src/core/ext/filters/client_channel/status_util.cc
@@ -0,0 +1,100 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/ext/filters/client_channel/status_util.h"
+
+#include "src/core/lib/gpr/useful.h"
+
+typedef struct {
+ const char* str;
+ grpc_status_code status;
+} status_string_entry;
+
+static const status_string_entry g_status_string_entries[] = {
+ {"OK", GRPC_STATUS_OK},
+ {"CANCELLED", GRPC_STATUS_CANCELLED},
+ {"UNKNOWN", GRPC_STATUS_UNKNOWN},
+ {"INVALID_ARGUMENT", GRPC_STATUS_INVALID_ARGUMENT},
+ {"DEADLINE_EXCEEDED", GRPC_STATUS_DEADLINE_EXCEEDED},
+ {"NOT_FOUND", GRPC_STATUS_NOT_FOUND},
+ {"ALREADY_EXISTS", GRPC_STATUS_ALREADY_EXISTS},
+ {"PERMISSION_DENIED", GRPC_STATUS_PERMISSION_DENIED},
+ {"UNAUTHENTICATED", GRPC_STATUS_UNAUTHENTICATED},
+ {"RESOURCE_EXHAUSTED", GRPC_STATUS_RESOURCE_EXHAUSTED},
+ {"FAILED_PRECONDITION", GRPC_STATUS_FAILED_PRECONDITION},
+ {"ABORTED", GRPC_STATUS_ABORTED},
+ {"OUT_OF_RANGE", GRPC_STATUS_OUT_OF_RANGE},
+ {"UNIMPLEMENTED", GRPC_STATUS_UNIMPLEMENTED},
+ {"INTERNAL", GRPC_STATUS_INTERNAL},
+ {"UNAVAILABLE", GRPC_STATUS_UNAVAILABLE},
+ {"DATA_LOSS", GRPC_STATUS_DATA_LOSS},
+};
+
+bool grpc_status_code_from_string(const char* status_str,
+ grpc_status_code* status) {
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(g_status_string_entries); ++i) {
+ if (strcmp(status_str, g_status_string_entries[i].str) == 0) {
+ *status = g_status_string_entries[i].status;
+ return true;
+ }
+ }
+ return false;
+}
+
+const char* grpc_status_code_to_string(grpc_status_code status) {
+ switch (status) {
+ case GRPC_STATUS_OK:
+ return "OK";
+ case GRPC_STATUS_CANCELLED:
+ return "CANCELLED";
+ case GRPC_STATUS_UNKNOWN:
+ return "UNKNOWN";
+ case GRPC_STATUS_INVALID_ARGUMENT:
+ return "INVALID_ARGUMENT";
+ case GRPC_STATUS_DEADLINE_EXCEEDED:
+ return "DEADLINE_EXCEEDED";
+ case GRPC_STATUS_NOT_FOUND:
+ return "NOT_FOUND";
+ case GRPC_STATUS_ALREADY_EXISTS:
+ return "ALREADY_EXISTS";
+ case GRPC_STATUS_PERMISSION_DENIED:
+ return "PERMISSION_DENIED";
+ case GRPC_STATUS_UNAUTHENTICATED:
+ return "UNAUTHENTICATED";
+ case GRPC_STATUS_RESOURCE_EXHAUSTED:
+ return "RESOURCE_EXHAUSTED";
+ case GRPC_STATUS_FAILED_PRECONDITION:
+ return "FAILED_PRECONDITION";
+ case GRPC_STATUS_ABORTED:
+ return "ABORTED";
+ case GRPC_STATUS_OUT_OF_RANGE:
+ return "OUT_OF_RANGE";
+ case GRPC_STATUS_UNIMPLEMENTED:
+ return "UNIMPLEMENTED";
+ case GRPC_STATUS_INTERNAL:
+ return "INTERNAL";
+ case GRPC_STATUS_UNAVAILABLE:
+ return "UNAVAILABLE";
+ case GRPC_STATUS_DATA_LOSS:
+ return "DATA_LOSS";
+ default:
+ return "UNKNOWN";
+ }
+}
diff --git a/src/core/ext/filters/client_channel/status_util.h b/src/core/ext/filters/client_channel/status_util.h
new file mode 100644
index 0000000000..e018709730
--- /dev/null
+++ b/src/core/ext/filters/client_channel/status_util.h
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_STATUS_UTIL_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_STATUS_UTIL_H
+
+#include <grpc/support/port_platform.h>
+
+#include <grpc/status.h>
+
+#include <stdbool.h>
+#include <string.h>
+
+/// If \a status_str is a valid status string, sets \a status to the
+/// corresponding status value and returns true.
+bool grpc_status_code_from_string(const char* status_str,
+ grpc_status_code* status);
+
+/// Returns the string form of \a status, or "UNKNOWN" if invalid.
+const char* grpc_status_code_to_string(grpc_status_code status);
+
+namespace grpc_core {
+namespace internal {
+
+/// A set of grpc_status_code values.
+class StatusCodeSet {
+ public:
+ bool Empty() const { return status_code_mask_ == 0; }
+
+ void Add(grpc_status_code status) { status_code_mask_ |= (1 << status); }
+
+ bool Contains(grpc_status_code status) const {
+ return status_code_mask_ & (1 << status);
+ }
+
+ private:
+ int status_code_mask_ = 0; // A bitfield of status codes in the set.
+};
+
+} // namespace internal
+} // namespace grpc_core
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_STATUS_UTIL_H */
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index 1304b4a6ad..cae7cc35e3 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -659,7 +659,6 @@ static void on_subchannel_connected(void* arg, grpc_error* error) {
static void subchannel_call_destroy(void* call, grpc_error* error) {
GPR_TIMER_SCOPE("grpc_subchannel_call_unref.destroy", 0);
grpc_subchannel_call* c = static_cast<grpc_subchannel_call*>(call);
- GPR_ASSERT(c->schedule_closure_after_destroy != nullptr);
grpc_core::ConnectedSubchannel* connection = c->connection;
grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr,
c->schedule_closure_after_destroy);
@@ -673,9 +672,10 @@ void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call* call,
call->schedule_closure_after_destroy = closure;
}
-void grpc_subchannel_call_ref(
+grpc_subchannel_call* grpc_subchannel_call_ref(
grpc_subchannel_call* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
+ return c;
}
void grpc_subchannel_call_unref(
@@ -705,6 +705,13 @@ const grpc_subchannel_key* grpc_subchannel_get_key(
return subchannel->key;
}
+void* grpc_connected_subchannel_call_get_parent_data(
+ grpc_subchannel_call* subchannel_call) {
+ grpc_channel_stack* chanstk = subchannel_call->connection->channel_stack();
+ return (char*)subchannel_call + sizeof(grpc_subchannel_call) +
+ chanstk->call_stack_size;
+}
+
grpc_call_stack* grpc_subchannel_call_get_call_stack(
grpc_subchannel_call* subchannel_call) {
return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call);
@@ -776,8 +783,8 @@ void ConnectedSubchannel::Ping(grpc_closure* on_initiate,
grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
grpc_subchannel_call** call) {
*call = static_cast<grpc_subchannel_call*>(gpr_arena_alloc(
- args.arena,
- sizeof(grpc_subchannel_call) + channel_stack_->call_stack_size));
+ args.arena, sizeof(grpc_subchannel_call) +
+ channel_stack_->call_stack_size + args.parent_data_size));
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
RefCountedPtr<ConnectedSubchannel> connection =
Ref(DEBUG_LOCATION, "subchannel_call");
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index 7f997d9924..e23aec12df 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -81,6 +81,7 @@ class ConnectedSubchannel : public RefCountedWithTracing<ConnectedSubchannel> {
gpr_arena* arena;
grpc_call_context_element* context;
grpc_call_combiner* call_combiner;
+ size_t parent_data_size;
};
explicit ConnectedSubchannel(grpc_channel_stack* channel_stack);
@@ -109,11 +110,17 @@ grpc_subchannel* grpc_subchannel_weak_ref(
grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_weak_unref(
grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_call_ref(
+grpc_subchannel_call* grpc_subchannel_call_ref(
grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_unref(
grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+/** Returns a pointer to the parent data associated with \a subchannel_call.
+ The data will be of the size specified in \a parent_data_size
+ field of the args passed to \a grpc_connected_subchannel_create_call(). */
+void* grpc_connected_subchannel_call_get_parent_data(
+ grpc_subchannel_call* subchannel_call);
+
/** poll the current connectivity state of a channel */
grpc_connectivity_state grpc_subchannel_check_connectivity(
grpc_subchannel* channel, grpc_error** error);
diff --git a/src/core/ext/filters/max_age/max_age_filter.cc b/src/core/ext/filters/max_age/max_age_filter.cc
index acb1d66fa8..1fe8288bd0 100644
--- a/src/core/ext/filters/max_age/max_age_filter.cc
+++ b/src/core/ext/filters/max_age/max_age_filter.cc
@@ -370,6 +370,9 @@ static void channel_connectivity_changed(void* arg, grpc_error* error) {
max_idle_timer, and prevent max_idle_timer from being started in the
future. */
increase_call_count(chand);
+ if (gpr_atm_acq_load(&chand->idle_state) == MAX_IDLE_STATE_SEEN_EXIT_IDLE) {
+ grpc_timer_cancel(&chand->max_idle_timer);
+ }
}
}
diff --git a/src/core/ext/filters/message_size/message_size_filter.cc b/src/core/ext/filters/message_size/message_size_filter.cc
index 63a9e566d3..b1b14dde02 100644
--- a/src/core/ext/filters/message_size/message_size_filter.cc
+++ b/src/core/ext/filters/message_size/message_size_filter.cc
@@ -29,6 +29,8 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/service_config.h"
@@ -37,27 +39,29 @@ typedef struct {
int max_recv_size;
} message_size_limits;
-typedef struct {
- gpr_refcount refs;
- message_size_limits limits;
-} refcounted_message_size_limits;
+namespace grpc_core {
+namespace {
-static void* refcounted_message_size_limits_ref(void* value) {
- refcounted_message_size_limits* limits =
- static_cast<refcounted_message_size_limits*>(value);
- gpr_ref(&limits->refs);
- return value;
-}
+class MessageSizeLimits : public RefCounted<MessageSizeLimits> {
+ public:
+ static RefCountedPtr<MessageSizeLimits> CreateFromJson(const grpc_json* json);
-static void refcounted_message_size_limits_unref(void* value) {
- refcounted_message_size_limits* limits =
- static_cast<refcounted_message_size_limits*>(value);
- if (gpr_unref(&limits->refs)) {
- gpr_free(value);
+ const message_size_limits& limits() const { return limits_; }
+
+ private:
+ // So New() can call our private ctor.
+ template <typename T, typename... Args>
+ friend T* grpc_core::New(Args&&... args);
+
+ MessageSizeLimits(int max_send_size, int max_recv_size) {
+ limits_.max_send_size = max_send_size;
+ limits_.max_recv_size = max_recv_size;
}
-}
-static void* refcounted_message_size_limits_create_from_json(
+ message_size_limits limits_;
+};
+
+RefCountedPtr<MessageSizeLimits> MessageSizeLimits::CreateFromJson(
const grpc_json* json) {
int max_request_message_bytes = -1;
int max_response_message_bytes = -1;
@@ -79,16 +83,15 @@ static void* refcounted_message_size_limits_create_from_json(
if (max_response_message_bytes == -1) return nullptr;
}
}
- refcounted_message_size_limits* value =
- static_cast<refcounted_message_size_limits*>(
- gpr_malloc(sizeof(refcounted_message_size_limits)));
- gpr_ref_init(&value->refs, 1);
- value->limits.max_send_size = max_request_message_bytes;
- value->limits.max_recv_size = max_response_message_bytes;
- return value;
+ return MakeRefCounted<MessageSizeLimits>(max_request_message_bytes,
+ max_response_message_bytes);
}
+} // namespace
+} // namespace grpc_core
+
namespace {
+
struct call_data {
grpc_call_combiner* call_combiner;
message_size_limits limits;
@@ -105,8 +108,11 @@ struct call_data {
struct channel_data {
message_size_limits limits;
// Maps path names to refcounted_message_size_limits structs.
- grpc_slice_hash_table* method_limit_table;
+ grpc_core::RefCountedPtr<grpc_core::SliceHashTable<
+ grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits>>>
+ method_limit_table;
};
+
} // namespace
// Callback invoked when we receive a message. Here we check the max
@@ -185,20 +191,19 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
// size to the receive limit.
calld->limits = chand->limits;
if (chand->method_limit_table != nullptr) {
- refcounted_message_size_limits* limits =
- static_cast<refcounted_message_size_limits*>(
- grpc_method_config_table_get(chand->method_limit_table,
- args->path));
+ grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits> limits =
+ grpc_core::ServiceConfig::MethodConfigTableLookup(
+ *chand->method_limit_table, args->path);
if (limits != nullptr) {
- if (limits->limits.max_send_size >= 0 &&
- (limits->limits.max_send_size < calld->limits.max_send_size ||
+ if (limits->limits().max_send_size >= 0 &&
+ (limits->limits().max_send_size < calld->limits.max_send_size ||
calld->limits.max_send_size < 0)) {
- calld->limits.max_send_size = limits->limits.max_send_size;
+ calld->limits.max_send_size = limits->limits().max_send_size;
}
- if (limits->limits.max_recv_size >= 0 &&
- (limits->limits.max_recv_size < calld->limits.max_recv_size ||
+ if (limits->limits().max_recv_size >= 0 &&
+ (limits->limits().max_recv_size < calld->limits.max_recv_size ||
calld->limits.max_recv_size < 0)) {
- calld->limits.max_recv_size = limits->limits.max_recv_size;
+ calld->limits.max_recv_size = limits->limits().max_recv_size;
}
}
}
@@ -253,15 +258,11 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVICE_CONFIG);
const char* service_config_str = grpc_channel_arg_get_string(channel_arg);
if (service_config_str != nullptr) {
- grpc_service_config* service_config =
- grpc_service_config_create(service_config_str);
+ grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
+ grpc_core::ServiceConfig::Create(service_config_str);
if (service_config != nullptr) {
- chand->method_limit_table =
- grpc_service_config_create_method_config_table(
- service_config, refcounted_message_size_limits_create_from_json,
- refcounted_message_size_limits_ref,
- refcounted_message_size_limits_unref);
- grpc_service_config_destroy(service_config);
+ chand->method_limit_table = service_config->CreateMethodConfigTable(
+ grpc_core::MessageSizeLimits::CreateFromJson);
}
}
return GRPC_ERROR_NONE;
@@ -270,7 +271,7 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
// Destructor for channel_data.
static void destroy_channel_elem(grpc_channel_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
- grpc_slice_hash_table_unref(chand->method_limit_table);
+ chand->method_limit_table.reset();
}
const grpc_channel_filter grpc_message_size_filter = {
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
index dcfcd243a9..a82009ff69 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
@@ -30,10 +30,11 @@
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/security_connector.h"
-#include "src/core/lib/security/transport/lb_targets_info.h"
+#include "src/core/lib/security/transport/target_authority_table.h"
#include "src/core/lib/slice/slice_hash_table.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/api_trace.h"
@@ -73,11 +74,11 @@ static grpc_subchannel_args* get_secure_naming_subchannel_args(
const char* server_uri_path;
server_uri_path =
server_uri->path[0] == '/' ? server_uri->path + 1 : server_uri->path;
- const grpc_slice_hash_table* targets_info =
- grpc_lb_targets_info_find_in_args(args->args);
- char* target_name_to_check = nullptr;
- if (targets_info != nullptr) { // LB channel
- // Find the balancer name for the target.
+ const grpc_core::TargetAuthorityTable* target_authority_table =
+ grpc_core::FindTargetAuthorityTableInArgs(args->args);
+ grpc_core::UniquePtr<char> authority;
+ if (target_authority_table != nullptr) {
+ // Find the authority for the target.
const char* target_uri_str =
grpc_get_subchannel_address_uri_arg(args->args);
grpc_uri* target_uri =
@@ -86,37 +87,33 @@ static grpc_subchannel_args* get_secure_naming_subchannel_args(
if (target_uri->path[0] != '\0') { // "path" may be empty
const grpc_slice key = grpc_slice_from_static_string(
target_uri->path[0] == '/' ? target_uri->path + 1 : target_uri->path);
- const char* value = static_cast<const char*>(
- grpc_slice_hash_table_get(targets_info, key));
- if (value != nullptr) target_name_to_check = gpr_strdup(value);
+ const grpc_core::UniquePtr<char>* value =
+ target_authority_table->Get(key);
+ if (value != nullptr) authority.reset(gpr_strdup(value->get()));
grpc_slice_unref_internal(key);
}
- if (target_name_to_check == nullptr) {
- // If the target name to check hasn't already been set, fall back to using
- // SERVER_URI
- target_name_to_check = gpr_strdup(server_uri_path);
- }
grpc_uri_destroy(target_uri);
- } else { // regular channel: the secure name is the original server URI.
- target_name_to_check = gpr_strdup(server_uri_path);
+ }
+ // If the authority hasn't already been set (either because no target
+ // authority table was present or because the target was not present
+ // in the table), fall back to using the original server URI.
+ if (authority == nullptr) {
+ authority.reset(gpr_strdup(server_uri_path));
}
grpc_uri_destroy(server_uri);
- GPR_ASSERT(target_name_to_check != nullptr);
grpc_channel_security_connector* subchannel_security_connector = nullptr;
// Create the security connector using the credentials and target name.
grpc_channel_args* new_args_from_connector = nullptr;
const grpc_security_status security_status =
grpc_channel_credentials_create_security_connector(
- channel_credentials, target_name_to_check, args->args,
+ channel_credentials, authority.get(), args->args,
&subchannel_security_connector, &new_args_from_connector);
if (security_status != GRPC_SECURITY_OK) {
gpr_log(GPR_ERROR,
"Failed to create secure subchannel for secure name '%s'",
- target_name_to_check);
- gpr_free(target_name_to_check);
+ authority.get());
return nullptr;
}
- gpr_free(target_name_to_check);
grpc_arg new_security_connector_arg =
grpc_security_connector_to_arg(&subchannel_security_connector->base);
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 89115b66ed..df3fb8c68c 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -1456,8 +1456,10 @@ static void perform_stream_op_locked(void* stream_op,
}
}
if (op_payload->send_initial_metadata.peer_string != nullptr) {
- gpr_atm_rel_store(op_payload->send_initial_metadata.peer_string,
- (gpr_atm)gpr_strdup(t->peer_string));
+ char* old_peer_string = (char*)gpr_atm_full_xchg(
+ op_payload->send_initial_metadata.peer_string,
+ (gpr_atm)gpr_strdup(t->peer_string));
+ gpr_free(old_peer_string);
}
}
@@ -1571,8 +1573,10 @@ static void perform_stream_op_locked(void* stream_op,
s->trailing_metadata_available =
op_payload->recv_initial_metadata.trailing_metadata_available;
if (op_payload->recv_initial_metadata.peer_string != nullptr) {
- gpr_atm_rel_store(op_payload->recv_initial_metadata.peer_string,
- (gpr_atm)gpr_strdup(t->peer_string));
+ char* old_peer_string = (char*)gpr_atm_full_xchg(
+ op_payload->recv_initial_metadata.peer_string,
+ (gpr_atm)gpr_strdup(t->peer_string));
+ gpr_free(old_peer_string);
}
grpc_chttp2_maybe_complete_recv_initial_metadata(t, s);
}
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
index 58d77b932f..4d7dfd900f 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
@@ -69,6 +69,5 @@ void grpc_chttp2_incoming_metadata_buffer_set_deadline(
void grpc_chttp2_incoming_metadata_buffer_publish(
grpc_chttp2_incoming_metadata_buffer* buffer, grpc_metadata_batch* batch) {
- *batch = buffer->batch;
- grpc_metadata_batch_init(&buffer->batch);
+ grpc_metadata_batch_move(&buffer->batch, batch);
}
diff --git a/src/core/lib/gpr/arena.cc b/src/core/lib/gpr/arena.cc
index 444bb3d719..b02c5b9fb6 100644
--- a/src/core/lib/gpr/arena.cc
+++ b/src/core/lib/gpr/arena.cc
@@ -26,6 +26,49 @@
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
+// Uncomment this to use a simple arena that simply allocates the
+// requested amount of memory for each call to gpr_arena_alloc(). This
+// effectively eliminates the efficiency gain of using an arena, but it
+// may be useful for debugging purposes.
+//#define SIMPLE_ARENA_FOR_DEBUGGING
+
+#ifdef SIMPLE_ARENA_FOR_DEBUGGING
+
+#include <grpc/support/sync.h>
+
+struct gpr_arena {
+ gpr_mu mu;
+ void** ptrs;
+ size_t num_ptrs;
+};
+
+gpr_arena* gpr_arena_create(size_t ignored_initial_size) {
+ gpr_arena* arena = (gpr_arena*)gpr_zalloc(sizeof(*arena));
+ gpr_mu_init(&arena->mu);
+ return arena;
+}
+
+size_t gpr_arena_destroy(gpr_arena* arena) {
+ gpr_mu_destroy(&arena->mu);
+ for (size_t i = 0; i < arena->num_ptrs; ++i) {
+ gpr_free(arena->ptrs[i]);
+ }
+ gpr_free(arena->ptrs);
+ gpr_free(arena);
+ return 1; // Value doesn't matter, since it won't be used.
+}
+
+void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
+ gpr_mu_lock(&arena->mu);
+ arena->ptrs =
+ (void**)gpr_realloc(arena->ptrs, sizeof(void*) * (arena->num_ptrs + 1));
+ void* retval = arena->ptrs[arena->num_ptrs++] = gpr_zalloc(size);
+ gpr_mu_unlock(&arena->mu);
+ return retval;
+}
+
+#else // SIMPLE_ARENA_FOR_DEBUGGING
+
// TODO(roth): We currently assume that all callers need alignment of 16
// bytes, which may be wrong in some cases. As part of converting the
// arena API to C++, we should consider replacing gpr_arena_alloc() with a
@@ -105,3 +148,5 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone));
return ptr + start - z->size_begin;
}
+
+#endif // SIMPLE_ARENA_FOR_DEBUGGING
diff --git a/src/core/lib/gprpp/orphanable.h b/src/core/lib/gprpp/orphanable.h
index 9e9e7f015f..a5bc8d8efc 100644
--- a/src/core/lib/gprpp/orphanable.h
+++ b/src/core/lib/gprpp/orphanable.h
@@ -24,6 +24,7 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
+#include <cinttypes>
#include <memory>
#include "src/core/lib/debug/trace.h"
diff --git a/src/core/lib/gprpp/ref_counted.h b/src/core/lib/gprpp/ref_counted.h
index 02b115a40e..46bfaf7fb8 100644
--- a/src/core/lib/gprpp/ref_counted.h
+++ b/src/core/lib/gprpp/ref_counted.h
@@ -24,6 +24,8 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
+#include <cinttypes>
+
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/abstract.h"
#include "src/core/lib/gprpp/debug_location.h"
diff --git a/src/core/lib/gprpp/ref_counted_ptr.h b/src/core/lib/gprpp/ref_counted_ptr.h
index 72088e76ef..388e2ec410 100644
--- a/src/core/lib/gprpp/ref_counted_ptr.h
+++ b/src/core/lib/gprpp/ref_counted_ptr.h
@@ -33,6 +33,7 @@ template <typename T>
class RefCountedPtr {
public:
RefCountedPtr() {}
+ RefCountedPtr(std::nullptr_t) {}
// If value is non-null, we take ownership of a ref to it.
explicit RefCountedPtr(T* value) { value_ = value; }
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.cc b/src/core/lib/security/credentials/fake/fake_credentials.cc
index 46311fa122..858ab6b41b 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.cc
+++ b/src/core/lib/security/credentials/fake/fake_credentials.cc
@@ -32,9 +32,6 @@
/* -- Fake transport security credentials. -- */
-#define GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS \
- "grpc.fake_security.expected_targets"
-
static grpc_security_status fake_transport_security_create_security_connector(
grpc_channel_credentials* c, grpc_call_credentials* call_creds,
const char* target, const grpc_channel_args* args,
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.h b/src/core/lib/security/credentials/fake/fake_credentials.h
index 5166e43167..e89e6e24cc 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.h
+++ b/src/core/lib/security/credentials/fake/fake_credentials.h
@@ -23,6 +23,9 @@
#include "src/core/lib/security/credentials/credentials.h"
+#define GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS \
+ "grpc.fake_security.expected_targets"
+
/* -- Fake transport security credentials. -- */
/* Creates a fake transport security credentials object for testing. */
diff --git a/src/core/lib/security/security_connector/security_connector.cc b/src/core/lib/security/security_connector/security_connector.cc
index b01fd6f769..3cc151bec7 100644
--- a/src/core/lib/security/security_connector/security_connector.cc
+++ b/src/core/lib/security/security_connector/security_connector.cc
@@ -39,9 +39,9 @@
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/core/lib/security/credentials/ssl/ssl_credentials.h"
-#include "src/core/lib/security/transport/lb_targets_info.h"
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/security_handshaker.h"
+#include "src/core/lib/security/transport/target_authority_table.h"
#include "src/core/tsi/fake_transport_security.h"
#include "src/core/tsi/ssl_transport_security.h"
#include "src/core/tsi/transport_security_adapter.h"
@@ -463,6 +463,15 @@ static bool fake_channel_check_call_host(grpc_channel_security_connector* sc,
grpc_auth_context* auth_context,
grpc_closure* on_call_host_checked,
grpc_error** error) {
+ grpc_fake_channel_security_connector* c =
+ reinterpret_cast<grpc_fake_channel_security_connector*>(sc);
+ if (c->is_lb_channel) {
+ // TODO(dgq): verify that the host (ie, authority header) matches that of
+ // the LB, as opposed to that of the backends.
+ } else {
+ // TODO(dgq): verify that the host (ie, authority header) matches that of
+ // the backend, not the LB's.
+ }
return true;
}
@@ -514,7 +523,7 @@ grpc_channel_security_connector* grpc_fake_channel_security_connector_create(
c->target = gpr_strdup(target);
const char* expected_targets = grpc_fake_transport_get_expected_targets(args);
c->expected_targets = gpr_strdup(expected_targets);
- c->is_lb_channel = (grpc_lb_targets_info_find_in_args(args) != nullptr);
+ c->is_lb_channel = grpc_core::FindTargetAuthorityTableInArgs(args) != nullptr;
return &c->base;
}
diff --git a/src/core/lib/security/transport/lb_targets_info.cc b/src/core/lib/security/transport/lb_targets_info.cc
deleted file mode 100644
index 155a91e556..0000000000
--- a/src/core/lib/security/transport/lb_targets_info.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include <grpc/support/log.h>
-
-#include "src/core/lib/channel/channel_args.h"
-#include "src/core/lib/security/transport/lb_targets_info.h"
-
-/* Channel arg key for the mapping of LB server addresses to their names for
- * secure naming purposes. */
-#define GRPC_ARG_LB_SECURE_NAMING_MAP "grpc.lb_secure_naming_map"
-
-static void* targets_info_copy(void* p) {
- return grpc_slice_hash_table_ref(static_cast<grpc_slice_hash_table*>(p));
-}
-static void targets_info_destroy(void* p) {
- grpc_slice_hash_table_unref(static_cast<grpc_slice_hash_table*>(p));
-}
-static int targets_info_cmp(void* a, void* b) {
- return grpc_slice_hash_table_cmp(
- static_cast<const grpc_slice_hash_table*>(a),
- static_cast<const grpc_slice_hash_table*>(b));
-}
-static const grpc_arg_pointer_vtable server_to_balancer_names_vtable = {
- targets_info_copy, targets_info_destroy, targets_info_cmp};
-
-grpc_arg grpc_lb_targets_info_create_channel_arg(
- grpc_slice_hash_table* targets_info) {
- return grpc_channel_arg_pointer_create((char*)GRPC_ARG_LB_SECURE_NAMING_MAP,
- targets_info,
- &server_to_balancer_names_vtable);
-}
-
-grpc_slice_hash_table* grpc_lb_targets_info_find_in_args(
- const grpc_channel_args* args) {
- const grpc_arg* targets_info_arg =
- grpc_channel_args_find(args, GRPC_ARG_LB_SECURE_NAMING_MAP);
- if (targets_info_arg != nullptr) {
- GPR_ASSERT(targets_info_arg->type == GRPC_ARG_POINTER);
- return static_cast<grpc_slice_hash_table*>(
- targets_info_arg->value.pointer.p);
- }
- return nullptr;
-}
diff --git a/src/core/lib/security/transport/target_authority_table.cc b/src/core/lib/security/transport/target_authority_table.cc
new file mode 100644
index 0000000000..1eeb557f6a
--- /dev/null
+++ b/src/core/lib/security/transport/target_authority_table.cc
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include <grpc/support/log.h>
+
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/security/transport/target_authority_table.h"
+
+// Channel arg key for the mapping of target addresses to their authorities.
+#define GRPC_ARG_TARGET_AUTHORITY_TABLE "grpc.target_authority_table"
+
+namespace grpc_core {
+namespace {
+
+void* target_authority_table_copy(void* p) {
+ TargetAuthorityTable* table = static_cast<TargetAuthorityTable*>(p);
+ // TODO(roth): When channel_args are converted to C++, pass the
+ // RefCountedPtr<> directly instead of managing the ref manually.
+ table->Ref().release();
+ return p;
+}
+void target_authority_table_destroy(void* p) {
+ TargetAuthorityTable* table = static_cast<TargetAuthorityTable*>(p);
+ table->Unref();
+}
+int target_authority_table_cmp(void* a, void* b) {
+ return TargetAuthorityTable::Cmp(
+ *static_cast<const TargetAuthorityTable*>(a),
+ *static_cast<const TargetAuthorityTable*>(b));
+}
+const grpc_arg_pointer_vtable target_authority_table_arg_vtable = {
+ target_authority_table_copy, target_authority_table_destroy,
+ target_authority_table_cmp};
+
+} // namespace
+
+grpc_arg CreateTargetAuthorityTableChannelArg(TargetAuthorityTable* table) {
+ return grpc_channel_arg_pointer_create((char*)GRPC_ARG_TARGET_AUTHORITY_TABLE,
+ table,
+ &target_authority_table_arg_vtable);
+}
+
+TargetAuthorityTable* FindTargetAuthorityTableInArgs(
+ const grpc_channel_args* args) {
+ const grpc_arg* arg =
+ grpc_channel_args_find(args, GRPC_ARG_TARGET_AUTHORITY_TABLE);
+ if (arg != nullptr) {
+ if (arg->type == GRPC_ARG_POINTER) {
+ return static_cast<TargetAuthorityTable*>(arg->value.pointer.p);
+ } else {
+ gpr_log(GPR_ERROR, "value of " GRPC_ARG_TARGET_AUTHORITY_TABLE
+ " channel arg was not pointer type; ignoring");
+ }
+ }
+ return nullptr;
+}
+
+} // namespace grpc_core
diff --git a/src/core/lib/security/transport/target_authority_table.h b/src/core/lib/security/transport/target_authority_table.h
new file mode 100644
index 0000000000..a2e7dc6ac2
--- /dev/null
+++ b/src/core/lib/security/transport/target_authority_table.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_TARGET_AUTHORITY_TABLE_H
+#define GRPC_CORE_LIB_SECURITY_TRANSPORT_TARGET_AUTHORITY_TABLE_H
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/slice/slice_hash_table.h"
+
+namespace grpc_core {
+
+/// A hash table mapping target addresses to authorities.
+typedef SliceHashTable<UniquePtr<char>> TargetAuthorityTable;
+
+/// Returns a channel argument containing \a table.
+grpc_arg CreateTargetAuthorityTableChannelArg(TargetAuthorityTable* table);
+
+/// Returns the target authority table from \a args or nullptr.
+TargetAuthorityTable* FindTargetAuthorityTableInArgs(
+ const grpc_channel_args* args);
+
+} // namespace grpc_core
+
+#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_TARGET_AUTHORITY_TABLE_H */
diff --git a/src/core/lib/slice/slice_hash_table.cc b/src/core/lib/slice/slice_hash_table.cc
deleted file mode 100644
index 9e32321636..0000000000
--- a/src/core/lib/slice/slice_hash_table.cc
+++ /dev/null
@@ -1,147 +0,0 @@
-//
-// Copyright 2016 gRPC authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/lib/slice/slice_hash_table.h"
-
-#include <stdbool.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-#include "src/core/lib/slice/slice_internal.h"
-#include "src/core/lib/transport/metadata.h"
-
-struct grpc_slice_hash_table {
- gpr_refcount refs;
- void (*destroy_value)(void* value);
- int (*value_cmp)(void* a, void* b);
- size_t size;
- size_t max_num_probes;
- grpc_slice_hash_table_entry* entries;
-};
-
-static bool is_empty(grpc_slice_hash_table_entry* entry) {
- return entry->value == nullptr;
-}
-
-static void grpc_slice_hash_table_add(grpc_slice_hash_table* table,
- grpc_slice key, void* value) {
- GPR_ASSERT(value != nullptr);
- const size_t hash = grpc_slice_hash(key);
- for (size_t offset = 0; offset < table->size; ++offset) {
- const size_t idx = (hash + offset) % table->size;
- if (is_empty(&table->entries[idx])) {
- table->entries[idx].key = key;
- table->entries[idx].value = value;
- // Keep track of the maximum number of probes needed, since this
- // provides an upper bound for lookups.
- if (offset > table->max_num_probes) table->max_num_probes = offset;
- return;
- }
- }
- GPR_ASSERT(false); // Table should never be full.
-}
-
-grpc_slice_hash_table* grpc_slice_hash_table_create(
- size_t num_entries, grpc_slice_hash_table_entry* entries,
- void (*destroy_value)(void* value), int (*value_cmp)(void* a, void* b)) {
- grpc_slice_hash_table* table =
- static_cast<grpc_slice_hash_table*>(gpr_zalloc(sizeof(*table)));
- gpr_ref_init(&table->refs, 1);
- table->destroy_value = destroy_value;
- table->value_cmp = value_cmp;
- // Keep load factor low to improve performance of lookups.
- table->size = num_entries * 2;
- const size_t entry_size = sizeof(grpc_slice_hash_table_entry) * table->size;
- table->entries =
- static_cast<grpc_slice_hash_table_entry*>(gpr_zalloc(entry_size));
- for (size_t i = 0; i < num_entries; ++i) {
- grpc_slice_hash_table_entry* entry = &entries[i];
- grpc_slice_hash_table_add(table, entry->key, entry->value);
- }
- return table;
-}
-
-grpc_slice_hash_table* grpc_slice_hash_table_ref(grpc_slice_hash_table* table) {
- if (table != nullptr) gpr_ref(&table->refs);
- return table;
-}
-
-void grpc_slice_hash_table_unref(grpc_slice_hash_table* table) {
- if (table != nullptr && gpr_unref(&table->refs)) {
- for (size_t i = 0; i < table->size; ++i) {
- grpc_slice_hash_table_entry* entry = &table->entries[i];
- if (!is_empty(entry)) {
- grpc_slice_unref_internal(entry->key);
- table->destroy_value(entry->value);
- }
- }
- gpr_free(table->entries);
- gpr_free(table);
- }
-}
-
-void* grpc_slice_hash_table_get(const grpc_slice_hash_table* table,
- const grpc_slice key) {
- const size_t hash = grpc_slice_hash(key);
- // We cap the number of probes at the max number recorded when
- // populating the table.
- for (size_t offset = 0; offset <= table->max_num_probes; ++offset) {
- const size_t idx = (hash + offset) % table->size;
- if (is_empty(&table->entries[idx])) break;
- if (grpc_slice_eq(table->entries[idx].key, key)) {
- return table->entries[idx].value;
- }
- }
- return nullptr; // Not found.
-}
-
-static int pointer_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
-int grpc_slice_hash_table_cmp(const grpc_slice_hash_table* a,
- const grpc_slice_hash_table* b) {
- int (*const value_cmp_fn_a)(void* a, void* b) =
- a->value_cmp != nullptr ? a->value_cmp : pointer_cmp;
- int (*const value_cmp_fn_b)(void* a, void* b) =
- b->value_cmp != nullptr ? b->value_cmp : pointer_cmp;
- // Compare value_fns
- const int value_fns_cmp =
- GPR_ICMP((void*)value_cmp_fn_a, (void*)value_cmp_fn_b);
- if (value_fns_cmp != 0) return value_fns_cmp;
- // Compare sizes
- if (a->size < b->size) return -1;
- if (a->size > b->size) return 1;
- // Compare rows.
- for (size_t i = 0; i < a->size; ++i) {
- if (is_empty(&a->entries[i])) {
- if (!is_empty(&b->entries[i])) {
- return -1; // a empty but b non-empty
- }
- continue; // both empty, no need to check key or value
- } else if (is_empty(&b->entries[i])) {
- return 1; // a non-empty but b empty
- }
- // neither entry is empty
- const int key_cmp = grpc_slice_cmp(a->entries[i].key, b->entries[i].key);
- if (key_cmp != 0) return key_cmp;
- const int value_cmp =
- value_cmp_fn_a(a->entries[i].value, b->entries[i].value);
- if (value_cmp != 0) return value_cmp;
- }
- return 0;
-}
diff --git a/src/core/lib/slice/slice_hash_table.h b/src/core/lib/slice/slice_hash_table.h
index 819bb3b5bc..fbe9cc58e8 100644
--- a/src/core/lib/slice/slice_hash_table.h
+++ b/src/core/lib/slice/slice_hash_table.h
@@ -19,52 +19,183 @@
#include <grpc/support/port_platform.h>
-#include "src/core/lib/transport/metadata.h"
+#include <string.h>
-/** Hash table implementation.
- *
- * This implementation uses open addressing
- * (https://en.wikipedia.org/wiki/Open_addressing) with linear
- * probing (https://en.wikipedia.org/wiki/Linear_probing).
- *
- * The keys are \a grpc_slice objects. The values are arbitrary pointers
- * with a common destroy function.
- *
- * Hash tables are intentionally immutable, to avoid the need for locking.
- */
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/gpr/useful.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/lib/slice/slice_internal.h"
+
+/// Hash table implementation.
+///
+/// This implementation uses open addressing
+/// (https://en.wikipedia.org/wiki/Open_addressing) with linear
+/// probing (https://en.wikipedia.org/wiki/Linear_probing).
+///
+/// The keys are \a grpc_slice objects. The values can be any type.
+///
+/// Hash tables are intentionally immutable, to avoid the need for locking.
+
+namespace grpc_core {
+
+template <typename T>
+class SliceHashTable : public RefCounted<SliceHashTable<T>> {
+ public:
+ struct Entry {
+ grpc_slice key;
+ T value;
+ bool is_set;
+ };
+
+ // Function for comparing values.
+ // TODO(roth): Eliminate this and the Cmp() method from this API once
+ // grpc_channel_args is redesigned to require that keys are unique.
+ typedef int (*ValueCmp)(const T&, const T&);
+
+ /// Creates a new hash table containing \a entries, which is an array
+ /// of length \a num_entries. Takes ownership of all keys and values in \a
+ /// entries. If not null, \a value_cmp will be used to compare values in
+ /// the context of \a Cmp(). If null, raw pointer (\a GPR_ICMP) comparison
+ /// will be used.
+ static RefCountedPtr<SliceHashTable> Create(size_t num_entries,
+ Entry* entries,
+ ValueCmp value_cmp);
+
+ /// Returns the value from the table associated with \a key.
+ /// Returns null if \a key is not found.
+ const T* Get(const grpc_slice& key) const;
+
+ /// Compares \a a vs. \a b.
+ /// A table is considered "smaller" (resp. "greater") if:
+ /// - GPR_ICMP(a->value_cmp, b->value_cmp) < 1 (resp. > 1),
+ /// - else, it contains fewer (resp. more) entries,
+ /// - else, if strcmp(a_key, b_key) < 1 (resp. > 1),
+ /// - else, if value_cmp(a_value, b_value) < 1 (resp. > 1).
+ static int Cmp(const SliceHashTable& a, const SliceHashTable& b);
+
+ private:
+ // So New() can call our private ctor.
+ template <typename T2, typename... Args>
+ friend T2* New(Args&&... args);
+
+ SliceHashTable(size_t num_entries, Entry* entries, ValueCmp value_cmp);
+ virtual ~SliceHashTable();
+
+ void Add(grpc_slice key, T& value);
+
+ // Default value comparison function, if none specified by caller.
+ static int DefaultValueCmp(const T& a, const T& b) { return GPR_ICMP(a, b); }
+
+ const ValueCmp value_cmp_;
+ const size_t size_;
+ size_t max_num_probes_;
+ Entry* entries_;
+};
+
+//
+// implementation -- no user-serviceable parts below
+//
+
+template <typename T>
+RefCountedPtr<SliceHashTable<T>> SliceHashTable<T>::Create(size_t num_entries,
+ Entry* entries,
+ ValueCmp value_cmp) {
+ return MakeRefCounted<SliceHashTable<T>>(num_entries, entries, value_cmp);
+}
+
+template <typename T>
+SliceHashTable<T>::SliceHashTable(size_t num_entries, Entry* entries,
+ ValueCmp value_cmp)
+ : value_cmp_(value_cmp),
+ // Keep load factor low to improve performance of lookups.
+ size_(num_entries * 2),
+ max_num_probes_(0) {
+ entries_ = static_cast<Entry*>(gpr_zalloc(sizeof(Entry) * size_));
+ for (size_t i = 0; i < num_entries; ++i) {
+ Entry* entry = &entries[i];
+ Add(entry->key, entry->value);
+ }
+}
+
+template <typename T>
+SliceHashTable<T>::~SliceHashTable() {
+ for (size_t i = 0; i < size_; ++i) {
+ Entry& entry = entries_[i];
+ if (entry.is_set) {
+ grpc_slice_unref_internal(entry.key);
+ entry.value.~T();
+ }
+ }
+ gpr_free(entries_);
+}
+
+template <typename T>
+void SliceHashTable<T>::Add(grpc_slice key, T& value) {
+ const size_t hash = grpc_slice_hash(key);
+ for (size_t offset = 0; offset < size_; ++offset) {
+ const size_t idx = (hash + offset) % size_;
+ if (!entries_[idx].is_set) {
+ entries_[idx].is_set = true;
+ entries_[idx].key = key;
+ entries_[idx].value = std::move(value);
+ // Keep track of the maximum number of probes needed, since this
+ // provides an upper bound for lookups.
+ if (offset > max_num_probes_) max_num_probes_ = offset;
+ return;
+ }
+ }
+ GPR_ASSERT(false); // Table should never be full.
+}
+
+template <typename T>
+const T* SliceHashTable<T>::Get(const grpc_slice& key) const {
+ const size_t hash = grpc_slice_hash(key);
+ // We cap the number of probes at the max number recorded when
+ // populating the table.
+ for (size_t offset = 0; offset <= max_num_probes_; ++offset) {
+ const size_t idx = (hash + offset) % size_;
+ if (!entries_[idx].is_set) break;
+ if (grpc_slice_eq(entries_[idx].key, key)) {
+ return &entries_[idx].value;
+ }
+ }
+ return nullptr; // Not found.
+}
+
+template <typename T>
+int SliceHashTable<T>::Cmp(const SliceHashTable& a, const SliceHashTable& b) {
+ ValueCmp value_cmp_a =
+ a.value_cmp_ != nullptr ? a.value_cmp_ : DefaultValueCmp;
+ ValueCmp value_cmp_b =
+ b.value_cmp_ != nullptr ? b.value_cmp_ : DefaultValueCmp;
+ // Compare value_fns
+ const int value_fns_cmp = GPR_ICMP((void*)value_cmp_a, (void*)value_cmp_b);
+ if (value_fns_cmp != 0) return value_fns_cmp;
+ // Compare sizes
+ if (a.size_ < b.size_) return -1;
+ if (a.size_ > b.size_) return 1;
+ // Compare rows.
+ for (size_t i = 0; i < a.size_; ++i) {
+ if (!a.entries_[i].is_set) {
+ if (b.entries_[i].is_set) {
+ return -1; // a empty but b non-empty
+ }
+ continue; // both empty, no need to check key or value
+ } else if (!b.entries_[i].is_set) {
+ return 1; // a non-empty but b empty
+ }
+ // neither entry is empty
+ const int key_cmp = grpc_slice_cmp(a.entries_[i].key, b.entries_[i].key);
+ if (key_cmp != 0) return key_cmp;
+ const int value_cmp = value_cmp_a(a.entries_[i].value, b.entries_[i].value);
+ if (value_cmp != 0) return value_cmp;
+ }
+ return 0;
+}
-typedef struct grpc_slice_hash_table grpc_slice_hash_table;
-
-typedef struct grpc_slice_hash_table_entry {
- grpc_slice key;
- void* value; /* Must not be NULL. */
-} grpc_slice_hash_table_entry;
-
-/** Creates a new hash table of containing \a entries, which is an array
- of length \a num_entries. Takes ownership of all keys and values in \a
- entries. Values will be cleaned up via \a destroy_value(). If not NULL, \a
- value_cmp will be used to compare values in the context of \a
- grpc_slice_hash_table_cmp. If NULL, raw pointer (\a GPR_ICMP) comparison
- will be used. */
-grpc_slice_hash_table* grpc_slice_hash_table_create(
- size_t num_entries, grpc_slice_hash_table_entry* entries,
- void (*destroy_value)(void* value), int (*value_cmp)(void* a, void* b));
-
-grpc_slice_hash_table* grpc_slice_hash_table_ref(grpc_slice_hash_table* table);
-void grpc_slice_hash_table_unref(grpc_slice_hash_table* table);
-
-/** Returns the value from \a table associated with \a key.
- Returns NULL if \a key is not found. */
-void* grpc_slice_hash_table_get(const grpc_slice_hash_table* table,
- const grpc_slice key);
-
-/** Compares \a a vs. \a b.
- * A table is considered "smaller" (resp. "greater") if:
- * - GPR_ICMP(a->value_cmp, b->value_cmp) < 1 (resp. > 1),
- * - else, it contains fewer (resp. more) entries,
- * - else, if strcmp(a_key, b_key) < 1 (resp. > 1),
- * - else, if value_cmp(a_value, b_value) < 1 (resp. > 1). */
-int grpc_slice_hash_table_cmp(const grpc_slice_hash_table* a,
- const grpc_slice_hash_table* b);
+} // namespace grpc_core
#endif /* GRPC_CORE_LIB_SLICE_SLICE_HASH_TABLE_H */
diff --git a/src/core/lib/slice/slice_weak_hash_table.h b/src/core/lib/slice/slice_weak_hash_table.h
new file mode 100644
index 0000000000..9d0ddfc2d2
--- /dev/null
+++ b/src/core/lib/slice/slice_weak_hash_table.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GRPC_CORE_LIB_SLICE_SLICE_WEAK_HASH_TABLE_H
+#define GRPC_CORE_LIB_SLICE_SLICE_WEAK_HASH_TABLE_H
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/gprpp/memory.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/lib/slice/slice_internal.h"
+
+/// Weak hash table implementation.
+///
+/// This entries in this table are weak: an entry may be removed at any time due
+/// to a number of reasons: memory pressure, hash collisions, etc.
+///
+/// The keys are \a grpc_slice objects. The values are of arbitrary type.
+///
+/// This class is thread unsafe. It's the caller's responsibility to ensure
+/// proper locking when accessing its methods.
+
+namespace grpc_core {
+
+template <typename T, size_t Size>
+class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
+ public:
+ /// Creates a new table of at most \a size entries.
+ static RefCountedPtr<SliceWeakHashTable> Create() {
+ return MakeRefCounted<SliceWeakHashTable<T, Size>>();
+ }
+
+ /// Add a mapping from \a key to \a value, taking ownership of \a key. This
+ /// operation will always succeed. It may discard older entries.
+ void Add(grpc_slice key, T value) {
+ const size_t idx = grpc_slice_hash(key) % Size;
+ entries_[idx].Set(key, std::move(value));
+ return;
+ }
+
+ /// Returns the value from the table associated with / \a key or null if not
+ /// found.
+ const T* Get(const grpc_slice key) const {
+ const size_t idx = grpc_slice_hash(key) % Size;
+ const auto& entry = entries_[idx];
+ return grpc_slice_eq(entry.key(), key) ? entry.value() : nullptr;
+ }
+
+ private:
+ // So New() can call our private ctor.
+ template <typename T2, typename... Args>
+ friend T2* New(Args&&... args);
+
+ SliceWeakHashTable() = default;
+ ~SliceWeakHashTable() = default;
+
+ /// The type of the table "rows".
+ class Entry {
+ public:
+ Entry() = default;
+ ~Entry() {
+ if (is_set_) grpc_slice_unref_internal(key_);
+ }
+ grpc_slice key() const { return key_; }
+
+ /// Return the entry's value, or null if unset.
+ const T* value() const {
+ if (!is_set_) return nullptr;
+ return &value_;
+ }
+
+ /// Set the \a key and \a value (which is moved) for the entry.
+ void Set(grpc_slice key, T&& value) {
+ if (is_set_) grpc_slice_unref_internal(key_);
+ key_ = key;
+ value_ = std::move(value);
+ is_set_ = true;
+ }
+
+ private:
+ grpc_slice key_;
+ T value_;
+ bool is_set_ = false;
+ };
+
+ Entry entries_[Size];
+};
+
+} // namespace grpc_core
+
+#endif /* GRPC_CORE_LIB_SLICE_SLICE_WEAK_HASH_TABLE_H */
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index 3df745652a..c4844da318 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -50,6 +50,7 @@
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/static_metadata.h"
+#include "src/core/lib/transport/status_metadata.h"
#include "src/core/lib/transport/transport.h"
/** The maximum number of concurrent batches possible.
@@ -976,32 +977,6 @@ static int prepare_application_metadata(grpc_call* call, int count,
return 1;
}
-/* we offset status by a small amount when storing it into transport metadata
- as metadata cannot store a 0 value (which is used as OK for grpc_status_codes
- */
-#define STATUS_OFFSET 1
-static void destroy_status(void* ignored) {}
-
-static uint32_t decode_status(grpc_mdelem md) {
- uint32_t status;
- void* user_data;
- if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_0)) return 0;
- if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_1)) return 1;
- if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_2)) return 2;
- user_data = grpc_mdelem_get_user_data(md, destroy_status);
- if (user_data != nullptr) {
- status = (static_cast<uint32_t>((intptr_t)user_data)) - STATUS_OFFSET;
- } else {
- if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(md), &status)) {
- status = GRPC_STATUS_UNKNOWN; /* could not parse status code */
- }
- grpc_mdelem_set_user_data(
- md, destroy_status,
- (void*)static_cast<intptr_t>(status + STATUS_OFFSET));
- }
- return status;
-}
-
static grpc_message_compression_algorithm decode_message_compression(
grpc_mdelem md) {
grpc_message_compression_algorithm algorithm =
@@ -1093,7 +1068,8 @@ static void recv_initial_filter(grpc_call* call, grpc_metadata_batch* b) {
static void recv_trailing_filter(void* args, grpc_metadata_batch* b) {
grpc_call* call = static_cast<grpc_call*>(args);
if (b->idx.named.grpc_status != nullptr) {
- uint32_t status_code = decode_status(b->idx.named.grpc_status->md);
+ grpc_status_code status_code =
+ grpc_get_status_code_from_metadata(b->idx.named.grpc_status->md);
grpc_error* error =
status_code == GRPC_STATUS_OK
? GRPC_ERROR_NONE
diff --git a/src/core/lib/transport/metadata_batch.cc b/src/core/lib/transport/metadata_batch.cc
index b23f516516..49740fcd1e 100644
--- a/src/core/lib/transport/metadata_batch.cc
+++ b/src/core/lib/transport/metadata_batch.cc
@@ -303,3 +303,27 @@ grpc_error* grpc_metadata_batch_filter(grpc_metadata_batch* batch,
}
return error;
}
+
+void grpc_metadata_batch_copy(grpc_metadata_batch* src,
+ grpc_metadata_batch* dst,
+ grpc_linked_mdelem* storage) {
+ grpc_metadata_batch_init(dst);
+ dst->deadline = src->deadline;
+ size_t i = 0;
+ for (grpc_linked_mdelem* elem = src->list.head; elem != nullptr;
+ elem = elem->next) {
+ grpc_error* error = grpc_metadata_batch_add_tail(dst, &storage[i++],
+ GRPC_MDELEM_REF(elem->md));
+ // The only way that grpc_metadata_batch_add_tail() can fail is if
+ // there's a duplicate entry for a callout. However, that can't be
+ // the case here, because we would not have been allowed to create
+ // a source batch that had that kind of conflict.
+ GPR_ASSERT(error == GRPC_ERROR_NONE);
+ }
+}
+
+void grpc_metadata_batch_move(grpc_metadata_batch* src,
+ grpc_metadata_batch* dst) {
+ *dst = *src;
+ grpc_metadata_batch_init(src);
+}
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index 06fc9ade7e..3876063b52 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -137,4 +137,13 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch* comd);
} while (0)
#endif
+/// Copies \a src to \a dst. \a storage must point to an array of
+/// \a grpc_linked_mdelem structs of at least the same size as \a src.
+void grpc_metadata_batch_copy(grpc_metadata_batch* src,
+ grpc_metadata_batch* dst,
+ grpc_linked_mdelem* storage);
+
+void grpc_metadata_batch_move(grpc_metadata_batch* src,
+ grpc_metadata_batch* dst);
+
#endif /* GRPC_CORE_LIB_TRANSPORT_METADATA_BATCH_H */
diff --git a/src/core/lib/transport/service_config.cc b/src/core/lib/transport/service_config.cc
index b1d727419d..e1a55d98ab 100644
--- a/src/core/lib/transport/service_config.cc
+++ b/src/core/lib/transport/service_config.cc
@@ -31,74 +31,30 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
-// The main purpose of the code here is to parse the service config in
-// JSON form, which will look like this:
-//
-// {
-// "loadBalancingPolicy": "string", // optional
-// "methodConfig": [ // array of one or more method_config objects
-// {
-// "name": [ // array of one or more name objects
-// {
-// "service": "string", // required
-// "method": "string", // optional
-// }
-// ],
-// // remaining fields are optional.
-// // see https://developers.google.com/protocol-buffers/docs/proto3#json
-// // for format details.
-// "waitForReady": bool,
-// "timeout": "duration_string",
-// "maxRequestMessageBytes": "int64_string",
-// "maxResponseMessageBytes": "int64_string",
-// }
-// ]
-// }
-
-struct grpc_service_config {
- char* json_string; // Underlying storage for json_tree.
- grpc_json* json_tree;
-};
+namespace grpc_core {
-grpc_service_config* grpc_service_config_create(const char* json_string) {
- grpc_service_config* service_config =
- static_cast<grpc_service_config*>(gpr_malloc(sizeof(*service_config)));
- service_config->json_string = gpr_strdup(json_string);
- service_config->json_tree =
- grpc_json_parse_string(service_config->json_string);
- if (service_config->json_tree == nullptr) {
+UniquePtr<ServiceConfig> ServiceConfig::Create(const char* json) {
+ UniquePtr<char> json_string(gpr_strdup(json));
+ grpc_json* json_tree = grpc_json_parse_string(json_string.get());
+ if (json_tree == nullptr) {
gpr_log(GPR_INFO, "failed to parse JSON for service config");
- gpr_free(service_config->json_string);
- gpr_free(service_config);
return nullptr;
}
- return service_config;
+ return MakeUnique<ServiceConfig>(std::move(json_string), json_tree);
}
-void grpc_service_config_destroy(grpc_service_config* service_config) {
- grpc_json_destroy(service_config->json_tree);
- gpr_free(service_config->json_string);
- gpr_free(service_config);
-}
+ServiceConfig::ServiceConfig(UniquePtr<char> json_string, grpc_json* json_tree)
+ : json_string_(std::move(json_string)), json_tree_(json_tree) {}
-void grpc_service_config_parse_global_params(
- const grpc_service_config* service_config,
- void (*process_json)(const grpc_json* json, void* arg), void* arg) {
- const grpc_json* json = service_config->json_tree;
- if (json->type != GRPC_JSON_OBJECT || json->key != nullptr) return;
- for (grpc_json* field = json->child; field != nullptr; field = field->next) {
- if (field->key == nullptr) return;
- if (strcmp(field->key, "methodConfig") == 0) continue;
- process_json(field, arg);
- }
-}
+ServiceConfig::~ServiceConfig() { grpc_json_destroy(json_tree_); }
-const char* grpc_service_config_get_lb_policy_name(
- const grpc_service_config* service_config) {
- const grpc_json* json = service_config->json_tree;
- if (json->type != GRPC_JSON_OBJECT || json->key != nullptr) return nullptr;
+const char* ServiceConfig::GetLoadBalancingPolicyName() const {
+ if (json_tree_->type != GRPC_JSON_OBJECT || json_tree_->key != nullptr) {
+ return nullptr;
+ }
const char* lb_policy_name = nullptr;
- for (grpc_json* field = json->child; field != nullptr; field = field->next) {
+ for (grpc_json* field = json_tree_->child; field != nullptr;
+ field = field->next) {
if (field->key == nullptr) return nullptr;
if (strcmp(field->key, "loadBalancingPolicy") == 0) {
if (lb_policy_name != nullptr) return nullptr; // Duplicate.
@@ -109,8 +65,7 @@ const char* grpc_service_config_get_lb_policy_name(
return lb_policy_name;
}
-// Returns the number of names specified in the method config \a json.
-static size_t count_names_in_method_config_json(grpc_json* json) {
+size_t ServiceConfig::CountNamesInMethodConfig(grpc_json* json) {
size_t num_names = 0;
for (grpc_json* field = json->child; field != nullptr; field = field->next) {
if (field->key != nullptr && strcmp(field->key, "name") == 0) {
@@ -124,9 +79,7 @@ static size_t count_names_in_method_config_json(grpc_json* json) {
return num_names;
}
-// Returns a path string for the JSON name object specified by \a json.
-// Returns NULL on error. Caller takes ownership of result.
-static char* parse_json_method_name(grpc_json* json) {
+UniquePtr<char> ServiceConfig::ParseJsonMethodName(grpc_json* json) {
if (json->type != GRPC_JSON_OBJECT) return nullptr;
const char* service_name = nullptr;
const char* method_name = nullptr;
@@ -147,116 +100,7 @@ static char* parse_json_method_name(grpc_json* json) {
char* path;
gpr_asprintf(&path, "/%s/%s", service_name,
method_name == nullptr ? "*" : method_name);
- return path;
+ return UniquePtr<char>(path);
}
-// Parses the method config from \a json. Adds an entry to \a entries for
-// each name found, incrementing \a idx for each entry added.
-// Returns false on error.
-static bool parse_json_method_config(
- grpc_json* json, void* (*create_value)(const grpc_json* method_config_json),
- void* (*ref_value)(void* value), void (*unref_value)(void* value),
- grpc_slice_hash_table_entry* entries, size_t* idx) {
- // Construct value.
- void* method_config = create_value(json);
- if (method_config == nullptr) return false;
- // Construct list of paths.
- bool success = false;
- gpr_strvec paths;
- gpr_strvec_init(&paths);
- for (grpc_json* child = json->child; child != nullptr; child = child->next) {
- if (child->key == nullptr) continue;
- if (strcmp(child->key, "name") == 0) {
- if (child->type != GRPC_JSON_ARRAY) goto done;
- for (grpc_json* name = child->child; name != nullptr; name = name->next) {
- char* path = parse_json_method_name(name);
- if (path == nullptr) goto done;
- gpr_strvec_add(&paths, path);
- }
- }
- }
- if (paths.count == 0) goto done; // No names specified.
- // Add entry for each path.
- for (size_t i = 0; i < paths.count; ++i) {
- entries[*idx].key = grpc_slice_from_copied_string(paths.strs[i]);
- entries[*idx].value = ref_value(method_config);
- ++*idx;
- }
- success = true;
-done:
- unref_value(method_config);
- gpr_strvec_destroy(&paths);
- return success;
-}
-
-grpc_slice_hash_table* grpc_service_config_create_method_config_table(
- const grpc_service_config* service_config,
- void* (*create_value)(const grpc_json* method_config_json),
- void* (*ref_value)(void* value), void (*unref_value)(void* value)) {
- const grpc_json* json = service_config->json_tree;
- // Traverse parsed JSON tree.
- if (json->type != GRPC_JSON_OBJECT || json->key != nullptr) return nullptr;
- size_t num_entries = 0;
- grpc_slice_hash_table_entry* entries = nullptr;
- for (grpc_json* field = json->child; field != nullptr; field = field->next) {
- if (field->key == nullptr) return nullptr;
- if (strcmp(field->key, "methodConfig") == 0) {
- if (entries != nullptr) return nullptr; // Duplicate.
- if (field->type != GRPC_JSON_ARRAY) return nullptr;
- // Find number of entries.
- for (grpc_json* method = field->child; method != nullptr;
- method = method->next) {
- size_t count = count_names_in_method_config_json(method);
- if (count <= 0) return nullptr;
- num_entries += count;
- }
- // Populate method config table entries.
- entries = static_cast<grpc_slice_hash_table_entry*>(
- gpr_malloc(num_entries * sizeof(grpc_slice_hash_table_entry)));
- size_t idx = 0;
- for (grpc_json* method = field->child; method != nullptr;
- method = method->next) {
- if (!parse_json_method_config(method, create_value, ref_value,
- unref_value, entries, &idx)) {
- for (size_t i = 0; i < idx; ++i) {
- grpc_slice_unref_internal(entries[i].key);
- unref_value(entries[i].value);
- }
- gpr_free(entries);
- return nullptr;
- }
- }
- GPR_ASSERT(idx == num_entries);
- }
- }
- // Instantiate method config table.
- grpc_slice_hash_table* method_config_table = nullptr;
- if (entries != nullptr) {
- method_config_table = grpc_slice_hash_table_create(num_entries, entries,
- unref_value, nullptr);
- gpr_free(entries);
- }
- return method_config_table;
-}
-
-void* grpc_method_config_table_get(const grpc_slice_hash_table* table,
- grpc_slice path) {
- void* value = grpc_slice_hash_table_get(table, path);
- // If we didn't find a match for the path, try looking for a wildcard
- // entry (i.e., change "/service/method" to "/service/*").
- if (value == nullptr) {
- char* path_str = grpc_slice_to_c_string(path);
- const char* sep = strrchr(path_str, '/') + 1;
- const size_t len = static_cast<size_t>(sep - path_str);
- char* buf = static_cast<char*>(gpr_malloc(len + 2)); // '*' and NUL
- memcpy(buf, path_str, len);
- buf[len] = '*';
- buf[len + 1] = '\0';
- grpc_slice wildcard_path = grpc_slice_from_copied_string(buf);
- gpr_free(buf);
- value = grpc_slice_hash_table_get(table, wildcard_path);
- grpc_slice_unref_internal(wildcard_path);
- gpr_free(path_str);
- }
- return value;
-}
+} // namespace grpc_core
diff --git a/src/core/lib/transport/service_config.h b/src/core/lib/transport/service_config.h
index 6517f36802..a65b267d46 100644
--- a/src/core/lib/transport/service_config.h
+++ b/src/core/lib/transport/service_config.h
@@ -20,44 +20,230 @@
#include <grpc/support/port_platform.h>
#include <grpc/impl/codegen/grpc_types.h>
+#include <grpc/support/string_util.h>
+#include "src/core/lib/gprpp/inlined_vector.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/json/json.h"
#include "src/core/lib/slice/slice_hash_table.h"
-typedef struct grpc_service_config grpc_service_config;
-
-grpc_service_config* grpc_service_config_create(const char* json_string);
-void grpc_service_config_destroy(grpc_service_config* service_config);
-
-/// Invokes \a process_json() for each global parameter in the service
-/// config. \a arg is passed as the second argument to \a process_json().
-void grpc_service_config_parse_global_params(
- const grpc_service_config* service_config,
- void (*process_json)(const grpc_json* json, void* arg), void* arg);
-
-/// Gets the LB policy name from \a service_config.
-/// Returns NULL if no LB policy name was specified.
-/// Caller does NOT take ownership.
-const char* grpc_service_config_get_lb_policy_name(
- const grpc_service_config* service_config);
-
-/// Creates a method config table based on the data in \a json.
-/// The table's keys are request paths. The table's value type is
-/// returned by \a create_value(), based on data parsed from the JSON tree.
-/// \a ref_value() and \a unref_value() are used to ref and unref values.
-/// Returns NULL on error.
-grpc_slice_hash_table* grpc_service_config_create_method_config_table(
- const grpc_service_config* service_config,
- void* (*create_value)(const grpc_json* method_config_json),
- void* (*ref_value)(void* value), void (*unref_value)(void* value));
-
-/// A helper function for looking up values in the table returned by
-/// \a grpc_service_config_create_method_config_table().
-/// Gets the method config for the specified \a path, which should be of
-/// the form "/service/method".
-/// Returns NULL if the method has no config.
-/// Caller does NOT own a reference to the result.
-void* grpc_method_config_table_get(const grpc_slice_hash_table* table,
- grpc_slice path);
+// The main purpose of the code here is to parse the service config in
+// JSON form, which will look like this:
+//
+// {
+// "loadBalancingPolicy": "string", // optional
+// "methodConfig": [ // array of one or more method_config objects
+// {
+// "name": [ // array of one or more name objects
+// {
+// "service": "string", // required
+// "method": "string", // optional
+// }
+// ],
+// // remaining fields are optional.
+// // see
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+// // for format details.
+// "waitForReady": bool,
+// "timeout": "duration_string",
+// "maxRequestMessageBytes": "int64_string",
+// "maxResponseMessageBytes": "int64_string",
+// }
+// ]
+// }
+
+namespace grpc_core {
+
+class ServiceConfig {
+ public:
+ /// Creates a new service config from parsing \a json_string.
+ /// Returns null on parse error.
+ static UniquePtr<ServiceConfig> Create(const char* json);
+
+ ~ServiceConfig();
+
+ /// Invokes \a process_json() for each global parameter in the service
+ /// config. \a arg is passed as the second argument to \a process_json().
+ template <typename T>
+ using ProcessJson = void (*)(const grpc_json*, T*);
+ template <typename T>
+ void ParseGlobalParams(ProcessJson<T> process_json, T* arg) const;
+
+ /// Gets the LB policy name from \a service_config.
+ /// Returns NULL if no LB policy name was specified.
+ /// Caller does NOT take ownership.
+ const char* GetLoadBalancingPolicyName() const;
+
+ /// Creates a method config table based on the data in \a json.
+ /// The table's keys are request paths. The table's value type is
+ /// returned by \a create_value(), based on data parsed from the JSON tree.
+ /// Returns null on error.
+ template <typename T>
+ using CreateValue = RefCountedPtr<T> (*)(const grpc_json* method_config_json);
+ template <typename T>
+ RefCountedPtr<SliceHashTable<RefCountedPtr<T>>> CreateMethodConfigTable(
+ CreateValue<T> create_value);
+
+ /// A helper function for looking up values in the table returned by
+ /// \a CreateMethodConfigTable().
+ /// Gets the method config for the specified \a path, which should be of
+ /// the form "/service/method".
+ /// Returns null if the method has no config.
+ /// Caller does NOT own a reference to the result.
+ template <typename T>
+ static RefCountedPtr<T> MethodConfigTableLookup(
+ const SliceHashTable<RefCountedPtr<T>>& table, grpc_slice path);
+
+ private:
+ // So New() can call our private ctor.
+ template <typename T, typename... Args>
+ friend T* New(Args&&... args);
+
+ // Takes ownership of \a json_tree.
+ ServiceConfig(UniquePtr<char> json_string, grpc_json* json_tree);
+
+ // Returns the number of names specified in the method config \a json.
+ static size_t CountNamesInMethodConfig(grpc_json* json);
+
+ // Returns a path string for the JSON name object specified by \a json.
+ // Returns null on error.
+ static UniquePtr<char> ParseJsonMethodName(grpc_json* json);
+
+ // Parses the method config from \a json. Adds an entry to \a entries for
+ // each name found, incrementing \a idx for each entry added.
+ // Returns false on error.
+ template <typename T>
+ static bool ParseJsonMethodConfig(
+ grpc_json* json, CreateValue<T> create_value,
+ typename SliceHashTable<RefCountedPtr<T>>::Entry* entries, size_t* idx);
+
+ UniquePtr<char> json_string_; // Underlying storage for json_tree.
+ grpc_json* json_tree_;
+};
+
+//
+// implementation -- no user-serviceable parts below
+//
+
+template <typename T>
+void ServiceConfig::ParseGlobalParams(ProcessJson<T> process_json,
+ T* arg) const {
+ if (json_tree_->type != GRPC_JSON_OBJECT || json_tree_->key != nullptr) {
+ return;
+ }
+ for (grpc_json* field = json_tree_->child; field != nullptr;
+ field = field->next) {
+ if (field->key == nullptr) return;
+ if (strcmp(field->key, "methodConfig") == 0) continue;
+ process_json(field, arg);
+ }
+}
+
+template <typename T>
+bool ServiceConfig::ParseJsonMethodConfig(
+ grpc_json* json, CreateValue<T> create_value,
+ typename SliceHashTable<RefCountedPtr<T>>::Entry* entries, size_t* idx) {
+ // Construct value.
+ RefCountedPtr<T> method_config = create_value(json);
+ if (method_config == nullptr) return false;
+ // Construct list of paths.
+ InlinedVector<UniquePtr<char>, 10> paths;
+ for (grpc_json* child = json->child; child != nullptr; child = child->next) {
+ if (child->key == nullptr) continue;
+ if (strcmp(child->key, "name") == 0) {
+ if (child->type != GRPC_JSON_ARRAY) return false;
+ for (grpc_json* name = child->child; name != nullptr; name = name->next) {
+ UniquePtr<char> path = ParseJsonMethodName(name);
+ if (path == nullptr) return false;
+ paths.push_back(std::move(path));
+ }
+ }
+ }
+ if (paths.size() == 0) return false; // No names specified.
+ // Add entry for each path.
+ for (size_t i = 0; i < paths.size(); ++i) {
+ entries[*idx].key = grpc_slice_from_copied_string(paths[i].get());
+ entries[*idx].value = method_config; // Takes a new ref.
+ ++*idx;
+ }
+ // Success.
+ return true;
+}
+
+template <typename T>
+RefCountedPtr<SliceHashTable<RefCountedPtr<T>>>
+ServiceConfig::CreateMethodConfigTable(CreateValue<T> create_value) {
+ // Traverse parsed JSON tree.
+ if (json_tree_->type != GRPC_JSON_OBJECT || json_tree_->key != nullptr) {
+ return nullptr;
+ }
+ size_t num_entries = 0;
+ typename SliceHashTable<RefCountedPtr<T>>::Entry* entries = nullptr;
+ for (grpc_json* field = json_tree_->child; field != nullptr;
+ field = field->next) {
+ if (field->key == nullptr) return nullptr;
+ if (strcmp(field->key, "methodConfig") == 0) {
+ if (entries != nullptr) return nullptr; // Duplicate.
+ if (field->type != GRPC_JSON_ARRAY) return nullptr;
+ // Find number of entries.
+ for (grpc_json* method = field->child; method != nullptr;
+ method = method->next) {
+ size_t count = CountNamesInMethodConfig(method);
+ if (count <= 0) return nullptr;
+ num_entries += count;
+ }
+ // Populate method config table entries.
+ entries = static_cast<typename SliceHashTable<RefCountedPtr<T>>::Entry*>(
+ gpr_zalloc(num_entries *
+ sizeof(typename SliceHashTable<RefCountedPtr<T>>::Entry)));
+ size_t idx = 0;
+ for (grpc_json* method = field->child; method != nullptr;
+ method = method->next) {
+ if (!ParseJsonMethodConfig(method, create_value, entries, &idx)) {
+ for (size_t i = 0; i < idx; ++i) {
+ grpc_slice_unref_internal(entries[i].key);
+ entries[i].value.reset();
+ }
+ gpr_free(entries);
+ return nullptr;
+ }
+ }
+ GPR_ASSERT(idx == num_entries);
+ }
+ }
+ // Instantiate method config table.
+ RefCountedPtr<SliceHashTable<RefCountedPtr<T>>> method_config_table;
+ if (entries != nullptr) {
+ method_config_table =
+ SliceHashTable<RefCountedPtr<T>>::Create(num_entries, entries, nullptr);
+ gpr_free(entries);
+ }
+ return method_config_table;
+}
+
+template <typename T>
+RefCountedPtr<T> ServiceConfig::MethodConfigTableLookup(
+ const SliceHashTable<RefCountedPtr<T>>& table, grpc_slice path) {
+ const RefCountedPtr<T>* value = table.Get(path);
+ // If we didn't find a match for the path, try looking for a wildcard
+ // entry (i.e., change "/service/method" to "/service/*").
+ if (value == nullptr) {
+ char* path_str = grpc_slice_to_c_string(path);
+ const char* sep = strrchr(path_str, '/') + 1;
+ const size_t len = (size_t)(sep - path_str);
+ char* buf = (char*)gpr_malloc(len + 2); // '*' and NUL
+ memcpy(buf, path_str, len);
+ buf[len] = '*';
+ buf[len + 1] = '\0';
+ grpc_slice wildcard_path = grpc_slice_from_copied_string(buf);
+ gpr_free(buf);
+ value = table.Get(wildcard_path);
+ grpc_slice_unref_internal(wildcard_path);
+ gpr_free(path_str);
+ }
+ return RefCountedPtr<T>(*value);
+}
+
+} // namespace grpc_core
#endif /* GRPC_CORE_LIB_TRANSPORT_SERVICE_CONFIG_H */
diff --git a/src/core/lib/transport/static_metadata.cc b/src/core/lib/transport/static_metadata.cc
index 0e11b6e4e4..6a5144f21a 100644
--- a/src/core/lib/transport/static_metadata.cc
+++ b/src/core/lib/transport/static_metadata.cc
@@ -50,61 +50,64 @@ static uint8_t g_bytes[] = {
114, 110, 97, 108, 45, 115, 116, 114, 101, 97, 109, 45, 101, 110, 99,
111, 100, 105, 110, 103, 45, 114, 101, 113, 117, 101, 115, 116, 117, 115,
101, 114, 45, 97, 103, 101, 110, 116, 104, 111, 115, 116, 108, 98, 45,
- 116, 111, 107, 101, 110, 103, 114, 112, 99, 45, 116, 105, 109, 101, 111,
- 117, 116, 103, 114, 112, 99, 46, 119, 97, 105, 116, 95, 102, 111, 114,
- 95, 114, 101, 97, 100, 121, 103, 114, 112, 99, 46, 116, 105, 109, 101,
- 111, 117, 116, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 113,
- 117, 101, 115, 116, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98, 121,
- 116, 101, 115, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 115,
- 112, 111, 110, 115, 101, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98,
- 121, 116, 101, 115, 47, 103, 114, 112, 99, 46, 108, 98, 46, 118, 49,
- 46, 76, 111, 97, 100, 66, 97, 108, 97, 110, 99, 101, 114, 47, 66,
- 97, 108, 97, 110, 99, 101, 76, 111, 97, 100, 100, 101, 102, 108, 97,
- 116, 101, 103, 122, 105, 112, 115, 116, 114, 101, 97, 109, 47, 103, 122,
- 105, 112, 48, 49, 50, 105, 100, 101, 110, 116, 105, 116, 121, 116, 114,
- 97, 105, 108, 101, 114, 115, 97, 112, 112, 108, 105, 99, 97, 116, 105,
- 111, 110, 47, 103, 114, 112, 99, 80, 79, 83, 84, 50, 48, 48, 52,
- 48, 52, 104, 116, 116, 112, 104, 116, 116, 112, 115, 103, 114, 112, 99,
- 71, 69, 84, 80, 85, 84, 47, 47, 105, 110, 100, 101, 120, 46, 104,
- 116, 109, 108, 50, 48, 52, 50, 48, 54, 51, 48, 52, 52, 48, 48,
- 53, 48, 48, 97, 99, 99, 101, 112, 116, 45, 99, 104, 97, 114, 115,
- 101, 116, 103, 122, 105, 112, 44, 32, 100, 101, 102, 108, 97, 116, 101,
- 97, 99, 99, 101, 112, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101,
- 97, 99, 99, 101, 112, 116, 45, 114, 97, 110, 103, 101, 115, 97, 99,
- 99, 101, 112, 116, 97, 99, 99, 101, 115, 115, 45, 99, 111, 110, 116,
- 114, 111, 108, 45, 97, 108, 108, 111, 119, 45, 111, 114, 105, 103, 105,
- 110, 97, 103, 101, 97, 108, 108, 111, 119, 97, 117, 116, 104, 111, 114,
- 105, 122, 97, 116, 105, 111, 110, 99, 97, 99, 104, 101, 45, 99, 111,
- 110, 116, 114, 111, 108, 99, 111, 110, 116, 101, 110, 116, 45, 100, 105,
- 115, 112, 111, 115, 105, 116, 105, 111, 110, 99, 111, 110, 116, 101, 110,
- 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 99, 111, 110, 116, 101,
- 110, 116, 45, 108, 101, 110, 103, 116, 104, 99, 111, 110, 116, 101, 110,
- 116, 45, 108, 111, 99, 97, 116, 105, 111, 110, 99, 111, 110, 116, 101,
- 110, 116, 45, 114, 97, 110, 103, 101, 99, 111, 111, 107, 105, 101, 100,
- 97, 116, 101, 101, 116, 97, 103, 101, 120, 112, 101, 99, 116, 101, 120,
- 112, 105, 114, 101, 115, 102, 114, 111, 109, 105, 102, 45, 109, 97, 116,
- 99, 104, 105, 102, 45, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115,
- 105, 110, 99, 101, 105, 102, 45, 110, 111, 110, 101, 45, 109, 97, 116,
- 99, 104, 105, 102, 45, 114, 97, 110, 103, 101, 105, 102, 45, 117, 110,
- 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105, 110, 99, 101, 108,
- 97, 115, 116, 45, 109, 111, 100, 105, 102, 105, 101, 100, 108, 98, 45,
- 99, 111, 115, 116, 45, 98, 105, 110, 108, 105, 110, 107, 108, 111, 99,
- 97, 116, 105, 111, 110, 109, 97, 120, 45, 102, 111, 114, 119, 97, 114,
- 100, 115, 112, 114, 111, 120, 121, 45, 97, 117, 116, 104, 101, 110, 116,
- 105, 99, 97, 116, 101, 112, 114, 111, 120, 121, 45, 97, 117, 116, 104,
- 111, 114, 105, 122, 97, 116, 105, 111, 110, 114, 97, 110, 103, 101, 114,
- 101, 102, 101, 114, 101, 114, 114, 101, 102, 114, 101, 115, 104, 114, 101,
- 116, 114, 121, 45, 97, 102, 116, 101, 114, 115, 101, 114, 118, 101, 114,
- 115, 101, 116, 45, 99, 111, 111, 107, 105, 101, 115, 116, 114, 105, 99,
- 116, 45, 116, 114, 97, 110, 115, 112, 111, 114, 116, 45, 115, 101, 99,
- 117, 114, 105, 116, 121, 116, 114, 97, 110, 115, 102, 101, 114, 45, 101,
- 110, 99, 111, 100, 105, 110, 103, 118, 97, 114, 121, 118, 105, 97, 119,
- 119, 119, 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 101,
- 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116,
- 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 103, 122, 105, 112, 100,
- 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112, 105, 100, 101, 110,
- 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122,
- 105, 112};
+ 116, 111, 107, 101, 110, 103, 114, 112, 99, 45, 112, 114, 101, 118, 105,
+ 111, 117, 115, 45, 114, 112, 99, 45, 97, 116, 116, 101, 109, 112, 116,
+ 115, 103, 114, 112, 99, 45, 114, 101, 116, 114, 121, 45, 112, 117, 115,
+ 104, 98, 97, 99, 107, 45, 109, 115, 103, 114, 112, 99, 45, 116, 105,
+ 109, 101, 111, 117, 116, 49, 50, 51, 52, 103, 114, 112, 99, 46, 119,
+ 97, 105, 116, 95, 102, 111, 114, 95, 114, 101, 97, 100, 121, 103, 114,
+ 112, 99, 46, 116, 105, 109, 101, 111, 117, 116, 103, 114, 112, 99, 46,
+ 109, 97, 120, 95, 114, 101, 113, 117, 101, 115, 116, 95, 109, 101, 115,
+ 115, 97, 103, 101, 95, 98, 121, 116, 101, 115, 103, 114, 112, 99, 46,
+ 109, 97, 120, 95, 114, 101, 115, 112, 111, 110, 115, 101, 95, 109, 101,
+ 115, 115, 97, 103, 101, 95, 98, 121, 116, 101, 115, 47, 103, 114, 112,
+ 99, 46, 108, 98, 46, 118, 49, 46, 76, 111, 97, 100, 66, 97, 108,
+ 97, 110, 99, 101, 114, 47, 66, 97, 108, 97, 110, 99, 101, 76, 111,
+ 97, 100, 100, 101, 102, 108, 97, 116, 101, 103, 122, 105, 112, 115, 116,
+ 114, 101, 97, 109, 47, 103, 122, 105, 112, 48, 105, 100, 101, 110, 116,
+ 105, 116, 121, 116, 114, 97, 105, 108, 101, 114, 115, 97, 112, 112, 108,
+ 105, 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99, 80, 79, 83,
+ 84, 50, 48, 48, 52, 48, 52, 104, 116, 116, 112, 104, 116, 116, 112,
+ 115, 103, 114, 112, 99, 71, 69, 84, 80, 85, 84, 47, 47, 105, 110,
+ 100, 101, 120, 46, 104, 116, 109, 108, 50, 48, 52, 50, 48, 54, 51,
+ 48, 52, 52, 48, 48, 53, 48, 48, 97, 99, 99, 101, 112, 116, 45,
+ 99, 104, 97, 114, 115, 101, 116, 103, 122, 105, 112, 44, 32, 100, 101,
+ 102, 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45, 108, 97, 110,
+ 103, 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45, 114, 97, 110,
+ 103, 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99, 101, 115, 115,
+ 45, 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108, 111, 119, 45,
+ 111, 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108, 111, 119, 97,
+ 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 99, 97, 99,
+ 104, 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111, 110, 116, 101,
+ 110, 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105, 111, 110, 99,
+ 111, 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101,
+ 99, 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103, 116, 104, 99,
+ 111, 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110,
+ 99, 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103, 101, 99, 111,
+ 111, 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103, 101, 120, 112,
+ 101, 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114, 111, 109, 105,
+ 102, 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111, 100, 105, 102,
+ 105, 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45, 110, 111, 110,
+ 101, 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97, 110, 103, 101,
+ 105, 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115,
+ 105, 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100, 105, 102, 105,
+ 101, 100, 108, 98, 45, 99, 111, 115, 116, 45, 98, 105, 110, 108, 105,
+ 110, 107, 108, 111, 99, 97, 116, 105, 111, 110, 109, 97, 120, 45, 102,
+ 111, 114, 119, 97, 114, 100, 115, 112, 114, 111, 120, 121, 45, 97, 117,
+ 116, 104, 101, 110, 116, 105, 99, 97, 116, 101, 112, 114, 111, 120, 121,
+ 45, 97, 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 114,
+ 97, 110, 103, 101, 114, 101, 102, 101, 114, 101, 114, 114, 101, 102, 114,
+ 101, 115, 104, 114, 101, 116, 114, 121, 45, 97, 102, 116, 101, 114, 115,
+ 101, 114, 118, 101, 114, 115, 101, 116, 45, 99, 111, 111, 107, 105, 101,
+ 115, 116, 114, 105, 99, 116, 45, 116, 114, 97, 110, 115, 112, 111, 114,
+ 116, 45, 115, 101, 99, 117, 114, 105, 116, 121, 116, 114, 97, 110, 115,
+ 102, 101, 114, 45, 101, 110, 99, 111, 100, 105, 110, 103, 118, 97, 114,
+ 121, 118, 105, 97, 119, 119, 119, 45, 97, 117, 116, 104, 101, 110, 116,
+ 105, 99, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100,
+ 101, 102, 108, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44,
+ 103, 122, 105, 112, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105,
+ 112, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97,
+ 116, 101, 44, 103, 122, 105, 112};
static void static_ref(void* unused) {}
static void static_unref(void* unused) {}
@@ -217,6 +220,10 @@ grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
};
const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
@@ -242,85 +249,89 @@ const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
- {&grpc_static_metadata_refcounts[22], {{g_bytes + 290, 12}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}},
- {&grpc_static_metadata_refcounts[24], {{g_bytes + 302, 19}}},
- {&grpc_static_metadata_refcounts[25], {{g_bytes + 321, 12}}},
- {&grpc_static_metadata_refcounts[26], {{g_bytes + 333, 30}}},
- {&grpc_static_metadata_refcounts[27], {{g_bytes + 363, 31}}},
- {&grpc_static_metadata_refcounts[28], {{g_bytes + 394, 36}}},
- {&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 7}}},
- {&grpc_static_metadata_refcounts[30], {{g_bytes + 437, 4}}},
- {&grpc_static_metadata_refcounts[31], {{g_bytes + 441, 11}}},
- {&grpc_static_metadata_refcounts[32], {{g_bytes + 452, 1}}},
- {&grpc_static_metadata_refcounts[33], {{g_bytes + 453, 1}}},
- {&grpc_static_metadata_refcounts[34], {{g_bytes + 454, 1}}},
- {&grpc_static_metadata_refcounts[35], {{g_bytes + 455, 8}}},
- {&grpc_static_metadata_refcounts[36], {{g_bytes + 463, 8}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 471, 16}}},
- {&grpc_static_metadata_refcounts[38], {{g_bytes + 487, 4}}},
- {&grpc_static_metadata_refcounts[39], {{g_bytes + 491, 3}}},
- {&grpc_static_metadata_refcounts[40], {{g_bytes + 494, 3}}},
- {&grpc_static_metadata_refcounts[41], {{g_bytes + 497, 4}}},
- {&grpc_static_metadata_refcounts[42], {{g_bytes + 501, 5}}},
- {&grpc_static_metadata_refcounts[43], {{g_bytes + 506, 4}}},
- {&grpc_static_metadata_refcounts[44], {{g_bytes + 510, 3}}},
- {&grpc_static_metadata_refcounts[45], {{g_bytes + 513, 3}}},
- {&grpc_static_metadata_refcounts[46], {{g_bytes + 516, 1}}},
- {&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 11}}},
- {&grpc_static_metadata_refcounts[48], {{g_bytes + 528, 3}}},
- {&grpc_static_metadata_refcounts[49], {{g_bytes + 531, 3}}},
- {&grpc_static_metadata_refcounts[50], {{g_bytes + 534, 3}}},
- {&grpc_static_metadata_refcounts[51], {{g_bytes + 537, 3}}},
- {&grpc_static_metadata_refcounts[52], {{g_bytes + 540, 3}}},
- {&grpc_static_metadata_refcounts[53], {{g_bytes + 543, 14}}},
- {&grpc_static_metadata_refcounts[54], {{g_bytes + 557, 13}}},
- {&grpc_static_metadata_refcounts[55], {{g_bytes + 570, 15}}},
- {&grpc_static_metadata_refcounts[56], {{g_bytes + 585, 13}}},
- {&grpc_static_metadata_refcounts[57], {{g_bytes + 598, 6}}},
- {&grpc_static_metadata_refcounts[58], {{g_bytes + 604, 27}}},
- {&grpc_static_metadata_refcounts[59], {{g_bytes + 631, 3}}},
- {&grpc_static_metadata_refcounts[60], {{g_bytes + 634, 5}}},
- {&grpc_static_metadata_refcounts[61], {{g_bytes + 639, 13}}},
- {&grpc_static_metadata_refcounts[62], {{g_bytes + 652, 13}}},
- {&grpc_static_metadata_refcounts[63], {{g_bytes + 665, 19}}},
- {&grpc_static_metadata_refcounts[64], {{g_bytes + 684, 16}}},
- {&grpc_static_metadata_refcounts[65], {{g_bytes + 700, 14}}},
- {&grpc_static_metadata_refcounts[66], {{g_bytes + 714, 16}}},
- {&grpc_static_metadata_refcounts[67], {{g_bytes + 730, 13}}},
- {&grpc_static_metadata_refcounts[68], {{g_bytes + 743, 6}}},
- {&grpc_static_metadata_refcounts[69], {{g_bytes + 749, 4}}},
- {&grpc_static_metadata_refcounts[70], {{g_bytes + 753, 4}}},
- {&grpc_static_metadata_refcounts[71], {{g_bytes + 757, 6}}},
- {&grpc_static_metadata_refcounts[72], {{g_bytes + 763, 7}}},
- {&grpc_static_metadata_refcounts[73], {{g_bytes + 770, 4}}},
- {&grpc_static_metadata_refcounts[74], {{g_bytes + 774, 8}}},
- {&grpc_static_metadata_refcounts[75], {{g_bytes + 782, 17}}},
- {&grpc_static_metadata_refcounts[76], {{g_bytes + 799, 13}}},
- {&grpc_static_metadata_refcounts[77], {{g_bytes + 812, 8}}},
- {&grpc_static_metadata_refcounts[78], {{g_bytes + 820, 19}}},
- {&grpc_static_metadata_refcounts[79], {{g_bytes + 839, 13}}},
- {&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 11}}},
- {&grpc_static_metadata_refcounts[81], {{g_bytes + 863, 4}}},
- {&grpc_static_metadata_refcounts[82], {{g_bytes + 867, 8}}},
- {&grpc_static_metadata_refcounts[83], {{g_bytes + 875, 12}}},
- {&grpc_static_metadata_refcounts[84], {{g_bytes + 887, 18}}},
- {&grpc_static_metadata_refcounts[85], {{g_bytes + 905, 19}}},
- {&grpc_static_metadata_refcounts[86], {{g_bytes + 924, 5}}},
- {&grpc_static_metadata_refcounts[87], {{g_bytes + 929, 7}}},
- {&grpc_static_metadata_refcounts[88], {{g_bytes + 936, 7}}},
- {&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 11}}},
- {&grpc_static_metadata_refcounts[90], {{g_bytes + 954, 6}}},
- {&grpc_static_metadata_refcounts[91], {{g_bytes + 960, 10}}},
- {&grpc_static_metadata_refcounts[92], {{g_bytes + 970, 25}}},
- {&grpc_static_metadata_refcounts[93], {{g_bytes + 995, 17}}},
- {&grpc_static_metadata_refcounts[94], {{g_bytes + 1012, 4}}},
- {&grpc_static_metadata_refcounts[95], {{g_bytes + 1016, 3}}},
- {&grpc_static_metadata_refcounts[96], {{g_bytes + 1019, 16}}},
- {&grpc_static_metadata_refcounts[97], {{g_bytes + 1035, 16}}},
- {&grpc_static_metadata_refcounts[98], {{g_bytes + 1051, 13}}},
- {&grpc_static_metadata_refcounts[99], {{g_bytes + 1064, 12}}},
- {&grpc_static_metadata_refcounts[100], {{g_bytes + 1076, 21}}},
+ {&grpc_static_metadata_refcounts[22], {{g_bytes + 290, 26}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 316, 22}}},
+ {&grpc_static_metadata_refcounts[24], {{g_bytes + 338, 12}}},
+ {&grpc_static_metadata_refcounts[25], {{g_bytes + 350, 1}}},
+ {&grpc_static_metadata_refcounts[26], {{g_bytes + 351, 1}}},
+ {&grpc_static_metadata_refcounts[27], {{g_bytes + 352, 1}}},
+ {&grpc_static_metadata_refcounts[28], {{g_bytes + 353, 1}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}},
+ {&grpc_static_metadata_refcounts[30], {{g_bytes + 354, 19}}},
+ {&grpc_static_metadata_refcounts[31], {{g_bytes + 373, 12}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 385, 30}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 415, 31}}},
+ {&grpc_static_metadata_refcounts[34], {{g_bytes + 446, 36}}},
+ {&grpc_static_metadata_refcounts[35], {{g_bytes + 482, 7}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 489, 4}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 493, 11}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 504, 1}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 505, 8}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 513, 8}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 521, 16}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 537, 4}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 541, 3}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 544, 3}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 547, 4}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 551, 5}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 556, 4}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 560, 3}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 563, 3}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 566, 1}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 567, 11}}},
+ {&grpc_static_metadata_refcounts[52], {{g_bytes + 578, 3}}},
+ {&grpc_static_metadata_refcounts[53], {{g_bytes + 581, 3}}},
+ {&grpc_static_metadata_refcounts[54], {{g_bytes + 584, 3}}},
+ {&grpc_static_metadata_refcounts[55], {{g_bytes + 587, 3}}},
+ {&grpc_static_metadata_refcounts[56], {{g_bytes + 590, 3}}},
+ {&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 14}}},
+ {&grpc_static_metadata_refcounts[58], {{g_bytes + 607, 13}}},
+ {&grpc_static_metadata_refcounts[59], {{g_bytes + 620, 15}}},
+ {&grpc_static_metadata_refcounts[60], {{g_bytes + 635, 13}}},
+ {&grpc_static_metadata_refcounts[61], {{g_bytes + 648, 6}}},
+ {&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 27}}},
+ {&grpc_static_metadata_refcounts[63], {{g_bytes + 681, 3}}},
+ {&grpc_static_metadata_refcounts[64], {{g_bytes + 684, 5}}},
+ {&grpc_static_metadata_refcounts[65], {{g_bytes + 689, 13}}},
+ {&grpc_static_metadata_refcounts[66], {{g_bytes + 702, 13}}},
+ {&grpc_static_metadata_refcounts[67], {{g_bytes + 715, 19}}},
+ {&grpc_static_metadata_refcounts[68], {{g_bytes + 734, 16}}},
+ {&grpc_static_metadata_refcounts[69], {{g_bytes + 750, 14}}},
+ {&grpc_static_metadata_refcounts[70], {{g_bytes + 764, 16}}},
+ {&grpc_static_metadata_refcounts[71], {{g_bytes + 780, 13}}},
+ {&grpc_static_metadata_refcounts[72], {{g_bytes + 793, 6}}},
+ {&grpc_static_metadata_refcounts[73], {{g_bytes + 799, 4}}},
+ {&grpc_static_metadata_refcounts[74], {{g_bytes + 803, 4}}},
+ {&grpc_static_metadata_refcounts[75], {{g_bytes + 807, 6}}},
+ {&grpc_static_metadata_refcounts[76], {{g_bytes + 813, 7}}},
+ {&grpc_static_metadata_refcounts[77], {{g_bytes + 820, 4}}},
+ {&grpc_static_metadata_refcounts[78], {{g_bytes + 824, 8}}},
+ {&grpc_static_metadata_refcounts[79], {{g_bytes + 832, 17}}},
+ {&grpc_static_metadata_refcounts[80], {{g_bytes + 849, 13}}},
+ {&grpc_static_metadata_refcounts[81], {{g_bytes + 862, 8}}},
+ {&grpc_static_metadata_refcounts[82], {{g_bytes + 870, 19}}},
+ {&grpc_static_metadata_refcounts[83], {{g_bytes + 889, 13}}},
+ {&grpc_static_metadata_refcounts[84], {{g_bytes + 902, 11}}},
+ {&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 4}}},
+ {&grpc_static_metadata_refcounts[86], {{g_bytes + 917, 8}}},
+ {&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 12}}},
+ {&grpc_static_metadata_refcounts[88], {{g_bytes + 937, 18}}},
+ {&grpc_static_metadata_refcounts[89], {{g_bytes + 955, 19}}},
+ {&grpc_static_metadata_refcounts[90], {{g_bytes + 974, 5}}},
+ {&grpc_static_metadata_refcounts[91], {{g_bytes + 979, 7}}},
+ {&grpc_static_metadata_refcounts[92], {{g_bytes + 986, 7}}},
+ {&grpc_static_metadata_refcounts[93], {{g_bytes + 993, 11}}},
+ {&grpc_static_metadata_refcounts[94], {{g_bytes + 1004, 6}}},
+ {&grpc_static_metadata_refcounts[95], {{g_bytes + 1010, 10}}},
+ {&grpc_static_metadata_refcounts[96], {{g_bytes + 1020, 25}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1045, 17}}},
+ {&grpc_static_metadata_refcounts[98], {{g_bytes + 1062, 4}}},
+ {&grpc_static_metadata_refcounts[99], {{g_bytes + 1066, 3}}},
+ {&grpc_static_metadata_refcounts[100], {{g_bytes + 1069, 16}}},
+ {&grpc_static_metadata_refcounts[101], {{g_bytes + 1085, 16}}},
+ {&grpc_static_metadata_refcounts[102], {{g_bytes + 1101, 13}}},
+ {&grpc_static_metadata_refcounts[103], {{g_bytes + 1114, 12}}},
+ {&grpc_static_metadata_refcounts[104], {{g_bytes + 1126, 21}}},
};
uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
@@ -330,50 +341,51 @@ uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8, 2, 4, 4};
static const int8_t elems_r[] = {
- 13, 2, 1, 0, 15, 4, 0, 21, 0, 23, -3, 0, 0, 0, 10, 19, -4,
- 0, 0, 1, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, -52, 0, -55, -36, -57, -58, -58, -58, 0, 40, 39, 38, 37, 36, 35,
- 34, 33, 32, 31, 30, 29, 28, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19,
- 18, 17, 16, 15, 18, 17, 16, 15, 14, 13, 12, 11, 11, 0};
+ 16, 11, -1, 0, 15, 2, -78, 24, 0, 18, -5, 0, 0, 0, 17, 14, -8, 0,
+ 0, 27, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, -64, 0, -44, -43, -70, 0, 34, 33, 33, 32, 31, 30, 29, 28, 27,
+ 27, 26, 25, 24, 23, 22, 21, 20, 20, 19, 19, 18, 17, 16, 15, 14, 13, 12,
+ 11, 14, 13, 12, 11, 10, 9, 9, 8, 7, 6, 5, 0};
static uint32_t elems_phash(uint32_t i) {
- i -= 46;
- uint32_t x = i % 99;
- uint32_t y = i / 99;
+ i -= 50;
+ uint32_t x = i % 103;
+ uint32_t y = i / 103;
uint32_t h = x;
if (y < GPR_ARRAY_SIZE(elems_r)) {
- uint32_t delta = static_cast<uint32_t>(elems_r[y]);
+ uint32_t delta = (uint32_t)elems_r[y];
h += delta;
}
return h;
}
static const uint16_t elem_keys[] = {
- 1039, 1040, 145, 146, 541, 1639, 1045, 250, 251, 252, 253, 254,
- 1646, 46, 47, 1437, 1942, 1651, 445, 446, 447, 739, 740, 741,
- 938, 939, 1538, 2043, 2144, 1451, 944, 5376, 5578, 1545, 5780, 5881,
- 1670, 5982, 1550, 6083, 6184, 6285, 6386, 6487, 6588, 6689, 6790, 6891,
- 6992, 7093, 7194, 7295, 7396, 5679, 7497, 7598, 7699, 7800, 7901, 8002,
- 8103, 8204, 8305, 8406, 8507, 8608, 8709, 8810, 1107, 1108, 1109, 1110,
- 8911, 9012, 9113, 9214, 9315, 9416, 9517, 9618, 1714, 9719, 0, 326,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 241, 242, 0, 0, 0, 0, 0, 0, 139, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0};
+ 1085, 1086, 565, 1709, 1089, 262, 263, 264, 265, 266, 1716,
+ 153, 154, 1719, 760, 761, 50, 51, 465, 466, 467, 980,
+ 981, 1604, 1499, 984, 773, 2129, 2234, 6014, 1611, 6434, 1738,
+ 1614, 6539, 6644, 1511, 6749, 6854, 6959, 7064, 7169, 7274, 7379,
+ 2024, 7484, 7589, 7694, 7799, 7904, 8009, 8114, 8219, 6224, 8324,
+ 8429, 6329, 8534, 8639, 8744, 8849, 8954, 9059, 9164, 9269, 9374,
+ 1151, 1152, 1153, 1154, 9479, 9584, 9689, 9794, 9899, 10004, 1782,
+ 10109, 10214, 10319, 10424, 10529, 0, 0, 0, 0, 0, 344,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 253, 254, 147, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0};
static const uint8_t elem_idxs[] = {
- 77, 79, 15, 16, 6, 25, 76, 19, 20, 21, 22, 23, 84, 17,
- 18, 43, 72, 83, 11, 12, 13, 0, 1, 2, 5, 4, 38, 50,
- 57, 7, 3, 24, 27, 37, 29, 30, 26, 31, 36, 32, 33, 34,
- 35, 39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 28, 51, 52,
- 53, 54, 55, 56, 58, 59, 60, 61, 62, 63, 64, 65, 78, 80,
- 81, 82, 66, 67, 68, 69, 70, 71, 73, 74, 85, 75, 255, 14,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 9, 10, 255, 255, 255, 255, 255, 255, 8};
+ 77, 79, 6, 25, 76, 19, 20, 21, 22, 23, 84, 15, 16, 83, 1,
+ 2, 17, 18, 11, 12, 13, 5, 4, 38, 43, 3, 0, 50, 57, 24,
+ 37, 29, 26, 36, 30, 31, 7, 32, 33, 34, 35, 39, 40, 41, 72,
+ 42, 44, 45, 46, 47, 48, 49, 51, 27, 52, 53, 28, 54, 55, 56,
+ 58, 59, 60, 61, 62, 63, 78, 80, 81, 82, 64, 65, 66, 67, 68,
+ 69, 85, 70, 71, 73, 74, 75, 255, 255, 255, 255, 255, 14, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 9, 10, 8};
grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
if (a == -1 || b == -1) return GRPC_MDNULL;
- uint32_t k = static_cast<uint32_t>(a * 101 + b);
+ uint32_t k = (uint32_t)(a * 105 + b);
uint32_t h = elems_phash(k);
return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k &&
elem_idxs[h] != 255
@@ -384,177 +396,177 @@ grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
- {&grpc_static_metadata_refcounts[32], {{g_bytes + 452, 1}}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 504, 1}}}},
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
- {&grpc_static_metadata_refcounts[33], {{g_bytes + 453, 1}}}},
+ {&grpc_static_metadata_refcounts[25], {{g_bytes + 350, 1}}}},
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
- {&grpc_static_metadata_refcounts[34], {{g_bytes + 454, 1}}}},
+ {&grpc_static_metadata_refcounts[26], {{g_bytes + 351, 1}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
- {&grpc_static_metadata_refcounts[35], {{g_bytes + 455, 8}}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 505, 8}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
- {&grpc_static_metadata_refcounts[30], {{g_bytes + 437, 4}}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 489, 4}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
- {&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 7}}}},
+ {&grpc_static_metadata_refcounts[35], {{g_bytes + 482, 7}}}},
{{&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
- {&grpc_static_metadata_refcounts[36], {{g_bytes + 463, 8}}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 513, 8}}}},
{{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 471, 16}}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 521, 16}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
- {&grpc_static_metadata_refcounts[38], {{g_bytes + 487, 4}}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 537, 4}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[39], {{g_bytes + 491, 3}}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 541, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[40], {{g_bytes + 494, 3}}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 544, 3}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
- {&grpc_static_metadata_refcounts[41], {{g_bytes + 497, 4}}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 547, 4}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
- {&grpc_static_metadata_refcounts[42], {{g_bytes + 501, 5}}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 551, 5}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
- {&grpc_static_metadata_refcounts[43], {{g_bytes + 506, 4}}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 556, 4}}}},
{{&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
- {&grpc_static_metadata_refcounts[44], {{g_bytes + 510, 3}}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 560, 3}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
- {&grpc_static_metadata_refcounts[45], {{g_bytes + 513, 3}}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 563, 3}}}},
{{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
- {&grpc_static_metadata_refcounts[46], {{g_bytes + 516, 1}}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 566, 1}}}},
{{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
- {&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 11}}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 567, 11}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[48], {{g_bytes + 528, 3}}}},
+ {&grpc_static_metadata_refcounts[52], {{g_bytes + 578, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[49], {{g_bytes + 531, 3}}}},
+ {&grpc_static_metadata_refcounts[53], {{g_bytes + 581, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[50], {{g_bytes + 534, 3}}}},
+ {&grpc_static_metadata_refcounts[54], {{g_bytes + 584, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[51], {{g_bytes + 537, 3}}}},
+ {&grpc_static_metadata_refcounts[55], {{g_bytes + 587, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[52], {{g_bytes + 540, 3}}}},
- {{&grpc_static_metadata_refcounts[53], {{g_bytes + 543, 14}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {&grpc_static_metadata_refcounts[56], {{g_bytes + 590, 3}}}},
+ {{&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 14}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[54], {{g_bytes + 557, 13}}}},
- {{&grpc_static_metadata_refcounts[55], {{g_bytes + 570, 15}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[56], {{g_bytes + 585, 13}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[57], {{g_bytes + 598, 6}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[58], {{g_bytes + 604, 27}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[59], {{g_bytes + 631, 3}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[60], {{g_bytes + 634, 5}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[61], {{g_bytes + 639, 13}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[62], {{g_bytes + 652, 13}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[63], {{g_bytes + 665, 19}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {&grpc_static_metadata_refcounts[58], {{g_bytes + 607, 13}}}},
+ {{&grpc_static_metadata_refcounts[59], {{g_bytes + 620, 15}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[60], {{g_bytes + 635, 13}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[61], {{g_bytes + 648, 6}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 27}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[63], {{g_bytes + 681, 3}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[64], {{g_bytes + 684, 5}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[65], {{g_bytes + 689, 13}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[66], {{g_bytes + 702, 13}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[67], {{g_bytes + 715, 19}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
- {&grpc_static_metadata_refcounts[35], {{g_bytes + 455, 8}}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 505, 8}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
- {&grpc_static_metadata_refcounts[30], {{g_bytes + 437, 4}}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 489, 4}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[64], {{g_bytes + 684, 16}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[65], {{g_bytes + 700, 14}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[66], {{g_bytes + 714, 16}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[67], {{g_bytes + 730, 13}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[68], {{g_bytes + 734, 16}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[69], {{g_bytes + 750, 14}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[70], {{g_bytes + 764, 16}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[71], {{g_bytes + 780, 13}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[68], {{g_bytes + 743, 6}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[69], {{g_bytes + 749, 4}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[70], {{g_bytes + 753, 4}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[71], {{g_bytes + 757, 6}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[72], {{g_bytes + 763, 7}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[73], {{g_bytes + 770, 4}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[72], {{g_bytes + 793, 6}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[73], {{g_bytes + 799, 4}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[74], {{g_bytes + 803, 4}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[75], {{g_bytes + 807, 6}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[76], {{g_bytes + 813, 7}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[77], {{g_bytes + 820, 4}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[74], {{g_bytes + 774, 8}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[75], {{g_bytes + 782, 17}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[76], {{g_bytes + 799, 13}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[77], {{g_bytes + 812, 8}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[78], {{g_bytes + 820, 19}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[79], {{g_bytes + 839, 13}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[78], {{g_bytes + 824, 8}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[79], {{g_bytes + 832, 17}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[80], {{g_bytes + 849, 13}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[81], {{g_bytes + 862, 8}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[82], {{g_bytes + 870, 19}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[83], {{g_bytes + 889, 13}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 11}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[81], {{g_bytes + 863, 4}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[82], {{g_bytes + 867, 8}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[83], {{g_bytes + 875, 12}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[84], {{g_bytes + 887, 18}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[85], {{g_bytes + 905, 19}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[86], {{g_bytes + 924, 5}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[87], {{g_bytes + 929, 7}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[88], {{g_bytes + 936, 7}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 11}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[90], {{g_bytes + 954, 6}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[91], {{g_bytes + 960, 10}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[92], {{g_bytes + 970, 25}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[93], {{g_bytes + 995, 17}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[84], {{g_bytes + 902, 11}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 4}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[86], {{g_bytes + 917, 8}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 12}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[88], {{g_bytes + 937, 18}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[89], {{g_bytes + 955, 19}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[90], {{g_bytes + 974, 5}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[91], {{g_bytes + 979, 7}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[92], {{g_bytes + 986, 7}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[93], {{g_bytes + 993, 11}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1004, 6}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[95], {{g_bytes + 1010, 10}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[96], {{g_bytes + 1020, 25}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[97], {{g_bytes + 1045, 17}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1012, 4}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[95], {{g_bytes + 1016, 3}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
- {{&grpc_static_metadata_refcounts[96], {{g_bytes + 1019, 16}}},
- {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[98], {{g_bytes + 1062, 4}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[99], {{g_bytes + 1066, 3}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
+ {{&grpc_static_metadata_refcounts[100], {{g_bytes + 1069, 16}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[35], {{g_bytes + 455, 8}}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 505, 8}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 7}}}},
+ {&grpc_static_metadata_refcounts[35], {{g_bytes + 482, 7}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[97], {{g_bytes + 1035, 16}}}},
+ {&grpc_static_metadata_refcounts[101], {{g_bytes + 1085, 16}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[30], {{g_bytes + 437, 4}}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 489, 4}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[98], {{g_bytes + 1051, 13}}}},
+ {&grpc_static_metadata_refcounts[102], {{g_bytes + 1101, 13}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[99], {{g_bytes + 1064, 12}}}},
+ {&grpc_static_metadata_refcounts[103], {{g_bytes + 1114, 12}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[100], {{g_bytes + 1076, 21}}}},
+ {&grpc_static_metadata_refcounts[104], {{g_bytes + 1126, 21}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[35], {{g_bytes + 455, 8}}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 505, 8}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[30], {{g_bytes + 437, 4}}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 489, 4}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[98], {{g_bytes + 1051, 13}}}},
+ {&grpc_static_metadata_refcounts[102], {{g_bytes + 1101, 13}}}},
};
bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {
true, // :path
@@ -579,6 +591,8 @@ bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {
true, // user-agent
true, // host
true, // lb-token
+ true, // grpc-previous-rpc-attempts
+ true, // grpc-retry-pushback-ms
};
const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 76, 77, 78,
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index 88d9f9f52c..b3a10f5873 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -31,7 +31,7 @@
#include "src/core/lib/transport/metadata.h"
-#define GRPC_STATIC_MDSTR_COUNT 101
+#define GRPC_STATIC_MDSTR_COUNT 105
extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
/* ":path" */
#define GRPC_MDSTR_PATH (grpc_static_slice_table[0])
@@ -78,168 +78,176 @@ extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
#define GRPC_MDSTR_HOST (grpc_static_slice_table[20])
/* "lb-token" */
#define GRPC_MDSTR_LB_TOKEN (grpc_static_slice_table[21])
+/* "grpc-previous-rpc-attempts" */
+#define GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS (grpc_static_slice_table[22])
+/* "grpc-retry-pushback-ms" */
+#define GRPC_MDSTR_GRPC_RETRY_PUSHBACK_MS (grpc_static_slice_table[23])
/* "grpc-timeout" */
-#define GRPC_MDSTR_GRPC_TIMEOUT (grpc_static_slice_table[22])
+#define GRPC_MDSTR_GRPC_TIMEOUT (grpc_static_slice_table[24])
+/* "1" */
+#define GRPC_MDSTR_1 (grpc_static_slice_table[25])
+/* "2" */
+#define GRPC_MDSTR_2 (grpc_static_slice_table[26])
+/* "3" */
+#define GRPC_MDSTR_3 (grpc_static_slice_table[27])
+/* "4" */
+#define GRPC_MDSTR_4 (grpc_static_slice_table[28])
/* "" */
-#define GRPC_MDSTR_EMPTY (grpc_static_slice_table[23])
+#define GRPC_MDSTR_EMPTY (grpc_static_slice_table[29])
/* "grpc.wait_for_ready" */
-#define GRPC_MDSTR_GRPC_DOT_WAIT_FOR_READY (grpc_static_slice_table[24])
+#define GRPC_MDSTR_GRPC_DOT_WAIT_FOR_READY (grpc_static_slice_table[30])
/* "grpc.timeout" */
-#define GRPC_MDSTR_GRPC_DOT_TIMEOUT (grpc_static_slice_table[25])
+#define GRPC_MDSTR_GRPC_DOT_TIMEOUT (grpc_static_slice_table[31])
/* "grpc.max_request_message_bytes" */
#define GRPC_MDSTR_GRPC_DOT_MAX_REQUEST_MESSAGE_BYTES \
- (grpc_static_slice_table[26])
+ (grpc_static_slice_table[32])
/* "grpc.max_response_message_bytes" */
#define GRPC_MDSTR_GRPC_DOT_MAX_RESPONSE_MESSAGE_BYTES \
- (grpc_static_slice_table[27])
+ (grpc_static_slice_table[33])
/* "/grpc.lb.v1.LoadBalancer/BalanceLoad" */
#define GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD \
- (grpc_static_slice_table[28])
+ (grpc_static_slice_table[34])
/* "deflate" */
-#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[29])
+#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[35])
/* "gzip" */
-#define GRPC_MDSTR_GZIP (grpc_static_slice_table[30])
+#define GRPC_MDSTR_GZIP (grpc_static_slice_table[36])
/* "stream/gzip" */
-#define GRPC_MDSTR_STREAM_SLASH_GZIP (grpc_static_slice_table[31])
+#define GRPC_MDSTR_STREAM_SLASH_GZIP (grpc_static_slice_table[37])
/* "0" */
-#define GRPC_MDSTR_0 (grpc_static_slice_table[32])
-/* "1" */
-#define GRPC_MDSTR_1 (grpc_static_slice_table[33])
-/* "2" */
-#define GRPC_MDSTR_2 (grpc_static_slice_table[34])
+#define GRPC_MDSTR_0 (grpc_static_slice_table[38])
/* "identity" */
-#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[35])
+#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[39])
/* "trailers" */
-#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[36])
+#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[40])
/* "application/grpc" */
-#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[37])
+#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[41])
/* "POST" */
-#define GRPC_MDSTR_POST (grpc_static_slice_table[38])
+#define GRPC_MDSTR_POST (grpc_static_slice_table[42])
/* "200" */
-#define GRPC_MDSTR_200 (grpc_static_slice_table[39])
+#define GRPC_MDSTR_200 (grpc_static_slice_table[43])
/* "404" */
-#define GRPC_MDSTR_404 (grpc_static_slice_table[40])
+#define GRPC_MDSTR_404 (grpc_static_slice_table[44])
/* "http" */
-#define GRPC_MDSTR_HTTP (grpc_static_slice_table[41])
+#define GRPC_MDSTR_HTTP (grpc_static_slice_table[45])
/* "https" */
-#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[42])
+#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[46])
/* "grpc" */
-#define GRPC_MDSTR_GRPC (grpc_static_slice_table[43])
+#define GRPC_MDSTR_GRPC (grpc_static_slice_table[47])
/* "GET" */
-#define GRPC_MDSTR_GET (grpc_static_slice_table[44])
+#define GRPC_MDSTR_GET (grpc_static_slice_table[48])
/* "PUT" */
-#define GRPC_MDSTR_PUT (grpc_static_slice_table[45])
+#define GRPC_MDSTR_PUT (grpc_static_slice_table[49])
/* "/" */
-#define GRPC_MDSTR_SLASH (grpc_static_slice_table[46])
+#define GRPC_MDSTR_SLASH (grpc_static_slice_table[50])
/* "/index.html" */
-#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[47])
+#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[51])
/* "204" */
-#define GRPC_MDSTR_204 (grpc_static_slice_table[48])
+#define GRPC_MDSTR_204 (grpc_static_slice_table[52])
/* "206" */
-#define GRPC_MDSTR_206 (grpc_static_slice_table[49])
+#define GRPC_MDSTR_206 (grpc_static_slice_table[53])
/* "304" */
-#define GRPC_MDSTR_304 (grpc_static_slice_table[50])
+#define GRPC_MDSTR_304 (grpc_static_slice_table[54])
/* "400" */
-#define GRPC_MDSTR_400 (grpc_static_slice_table[51])
+#define GRPC_MDSTR_400 (grpc_static_slice_table[55])
/* "500" */
-#define GRPC_MDSTR_500 (grpc_static_slice_table[52])
+#define GRPC_MDSTR_500 (grpc_static_slice_table[56])
/* "accept-charset" */
-#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[53])
+#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[57])
/* "gzip, deflate" */
-#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[54])
+#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[58])
/* "accept-language" */
-#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[55])
+#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[59])
/* "accept-ranges" */
-#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[56])
+#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[60])
/* "accept" */
-#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[57])
+#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[61])
/* "access-control-allow-origin" */
-#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[58])
+#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[62])
/* "age" */
-#define GRPC_MDSTR_AGE (grpc_static_slice_table[59])
+#define GRPC_MDSTR_AGE (grpc_static_slice_table[63])
/* "allow" */
-#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[60])
+#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[64])
/* "authorization" */
-#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[61])
+#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[65])
/* "cache-control" */
-#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[62])
+#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[66])
/* "content-disposition" */
-#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[63])
+#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[67])
/* "content-language" */
-#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[64])
+#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[68])
/* "content-length" */
-#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[65])
+#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[69])
/* "content-location" */
-#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[66])
+#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[70])
/* "content-range" */
-#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[67])
+#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[71])
/* "cookie" */
-#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[68])
+#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[72])
/* "date" */
-#define GRPC_MDSTR_DATE (grpc_static_slice_table[69])
+#define GRPC_MDSTR_DATE (grpc_static_slice_table[73])
/* "etag" */
-#define GRPC_MDSTR_ETAG (grpc_static_slice_table[70])
+#define GRPC_MDSTR_ETAG (grpc_static_slice_table[74])
/* "expect" */
-#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[71])
+#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[75])
/* "expires" */
-#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[72])
+#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[76])
/* "from" */
-#define GRPC_MDSTR_FROM (grpc_static_slice_table[73])
+#define GRPC_MDSTR_FROM (grpc_static_slice_table[77])
/* "if-match" */
-#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[74])
+#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[78])
/* "if-modified-since" */
-#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[75])
+#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[79])
/* "if-none-match" */
-#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[76])
+#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[80])
/* "if-range" */
-#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[77])
+#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[81])
/* "if-unmodified-since" */
-#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[78])
+#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[82])
/* "last-modified" */
-#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[79])
+#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[83])
/* "lb-cost-bin" */
-#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[80])
+#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[84])
/* "link" */
-#define GRPC_MDSTR_LINK (grpc_static_slice_table[81])
+#define GRPC_MDSTR_LINK (grpc_static_slice_table[85])
/* "location" */
-#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[82])
+#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[86])
/* "max-forwards" */
-#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[83])
+#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[87])
/* "proxy-authenticate" */
-#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[84])
+#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[88])
/* "proxy-authorization" */
-#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[85])
+#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[89])
/* "range" */
-#define GRPC_MDSTR_RANGE (grpc_static_slice_table[86])
+#define GRPC_MDSTR_RANGE (grpc_static_slice_table[90])
/* "referer" */
-#define GRPC_MDSTR_REFERER (grpc_static_slice_table[87])
+#define GRPC_MDSTR_REFERER (grpc_static_slice_table[91])
/* "refresh" */
-#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[88])
+#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[92])
/* "retry-after" */
-#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[89])
+#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[93])
/* "server" */
-#define GRPC_MDSTR_SERVER (grpc_static_slice_table[90])
+#define GRPC_MDSTR_SERVER (grpc_static_slice_table[94])
/* "set-cookie" */
-#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[91])
+#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[95])
/* "strict-transport-security" */
-#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[92])
+#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[96])
/* "transfer-encoding" */
-#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[93])
+#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[97])
/* "vary" */
-#define GRPC_MDSTR_VARY (grpc_static_slice_table[94])
+#define GRPC_MDSTR_VARY (grpc_static_slice_table[98])
/* "via" */
-#define GRPC_MDSTR_VIA (grpc_static_slice_table[95])
+#define GRPC_MDSTR_VIA (grpc_static_slice_table[99])
/* "www-authenticate" */
-#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[96])
+#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[100])
/* "identity,deflate" */
-#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[97])
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[101])
/* "identity,gzip" */
-#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[98])
+#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[102])
/* "deflate,gzip" */
-#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[99])
+#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[103])
/* "identity,deflate,gzip" */
#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (grpc_static_slice_table[100])
+ (grpc_static_slice_table[104])
extern const grpc_slice_refcount_vtable grpc_static_metadata_vtable;
extern grpc_slice_refcount
@@ -537,6 +545,8 @@ typedef enum {
GRPC_BATCH_USER_AGENT,
GRPC_BATCH_HOST,
GRPC_BATCH_LB_TOKEN,
+ GRPC_BATCH_GRPC_PREVIOUS_RPC_ATTEMPTS,
+ GRPC_BATCH_GRPC_RETRY_PUSHBACK_MS,
GRPC_BATCH_CALLOUTS_COUNT
} grpc_metadata_batch_callouts_index;
@@ -565,6 +575,8 @@ typedef union {
struct grpc_linked_mdelem* user_agent;
struct grpc_linked_mdelem* host;
struct grpc_linked_mdelem* lb_token;
+ struct grpc_linked_mdelem* grpc_previous_rpc_attempts;
+ struct grpc_linked_mdelem* grpc_retry_pushback_ms;
} named;
} grpc_metadata_batch_callouts;
diff --git a/src/core/lib/transport/status_metadata.cc b/src/core/lib/transport/status_metadata.cc
new file mode 100644
index 0000000000..f896053e4d
--- /dev/null
+++ b/src/core/lib/transport/status_metadata.cc
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/transport/status_metadata.h"
+
+#include "src/core/lib/slice/slice_string_helpers.h"
+#include "src/core/lib/transport/static_metadata.h"
+
+/* we offset status by a small amount when storing it into transport metadata
+ as metadata cannot store a 0 value (which is used as OK for grpc_status_codes
+ */
+#define STATUS_OFFSET 1
+
+static void destroy_status(void* ignored) {}
+
+grpc_status_code grpc_get_status_code_from_metadata(grpc_mdelem md) {
+ if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_0)) {
+ return GRPC_STATUS_OK;
+ }
+ if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_1)) {
+ return GRPC_STATUS_CANCELLED;
+ }
+ if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_2)) {
+ return GRPC_STATUS_UNKNOWN;
+ }
+ void* user_data = grpc_mdelem_get_user_data(md, destroy_status);
+ if (user_data != nullptr) {
+ return static_cast<grpc_status_code>((intptr_t)user_data - STATUS_OFFSET);
+ }
+ uint32_t status;
+ if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(md), &status)) {
+ status = GRPC_STATUS_UNKNOWN; /* could not parse status code */
+ }
+ grpc_mdelem_set_user_data(
+ md, destroy_status, (void*)static_cast<intptr_t>(status + STATUS_OFFSET));
+ return static_cast<grpc_status_code>(status);
+}
diff --git a/src/core/lib/security/transport/lb_targets_info.h b/src/core/lib/transport/status_metadata.h
index 7e816c5222..aed9c7ac20 100644
--- a/src/core/lib/security/transport/lb_targets_info.h
+++ b/src/core/lib/transport/status_metadata.h
@@ -16,19 +16,15 @@
*
*/
-#ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_LB_TARGETS_INFO_H
-#define GRPC_CORE_LIB_SECURITY_TRANSPORT_LB_TARGETS_INFO_H
+#ifndef GRPC_CORE_LIB_TRANSPORT_STATUS_METADATA_H
+#define GRPC_CORE_LIB_TRANSPORT_STATUS_METADATA_H
#include <grpc/support/port_platform.h>
-#include "src/core/lib/slice/slice_hash_table.h"
+#include <grpc/status.h>
-/** Return a channel argument containing \a targets_info. */
-grpc_arg grpc_lb_targets_info_create_channel_arg(
- grpc_slice_hash_table* targets_info);
+#include "src/core/lib/transport/metadata.h"
-/** Return the instance of targets info in \a args or NULL */
-grpc_slice_hash_table* grpc_lb_targets_info_find_in_args(
- const grpc_channel_args* args);
+grpc_status_code grpc_get_status_code_from_metadata(grpc_mdelem md);
-#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_LB_TARGETS_INFO_H */
+#endif /* GRPC_CORE_LIB_TRANSPORT_STATUS_METADATA_H */
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index b279ce8c80..37e50344c4 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -98,6 +98,19 @@ void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats* from,
void grpc_transport_move_stats(grpc_transport_stream_stats* from,
grpc_transport_stream_stats* to);
+// This struct (which is present in both grpc_transport_stream_op_batch
+// and grpc_transport_op_batch) is a convenience to allow filters or
+// transports to schedule a closure related to a particular batch without
+// having to allocate memory. The general pattern is to initialize the
+// closure with the callback arg set to the batch and extra_arg set to
+// whatever state is associated with the handler (e.g., the call element
+// or the transport stream object).
+//
+// Note that this can only be used by the current handler of a given
+// batch on the way down the stack (i.e., whichever filter or transport is
+// currently handling the batch). Once a filter or transport passes control
+// of the batch to the next handler, it cannot depend on the contents of
+// this struct anymore, because the next handler may reuse it.
typedef struct {
void* extra_arg;
grpc_closure closure;
@@ -157,6 +170,11 @@ struct grpc_transport_stream_op_batch_payload {
uint32_t send_initial_metadata_flags;
// If non-NULL, will be set by the transport to the peer string
// (a char*, which the caller takes ownership of).
+ // Note: This pointer may be used by the transport after the
+ // send_initial_metadata op is completed. It must remain valid
+ // until the call is destroyed.
+ // Note: When a transport sets this, it must free the previous
+ // value, if any.
gpr_atm* peer_string;
} send_initial_metadata;
@@ -175,6 +193,9 @@ struct grpc_transport_stream_op_batch_payload {
struct {
grpc_metadata_batch* recv_initial_metadata;
+ // Flags are used only on the server side. If non-null, will be set to
+ // a bitfield of the GRPC_INITIAL_METADATA_xxx macros (e.g., to
+ // indicate if the call is idempotent).
uint32_t* recv_flags;
/** Should be enqueued when initial metadata is ready to be processed. */
grpc_closure* recv_initial_metadata_ready;
@@ -184,6 +205,11 @@ struct grpc_transport_stream_op_batch_payload {
bool* trailing_metadata_available;
// If non-NULL, will be set by the transport to the peer string
// (a char*, which the caller takes ownership of).
+ // Note: This pointer may be used by the transport after the
+ // recv_initial_metadata op is completed. It must remain valid
+ // until the call is destroyed.
+ // Note: When a transport sets this, it must free the previous
+ // value, if any.
gpr_atm* peer_string;
} recv_initial_metadata;
@@ -192,6 +218,7 @@ struct grpc_transport_stream_op_batch_payload {
// containing a received message.
// The caller is responsible for calling grpc_byte_stream_destroy()
// on this byte stream.
+ // Will be NULL if trailing metadata is received instead of a message.
grpc_byte_stream** recv_message;
/** Should be enqueued when one message is ready to be processed. */
grpc_closure* recv_message_ready;
diff --git a/src/csharp/Grpc.Examples/MathGrpc.cs b/src/csharp/Grpc.Examples/MathGrpc.cs
index e29b1087e4..045708b947 100644
--- a/src/csharp/Grpc.Examples/MathGrpc.cs
+++ b/src/csharp/Grpc.Examples/MathGrpc.cs
@@ -20,9 +20,6 @@
#pragma warning disable 1591
#region Designer generated code
-using System;
-using System.Threading;
-using System.Threading.Tasks;
using grpc = global::Grpc.Core;
namespace Math {
@@ -159,7 +156,7 @@ namespace Math {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Math.DivReply Div(global::Math.DivArgs request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Math.DivReply Div(global::Math.DivArgs request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return Div(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -183,7 +180,7 @@ namespace Math {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return DivAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -208,7 +205,7 @@ namespace Math {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return DivMany(new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -234,7 +231,7 @@ namespace Math {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return Fib(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -258,7 +255,7 @@ namespace Math {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return Sum(new grpc::CallOptions(headers, deadline, cancellationToken));
}
diff --git a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
index 24a7259979..1d80bcd59e 100644
--- a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
+++ b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
@@ -20,9 +20,6 @@
#pragma warning disable 1591
#region Designer generated code
-using System;
-using System.Threading;
-using System.Threading.Tasks;
using grpc = global::Grpc.Core;
namespace Grpc.Health.V1 {
@@ -79,7 +76,7 @@ namespace Grpc.Health.V1 {
{
}
- public virtual global::Grpc.Health.V1.HealthCheckResponse Check(global::Grpc.Health.V1.HealthCheckRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Health.V1.HealthCheckResponse Check(global::Grpc.Health.V1.HealthCheckRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return Check(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -87,7 +84,7 @@ namespace Grpc.Health.V1 {
{
return CallInvoker.BlockingUnaryCall(__Method_Check, null, options, request);
}
- public virtual grpc::AsyncUnaryCall<global::Grpc.Health.V1.HealthCheckResponse> CheckAsync(global::Grpc.Health.V1.HealthCheckRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Health.V1.HealthCheckResponse> CheckAsync(global::Grpc.Health.V1.HealthCheckRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return CheckAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
diff --git a/src/csharp/Grpc.IntegrationTesting/Control.cs b/src/csharp/Grpc.IntegrationTesting/Control.cs
index 8e5da7b9f2..8795728906 100644
--- a/src/csharp/Grpc.IntegrationTesting/Control.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Control.cs
@@ -32,7 +32,7 @@ namespace Grpc.Testing {
"U2VjdXJpdHlQYXJhbXMSEwoLdXNlX3Rlc3RfY2EYASABKAgSHAoUc2VydmVy",
"X2hvc3Rfb3ZlcnJpZGUYAiABKAkSEQoJY3JlZF90eXBlGAMgASgJIk0KCkNo",
"YW5uZWxBcmcSDAoEbmFtZRgBIAEoCRITCglzdHJfdmFsdWUYAiABKAlIABIT",
- "CglpbnRfdmFsdWUYAyABKAVIAEIHCgV2YWx1ZSLVBAoMQ2xpZW50Q29uZmln",
+ "CglpbnRfdmFsdWUYAyABKAVIAEIHCgV2YWx1ZSLvBAoMQ2xpZW50Q29uZmln",
"EhYKDnNlcnZlcl90YXJnZXRzGAEgAygJEi0KC2NsaWVudF90eXBlGAIgASgO",
"MhguZ3JwYy50ZXN0aW5nLkNsaWVudFR5cGUSNQoPc2VjdXJpdHlfcGFyYW1z",
"GAMgASgLMhwuZ3JwYy50ZXN0aW5nLlNlY3VyaXR5UGFyYW1zEiQKHG91dHN0",
@@ -45,59 +45,60 @@ namespace Grpc.Testing {
"dG9ncmFtUGFyYW1zEhEKCWNvcmVfbGlzdBgNIAMoBRISCgpjb3JlX2xpbWl0",
"GA4gASgFEhgKEG90aGVyX2NsaWVudF9hcGkYDyABKAkSLgoMY2hhbm5lbF9h",
"cmdzGBAgAygLMhguZ3JwYy50ZXN0aW5nLkNoYW5uZWxBcmcSFgoOdGhyZWFk",
- "c19wZXJfY3EYESABKAUSGwoTbWVzc2FnZXNfcGVyX3N0cmVhbRgSIAEoBSI4",
- "CgxDbGllbnRTdGF0dXMSKAoFc3RhdHMYASABKAsyGS5ncnBjLnRlc3Rpbmcu",
- "Q2xpZW50U3RhdHMiFQoETWFyaxINCgVyZXNldBgBIAEoCCJoCgpDbGllbnRB",
- "cmdzEisKBXNldHVwGAEgASgLMhouZ3JwYy50ZXN0aW5nLkNsaWVudENvbmZp",
- "Z0gAEiIKBG1hcmsYAiABKAsyEi5ncnBjLnRlc3RpbmcuTWFya0gAQgkKB2Fy",
- "Z3R5cGUi/QIKDFNlcnZlckNvbmZpZxItCgtzZXJ2ZXJfdHlwZRgBIAEoDjIY",
- "LmdycGMudGVzdGluZy5TZXJ2ZXJUeXBlEjUKD3NlY3VyaXR5X3BhcmFtcxgC",
- "IAEoCzIcLmdycGMudGVzdGluZy5TZWN1cml0eVBhcmFtcxIMCgRwb3J0GAQg",
- "ASgFEhwKFGFzeW5jX3NlcnZlcl90aHJlYWRzGAcgASgFEhIKCmNvcmVfbGlt",
- "aXQYCCABKAUSMwoOcGF5bG9hZF9jb25maWcYCSABKAsyGy5ncnBjLnRlc3Rp",
- "bmcuUGF5bG9hZENvbmZpZxIRCgljb3JlX2xpc3QYCiADKAUSGAoQb3RoZXJf",
- "c2VydmVyX2FwaRgLIAEoCRIWCg50aHJlYWRzX3Blcl9jcRgMIAEoBRIcChNy",
- "ZXNvdXJjZV9xdW90YV9zaXplGOkHIAEoBRIvCgxjaGFubmVsX2FyZ3MY6gcg",
- "AygLMhguZ3JwYy50ZXN0aW5nLkNoYW5uZWxBcmciaAoKU2VydmVyQXJncxIr",
- "CgVzZXR1cBgBIAEoCzIaLmdycGMudGVzdGluZy5TZXJ2ZXJDb25maWdIABIi",
- "CgRtYXJrGAIgASgLMhIuZ3JwYy50ZXN0aW5nLk1hcmtIAEIJCgdhcmd0eXBl",
- "IlUKDFNlcnZlclN0YXR1cxIoCgVzdGF0cxgBIAEoCzIZLmdycGMudGVzdGlu",
- "Zy5TZXJ2ZXJTdGF0cxIMCgRwb3J0GAIgASgFEg0KBWNvcmVzGAMgASgFIg0K",
- "C0NvcmVSZXF1ZXN0Ih0KDENvcmVSZXNwb25zZRINCgVjb3JlcxgBIAEoBSIG",
- "CgRWb2lkIv0BCghTY2VuYXJpbxIMCgRuYW1lGAEgASgJEjEKDWNsaWVudF9j",
- "b25maWcYAiABKAsyGi5ncnBjLnRlc3RpbmcuQ2xpZW50Q29uZmlnEhMKC251",
- "bV9jbGllbnRzGAMgASgFEjEKDXNlcnZlcl9jb25maWcYBCABKAsyGi5ncnBj",
- "LnRlc3RpbmcuU2VydmVyQ29uZmlnEhMKC251bV9zZXJ2ZXJzGAUgASgFEhYK",
- "Dndhcm11cF9zZWNvbmRzGAYgASgFEhkKEWJlbmNobWFya19zZWNvbmRzGAcg",
- "ASgFEiAKGHNwYXduX2xvY2FsX3dvcmtlcl9jb3VudBgIIAEoBSI2CglTY2Vu",
- "YXJpb3MSKQoJc2NlbmFyaW9zGAEgAygLMhYuZ3JwYy50ZXN0aW5nLlNjZW5h",
- "cmlvIoQEChVTY2VuYXJpb1Jlc3VsdFN1bW1hcnkSCwoDcXBzGAEgASgBEhsK",
- "E3Fwc19wZXJfc2VydmVyX2NvcmUYAiABKAESGgoSc2VydmVyX3N5c3RlbV90",
- "aW1lGAMgASgBEhgKEHNlcnZlcl91c2VyX3RpbWUYBCABKAESGgoSY2xpZW50",
- "X3N5c3RlbV90aW1lGAUgASgBEhgKEGNsaWVudF91c2VyX3RpbWUYBiABKAES",
- "EgoKbGF0ZW5jeV81MBgHIAEoARISCgpsYXRlbmN5XzkwGAggASgBEhIKCmxh",
- "dGVuY3lfOTUYCSABKAESEgoKbGF0ZW5jeV85ORgKIAEoARITCgtsYXRlbmN5",
- "Xzk5ORgLIAEoARIYChBzZXJ2ZXJfY3B1X3VzYWdlGAwgASgBEiYKHnN1Y2Nl",
- "c3NmdWxfcmVxdWVzdHNfcGVyX3NlY29uZBgNIAEoARIiChpmYWlsZWRfcmVx",
- "dWVzdHNfcGVyX3NlY29uZBgOIAEoARIgChhjbGllbnRfcG9sbHNfcGVyX3Jl",
- "cXVlc3QYDyABKAESIAoYc2VydmVyX3BvbGxzX3Blcl9yZXF1ZXN0GBAgASgB",
- "EiIKGnNlcnZlcl9xdWVyaWVzX3Blcl9jcHVfc2VjGBEgASgBEiIKGmNsaWVu",
- "dF9xdWVyaWVzX3Blcl9jcHVfc2VjGBIgASgBIoMDCg5TY2VuYXJpb1Jlc3Vs",
- "dBIoCghzY2VuYXJpbxgBIAEoCzIWLmdycGMudGVzdGluZy5TY2VuYXJpbxIu",
- "CglsYXRlbmNpZXMYAiABKAsyGy5ncnBjLnRlc3RpbmcuSGlzdG9ncmFtRGF0",
- "YRIvCgxjbGllbnRfc3RhdHMYAyADKAsyGS5ncnBjLnRlc3RpbmcuQ2xpZW50",
- "U3RhdHMSLwoMc2VydmVyX3N0YXRzGAQgAygLMhkuZ3JwYy50ZXN0aW5nLlNl",
- "cnZlclN0YXRzEhQKDHNlcnZlcl9jb3JlcxgFIAMoBRI0CgdzdW1tYXJ5GAYg",
- "ASgLMiMuZ3JwYy50ZXN0aW5nLlNjZW5hcmlvUmVzdWx0U3VtbWFyeRIWCg5j",
- "bGllbnRfc3VjY2VzcxgHIAMoCBIWCg5zZXJ2ZXJfc3VjY2VzcxgIIAMoCBI5",
- "Cg9yZXF1ZXN0X3Jlc3VsdHMYCSADKAsyIC5ncnBjLnRlc3RpbmcuUmVxdWVz",
- "dFJlc3VsdENvdW50KkEKCkNsaWVudFR5cGUSDwoLU1lOQ19DTElFTlQQABIQ",
- "CgxBU1lOQ19DTElFTlQQARIQCgxPVEhFUl9DTElFTlQQAipbCgpTZXJ2ZXJU",
- "eXBlEg8KC1NZTkNfU0VSVkVSEAASEAoMQVNZTkNfU0VSVkVSEAESGAoUQVNZ",
- "TkNfR0VORVJJQ19TRVJWRVIQAhIQCgxPVEhFUl9TRVJWRVIQAypyCgdScGNU",
- "eXBlEgkKBVVOQVJZEAASDQoJU1RSRUFNSU5HEAESGQoVU1RSRUFNSU5HX0ZS",
- "T01fQ0xJRU5UEAISGQoVU1RSRUFNSU5HX0ZST01fU0VSVkVSEAMSFwoTU1RS",
- "RUFNSU5HX0JPVEhfV0FZUxAEYgZwcm90bzM="));
+ "c19wZXJfY3EYESABKAUSGwoTbWVzc2FnZXNfcGVyX3N0cmVhbRgSIAEoBRIY",
+ "ChB1c2VfY29hbGVzY2VfYXBpGBMgASgIIjgKDENsaWVudFN0YXR1cxIoCgVz",
+ "dGF0cxgBIAEoCzIZLmdycGMudGVzdGluZy5DbGllbnRTdGF0cyIVCgRNYXJr",
+ "Eg0KBXJlc2V0GAEgASgIImgKCkNsaWVudEFyZ3MSKwoFc2V0dXAYASABKAsy",
+ "Gi5ncnBjLnRlc3RpbmcuQ2xpZW50Q29uZmlnSAASIgoEbWFyaxgCIAEoCzIS",
+ "LmdycGMudGVzdGluZy5NYXJrSABCCQoHYXJndHlwZSL9AgoMU2VydmVyQ29u",
+ "ZmlnEi0KC3NlcnZlcl90eXBlGAEgASgOMhguZ3JwYy50ZXN0aW5nLlNlcnZl",
+ "clR5cGUSNQoPc2VjdXJpdHlfcGFyYW1zGAIgASgLMhwuZ3JwYy50ZXN0aW5n",
+ "LlNlY3VyaXR5UGFyYW1zEgwKBHBvcnQYBCABKAUSHAoUYXN5bmNfc2VydmVy",
+ "X3RocmVhZHMYByABKAUSEgoKY29yZV9saW1pdBgIIAEoBRIzCg5wYXlsb2Fk",
+ "X2NvbmZpZxgJIAEoCzIbLmdycGMudGVzdGluZy5QYXlsb2FkQ29uZmlnEhEK",
+ "CWNvcmVfbGlzdBgKIAMoBRIYChBvdGhlcl9zZXJ2ZXJfYXBpGAsgASgJEhYK",
+ "DnRocmVhZHNfcGVyX2NxGAwgASgFEhwKE3Jlc291cmNlX3F1b3RhX3NpemUY",
+ "6QcgASgFEi8KDGNoYW5uZWxfYXJncxjqByADKAsyGC5ncnBjLnRlc3Rpbmcu",
+ "Q2hhbm5lbEFyZyJoCgpTZXJ2ZXJBcmdzEisKBXNldHVwGAEgASgLMhouZ3Jw",
+ "Yy50ZXN0aW5nLlNlcnZlckNvbmZpZ0gAEiIKBG1hcmsYAiABKAsyEi5ncnBj",
+ "LnRlc3RpbmcuTWFya0gAQgkKB2FyZ3R5cGUiVQoMU2VydmVyU3RhdHVzEigK",
+ "BXN0YXRzGAEgASgLMhkuZ3JwYy50ZXN0aW5nLlNlcnZlclN0YXRzEgwKBHBv",
+ "cnQYAiABKAUSDQoFY29yZXMYAyABKAUiDQoLQ29yZVJlcXVlc3QiHQoMQ29y",
+ "ZVJlc3BvbnNlEg0KBWNvcmVzGAEgASgFIgYKBFZvaWQi/QEKCFNjZW5hcmlv",
+ "EgwKBG5hbWUYASABKAkSMQoNY2xpZW50X2NvbmZpZxgCIAEoCzIaLmdycGMu",
+ "dGVzdGluZy5DbGllbnRDb25maWcSEwoLbnVtX2NsaWVudHMYAyABKAUSMQoN",
+ "c2VydmVyX2NvbmZpZxgEIAEoCzIaLmdycGMudGVzdGluZy5TZXJ2ZXJDb25m",
+ "aWcSEwoLbnVtX3NlcnZlcnMYBSABKAUSFgoOd2FybXVwX3NlY29uZHMYBiAB",
+ "KAUSGQoRYmVuY2htYXJrX3NlY29uZHMYByABKAUSIAoYc3Bhd25fbG9jYWxf",
+ "d29ya2VyX2NvdW50GAggASgFIjYKCVNjZW5hcmlvcxIpCglzY2VuYXJpb3MY",
+ "ASADKAsyFi5ncnBjLnRlc3RpbmcuU2NlbmFyaW8ihAQKFVNjZW5hcmlvUmVz",
+ "dWx0U3VtbWFyeRILCgNxcHMYASABKAESGwoTcXBzX3Blcl9zZXJ2ZXJfY29y",
+ "ZRgCIAEoARIaChJzZXJ2ZXJfc3lzdGVtX3RpbWUYAyABKAESGAoQc2VydmVy",
+ "X3VzZXJfdGltZRgEIAEoARIaChJjbGllbnRfc3lzdGVtX3RpbWUYBSABKAES",
+ "GAoQY2xpZW50X3VzZXJfdGltZRgGIAEoARISCgpsYXRlbmN5XzUwGAcgASgB",
+ "EhIKCmxhdGVuY3lfOTAYCCABKAESEgoKbGF0ZW5jeV85NRgJIAEoARISCgps",
+ "YXRlbmN5Xzk5GAogASgBEhMKC2xhdGVuY3lfOTk5GAsgASgBEhgKEHNlcnZl",
+ "cl9jcHVfdXNhZ2UYDCABKAESJgoec3VjY2Vzc2Z1bF9yZXF1ZXN0c19wZXJf",
+ "c2Vjb25kGA0gASgBEiIKGmZhaWxlZF9yZXF1ZXN0c19wZXJfc2Vjb25kGA4g",
+ "ASgBEiAKGGNsaWVudF9wb2xsc19wZXJfcmVxdWVzdBgPIAEoARIgChhzZXJ2",
+ "ZXJfcG9sbHNfcGVyX3JlcXVlc3QYECABKAESIgoac2VydmVyX3F1ZXJpZXNf",
+ "cGVyX2NwdV9zZWMYESABKAESIgoaY2xpZW50X3F1ZXJpZXNfcGVyX2NwdV9z",
+ "ZWMYEiABKAEigwMKDlNjZW5hcmlvUmVzdWx0EigKCHNjZW5hcmlvGAEgASgL",
+ "MhYuZ3JwYy50ZXN0aW5nLlNjZW5hcmlvEi4KCWxhdGVuY2llcxgCIAEoCzIb",
+ "LmdycGMudGVzdGluZy5IaXN0b2dyYW1EYXRhEi8KDGNsaWVudF9zdGF0cxgD",
+ "IAMoCzIZLmdycGMudGVzdGluZy5DbGllbnRTdGF0cxIvCgxzZXJ2ZXJfc3Rh",
+ "dHMYBCADKAsyGS5ncnBjLnRlc3RpbmcuU2VydmVyU3RhdHMSFAoMc2VydmVy",
+ "X2NvcmVzGAUgAygFEjQKB3N1bW1hcnkYBiABKAsyIy5ncnBjLnRlc3Rpbmcu",
+ "U2NlbmFyaW9SZXN1bHRTdW1tYXJ5EhYKDmNsaWVudF9zdWNjZXNzGAcgAygI",
+ "EhYKDnNlcnZlcl9zdWNjZXNzGAggAygIEjkKD3JlcXVlc3RfcmVzdWx0cxgJ",
+ "IAMoCzIgLmdycGMudGVzdGluZy5SZXF1ZXN0UmVzdWx0Q291bnQqQQoKQ2xp",
+ "ZW50VHlwZRIPCgtTWU5DX0NMSUVOVBAAEhAKDEFTWU5DX0NMSUVOVBABEhAK",
+ "DE9USEVSX0NMSUVOVBACKlsKClNlcnZlclR5cGUSDwoLU1lOQ19TRVJWRVIQ",
+ "ABIQCgxBU1lOQ19TRVJWRVIQARIYChRBU1lOQ19HRU5FUklDX1NFUlZFUhAC",
+ "EhAKDE9USEVSX1NFUlZFUhADKnIKB1JwY1R5cGUSCQoFVU5BUlkQABINCglT",
+ "VFJFQU1JTkcQARIZChVTVFJFQU1JTkdfRlJPTV9DTElFTlQQAhIZChVTVFJF",
+ "QU1JTkdfRlJPTV9TRVJWRVIQAxIXChNTVFJFQU1JTkdfQk9USF9XQVlTEARi",
+ "BnByb3RvMw=="));
descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
new pbr::FileDescriptor[] { global::Grpc.Testing.PayloadsReflection.Descriptor, global::Grpc.Testing.StatsReflection.Descriptor, },
new pbr::GeneratedClrTypeInfo(new[] {typeof(global::Grpc.Testing.ClientType), typeof(global::Grpc.Testing.ServerType), typeof(global::Grpc.Testing.RpcType), }, new pbr::GeneratedClrTypeInfo[] {
@@ -106,7 +107,7 @@ namespace Grpc.Testing {
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.LoadParams), global::Grpc.Testing.LoadParams.Parser, new[]{ "ClosedLoop", "Poisson" }, new[]{ "Load" }, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.SecurityParams), global::Grpc.Testing.SecurityParams.Parser, new[]{ "UseTestCa", "ServerHostOverride", "CredType" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ChannelArg), global::Grpc.Testing.ChannelArg.Parser, new[]{ "Name", "StrValue", "IntValue" }, new[]{ "Value" }, null, null),
- new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientConfig), global::Grpc.Testing.ClientConfig.Parser, new[]{ "ServerTargets", "ClientType", "SecurityParams", "OutstandingRpcsPerChannel", "ClientChannels", "AsyncClientThreads", "RpcType", "LoadParams", "PayloadConfig", "HistogramParams", "CoreList", "CoreLimit", "OtherClientApi", "ChannelArgs", "ThreadsPerCq", "MessagesPerStream" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientConfig), global::Grpc.Testing.ClientConfig.Parser, new[]{ "ServerTargets", "ClientType", "SecurityParams", "OutstandingRpcsPerChannel", "ClientChannels", "AsyncClientThreads", "RpcType", "LoadParams", "PayloadConfig", "HistogramParams", "CoreList", "CoreLimit", "OtherClientApi", "ChannelArgs", "ThreadsPerCq", "MessagesPerStream", "UseCoalesceApi" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientStatus), global::Grpc.Testing.ClientStatus.Parser, new[]{ "Stats" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.Mark), global::Grpc.Testing.Mark.Parser, new[]{ "Reset" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientArgs), global::Grpc.Testing.ClientArgs.Parser, new[]{ "Setup", "Mark" }, new[]{ "Argtype" }, null, null),
@@ -989,6 +990,7 @@ namespace Grpc.Testing {
channelArgs_ = other.channelArgs_.Clone();
threadsPerCq_ = other.threadsPerCq_;
messagesPerStream_ = other.messagesPerStream_;
+ useCoalesceApi_ = other.useCoalesceApi_;
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1198,6 +1200,20 @@ namespace Grpc.Testing {
}
}
+ /// <summary>Field number for the "use_coalesce_api" field.</summary>
+ public const int UseCoalesceApiFieldNumber = 19;
+ private bool useCoalesceApi_;
+ /// <summary>
+ /// Use coalescing API when possible.
+ /// </summary>
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool UseCoalesceApi {
+ get { return useCoalesceApi_; }
+ set {
+ useCoalesceApi_ = value;
+ }
+ }
+
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
public override bool Equals(object other) {
return Equals(other as ClientConfig);
@@ -1227,6 +1243,7 @@ namespace Grpc.Testing {
if(!channelArgs_.Equals(other.channelArgs_)) return false;
if (ThreadsPerCq != other.ThreadsPerCq) return false;
if (MessagesPerStream != other.MessagesPerStream) return false;
+ if (UseCoalesceApi != other.UseCoalesceApi) return false;
return true;
}
@@ -1249,6 +1266,7 @@ namespace Grpc.Testing {
hash ^= channelArgs_.GetHashCode();
if (ThreadsPerCq != 0) hash ^= ThreadsPerCq.GetHashCode();
if (MessagesPerStream != 0) hash ^= MessagesPerStream.GetHashCode();
+ if (UseCoalesceApi != false) hash ^= UseCoalesceApi.GetHashCode();
return hash;
}
@@ -1314,6 +1332,10 @@ namespace Grpc.Testing {
output.WriteRawTag(144, 1);
output.WriteInt32(MessagesPerStream);
}
+ if (UseCoalesceApi != false) {
+ output.WriteRawTag(152, 1);
+ output.WriteBool(UseCoalesceApi);
+ }
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1361,6 +1383,9 @@ namespace Grpc.Testing {
if (MessagesPerStream != 0) {
size += 2 + pb::CodedOutputStream.ComputeInt32Size(MessagesPerStream);
}
+ if (UseCoalesceApi != false) {
+ size += 2 + 1;
+ }
return size;
}
@@ -1423,6 +1448,9 @@ namespace Grpc.Testing {
if (other.MessagesPerStream != 0) {
MessagesPerStream = other.MessagesPerStream;
}
+ if (other.UseCoalesceApi != false) {
+ UseCoalesceApi = other.UseCoalesceApi;
+ }
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1510,6 +1538,10 @@ namespace Grpc.Testing {
MessagesPerStream = input.ReadInt32();
break;
}
+ case 152: {
+ UseCoalesceApi = input.ReadBool();
+ break;
+ }
}
}
}
diff --git a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
index c02c9844e3..ba2107a576 100755
--- a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
+++ b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
@@ -19,7 +19,7 @@
<ItemGroup>
<PackageReference Include="Google.Protobuf" Version="$(GoogleProtobufVersion)" />
<PackageReference Include="CommandLineParser" Version="2.1.1-beta" />
- <PackageReference Include="Moq" Version="4.7.0" />
+ <PackageReference Include="Moq" Version="4.8.2" />
<PackageReference Include="NUnit" Version="3.6.0" />
<PackageReference Include="NUnitLite" Version="3.6.0" />
</ItemGroup>
diff --git a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
index f71d6d197d..d18b9e7d5e 100644
--- a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
@@ -26,9 +26,6 @@
#pragma warning disable 1591
#region Designer generated code
-using System;
-using System.Threading;
-using System.Threading.Tasks;
using grpc = global::Grpc.Core;
namespace Grpc.Testing {
@@ -121,7 +118,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return GetAllGauges(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -144,7 +141,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return GetGauge(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -166,7 +163,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return GetGaugeAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
diff --git a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
index d2e4f2e4a5..46b328a773 100644
--- a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
@@ -22,9 +22,6 @@
#pragma warning disable 1591
#region Designer generated code
-using System;
-using System.Threading;
-using System.Threading.Tasks;
using grpc = global::Grpc.Core;
namespace Grpc.Testing {
@@ -177,7 +174,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return UnaryCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -201,7 +198,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return UnaryCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -225,7 +222,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return StreamingCall(new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -248,7 +245,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncClientStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingFromClient(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncClientStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingFromClient(grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return StreamingFromClient(new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -271,7 +268,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.SimpleResponse> StreamingFromServer(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.SimpleResponse> StreamingFromServer(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return StreamingFromServer(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -294,7 +291,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingBothWays(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingBothWays(grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return StreamingBothWays(new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -470,7 +467,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return RunServer(new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -500,7 +497,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return RunClient(new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -526,7 +523,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return CoreCount(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -548,7 +545,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return CoreCountAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -570,7 +567,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return QuitWorker(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -592,7 +589,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return QuitWorkerAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -692,7 +689,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Void ReportScenario(global::Grpc.Testing.ScenarioResult request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.Void ReportScenario(global::Grpc.Testing.ScenarioResult request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return ReportScenario(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -714,7 +711,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Void> ReportScenarioAsync(global::Grpc.Testing.ScenarioResult request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Void> ReportScenarioAsync(global::Grpc.Testing.ScenarioResult request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return ReportScenarioAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
diff --git a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
index c0d147c150..6c4b77f7ac 100644
--- a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
@@ -23,9 +23,6 @@
#pragma warning disable 1591
#region Designer generated code
-using System;
-using System.Threading;
-using System.Threading.Tasks;
using grpc = global::Grpc.Core;
namespace Grpc.Testing {
@@ -244,7 +241,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return EmptyCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -266,7 +263,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return EmptyCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -288,7 +285,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return UnaryCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -310,7 +307,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return UnaryCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -334,7 +331,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return CacheableUnaryCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -360,7 +357,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return CacheableUnaryCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -385,7 +382,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return StreamingOutputCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -408,7 +405,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return StreamingInputCall(new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -431,7 +428,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return FullDuplexCall(new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -456,7 +453,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return HalfDuplexCall(new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -481,7 +478,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return UnimplementedCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -505,7 +502,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return UnimplementedCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -613,7 +610,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return UnimplementedCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -635,7 +632,7 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return UnimplementedCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -734,7 +731,7 @@ namespace Grpc.Testing {
{
}
- public virtual global::Grpc.Testing.Empty Start(global::Grpc.Testing.ReconnectParams request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.Empty Start(global::Grpc.Testing.ReconnectParams request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return Start(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -742,7 +739,7 @@ namespace Grpc.Testing {
{
return CallInvoker.BlockingUnaryCall(__Method_Start, null, options, request);
}
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> StartAsync(global::Grpc.Testing.ReconnectParams request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> StartAsync(global::Grpc.Testing.ReconnectParams request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return StartAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -750,7 +747,7 @@ namespace Grpc.Testing {
{
return CallInvoker.AsyncUnaryCall(__Method_Start, null, options, request);
}
- public virtual global::Grpc.Testing.ReconnectInfo Stop(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.ReconnectInfo Stop(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return Stop(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
@@ -758,7 +755,7 @@ namespace Grpc.Testing {
{
return CallInvoker.BlockingUnaryCall(__Method_Stop, null, options, request);
}
- public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.ReconnectInfo> StopAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.ReconnectInfo> StopAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return StopAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
diff --git a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
index 0195186eba..e2263cfc90 100644
--- a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
+++ b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
@@ -22,9 +22,6 @@
#pragma warning disable 1591
#region Designer generated code
-using System;
-using System.Threading;
-using System.Threading.Tasks;
using grpc = global::Grpc.Core;
namespace Grpc.Reflection.V1Alpha {
@@ -97,7 +94,7 @@ namespace Grpc.Reflection.V1Alpha {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> ServerReflectionInfo(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> ServerReflectionInfo(grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return ServerReflectionInfo(new grpc::CallOptions(headers, deadline, cancellationToken));
}
diff --git a/src/csharp/global.json b/src/csharp/global.json
index e4b797ee8e..815be4bfb9 100644
--- a/src/csharp/global.json
+++ b/src/csharp/global.json
@@ -1,5 +1,5 @@
{
"sdk": {
- "version": "1.0.0"
+ "version": "2.1.4"
}
}
diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m
index ac4596da25..02492607cd 100644
--- a/src/objective-c/GRPCClient/GRPCCall.m
+++ b/src/objective-c/GRPCClient/GRPCCall.m
@@ -108,6 +108,9 @@ static NSString * const kBearerPrefix = @"Bearer ";
// The dispatch queue to be used for enqueuing responses to user. Defaulted to the main dispatch
// queue
dispatch_queue_t _responseQueue;
+
+ // Whether the call is finished. If it is, should not call finishWithError again.
+ BOOL _finished;
}
@synthesize state = _state;
@@ -206,6 +209,8 @@ static NSString * const kBearerPrefix = @"Bearer ";
} else {
[_responseWriteable enqueueSuccessfulCompletion];
}
+
+ [GRPCConnectivityMonitor unregisterObserver:self];
}
- (void)cancelCall {
@@ -214,9 +219,10 @@ static NSString * const kBearerPrefix = @"Bearer ";
}
- (void)cancel {
- [self finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
- code:GRPCErrorCodeCancelled
- userInfo:@{NSLocalizedDescriptionKey: @"Canceled by app"}]];
+ [self maybeFinishWithError:[NSError errorWithDomain:kGRPCErrorDomain
+ code:GRPCErrorCodeCancelled
+ userInfo:@{NSLocalizedDescriptionKey: @"Canceled by app"}]];
+
if (!self.isWaitingForToken) {
[self cancelCall];
} else {
@@ -224,6 +230,19 @@ static NSString * const kBearerPrefix = @"Bearer ";
}
}
+- (void)maybeFinishWithError:(NSError *)errorOrNil {
+ BOOL toFinish = NO;
+ @synchronized(self) {
+ if (_finished == NO) {
+ _finished = YES;
+ toFinish = YES;
+ }
+ }
+ if (toFinish == YES) {
+ [self finishWithError:errorOrNil];
+ }
+}
+
- (void)dealloc {
__block GRPCWrappedCall *wrappedCall = _wrappedCall;
dispatch_async(_callQueue, ^{
@@ -250,11 +269,13 @@ static NSString * const kBearerPrefix = @"Bearer ";
if (self.state == GRXWriterStatePaused) {
return;
}
- __weak GRPCCall *weakSelf = self;
- __weak GRXConcurrentWriteable *weakWriteable = _responseWriteable;
dispatch_async(_callQueue, ^{
- [weakSelf startReadWithHandler:^(grpc_byte_buffer *message) {
+ __weak GRPCCall *weakSelf = self;
+ __weak GRXConcurrentWriteable *weakWriteable = self->_responseWriteable;
+ [self startReadWithHandler:^(grpc_byte_buffer *message) {
+ __strong GRPCCall *strongSelf = weakSelf;
+ __strong GRXConcurrentWriteable *strongWriteable = weakWriteable;
if (message == NULL) {
// No more messages from the server
return;
@@ -266,14 +287,14 @@ static NSString * const kBearerPrefix = @"Bearer ";
// don't want to throw, because the app shouldn't crash for a behavior
// that's on the hands of any server to have. Instead we finish and ask
// the server to cancel.
- [weakSelf finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
- code:GRPCErrorCodeResourceExhausted
- userInfo:@{NSLocalizedDescriptionKey: @"Client does not have enough memory to hold the server response."}]];
- [weakSelf cancelCall];
+ [strongSelf maybeFinishWithError:[NSError errorWithDomain:kGRPCErrorDomain
+ code:GRPCErrorCodeResourceExhausted
+ userInfo:@{NSLocalizedDescriptionKey: @"Client does not have enough memory to hold the server response."}]];
+ [strongSelf cancelCall];
return;
}
- [weakWriteable enqueueValue:data completionHandler:^{
- [weakSelf startNextRead];
+ [strongWriteable enqueueValue:data completionHandler:^{
+ [strongSelf startNextRead];
}];
}];
});
@@ -333,12 +354,17 @@ static NSString * const kBearerPrefix = @"Bearer ";
_requestWriter.state = GRXWriterStatePaused;
}
- __weak GRPCCall *weakSelf = self;
dispatch_async(_callQueue, ^{
- [weakSelf writeMessage:value withErrorHandler:^{
- [weakSelf finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
- code:GRPCErrorCodeInternal
- userInfo:nil]];
+ __weak GRPCCall *weakSelf = self;
+ [self writeMessage:value withErrorHandler:^{
+ __strong GRPCCall *strongSelf = weakSelf;
+ if (strongSelf != nil) {
+ [strongSelf maybeFinishWithError:[NSError errorWithDomain:kGRPCErrorDomain
+ code:GRPCErrorCodeInternal
+ userInfo:nil]];
+ // Wrapped call must be canceled when error is reported to upper layers
+ [strongSelf cancelCall];
+ }
}];
});
}
@@ -360,12 +386,15 @@ static NSString * const kBearerPrefix = @"Bearer ";
if (errorOrNil) {
[self cancel];
} else {
- __weak GRPCCall *weakSelf = self;
dispatch_async(_callQueue, ^{
- [weakSelf finishRequestWithErrorHandler:^{
- [weakSelf finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
- code:GRPCErrorCodeInternal
- userInfo:nil]];
+ __weak GRPCCall *weakSelf = self;
+ [self finishRequestWithErrorHandler:^{
+ __strong GRPCCall *strongSelf = weakSelf;
+ [strongSelf maybeFinishWithError:[NSError errorWithDomain:kGRPCErrorDomain
+ code:GRPCErrorCodeInternal
+ userInfo:nil]];
+ // Wrapped call must be canceled when error is reported to upper layers
+ [strongSelf cancelCall];
}];
});
}
@@ -387,30 +416,37 @@ static NSString * const kBearerPrefix = @"Bearer ";
}
- (void)invokeCall {
+ __weak GRPCCall *weakSelf = self;
[self invokeCallWithHeadersHandler:^(NSDictionary *headers) {
// Response headers received.
- self.responseHeaders = headers;
- [self startNextRead];
+ __strong GRPCCall *strongSelf = weakSelf;
+ if (strongSelf) {
+ strongSelf.responseHeaders = headers;
+ [strongSelf startNextRead];
+ }
} completionHandler:^(NSError *error, NSDictionary *trailers) {
- self.responseTrailers = trailers;
+ __strong GRPCCall *strongSelf = weakSelf;
+ if (strongSelf) {
+ strongSelf.responseTrailers = trailers;
- if (error) {
- NSMutableDictionary *userInfo = [NSMutableDictionary dictionary];
- if (error.userInfo) {
- [userInfo addEntriesFromDictionary:error.userInfo];
- }
- userInfo[kGRPCTrailersKey] = self.responseTrailers;
- // TODO(jcanizales): The C gRPC library doesn't guarantee that the headers block will be
- // called before this one, so an error might end up with trailers but no headers. We
- // shouldn't call finishWithError until ater both blocks are called. It is also when this is
- // done that we can provide a merged view of response headers and trailers in a thread-safe
- // way.
- if (self.responseHeaders) {
- userInfo[kGRPCHeadersKey] = self.responseHeaders;
+ if (error) {
+ NSMutableDictionary *userInfo = [NSMutableDictionary dictionary];
+ if (error.userInfo) {
+ [userInfo addEntriesFromDictionary:error.userInfo];
+ }
+ userInfo[kGRPCTrailersKey] = strongSelf.responseTrailers;
+ // TODO(jcanizales): The C gRPC library doesn't guarantee that the headers block will be
+ // called before this one, so an error might end up with trailers but no headers. We
+ // shouldn't call finishWithError until ater both blocks are called. It is also when this is
+ // done that we can provide a merged view of response headers and trailers in a thread-safe
+ // way.
+ if (strongSelf.responseHeaders) {
+ userInfo[kGRPCHeadersKey] = strongSelf.responseHeaders;
+ }
+ error = [NSError errorWithDomain:error.domain code:error.code userInfo:userInfo];
}
- error = [NSError errorWithDomain:error.domain code:error.code userInfo:userInfo];
+ [strongSelf maybeFinishWithError:error];
}
- [self finishWithError:error];
}];
// Now that the RPC has been initiated, request writes can start.
@synchronized(_requestWriter) {
@@ -439,16 +475,8 @@ static NSString * const kBearerPrefix = @"Bearer ";
// TODO(jcanizales): Check this on init.
[NSException raise:NSInvalidArgumentException format:@"host of %@ is nil", _host];
}
- _connectivityMonitor = [GRPCConnectivityMonitor monitorWithHost:host];
- __weak typeof(self) weakSelf = self;
- void (^handler)(void) = ^{
- typeof(self) strongSelf = weakSelf;
- [strongSelf finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
- code:GRPCErrorCodeUnavailable
- userInfo:@{ NSLocalizedDescriptionKey : @"Connectivity lost." }]];
- };
- [_connectivityMonitor handleLossWithHandler:handler
- wifiStatusChangeHandler:nil];
+ [GRPCConnectivityMonitor registerObserver:self
+ selector:@selector(connectivityChanged:)];
}
- (void)startWithWriteable:(id<GRXWriteable>)writeable {
@@ -512,4 +540,12 @@ static NSString * const kBearerPrefix = @"Bearer ";
}
}
+- (void)connectivityChanged:(NSNotification *)note {
+ [self maybeFinishWithError:[NSError errorWithDomain:kGRPCErrorDomain
+ code:GRPCErrorCodeUnavailable
+ userInfo:@{ NSLocalizedDescriptionKey : @"Connectivity lost." }]];
+ // Cancel underlying call upon this notification
+ [self cancelCall];
+}
+
@end
diff --git a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h b/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h
index cb55e46d70..394d21792d 100644
--- a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h
+++ b/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h
@@ -19,44 +19,30 @@
#import <Foundation/Foundation.h>
#import <SystemConfiguration/SystemConfiguration.h>
-@interface GRPCReachabilityFlags : NSObject
-
-+ (nonnull instancetype)flagsWithFlags:(SCNetworkReachabilityFlags)flags;
-
-/**
- * One accessor method to query each of the different flags. Example:
-
-@property(nonatomic, readonly) BOOL isCell;
-
- */
-#define GRPC_XMACRO_ITEM(methodName, FlagName) \
-@property(nonatomic, readonly) BOOL methodName;
-
-#include "GRPCReachabilityFlagNames.xmacro.h"
-#undef GRPC_XMACRO_ITEM
-
-@property(nonatomic, readonly) BOOL isHostReachable;
-@end
-
+typedef NS_ENUM(NSInteger, GRPCConnectivityStatus) {
+ GRPCConnectivityUnknown = 0,
+ GRPCConnectivityNoNetwork = 1,
+ GRPCConnectivityCellular = 2,
+ GRPCConnectivityWiFi = 3,
+};
+
+extern NSString * _Nonnull kGRPCConnectivityNotification;
+
+// This interface monitors OS reachability interface for any network status
+// change. Parties interested in these events should register themselves as
+// observer.
@interface GRPCConnectivityMonitor : NSObject
-+ (nullable instancetype)monitorWithHost:(nonnull NSString *)hostName;
-
- (nonnull instancetype)init NS_UNAVAILABLE;
-/**
- * Queue on which callbacks will be dispatched. Default is the main queue. Set it before calling
- * handleLossWithHandler:.
- */
-// TODO(jcanizales): Default to a serial background queue instead.
-@property(nonatomic, strong, null_resettable) dispatch_queue_t queue;
-
-/**
- * Calls handler every time the connectivity to this instance's host is lost. If this instance is
- * released before that happens, the handler won't be called.
- * Only one handler is active at a time, so if this method is called again before the previous
- * handler has been called, it might never be called at all (or yes, if it has already been queued).
- */
-- (void)handleLossWithHandler:(nullable void (^)(void))lossHandler
- wifiStatusChangeHandler:(nullable void (^)(void))wifiStatusChangeHandler;
+// Register an object as observer of network status change. \a observer
+// must have a notification method with one parameter of type
+// (NSNotification *) and should pass it to parameter \a selector. The
+// parameter of this notification method is not used for now.
++ (void)registerObserver:(_Nonnull id)observer
+ selector:(_Nonnull SEL)selector;
+
+// Ungegister an object from observers of network status change.
++ (void)unregisterObserver:(_Nonnull id)observer;
+
@end
diff --git a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m b/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m
index c8e10dd75f..7f31c7e23e 100644
--- a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m
+++ b/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m
@@ -18,175 +18,74 @@
#import "GRPCConnectivityMonitor.h"
-#pragma mark Flags
+#include <netinet/in.h>
-@implementation GRPCReachabilityFlags {
- SCNetworkReachabilityFlags _flags;
-}
+NSString *kGRPCConnectivityNotification = @"kGRPCConnectivityNotification";
-+ (instancetype)flagsWithFlags:(SCNetworkReachabilityFlags)flags {
- return [[self alloc] initWithFlags:flags];
-}
+static SCNetworkReachabilityRef reachability;
+static GRPCConnectivityStatus currentStatus;
-- (instancetype)initWithFlags:(SCNetworkReachabilityFlags)flags {
- if ((self = [super init])) {
- _flags = flags;
+// Aggregate information in flags into network status.
+GRPCConnectivityStatus CalculateConnectivityStatus(SCNetworkReachabilityFlags flags) {
+ GRPCConnectivityStatus result = GRPCConnectivityUnknown;
+ if (((flags & kSCNetworkReachabilityFlagsReachable) == 0) ||
+ ((flags & kSCNetworkReachabilityFlagsConnectionRequired) != 0)) {
+ return GRPCConnectivityNoNetwork;
}
- return self;
-}
-
-/*
- * One accessor method implementation per flag. Example:
-
-- (BOOL)isCell { \
- return !!(_flags & kSCNetworkReachabilityFlagsIsWWAN); \
-}
-
- */
-#define GRPC_XMACRO_ITEM(methodName, FlagName) \
-- (BOOL)methodName { \
- return !!(_flags & kSCNetworkReachabilityFlags ## FlagName); \
-}
-#include "GRPCReachabilityFlagNames.xmacro.h"
-#undef GRPC_XMACRO_ITEM
-
-- (BOOL)isHostReachable {
- // Note: connectionOnDemand means it'll be reachable only if using the CFSocketStream API or APIs
- // on top of it.
- // connectionRequired means we can't tell until a connection is attempted (e.g. for VPN on
- // demand).
- return self.reachable && !self.interventionRequired && !self.connectionOnDemand;
-}
-
-- (NSString *)description {
- NSMutableArray *activeOptions = [NSMutableArray arrayWithCapacity:9];
-
- /*
- * For each flag, add its name to the array if it's ON. Example:
-
- if (self.isCell) {
- [activeOptions addObject:@"isCell"];
+ result = GRPCConnectivityWiFi;
+#if TARGET_OS_IPHONE
+ if (flags & kSCNetworkReachabilityFlagsIsWWAN) {
+ return result = GRPCConnectivityCellular;
}
-
- */
- #define GRPC_XMACRO_ITEM(methodName, FlagName) \
- if (self.methodName) { \
- [activeOptions addObject:@ #methodName]; \
- }
- #include "GRPCReachabilityFlagNames.xmacro.h"
- #undef GRPC_XMACRO_ITEM
-
- return activeOptions.count == 0 ? @"(none)" : [activeOptions componentsJoinedByString:@", "];
-}
-
-- (BOOL)isEqual:(id)object {
- return [object isKindOfClass:[GRPCReachabilityFlags class]] &&
- _flags == ((GRPCReachabilityFlags *)object)->_flags;
-}
-
-- (NSUInteger)hash {
- return _flags;
-}
-@end
-
-#pragma mark Connectivity Monitor
-
-// Assumes the third argument is a block that accepts a GRPCReachabilityFlags object, and passes the
-// received ones to it.
-static void PassFlagsToContextInfoBlock(SCNetworkReachabilityRef target,
- SCNetworkReachabilityFlags flags,
- void *info) {
- #pragma unused (target)
- // This can be called many times with the same info. The info is retained by SCNetworkReachability
- // while this function is being executed.
- void (^handler)(GRPCReachabilityFlags *) = (__bridge void (^)(GRPCReachabilityFlags *))info;
- handler([[GRPCReachabilityFlags alloc] initWithFlags:flags]);
+#endif
+ return result;
}
-@implementation GRPCConnectivityMonitor {
- SCNetworkReachabilityRef _reachabilityRef;
- GRPCReachabilityFlags *_previousReachabilityFlags;
-}
+static void ReachabilityCallback(
+ SCNetworkReachabilityRef target, SCNetworkReachabilityFlags flags, void* info) {
+ GRPCConnectivityStatus newStatus = CalculateConnectivityStatus(flags);
-- (nullable instancetype)initWithReachability:(nullable SCNetworkReachabilityRef)reachability {
- if (!reachability) {
- return nil;
+ if (newStatus != currentStatus) {
+ [[NSNotificationCenter defaultCenter] postNotificationName:kGRPCConnectivityNotification
+ object:nil];
+ currentStatus = newStatus;
}
- if ((self = [super init])) {
- _reachabilityRef = CFRetain(reachability);
- _queue = dispatch_get_main_queue();
- _previousReachabilityFlags = nil;
- }
- return self;
}
-+ (nullable instancetype)monitorWithHost:(nonnull NSString *)host {
- const char *hostName = host.UTF8String;
- if (!hostName) {
- [NSException raise:NSInvalidArgumentException
- format:@"host.UTF8String returns NULL for %@", host];
- }
- SCNetworkReachabilityRef reachability =
- SCNetworkReachabilityCreateWithName(NULL, hostName);
+@implementation GRPCConnectivityMonitor
- GRPCConnectivityMonitor *returnValue = [[self alloc] initWithReachability:reachability];
- if (reachability) {
- CFRelease(reachability);
- }
- return returnValue;
-}
++ (void)initialize {
+ if (self == [GRPCConnectivityMonitor self]) {
+ struct sockaddr_in addr = {0};
+ addr.sin_len = sizeof(addr);
+ addr.sin_family = AF_INET;
+ reachability = SCNetworkReachabilityCreateWithAddress(NULL, (struct sockaddr *)&addr);
+ currentStatus = GRPCConnectivityUnknown;
-- (void)handleLossWithHandler:(nullable void (^)(void))lossHandler
- wifiStatusChangeHandler:(nullable void (^)(void))wifiStatusChangeHandler {
- __weak typeof(self) weakSelf = self;
- [self startListeningWithHandler:^(GRPCReachabilityFlags *flags) {
- typeof(self) strongSelf = weakSelf;
- if (strongSelf) {
- if (lossHandler && !flags.reachable) {
- lossHandler();
-#if TARGET_OS_IPHONE
- } else if (wifiStatusChangeHandler &&
- strongSelf->_previousReachabilityFlags &&
- (flags.isWWAN ^
- strongSelf->_previousReachabilityFlags.isWWAN)) {
- wifiStatusChangeHandler();
-#endif
- }
- strongSelf->_previousReachabilityFlags = flags;
+ SCNetworkConnectionFlags flags;
+ if (SCNetworkReachabilityGetFlags(reachability, &flags)) {
+ currentStatus = CalculateConnectivityStatus(flags);
}
- }];
-}
-- (void)startListeningWithHandler:(void (^)(GRPCReachabilityFlags *))handler {
- // Copy to ensure the handler block is in the heap (and so can't be deallocated when this method
- // returns).
- void (^copiedHandler)(GRPCReachabilityFlags *) = [handler copy];
- SCNetworkReachabilityContext context = {
- .version = 0,
- .info = (__bridge void *)copiedHandler,
- .retain = CFRetain,
- .release = CFRelease,
- };
- // The following will retain context.info, and release it when the callback is set to NULL.
- SCNetworkReachabilitySetCallback(_reachabilityRef, PassFlagsToContextInfoBlock, &context);
- SCNetworkReachabilitySetDispatchQueue(_reachabilityRef, _queue);
-}
-
-- (void)stopListening {
- // This releases the block on context.info.
- SCNetworkReachabilitySetCallback(_reachabilityRef, NULL, NULL);
- SCNetworkReachabilitySetDispatchQueue(_reachabilityRef, NULL);
+ SCNetworkReachabilityContext context = {0, (__bridge void *)(self), NULL, NULL, NULL};
+ if (!SCNetworkReachabilitySetCallback(reachability, ReachabilityCallback, &context) ||
+ !SCNetworkReachabilityScheduleWithRunLoop(
+ reachability, CFRunLoopGetMain(), kCFRunLoopCommonModes)) {
+ NSLog(@"gRPC connectivity monitor fail to set");
+ }
+ }
}
-- (void)setQueue:(dispatch_queue_t)queue {
- _queue = queue ?: dispatch_get_main_queue();
++ (void)registerObserver:(_Nonnull id)observer
+ selector:(SEL)selector {
+ [[NSNotificationCenter defaultCenter] addObserver:observer
+ selector:selector
+ name:kGRPCConnectivityNotification
+ object:nil];
}
-- (void)dealloc {
- if (_reachabilityRef) {
- [self stopListening];
- CFRelease(_reachabilityRef);
- }
++ (void)unregisterObserver:(_Nonnull id)observer {
+ [[NSNotificationCenter defaultCenter] removeObserver:observer];
}
@end
diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m
index 71b57cf1f6..8568e334dd 100644
--- a/src/objective-c/GRPCClient/private/GRPCHost.m
+++ b/src/objective-c/GRPCClient/private/GRPCHost.m
@@ -37,12 +37,6 @@ NS_ASSUME_NONNULL_BEGIN
static NSMutableDictionary *kHostCache;
-// This connectivity monitor flushes the host cache when connectivity status
-// changes or when connection switch between Wifi and Cellular data, so that a
-// new call will use a new channel. Otherwise, a new call will still use the
-// cached channel which is no longer available and will cause gRPC to hang.
-static GRPCConnectivityMonitor *connectivityMonitor = nil;
-
@implementation GRPCHost {
// TODO(mlumish): Investigate whether caching channels with strong links is a good idea.
GRPCChannel *_channel;
@@ -90,17 +84,7 @@ static GRPCConnectivityMonitor *connectivityMonitor = nil;
kHostCache[address] = self;
_compressAlgorithm = GRPC_COMPRESS_NONE;
}
- // Keep a single monitor to flush the cache if the connectivity status changes
- // Thread safety guarded by @synchronized(kHostCache)
- if (!connectivityMonitor) {
- connectivityMonitor =
- [GRPCConnectivityMonitor monitorWithHost:hostURL.host];
- void (^handler)(void) = ^{
- [GRPCHost flushChannelCache];
- };
- [connectivityMonitor handleLossWithHandler:handler
- wifiStatusChangeHandler:handler];
- }
+ [GRPCConnectivityMonitor registerObserver:self selector:@selector(connectivityChange:)];
}
return self;
}
@@ -281,6 +265,13 @@ static GRPCConnectivityMonitor *connectivityMonitor = nil;
}
}
+// Flushes the host cache when connectivity status changes or when connection switch between Wifi
+// and Cellular data, so that a new call will use a new channel. Otherwise, a new call will still
+// use the cached channel which is no longer available and will cause gRPC to hang.
+- (void)connectivityChange:(NSNotification *)note {
+ [GRPCHost flushChannelCache];
+}
+
@end
NS_ASSUME_NONNULL_END
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 8ed32e56b8..75156793f1 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -155,7 +155,6 @@ CORE_SOURCE_FILES = [
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_buffer.cc',
- 'src/core/lib/slice/slice_hash_table.cc',
'src/core/lib/slice/slice_intern.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
@@ -186,6 +185,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/transport/service_config.cc',
'src/core/lib/transport/static_metadata.cc',
'src/core/lib/transport/status_conversion.cc',
+ 'src/core/lib/transport/status_metadata.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
@@ -235,10 +235,10 @@ CORE_SOURCE_FILES = [
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
- 'src/core/lib/security/transport/lb_targets_info.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
+ 'src/core/lib/security/transport/target_authority_table.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/surface/init_secure.cc',
@@ -261,12 +261,14 @@ CORE_SOURCE_FILES = [
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy_factory.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
+ 'src/core/ext/filters/client_channel/method_params.cc',
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
+ 'src/core/ext/filters/client_channel/status_util.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_index.cc',
'src/core/ext/filters/client_channel/uri_parser.cc',