aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt2
-rw-r--r--Makefile4
-rwxr-xr-xRakefile4
-rw-r--r--build.yaml1
-rw-r--r--doc/md0
-rw-r--r--gRPC-Core.podspec1
-rw-r--r--grpc.gyp5
-rw-r--r--include/grpc/impl/codegen/port_platform.h21
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.cc6
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.cc6
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc71
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc95
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h90
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc6
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc660
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc89
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc122
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc4
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc6
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.cc10
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.cc14
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.cc10
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.cc10
-rw-r--r--src/core/ext/filters/max_age/max_age_filter.cc6
-rw-r--r--src/core/ext/filters/message_size/message_size_filter.cc10
-rw-r--r--src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc6
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.cc5
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc13
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc10
-rw-r--r--src/core/lib/iomgr/tcp_uv.cc30
-rw-r--r--src/core/lib/security/transport/client_auth_filter.cc10
-rw-r--r--src/core/lib/security/transport/server_auth_filter.cc14
-rw-r--r--src/core/lib/surface/call.cc5
-rw-r--r--src/core/lib/surface/server.cc24
-rw-r--r--src/core/tsi/ssl_transport_security.cc3
-rwxr-xr-xsrc/csharp/Grpc.Auth/Grpc.Auth.csproj4
-rwxr-xr-xsrc/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj4
-rwxr-xr-xsrc/csharp/Grpc.Core/Grpc.Core.csproj4
-rwxr-xr-xsrc/csharp/Grpc.Core/SourceLink.csproj.include19
-rwxr-xr-xsrc/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj4
-rwxr-xr-xsrc/csharp/Grpc.Reflection/Grpc.Reflection.csproj4
-rw-r--r--test/core/end2end/end2end_nosec_tests.cc8
-rw-r--r--test/core/end2end/end2end_tests.cc8
-rwxr-xr-xtest/core/end2end/gen_build_yaml.py1
-rwxr-xr-xtest/core/end2end/generate_tests.bzl1
-rw-r--r--test/core/end2end/tests/filter_status_code.cc353
-rw-r--r--test/core/transport/chttp2/settings_timeout_test.cc1
-rw-r--r--third_party/rake-compiler-dock/Dockerfile200
-rwxr-xr-xthird_party/rake-compiler-dock/build.sh7
-rw-r--r--third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/compat-with-bundler.diff105
-rw-r--r--third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/without-exts.diff14
-rw-r--r--third_party/rake-compiler-dock/build/patches/ruby-1.8.7-p374/nop.patch2
-rw-r--r--third_party/rake-compiler-dock/build/patches/ruby-1.9.3/no_sendfile.patch13
-rw-r--r--third_party/rake-compiler-dock/build/patches/ruby-1.9.3/nop.patch2
-rw-r--r--third_party/rake-compiler-dock/build/patches/ruby-2.3.0/no_sendfile.patch12
-rw-r--r--third_party/rake-compiler-dock/build/patches/ruby-2.4.0/no_sendfile.patch12
-rwxr-xr-xthird_party/rake-compiler-dock/build/runas12
-rw-r--r--third_party/rake-compiler-dock/build/sigfw.c43
-rwxr-xr-xthird_party/rake-compiler-dock/build/strip_wrapper30
-rw-r--r--third_party/rake-compiler-dock/build/sudoers1
-rw-r--r--tools/bazel.rc1
-rw-r--r--tools/distrib/build_ruby_environment_macos.sh2
-rwxr-xr-xtools/distrib/check_copyright.py1
-rw-r--r--tools/run_tests/generated/sources_and_headers.json2
-rw-r--r--tools/run_tests/generated/tests.json789
65 files changed, 1804 insertions, 1228 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8eff902f6b..eed1205268 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -4618,6 +4618,7 @@ add_library(end2end_tests
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_latency.cc
+ test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc
test/core/end2end/tests/high_initial_seqno.cc
test/core/end2end/tests/hpack_size.cc
@@ -4719,6 +4720,7 @@ add_library(end2end_nosec_tests
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_latency.cc
+ test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc
test/core/end2end/tests/high_initial_seqno.cc
test/core/end2end/tests/hpack_size.cc
diff --git a/Makefile b/Makefile
index e51882c364..38b40804d6 100644
--- a/Makefile
+++ b/Makefile
@@ -327,7 +327,7 @@ CXXFLAGS += -std=c++11
ifeq ($(SYSTEM),Darwin)
CXXFLAGS += -stdlib=libc++
endif
-CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1
+CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1 -Wno-deprecated-declarations
COREFLAGS += -fno-rtti -fno-exceptions
LDFLAGS += -g
@@ -8557,6 +8557,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_latency.cc \
+ test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \
test/core/end2end/tests/high_initial_seqno.cc \
test/core/end2end/tests/hpack_size.cc \
@@ -8655,6 +8656,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_latency.cc \
+ test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \
test/core/end2end/tests/high_initial_seqno.cc \
test/core/end2end/tests/hpack_size.cc \
diff --git a/Rakefile b/Rakefile
index d76b9ff657..74c8b1fd48 100755
--- a/Rakefile
+++ b/Rakefile
@@ -113,10 +113,10 @@ task 'gem:native' do
if RUBY_PLATFORM =~ /darwin/
FileUtils.touch 'grpc_c.32.ruby'
FileUtils.touch 'grpc_c.64.ruby'
- system "rake cross native gem RUBY_CC_VERSION=2.4.0:2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
+ system "rake cross native gem RUBY_CC_VERSION=2.5.0:2.4.0:2.3.0:2.2.2:2.1.6:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
else
Rake::Task['dlls'].execute
- docker_for_windows "gem update --system && bundle && rake cross native gem RUBY_CC_VERSION=2.4.0:2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
+ docker_for_windows "gem update --system && bundle && rake cross native gem RUBY_CC_VERSION=2.5.0:2.4.0:2.3.0:2.2.2:2.1.6:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
end
end
diff --git a/build.yaml b/build.yaml
index 42d7245981..fef7d6189f 100644
--- a/build.yaml
+++ b/build.yaml
@@ -5004,6 +5004,7 @@ defaults:
global:
COREFLAGS: -fno-rtti -fno-exceptions
CPPFLAGS: -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1
+ -Wno-deprecated-declarations
LDFLAGS: -g
zlib:
CFLAGS: -Wno-sign-conversion -Wno-conversion -Wno-unused-value -Wno-implicit-function-declaration
diff --git a/doc/md b/doc/md
deleted file mode 100644
index e69de29bb2..0000000000
--- a/doc/md
+++ /dev/null
diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec
index 708c3436ab..c127660dd5 100644
--- a/gRPC-Core.podspec
+++ b/gRPC-Core.podspec
@@ -1038,6 +1038,7 @@ Pod::Spec.new do |s|
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
+ 'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
diff --git a/grpc.gyp b/grpc.gyp
index c34206b1a5..281fbfa8a6 100644
--- a/grpc.gyp
+++ b/grpc.gyp
@@ -57,6 +57,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
+ '-Wno-deprecated-declarations',
],
'ldflags': [
'-g',
@@ -134,6 +135,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
+ '-Wno-deprecated-declarations',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
@@ -143,6 +145,7 @@
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
+ '-Wno-deprecated-declarations',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations'
@@ -2378,6 +2381,7 @@
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
+ 'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
@@ -2450,6 +2454,7 @@
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_latency.cc',
+ 'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h
index f4bc3eb3e0..ae1f951b49 100644
--- a/include/grpc/impl/codegen/port_platform.h
+++ b/include/grpc/impl/codegen/port_platform.h
@@ -195,12 +195,25 @@
#define GPR_PTHREAD_TLS 1
#else /* __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_7 */
#define GPR_CPU_POSIX 1
+/* TODO(vjpai): there is a reported issue in bazel build for Mac where __thread
+ in a header is currently not working (bazelbuild/bazel#4341). Remove
+ the following conditional and use GPR_GCC_TLS when that is fixed */
+#ifndef GRPC_BAZEL_BUILD
#define GPR_GCC_TLS 1
+#else /* GRPC_BAZEL_BUILD */
+#define GPR_PTHREAD_TLS 1
+#endif /* GRPC_BAZEL_BUILD */
#define GPR_APPLE_PTHREAD_NAME 1
#endif
#else /* __MAC_OS_X_VERSION_MIN_REQUIRED */
#define GPR_CPU_POSIX 1
+/* TODO(vjpai): Remove the following conditional and use only GPR_GCC_TLS
+ when bazelbuild/bazel#4341 is fixed */
+#ifndef GRPC_BAZEL_BUILD
#define GPR_GCC_TLS 1
+#else /* GRPC_BAZEL_BUILD */
+#define GPR_PTHREAD_TLS 1
+#endif /* GRPC_BAZEL_BUILD */
#endif
#define GPR_POSIX_CRASH_HANDLER 1
#endif
@@ -421,6 +434,14 @@ typedef unsigned __int64 uint64_t;
#endif
#endif
+#ifndef GRPC_UNUSED
+#if defined(__GNUC__) && !defined(__MINGW32__)
+#define GRPC_UNUSED __attribute__((unused))
+#else
+#define GRPC_UNUSED
+#endif
+#endif
+
#ifndef GPR_PRINT_FORMAT_CHECK
#ifdef __GNUC__
#define GPR_PRINT_FORMAT_CHECK(FORMAT_STR, ARGS) \
diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc
index bfc549e709..4ee5e9c109 100644
--- a/src/core/ext/filters/client_channel/backup_poller.cc
+++ b/src/core/ext/filters/client_channel/backup_poller.cc
@@ -33,7 +33,8 @@
#define DEFAULT_POLL_INTERVAL_MS 5000
-typedef struct backup_poller {
+namespace {
+struct backup_poller {
grpc_timer polling_timer;
grpc_closure run_poller_closure;
grpc_closure shutdown_closure;
@@ -42,7 +43,8 @@ typedef struct backup_poller {
bool shutting_down; // guarded by pollset_mu
gpr_refcount refs;
gpr_refcount shutdown_refs;
-} backup_poller;
+};
+} // namespace
static gpr_once g_once = GPR_ONCE_INIT;
static gpr_mu g_poller_mu;
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.cc b/src/core/ext/filters/client_channel/channel_connectivity.cc
index 20693ba419..a827aa30ec 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.cc
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -58,7 +58,8 @@ typedef enum {
CALLING_BACK_AND_FINISHED,
} callback_phase;
-typedef struct {
+namespace {
+struct state_watcher {
gpr_mu mu;
callback_phase phase;
grpc_closure on_complete;
@@ -71,7 +72,8 @@ typedef struct {
grpc_channel* channel;
grpc_error* error;
void* tag;
-} state_watcher;
+};
+} // namespace
static void delete_state_watcher(state_watcher* w) {
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index e99022a91b..3f3334d44a 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -553,6 +553,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
}
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
+ grpc_lb_policy_shutdown_locked(chand->lb_policy, new_lb_policy);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
}
chand->lb_policy = new_lb_policy;
@@ -658,6 +659,7 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
if (chand->lb_policy != nullptr) {
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
+ grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
chand->lb_policy = nullptr;
}
@@ -792,6 +794,7 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
if (chand->lb_policy != nullptr) {
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
chand->interested_parties);
+ grpc_lb_policy_shutdown_locked(chand->lb_policy, nullptr);
GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
}
gpr_free(chand->info_lb_policy_name);
@@ -852,12 +855,10 @@ typedef struct client_channel_call_data {
grpc_subchannel_call* subchannel_call;
grpc_error* error;
- grpc_lb_policy* lb_policy; // Holds ref while LB pick is pending.
+ grpc_lb_policy_pick_state pick;
grpc_closure lb_pick_closure;
grpc_closure lb_pick_cancel_closure;
- grpc_connected_subchannel* connected_subchannel;
- grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
grpc_polling_entity* pollent;
grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
@@ -866,8 +867,6 @@ typedef struct client_channel_call_data {
grpc_transport_stream_op_batch* initial_metadata_batch;
- grpc_linked_mdelem lb_token_mdelem;
-
grpc_closure on_complete;
grpc_closure* original_on_complete;
} call_data;
@@ -1005,16 +1004,16 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
const grpc_connected_subchannel_call_args call_args = {
- calld->pollent, // pollent
- calld->path, // path
- calld->call_start_time, // start_time
- calld->deadline, // deadline
- calld->arena, // arena
- calld->subchannel_call_context, // context
- calld->call_combiner // call_combiner
+ calld->pollent, // pollent
+ calld->path, // path
+ calld->call_start_time, // start_time
+ calld->deadline, // deadline
+ calld->arena, // arena
+ calld->pick.subchannel_call_context, // context
+ calld->call_combiner // call_combiner
};
grpc_error* new_error = grpc_connected_subchannel_create_call(
- calld->connected_subchannel, &call_args, &calld->subchannel_call);
+ calld->pick.connected_subchannel, &call_args, &calld->subchannel_call);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
chand, calld, calld->subchannel_call, grpc_error_string(new_error));
@@ -1032,7 +1031,7 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
- if (calld->connected_subchannel == nullptr) {
+ if (calld->pick.connected_subchannel == nullptr) {
// Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error);
calld->error = error == GRPC_ERROR_NONE
@@ -1071,13 +1070,16 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
- if (calld->lb_policy != nullptr) {
+ // Note: chand->lb_policy may have changed since we started our pick,
+ // in which case we will be cancelling the pick on a policy other than
+ // the one we started it on. However, this will just be a no-op.
+ if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
- chand, calld, calld->lb_policy);
+ chand, calld, chand->lb_policy);
}
- grpc_lb_policy_cancel_pick_locked(
- calld->lb_policy, &calld->connected_subchannel, GRPC_ERROR_REF(error));
+ grpc_lb_policy_cancel_pick_locked(chand->lb_policy, &calld->pick,
+ GRPC_ERROR_REF(error));
}
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
}
@@ -1092,9 +1094,6 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
}
- GPR_ASSERT(calld->lb_policy != nullptr);
- GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
- calld->lb_policy = nullptr;
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
}
@@ -1128,26 +1127,21 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
- const grpc_lb_policy_pick_args inputs = {
+ calld->pick.initial_metadata =
calld->initial_metadata_batch->payload->send_initial_metadata
- .send_initial_metadata,
- initial_metadata_flags, &calld->lb_token_mdelem};
- // Keep a ref to the LB policy in calld while the pick is pending.
- GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
- calld->lb_policy = chand->lb_policy;
+ .send_initial_metadata;
+ calld->pick.initial_metadata_flags = initial_metadata_flags;
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
- const bool pick_done = grpc_lb_policy_pick_locked(
- chand->lb_policy, &inputs, &calld->connected_subchannel,
- calld->subchannel_call_context, nullptr, &calld->lb_pick_closure);
+ calld->pick.on_complete = &calld->lb_pick_closure;
+ const bool pick_done =
+ grpc_lb_policy_pick_locked(chand->lb_policy, &calld->pick);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
- GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
- calld->lb_policy = nullptr;
} else {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
@@ -1289,7 +1283,7 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
grpc_call_element* elem = (grpc_call_element*)arg;
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
- GPR_ASSERT(calld->connected_subchannel == nullptr);
+ GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
if (chand->lb_policy != nullptr) {
// We already have an LB policy, so ask it for a pick.
if (pick_callback_start_locked(elem)) {
@@ -1467,15 +1461,14 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
"client_channel_destroy_call");
}
- GPR_ASSERT(calld->lb_policy == nullptr);
GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
- if (calld->connected_subchannel != nullptr) {
- GRPC_CONNECTED_SUBCHANNEL_UNREF(calld->connected_subchannel, "picked");
+ if (calld->pick.connected_subchannel != nullptr) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(calld->pick.connected_subchannel, "picked");
}
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
- if (calld->subchannel_call_context[i].value != nullptr) {
- calld->subchannel_call_context[i].destroy(
- calld->subchannel_call_context[i].value);
+ if (calld->pick.subchannel_call_context[i].value != nullptr) {
+ calld->pick.subchannel_call_context[i].destroy(
+ calld->pick.subchannel_call_context[i].value);
}
}
GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index 7a5a8dec34..cc4fe7ec62 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -19,8 +19,6 @@
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/lib/iomgr/combiner.h"
-#define WEAK_REF_BITS 16
-
grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
false, "lb_policy_refcount");
@@ -28,91 +26,60 @@ void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner) {
policy->vtable = vtable;
- gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
+ gpr_ref_init(&policy->refs, 1);
policy->interested_parties = grpc_pollset_set_create();
policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy");
}
#ifndef NDEBUG
-#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
-#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char* purpose
-#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
-#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose
+void grpc_lb_policy_ref(grpc_lb_policy* lb_policy, const char* file, int line,
+ const char* reason) {
+ if (grpc_trace_lb_policy_refcount.enabled()) {
+ gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "LB_POLICY:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
+ old_refs, old_refs + 1, reason);
+ }
#else
-#define REF_FUNC_EXTRA_ARGS
-#define REF_MUTATE_EXTRA_ARGS
-#define REF_FUNC_PASS_ARGS(new_reason)
-#define REF_MUTATE_PASS_ARGS(x)
+void grpc_lb_policy_ref(grpc_lb_policy* lb_policy) {
#endif
+ gpr_ref(&lb_policy->refs);
+}
-static gpr_atm ref_mutate(grpc_lb_policy* c, gpr_atm delta,
- int barrier REF_MUTATE_EXTRA_ARGS) {
- gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
- : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifndef NDEBUG
+void grpc_lb_policy_unref(grpc_lb_policy* lb_policy, const char* file, int line,
+ const char* reason) {
if (grpc_trace_lb_policy_refcount.enabled()) {
+ gpr_atm old_refs = gpr_atm_no_barrier_load(&lb_policy->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "LB_POLICY: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c,
- purpose, old_val, old_val + delta, reason);
+ "LB_POLICY:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", lb_policy,
+ old_refs, old_refs - 1, reason);
}
+#else
+void grpc_lb_policy_unref(grpc_lb_policy* lb_policy) {
#endif
- return old_val;
-}
-
-void grpc_lb_policy_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
- ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
-}
-
-static void shutdown_locked(void* arg, grpc_error* error) {
- grpc_lb_policy* policy = (grpc_lb_policy*)arg;
- policy->vtable->shutdown_locked(policy);
- GRPC_LB_POLICY_WEAK_UNREF(policy, "strong-unref");
-}
-
-void grpc_lb_policy_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
- gpr_atm old_val =
- ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
- 1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
- gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
- gpr_atm check = 1 << WEAK_REF_BITS;
- if ((old_val & mask) == check) {
- GRPC_CLOSURE_SCHED(
- GRPC_CLOSURE_CREATE(shutdown_locked, policy,
- grpc_combiner_scheduler(policy->combiner)),
- GRPC_ERROR_NONE);
- } else {
- grpc_lb_policy_weak_unref(policy REF_FUNC_PASS_ARGS("strong-unref"));
+ if (gpr_unref(&lb_policy->refs)) {
+ grpc_pollset_set_destroy(lb_policy->interested_parties);
+ grpc_combiner* combiner = lb_policy->combiner;
+ lb_policy->vtable->destroy(lb_policy);
+ GRPC_COMBINER_UNREF(combiner, "lb_policy");
}
}
-void grpc_lb_policy_weak_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
- ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
-}
-
-void grpc_lb_policy_weak_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
- gpr_atm old_val =
- ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
- if (old_val == 1) {
- grpc_pollset_set_destroy(policy->interested_parties);
- grpc_combiner* combiner = policy->combiner;
- policy->vtable->destroy(policy);
- GRPC_COMBINER_UNREF(combiner, "lb_policy");
- }
+void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
+ grpc_lb_policy* new_policy) {
+ policy->vtable->shutdown_locked(policy, new_policy);
}
int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_connected_subchannel** target,
- grpc_call_context_element* context,
- void** user_data, grpc_closure* on_complete) {
- return policy->vtable->pick_locked(policy, pick_args, target, context,
- user_data, on_complete);
+ grpc_lb_policy_pick_state* pick) {
+ return policy->vtable->pick_locked(policy, pick);
}
void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
- grpc_connected_subchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error) {
- policy->vtable->cancel_pick_locked(policy, target, error);
+ policy->vtable->cancel_pick_locked(policy, pick, error);
}
void grpc_lb_policy_cancel_picks_locked(grpc_lb_policy* policy,
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 3572c97ed1..1176a05b78 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -33,7 +33,7 @@ extern grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
struct grpc_lb_policy {
const grpc_lb_policy_vtable* vtable;
- gpr_atm ref_pair;
+ gpr_refcount refs;
/* owned pointer to interested parties in load balancing decisions */
grpc_pollset_set* interested_parties;
/* combiner under which lb_policy actions take place */
@@ -42,32 +42,42 @@ struct grpc_lb_policy {
grpc_closure* request_reresolution;
};
-/** Extra arguments for an LB pick */
-typedef struct grpc_lb_policy_pick_args {
- /** Initial metadata associated with the picking call. */
+/// State used for an LB pick.
+typedef struct grpc_lb_policy_pick_state {
+ /// Initial metadata associated with the picking call.
grpc_metadata_batch* initial_metadata;
- /** Bitmask used for selective cancelling. See \a
- * grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
- * grpc_types.h */
+ /// Bitmask used for selective cancelling. See \a
+ /// grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
+ /// grpc_types.h.
uint32_t initial_metadata_flags;
- /** Storage for LB token in \a initial_metadata, or NULL if not used */
- grpc_linked_mdelem* lb_token_mdelem_storage;
-} grpc_lb_policy_pick_args;
+ /// Storage for LB token in \a initial_metadata, or NULL if not used.
+ grpc_linked_mdelem lb_token_mdelem_storage;
+ /// Closure to run when pick is complete, if not completed synchronously.
+ grpc_closure* on_complete;
+ /// Will be set to the selected subchannel, or NULL on failure or when
+ /// the LB policy decides to drop the call.
+ grpc_connected_subchannel* connected_subchannel;
+ /// Will be populated with context to pass to the subchannel call, if needed.
+ grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
+ /// Upon success, \a *user_data will be set to whatever opaque information
+ /// may need to be propagated from the LB policy, or NULL if not needed.
+ void** user_data;
+ /// Next pointer. For internal use by LB policy.
+ struct grpc_lb_policy_pick_state* next;
+} grpc_lb_policy_pick_state;
struct grpc_lb_policy_vtable {
void (*destroy)(grpc_lb_policy* policy);
- void (*shutdown_locked)(grpc_lb_policy* policy);
+
+ /// \see grpc_lb_policy_shutdown_locked().
+ void (*shutdown_locked)(grpc_lb_policy* policy, grpc_lb_policy* new_policy);
/** \see grpc_lb_policy_pick */
- int (*pick_locked)(grpc_lb_policy* policy,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_connected_subchannel** target,
- grpc_call_context_element* context, void** user_data,
- grpc_closure* on_complete);
+ int (*pick_locked)(grpc_lb_policy* policy, grpc_lb_policy_pick_state* pick);
/** \see grpc_lb_policy_cancel_pick */
void (*cancel_pick_locked)(grpc_lb_policy* policy,
- grpc_connected_subchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error);
/** \see grpc_lb_policy_cancel_picks */
@@ -103,37 +113,19 @@ struct grpc_lb_policy_vtable {
};
#ifndef NDEBUG
-
-/* Strong references: the policy will shutdown when they reach zero */
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(p, r) \
grpc_lb_policy_unref((p), __FILE__, __LINE__, (r))
-
-/* Weak references: they don't prevent the shutdown of the LB policy. When no
- * strong references are left but there are still weak ones, shutdown is called.
- * Once the weak reference also reaches zero, the LB policy is destroyed. */
-#define GRPC_LB_POLICY_WEAK_REF(p, r) \
- grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_LB_POLICY_WEAK_UNREF(p, r) \
- grpc_lb_policy_weak_unref((p), __FILE__, __LINE__, (r))
void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
void grpc_lb_policy_unref(grpc_lb_policy* policy, const char* file, int line,
const char* reason);
-void grpc_lb_policy_weak_ref(grpc_lb_policy* policy, const char* file, int line,
- const char* reason);
-void grpc_lb_policy_weak_unref(grpc_lb_policy* policy, const char* file,
- int line, const char* reason);
-#else
+#else // !NDEBUG
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
#define GRPC_LB_POLICY_UNREF(p, r) grpc_lb_policy_unref((p))
-#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
-#define GRPC_LB_POLICY_WEAK_UNREF(p, r) grpc_lb_policy_weak_unref((p))
void grpc_lb_policy_ref(grpc_lb_policy* policy);
void grpc_lb_policy_unref(grpc_lb_policy* policy);
-void grpc_lb_policy_weak_ref(grpc_lb_policy* policy);
-void grpc_lb_policy_weak_unref(grpc_lb_policy* policy);
#endif
/** called by concrete implementations to initialize the base struct */
@@ -141,28 +133,24 @@ void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable,
grpc_combiner* combiner);
-/** Finds an appropriate subchannel for a call, based on \a pick_args.
-
- \a target will be set to the selected subchannel, or NULL on failure
- or when the LB policy decides to drop the call.
+/// Shuts down \a policy.
+/// If \a new_policy is non-null, any pending picks will be restarted
+/// on that policy; otherwise, they will be failed.
+void grpc_lb_policy_shutdown_locked(grpc_lb_policy* policy,
+ grpc_lb_policy* new_policy);
- Upon success, \a user_data will be set to whatever opaque information
- may need to be propagated from the LB policy, or NULL if not needed.
- \a context will be populated with context to pass to the subchannel
- call, if needed.
+/** Finds an appropriate subchannel for a call, based on data in \a pick.
+ \a pick must remain alive until the pick is complete.
If the pick succeeds and a result is known immediately, a non-zero
- value will be returned. Otherwise, \a on_complete will be invoked
+ value will be returned. Otherwise, \a pick->on_complete will be invoked
once the pick is complete with its error argument set to indicate
success or failure.
Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */
int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_connected_subchannel** target,
- grpc_call_context_element* context,
- void** user_data, grpc_closure* on_complete);
+ grpc_lb_policy_pick_state* pick);
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
against one of the connected subchannels managed by \a policy. */
@@ -170,11 +158,11 @@ void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy,
grpc_closure* on_initiate,
grpc_closure* on_ack);
-/** Cancel picks for \a target.
+/** Cancel picks for \a pick.
The \a on_complete callback of the pending picks will be invoked with \a
*target set to NULL. */
void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
- grpc_connected_subchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error);
/** Cancel all pending picks for which their \a initial_metadata_flags (as given
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index 3eedb08ecc..1708d81e61 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -32,7 +32,8 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
static void destroy_channel_elem(grpc_channel_element* elem) {}
-typedef struct {
+namespace {
+struct call_data {
// Stats object to update.
grpc_grpclb_client_stats* client_stats;
// State for intercepting send_initial_metadata.
@@ -43,7 +44,8 @@ typedef struct {
grpc_closure recv_initial_metadata_ready;
grpc_closure* original_recv_initial_metadata_ready;
bool recv_initial_metadata_succeeded;
-} call_data;
+};
+} // namespace
static void on_complete_for_send(void* arg, grpc_error* error) {
call_data* calld = (call_data*)arg;
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index ba4e90d4c2..5849ac9d2d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -54,7 +54,7 @@
* operations in progress over the old RR instance. This is done by
* decreasing the reference count on the old policy. The moment no more
* references are held on the old RR policy, it'll be destroyed and \a
- * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
+ * on_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
* state. At this point we can transition to a new RR instance safely, which
* is done once again via \a rr_handover_locked().
*
@@ -128,185 +128,48 @@
grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
-/* add lb_token of selected subchannel (address) to the call's initial
- * metadata */
-static grpc_error* initial_metadata_add_lb_token(
- grpc_metadata_batch* initial_metadata,
- grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
- GPR_ASSERT(lb_token_mdelem_storage != nullptr);
- GPR_ASSERT(!GRPC_MDISNULL(lb_token));
- return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
- lb_token);
-}
-
-static void destroy_client_stats(void* arg) {
- grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
-}
-
-typedef struct wrapped_rr_closure_arg {
- /* the closure instance using this struct as argument */
- grpc_closure wrapper_closure;
-
- /* the original closure. Usually a on_complete/notify cb for pick() and ping()
- * calls against the internal RR instance, respectively. */
- grpc_closure* wrapped_closure;
-
- /* the pick's initial metadata, kept in order to append the LB token for the
- * pick */
- grpc_metadata_batch* initial_metadata;
-
- /* the picked target, used to determine which LB token to add to the pick's
- * initial metadata */
- grpc_connected_subchannel** target;
-
- /* the context to be populated for the subchannel call */
- grpc_call_context_element* context;
-
- /* Stats for client-side load reporting. Note that this holds a
- * reference, which must be either passed on via context or unreffed. */
+struct glb_lb_policy;
+
+namespace {
+
+/// Linked list of pending pick requests. It stores all information needed to
+/// eventually call (Round Robin's) pick() on them. They mainly stay pending
+/// waiting for the RR policy to be created.
+///
+/// Note that when a pick is sent to the RR policy, we inject our own
+/// on_complete callback, so that we can intercept the result before
+/// invoking the original on_complete callback. This allows us to set the
+/// LB token metadata and add client_stats to the call context.
+/// See \a pending_pick_complete() for details.
+struct pending_pick {
+ // Our on_complete closure and the original one.
+ grpc_closure on_complete;
+ grpc_closure* original_on_complete;
+ // The original pick.
+ grpc_lb_policy_pick_state* pick;
+ // Stats for client-side load reporting. Note that this holds a
+ // reference, which must be either passed on via context or unreffed.
grpc_grpclb_client_stats* client_stats;
-
- /* the LB token associated with the pick */
+ // The LB token associated with the pick. This is set via user_data in
+ // the pick.
grpc_mdelem lb_token;
-
- /* storage for the lb token initial metadata mdelem */
- grpc_linked_mdelem* lb_token_mdelem_storage;
-
- /* The RR instance related to the closure */
- grpc_lb_policy* rr_policy;
-
- /* The grpclb instance that created the wrapping. This instance is not owned,
- * reference counts are untouched. It's used only for logging purposes. */
- grpc_lb_policy* glb_policy;
-
- /* heap memory to be freed upon closure execution. */
- void* free_when_done;
-} wrapped_rr_closure_arg;
-
-/* The \a on_complete closure passed as part of the pick requires keeping a
- * reference to its associated round robin instance. We wrap this closure in
- * order to unref the round robin instance upon its invocation */
-static void wrapped_rr_closure(void* arg, grpc_error* error) {
- wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
-
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
- GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
-
- if (wc_arg->rr_policy != nullptr) {
- /* if *target is nullptr, no pick has been made by the RR policy (eg, all
- * addresses failed to connect). There won't be any user_data/token
- * available */
- if (*wc_arg->target != nullptr) {
- if (!GRPC_MDISNULL(wc_arg->lb_token)) {
- initial_metadata_add_lb_token(wc_arg->initial_metadata,
- wc_arg->lb_token_mdelem_storage,
- GRPC_MDELEM_REF(wc_arg->lb_token));
- } else {
- gpr_log(
- GPR_ERROR,
- "[grpclb %p] No LB token for connected subchannel pick %p (from RR "
- "instance %p).",
- wc_arg->glb_policy, *wc_arg->target, wc_arg->rr_policy);
- abort();
- }
- // Pass on client stats via context. Passes ownership of the reference.
- GPR_ASSERT(wc_arg->client_stats != nullptr);
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
- } else {
- grpc_grpclb_client_stats_unref(wc_arg->client_stats);
- }
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
- wc_arg->rr_policy);
- }
- GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "wrapped_rr_closure");
- }
- GPR_ASSERT(wc_arg->free_when_done != nullptr);
- gpr_free(wc_arg->free_when_done);
-}
-
-/* Linked list of pending pick requests. It stores all information needed to
- * eventually call (Round Robin's) pick() on them. They mainly stay pending
- * waiting for the RR policy to be created/updated.
- *
- * One particularity is the wrapping of the user-provided \a on_complete closure
- * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
- * order to correctly unref the RR policy instance upon completion of the pick.
- * See \a wrapped_rr_closure for details. */
-typedef struct pending_pick {
+ // The grpclb instance that created the wrapping. This instance is not owned,
+ // reference counts are untouched. It's used only for logging purposes.
+ glb_lb_policy* glb_policy;
+ // Next pending pick.
struct pending_pick* next;
+};
- /* original pick()'s arguments */
- grpc_lb_policy_pick_args pick_args;
-
- /* output argument where to store the pick()ed connected subchannel, or
- * nullptr upon error. */
- grpc_connected_subchannel** target;
-
- /* args for wrapped_on_complete */
- wrapped_rr_closure_arg wrapped_on_complete_arg;
-} pending_pick;
-
-static void add_pending_pick(pending_pick** root,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_connected_subchannel** target,
- grpc_call_context_element* context,
- grpc_closure* on_complete) {
- pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
- pp->next = *root;
- pp->pick_args = *pick_args;
- pp->target = target;
- pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
- pp->wrapped_on_complete_arg.target = target;
- pp->wrapped_on_complete_arg.context = context;
- pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
- pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
- pick_args->lb_token_mdelem_storage;
- pp->wrapped_on_complete_arg.free_when_done = pp;
- GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
- wrapped_rr_closure, &pp->wrapped_on_complete_arg,
- grpc_schedule_on_exec_ctx);
- *root = pp;
-}
-
-/* Same as the \a pending_pick struct but for ping operations */
-typedef struct pending_ping {
+/// A linked list of pending pings waiting for the RR policy to be created.
+struct pending_ping {
+ grpc_closure* on_initiate;
+ grpc_closure* on_ack;
struct pending_ping* next;
+};
- /* args for sending the ping */
- wrapped_rr_closure_arg* on_initiate;
- wrapped_rr_closure_arg* on_ack;
-} pending_ping;
-
-static void add_pending_ping(pending_ping** root, grpc_closure* on_initiate,
- grpc_closure* on_ack) {
- pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
- if (on_initiate != nullptr) {
- pping->on_initiate =
- (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_initiate));
- pping->on_initiate->wrapped_closure = on_initiate;
- pping->on_initiate->free_when_done = pping->on_initiate;
- GRPC_CLOSURE_INIT(&pping->on_initiate->wrapper_closure, wrapped_rr_closure,
- &pping->on_initiate, grpc_schedule_on_exec_ctx);
- }
- if (on_ack != nullptr) {
- pping->on_ack = (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_ack));
- pping->on_ack->wrapped_closure = on_ack;
- pping->on_ack->free_when_done = pping->on_ack;
- GRPC_CLOSURE_INIT(&pping->on_ack->wrapper_closure, wrapped_rr_closure,
- &pping->on_ack, grpc_schedule_on_exec_ctx);
- }
- pping->next = *root;
- *root = pping;
-}
-
-/*
- * glb_lb_policy
- */
-typedef struct rr_connectivity_data rr_connectivity_data;
+} // namespace
-typedef struct glb_lb_policy {
+struct glb_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
@@ -331,6 +194,9 @@ typedef struct glb_lb_policy {
/** the RR policy to use of the backend servers returned by the LB server */
grpc_lb_policy* rr_policy;
+ grpc_closure on_rr_connectivity_changed;
+ grpc_connectivity_state rr_connectivity_state;
+
bool started_picking;
/** our connectivity state tracker */
@@ -365,11 +231,11 @@ typedef struct glb_lb_policy {
/** are we already watching the LB channel's connectivity? */
bool watching_lb_channel;
- /** is \a lb_call_retry_timer active? */
- bool retry_timer_active;
+ /** is the callback associated with \a lb_call_retry_timer pending? */
+ bool retry_timer_callback_pending;
- /** is \a lb_fallback_timer active? */
- bool fallback_timer_active;
+ /** is the callback associated with \a lb_fallback_timer pending? */
+ bool fallback_timer_callback_pending;
/** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed;
@@ -428,22 +294,92 @@ typedef struct glb_lb_policy {
/* Interval and timer for next client load report. */
grpc_millis client_stats_report_interval;
grpc_timer client_load_report_timer;
- bool client_load_report_timer_pending;
+ bool client_load_report_timer_callback_pending;
bool last_client_load_report_counters_were_zero;
/* Closure used for either the load report timer or the callback for
* completion of sending the load report. */
grpc_closure client_load_report_closure;
/* Client load report message payload. */
grpc_byte_buffer* client_load_report_payload;
-} glb_lb_policy;
-
-/* Keeps track and reacts to changes in connectivity of the RR instance */
-struct rr_connectivity_data {
- grpc_closure on_change;
- grpc_connectivity_state state;
- glb_lb_policy* glb_policy;
};
+/* add lb_token of selected subchannel (address) to the call's initial
+ * metadata */
+static grpc_error* initial_metadata_add_lb_token(
+ grpc_metadata_batch* initial_metadata,
+ grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
+ GPR_ASSERT(lb_token_mdelem_storage != nullptr);
+ GPR_ASSERT(!GRPC_MDISNULL(lb_token));
+ return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
+ lb_token);
+}
+
+static void destroy_client_stats(void* arg) {
+ grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
+}
+
+static void pending_pick_set_metadata_and_context(pending_pick* pp) {
+ /* if connected_subchannel is nullptr, no pick has been made by the RR
+ * policy (e.g., all addresses failed to connect). There won't be any
+ * user_data/token available */
+ if (pp->pick->connected_subchannel != nullptr) {
+ if (!GRPC_MDISNULL(pp->lb_token)) {
+ initial_metadata_add_lb_token(pp->pick->initial_metadata,
+ &pp->pick->lb_token_mdelem_storage,
+ GRPC_MDELEM_REF(pp->lb_token));
+ } else {
+ gpr_log(GPR_ERROR,
+ "[grpclb %p] No LB token for connected subchannel pick %p",
+ pp->glb_policy, pp->pick);
+ abort();
+ }
+ // Pass on client stats via context. Passes ownership of the reference.
+ GPR_ASSERT(pp->client_stats != nullptr);
+ pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
+ pp->client_stats;
+ pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
+ destroy_client_stats;
+ } else {
+ grpc_grpclb_client_stats_unref(pp->client_stats);
+ }
+}
+
+/* The \a on_complete closure passed as part of the pick requires keeping a
+ * reference to its associated round robin instance. We wrap this closure in
+ * order to unref the round robin instance upon its invocation */
+static void pending_pick_complete(void* arg, grpc_error* error) {
+ pending_pick* pp = (pending_pick*)arg;
+ pending_pick_set_metadata_and_context(pp);
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
+ gpr_free(pp);
+}
+
+static pending_pick* pending_pick_create(glb_lb_policy* glb_policy,
+ grpc_lb_policy_pick_state* pick) {
+ pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
+ pp->pick = pick;
+ pp->glb_policy = glb_policy;
+ GRPC_CLOSURE_INIT(&pp->on_complete, pending_pick_complete, pp,
+ grpc_schedule_on_exec_ctx);
+ pp->original_on_complete = pick->on_complete;
+ pp->pick->on_complete = &pp->on_complete;
+ return pp;
+}
+
+static void pending_pick_add(pending_pick** root, pending_pick* new_pp) {
+ new_pp->next = *root;
+ *root = new_pp;
+}
+
+static void pending_ping_add(pending_ping** root, grpc_closure* on_initiate,
+ grpc_closure* on_ack) {
+ pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
+ pping->on_initiate = on_initiate;
+ pping->on_ack = on_ack;
+ pping->next = *root;
+ *root = pping;
+}
+
static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
bool log) {
if (server->drop) return false;
@@ -555,7 +491,6 @@ static grpc_lb_addresses* process_serverlist_locked(
gpr_free(uri);
user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
}
-
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
false /* is_balancer */,
nullptr /* balancer_name */, user_data);
@@ -596,7 +531,6 @@ static void update_lb_connectivity_status_locked(
grpc_error* rr_state_error) {
const grpc_connectivity_state curr_glb_state =
grpc_connectivity_state_check(&glb_policy->state_tracker);
-
/* The new connectivity status is a function of the previous one and the new
* input coming from the status of the RR policy.
*
@@ -626,7 +560,6 @@ static void update_lb_connectivity_status_locked(
*
* (*) This function mustn't be called during shutting down. */
GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
-
switch (rr_state) {
case GRPC_CHANNEL_TRANSIENT_FAILURE:
case GRPC_CHANNEL_SHUTDOWN:
@@ -637,7 +570,6 @@ static void update_lb_connectivity_status_locked(
case GRPC_CHANNEL_READY:
GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
}
-
if (grpc_lb_glb_trace.enabled()) {
gpr_log(
GPR_INFO,
@@ -655,10 +587,8 @@ static void update_lb_connectivity_status_locked(
* cleanups this callback would otherwise be responsible for.
* If \a force_async is true, then we will manually schedule the
* completion callback even if the pick is available immediately. */
-static bool pick_from_internal_rr_locked(
- glb_lb_policy* glb_policy, const grpc_lb_policy_pick_args* pick_args,
- bool force_async, grpc_connected_subchannel** target,
- wrapped_rr_closure_arg* wc_arg) {
+static bool pick_from_internal_rr_locked(glb_lb_policy* glb_policy,
+ bool force_async, pending_pick* pp) {
// Check for drops if we are not using fallback backend addresses.
if (glb_policy->serverlist != nullptr) {
// Look at the index into the serverlist to see if we should drop this call.
@@ -668,57 +598,36 @@ static bool pick_from_internal_rr_locked(
glb_policy->serverlist_index = 0; // Wrap-around.
}
if (server->drop) {
- // Not using the RR policy, so unref it.
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
- wc_arg->rr_policy);
- }
- GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
// Update client load reporting stats to indicate the number of
// dropped calls. Note that we have to do this here instead of in
// the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter)
// for dropped calls.
- GPR_ASSERT(wc_arg->client_stats != nullptr);
+ GPR_ASSERT(glb_policy->client_stats != nullptr);
grpc_grpclb_client_stats_add_call_dropped_locked(
- server->load_balance_token, wc_arg->client_stats);
- grpc_grpclb_client_stats_unref(wc_arg->client_stats);
+ server->load_balance_token, glb_policy->client_stats);
if (force_async) {
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
- GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
- gpr_free(wc_arg->free_when_done);
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
+ gpr_free(pp);
return false;
}
- gpr_free(wc_arg->free_when_done);
+ gpr_free(pp);
return true;
}
}
+ // Set client_stats and user_data.
+ pp->client_stats = grpc_grpclb_client_stats_ref(glb_policy->client_stats);
+ GPR_ASSERT(pp->pick->user_data == nullptr);
+ pp->pick->user_data = (void**)&pp->lb_token;
// Pick via the RR policy.
- const bool pick_done = grpc_lb_policy_pick_locked(
- wc_arg->rr_policy, pick_args, target, wc_arg->context,
- (void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
+ bool pick_done = grpc_lb_policy_pick_locked(glb_policy->rr_policy, pp->pick);
if (pick_done) {
- /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
- wc_arg->rr_policy);
- }
- GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
- /* add the load reporting initial metadata */
- initial_metadata_add_lb_token(pick_args->initial_metadata,
- pick_args->lb_token_mdelem_storage,
- GRPC_MDELEM_REF(wc_arg->lb_token));
- // Pass on client stats via context. Passes ownership of the reference.
- GPR_ASSERT(wc_arg->client_stats != nullptr);
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
+ pending_pick_set_metadata_and_context(pp);
if (force_async) {
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
- GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
- gpr_free(wc_arg->free_when_done);
- return false;
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
+ pick_done = false;
}
- gpr_free(wc_arg->free_when_done);
+ gpr_free(pp);
}
/* else, the pending pick will be registered and taken care of by the
* pending pick list inside the RR policy (glb_policy->rr_policy).
@@ -760,7 +669,7 @@ static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
gpr_free(args);
}
-static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error);
+static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error);
static void create_rr_locked(glb_lb_policy* glb_policy,
grpc_lb_policy_args* args) {
GPR_ASSERT(glb_policy->rr_policy == nullptr);
@@ -782,72 +691,46 @@ static void create_rr_locked(glb_lb_policy* glb_policy,
glb_policy->base.request_reresolution = nullptr;
glb_policy->rr_policy = new_rr_policy;
grpc_error* rr_state_error = nullptr;
- const grpc_connectivity_state rr_state =
- grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
- &rr_state_error);
+ glb_policy->rr_connectivity_state = grpc_lb_policy_check_connectivity_locked(
+ glb_policy->rr_policy, &rr_state_error);
/* Connectivity state is a function of the RR policy updated/created */
- update_lb_connectivity_status_locked(glb_policy, rr_state, rr_state_error);
+ update_lb_connectivity_status_locked(
+ glb_policy, glb_policy->rr_connectivity_state, rr_state_error);
/* Add the gRPC LB's interested_parties pollset_set to that of the newly
* created RR policy. This will make the RR policy progress upon activity on
* gRPC LB, which in turn is tied to the application's call */
grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
glb_policy->base.interested_parties);
-
- /* Allocate the data for the tracking of the new RR policy's connectivity.
- * It'll be deallocated in glb_rr_connectivity_changed() */
- rr_connectivity_data* rr_connectivity =
- (rr_connectivity_data*)gpr_zalloc(sizeof(rr_connectivity_data));
- GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
- glb_rr_connectivity_changed_locked, rr_connectivity,
+ GRPC_CLOSURE_INIT(&glb_policy->on_rr_connectivity_changed,
+ on_rr_connectivity_changed_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
- rr_connectivity->glb_policy = glb_policy;
- rr_connectivity->state = rr_state;
-
/* Subscribe to changes to the connectivity of the new RR */
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
- grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
- &rr_connectivity->state,
- &rr_connectivity->on_change);
+ GRPC_LB_POLICY_REF(&glb_policy->base, "glb_rr_connectivity_cb");
+ grpc_lb_policy_notify_on_state_change_locked(
+ glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
+ &glb_policy->on_rr_connectivity_changed);
grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
-
- /* Update picks and pings in wait */
+ // Send pending picks to RR policy.
pending_pick* pp;
while ((pp = glb_policy->pending_picks)) {
glb_policy->pending_picks = pp->next;
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
- pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
- pp->wrapped_on_complete_arg.client_stats =
- grpc_grpclb_client_stats_ref(glb_policy->client_stats);
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Pending pick about to (async) PICK from RR %p",
glb_policy, glb_policy->rr_policy);
}
- pick_from_internal_rr_locked(glb_policy, &pp->pick_args,
- true /* force_async */, pp->target,
- &pp->wrapped_on_complete_arg);
+ pick_from_internal_rr_locked(glb_policy, true /* force_async */, pp);
}
-
+ // Send pending pings to RR policy.
pending_ping* pping;
while ((pping = glb_policy->pending_pings)) {
glb_policy->pending_pings = pping->next;
- grpc_closure* on_initiate = nullptr;
- grpc_closure* on_ack = nullptr;
- if (pping->on_initiate != nullptr) {
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
- pping->on_initiate->rr_policy = glb_policy->rr_policy;
- on_initiate = &pping->on_initiate->wrapper_closure;
- }
- if (pping->on_ack != nullptr) {
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
- pping->on_ack->rr_policy = glb_policy->rr_policy;
- on_ack = &pping->on_ack->wrapper_closure;
- }
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
glb_policy, glb_policy->rr_policy);
}
- grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
+ grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, pping->on_initiate,
+ pping->on_ack);
gpr_free(pping);
}
}
@@ -873,31 +756,28 @@ static void rr_handover_locked(glb_lb_policy* glb_policy) {
lb_policy_args_destroy(args);
}
-static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
- rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
- glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
+static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
if (glb_policy->shutting_down) {
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
- gpr_free(rr_connectivity);
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
return;
}
- if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) {
+ if (glb_policy->rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
/* An RR policy that has transitioned into the SHUTDOWN connectivity state
* should not be considered for picks or updates: the SHUTDOWN state is a
* sink, policies can't transition back from it. .*/
GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
glb_policy->rr_policy = nullptr;
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
- gpr_free(rr_connectivity);
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
return;
}
/* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
- update_lb_connectivity_status_locked(glb_policy, rr_connectivity->state,
- GRPC_ERROR_REF(error));
- /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
- grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
- &rr_connectivity->state,
- &rr_connectivity->on_change);
+ update_lb_connectivity_status_locked(
+ glb_policy, glb_policy->rr_connectivity_state, GRPC_ERROR_REF(error));
+ /* Resubscribe. Reuse the "glb_rr_connectivity_cb" ref. */
+ grpc_lb_policy_notify_on_state_change_locked(
+ glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
+ &glb_policy->on_rr_connectivity_changed);
}
static void destroy_balancer_name(void* balancer_name) {
@@ -1005,38 +885,27 @@ static void glb_destroy(grpc_lb_policy* pol) {
gpr_free(glb_policy);
}
-static void glb_shutdown_locked(grpc_lb_policy* pol) {
+static void glb_shutdown_locked(grpc_lb_policy* pol,
+ grpc_lb_policy* new_policy) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
glb_policy->shutting_down = true;
-
- /* We need a copy of the lb_call pointer because we can't cancell the call
- * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
- * the cancel, needs to acquire that same lock */
- grpc_call* lb_call = glb_policy->lb_call;
-
/* glb_policy->lb_call and this local lb_call must be consistent at this point
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
* of query_for_backends_locked, which can only be invoked while
* glb_policy->shutting_down is false. */
- if (lb_call != nullptr) {
- grpc_call_cancel(lb_call, nullptr);
+ if (glb_policy->lb_call != nullptr) {
+ grpc_call_cancel(glb_policy->lb_call, nullptr);
/* lb_on_server_status_received will pick up the cancel and clean up */
}
- if (glb_policy->retry_timer_active) {
+ if (glb_policy->retry_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
- glb_policy->retry_timer_active = false;
}
- if (glb_policy->fallback_timer_active) {
+ if (glb_policy->fallback_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->lb_fallback_timer);
- glb_policy->fallback_timer_active = false;
}
-
- pending_pick* pp = glb_policy->pending_picks;
- glb_policy->pending_picks = nullptr;
- pending_ping* pping = glb_policy->pending_pings;
- glb_policy->pending_pings = nullptr;
if (glb_policy->rr_policy != nullptr) {
+ grpc_lb_policy_shutdown_locked(glb_policy->rr_policy, nullptr);
GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
} else {
grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
@@ -1051,28 +920,33 @@ static void glb_shutdown_locked(grpc_lb_policy* pol) {
}
grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "glb_shutdown");
-
+ // Clear pending picks.
+ pending_pick* pp = glb_policy->pending_picks;
+ glb_policy->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
- *pp->target = nullptr;
- GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_REF(error));
- gpr_free(pp);
+ if (new_policy != nullptr) {
+ // Hand pick over to new policy.
+ grpc_grpclb_client_stats_unref(pp->client_stats);
+ pp->pick->on_complete = pp->original_on_complete;
+ if (grpc_lb_policy_pick_locked(new_policy, pp->pick)) {
+ // Synchronous return; schedule callback.
+ GRPC_CLOSURE_SCHED(pp->pick->on_complete, GRPC_ERROR_NONE);
+ }
+ gpr_free(pp);
+ } else {
+ pp->pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error));
+ }
pp = next;
}
-
+ // Clear pending pings.
+ pending_ping* pping = glb_policy->pending_pings;
+ glb_policy->pending_pings = nullptr;
while (pping != nullptr) {
pending_ping* next = pping->next;
- if (pping->on_initiate != nullptr) {
- GRPC_CLOSURE_SCHED(&pping->on_initiate->wrapper_closure,
- GRPC_ERROR_REF(error));
- gpr_free(pping->on_initiate);
- }
- if (pping->on_ack != nullptr) {
- GRPC_CLOSURE_SCHED(&pping->on_ack->wrapper_closure,
- GRPC_ERROR_REF(error));
- gpr_free(pping->on_ack);
- }
+ GRPC_CLOSURE_SCHED(pping->on_initiate, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(pping->on_ack, GRPC_ERROR_REF(error));
gpr_free(pping);
pping = next;
}
@@ -1090,16 +964,16 @@ static void glb_shutdown_locked(grpc_lb_policy* pol) {
// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
// we invoke the completion closure and set *target to nullptr right here.
static void glb_cancel_pick_locked(grpc_lb_policy* pol,
- grpc_connected_subchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
- if (pp->target == target) {
- *target = nullptr;
- GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
+ if (pp->pick == pick) {
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(&pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
@@ -1109,7 +983,7 @@ static void glb_cancel_pick_locked(grpc_lb_policy* pol,
pp = next;
}
if (glb_policy->rr_policy != nullptr) {
- grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, target,
+ grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, pick,
GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
@@ -1134,9 +1008,9 @@ static void glb_cancel_picks_locked(grpc_lb_policy* pol,
glb_policy->pending_picks = nullptr;
while (pp != nullptr) {
pending_pick* next = pp->next;
- if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
+ if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_CLOSURE_SCHED(&pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
@@ -1158,14 +1032,15 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy);
static void start_picking_locked(glb_lb_policy* glb_policy) {
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
- glb_policy->serverlist == nullptr && !glb_policy->fallback_timer_active) {
+ glb_policy->serverlist == nullptr &&
+ !glb_policy->fallback_timer_callback_pending) {
grpc_millis deadline =
grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
+ GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_fallback_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
- glb_policy->fallback_timer_active = true;
+ glb_policy->fallback_timer_callback_pending = true;
grpc_timer_init(&glb_policy->lb_fallback_timer, deadline,
&glb_policy->lb_on_fallback);
}
@@ -1183,19 +1058,9 @@ static void glb_exit_idle_locked(grpc_lb_policy* pol) {
}
static int glb_pick_locked(grpc_lb_policy* pol,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_connected_subchannel** target,
- grpc_call_context_element* context, void** user_data,
- grpc_closure* on_complete) {
- if (pick_args->lb_token_mdelem_storage == nullptr) {
- *target = nullptr;
- GRPC_CLOSURE_SCHED(on_complete,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "No mdelem storage for the LB token. Load reporting "
- "won't work without it. Failing"));
- return 0;
- }
+ grpc_lb_policy_pick_state* pick) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ pending_pick* pp = pending_pick_create(glb_policy, pick);
bool pick_done = false;
if (glb_policy->rr_policy != nullptr) {
const grpc_connectivity_state rr_connectivity_state =
@@ -1203,7 +1068,7 @@ static int glb_pick_locked(grpc_lb_policy* pol,
nullptr);
// The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
// callback registered to capture this event
- // (glb_rr_connectivity_changed_locked) may not have been invoked yet. We
+ // (on_rr_connectivity_changed_locked) may not have been invoked yet. We
// need to make sure we aren't trying to pick from a RR policy instance
// that's in shutdown.
if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
@@ -1213,32 +1078,16 @@ static int glb_pick_locked(grpc_lb_policy* pol,
glb_policy, glb_policy->rr_policy,
grpc_connectivity_state_name(rr_connectivity_state));
}
- add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
- on_complete);
+ pending_pick_add(&glb_policy->pending_picks, pp);
pick_done = false;
} else { // RR not in shutdown
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
glb_policy->rr_policy);
}
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
- wrapped_rr_closure_arg* wc_arg =
- (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
- GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
- grpc_schedule_on_exec_ctx);
- wc_arg->rr_policy = glb_policy->rr_policy;
- wc_arg->target = target;
- wc_arg->context = context;
GPR_ASSERT(glb_policy->client_stats != nullptr);
- wc_arg->client_stats =
- grpc_grpclb_client_stats_ref(glb_policy->client_stats);
- wc_arg->wrapped_closure = on_complete;
- wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
- wc_arg->initial_metadata = pick_args->initial_metadata;
- wc_arg->free_when_done = wc_arg;
- wc_arg->glb_policy = pol;
- pick_done = pick_from_internal_rr_locked(
- glb_policy, pick_args, false /* force_async */, target, wc_arg);
+ pick_done =
+ pick_from_internal_rr_locked(glb_policy, false /* force_async */, pp);
}
} else { // glb_policy->rr_policy == NULL
if (grpc_lb_glb_trace.enabled()) {
@@ -1246,8 +1095,7 @@ static int glb_pick_locked(grpc_lb_policy* pol,
"[grpclb %p] No RR policy. Adding to grpclb's pending picks",
glb_policy);
}
- add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
- on_complete);
+ pending_pick_add(&glb_policy->pending_picks, pp);
if (!glb_policy->started_picking) {
start_picking_locked(glb_policy);
}
@@ -1269,7 +1117,7 @@ static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
if (glb_policy->rr_policy) {
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
} else {
- add_pending_ping(&glb_policy->pending_pings, on_initiate, on_ack);
+ pending_ping_add(&glb_policy->pending_pings, on_initiate, on_ack);
if (!glb_policy->started_picking) {
start_picking_locked(glb_policy);
}
@@ -1286,7 +1134,7 @@ static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
- glb_policy->retry_timer_active = false;
+ glb_policy->retry_timer_callback_pending = false;
if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
error == GRPC_ERROR_NONE) {
if (grpc_lb_glb_trace.enabled()) {
@@ -1294,12 +1142,12 @@ static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
}
query_for_backends_locked(glb_policy);
}
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_retry_timer");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_retry_timer");
}
static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
if (glb_policy->started_picking && glb_policy->updating_lb_call) {
- if (glb_policy->retry_timer_active) {
+ if (glb_policy->retry_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
}
if (!glb_policy->shutting_down) start_picking_locked(glb_policy);
@@ -1313,23 +1161,23 @@ static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
if (timeout > 0) {
gpr_log(GPR_DEBUG,
- "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
+ "[grpclb %p] ... retry LB call after %" PRIuPTR "ms.",
glb_policy, timeout);
} else {
- gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
+ gpr_log(GPR_DEBUG, "[grpclb %p] ... retry LB call immediately.",
glb_policy);
}
}
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
+ GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_retry_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
lb_call_on_retry_timer_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
- glb_policy->retry_timer_active = true;
+ glb_policy->retry_timer_callback_pending = true;
grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry);
}
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
- "lb_on_server_status_received_locked");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
+ "lb_on_server_status_received_locked");
}
static void send_client_load_report_locked(void* arg, grpc_error* error);
@@ -1351,8 +1199,8 @@ static void client_load_report_done_locked(void* arg, grpc_error* error) {
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
glb_policy->client_load_report_payload = nullptr;
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
- glb_policy->client_load_report_timer_pending = false;
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
+ glb_policy->client_load_report_timer_callback_pending = false;
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
if (glb_policy->lb_call == nullptr) {
maybe_restart_lb_call(glb_policy);
}
@@ -1392,8 +1240,8 @@ static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
static void send_client_load_report_locked(void* arg, grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
- glb_policy->client_load_report_timer_pending = false;
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
+ glb_policy->client_load_report_timer_callback_pending = false;
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
if (glb_policy->lb_call == nullptr) {
maybe_restart_lb_call(glb_policy);
}
@@ -1503,7 +1351,7 @@ static void lb_call_destroy_locked(glb_lb_policy* glb_policy) {
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
grpc_slice_unref_internal(glb_policy->lb_call_status_details);
- if (glb_policy->client_load_report_timer_pending) {
+ if (glb_policy->client_load_report_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->client_load_report_timer);
}
}
@@ -1546,10 +1394,8 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op->flags = 0;
op->reserved = nullptr;
op++;
- /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
- * count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
- "lb_on_sent_initial_request_locked");
+ /* take a ref to be released in lb_on_sent_initial_request_locked() */
+ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_sent_initial_request_locked");
call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_sent_initial_request);
@@ -1565,10 +1411,8 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op->flags = 0;
op->reserved = nullptr;
op++;
- /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
- * count goes to zero) to be unref'd in lb_on_server_status_received_locked */
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
- "lb_on_server_status_received_locked");
+ /* take a ref to be released in lb_on_server_status_received_locked() */
+ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_server_status_received_locked");
call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_server_status_received);
@@ -1580,9 +1424,8 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op->flags = 0;
op->reserved = nullptr;
op++;
- /* take another weak ref to be unref'd/reused in
- * lb_on_response_received_locked */
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
+ /* take a ref to be unref'd/reused in lb_on_response_received_locked() */
+ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_response_received_locked");
call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received);
@@ -1597,8 +1440,7 @@ static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
if (glb_policy->client_load_report_payload != nullptr) {
do_send_client_load_report_locked(glb_policy);
}
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
- "lb_on_sent_initial_request_locked");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "lb_on_sent_initial_request_locked");
}
static void lb_on_response_received_locked(void* arg, grpc_error* error) {
@@ -1630,11 +1472,9 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
"client load reporting interval = %" PRIdPTR " milliseconds",
glb_policy, glb_policy->client_stats_report_interval);
}
- /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
- * strong ref count goes to zero) to be unref'd in
- * send_client_load_report_locked() */
- glb_policy->client_load_report_timer_pending = true;
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
+ /* take a ref to be unref'd in send_client_load_report_locked() */
+ glb_policy->client_load_report_timer_callback_pending = true;
+ GRPC_LB_POLICY_REF(&glb_policy->base, "client_load_report");
schedule_next_client_load_report(glb_policy);
} else if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
@@ -1682,9 +1522,8 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
/* or dispose of the fallback */
grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses = nullptr;
- if (glb_policy->fallback_timer_active) {
+ if (glb_policy->fallback_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->lb_fallback_timer);
- glb_policy->fallback_timer_active = false;
}
}
/* and update the copy in the glb_lb_policy instance. This
@@ -1717,27 +1556,27 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
op->flags = 0;
op->reserved = nullptr;
op++;
- /* reuse the "lb_on_response_received_locked" weak ref taken in
+ /* reuse the "lb_on_response_received_locked" ref taken in
* query_for_backends_locked() */
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
} else {
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
- "lb_on_response_received_locked_shutdown");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
+ "lb_on_response_received_locked_shutdown");
}
} else { /* empty payload: call cancelled. */
- /* dispose of the "lb_on_response_received_locked" weak ref taken in
+ /* dispose of the "lb_on_response_received_locked" ref taken in
* query_for_backends_locked() and reused in every reception loop */
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
- "lb_on_response_received_locked_empty_payload");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
+ "lb_on_response_received_locked_empty_payload");
}
}
static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
- glb_policy->fallback_timer_active = false;
+ glb_policy->fallback_timer_callback_pending = false;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't fall back. */
if (glb_policy->serverlist == nullptr) {
@@ -1751,7 +1590,7 @@ static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
rr_handover_locked(glb_policy);
}
}
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_fallback_timer");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_fallback_timer");
}
static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
@@ -1772,7 +1611,7 @@ static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
// If the load report timer is still pending, we wait for it to be
// called before restarting the call. Otherwise, we restart the call
// here.
- if (!glb_policy->client_load_report_timer_pending) {
+ if (!glb_policy->client_load_report_timer_callback_pending) {
maybe_restart_lb_call(glb_policy);
}
}
@@ -1835,7 +1674,7 @@ static void glb_update_locked(grpc_lb_policy* policy,
grpc_channel_get_channel_stack(glb_policy->lb_channel));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
glb_policy->watching_lb_channel = true;
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
+ GRPC_LB_POLICY_REF(&glb_policy->base, "watch_lb_channel_connectivity");
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(
@@ -1882,9 +1721,8 @@ static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
// lb_on_server_status_received() will pick up the cancel and reinit
// lb_call.
} else if (glb_policy->started_picking) {
- if (glb_policy->retry_timer_active) {
+ if (glb_policy->retry_timer_callback_pending) {
grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
- glb_policy->retry_timer_active = false;
}
start_picking_locked(glb_policy);
}
@@ -1892,8 +1730,8 @@ static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
case GRPC_CHANNEL_SHUTDOWN:
done:
glb_policy->watching_lb_channel = false;
- GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
- "watch_lb_channel_connectivity_cb_shutdown");
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
+ "watch_lb_channel_connectivity_cb_shutdown");
break;
}
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index 0861261359..60385272cf 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -31,13 +31,6 @@
grpc_core::TraceFlag grpc_lb_pick_first_trace(false, "pick_first");
-typedef struct pending_pick {
- struct pending_pick* next;
- uint32_t initial_metadata_flags;
- grpc_connected_subchannel** target;
- grpc_closure* on_complete;
-} pending_pick;
-
typedef struct {
/** base policy: must be first */
grpc_lb_policy base;
@@ -52,7 +45,7 @@ typedef struct {
/** are we shut down? */
bool shutdown;
/** list of picks that are waiting on connectivity */
- pending_pick* pending_picks;
+ grpc_lb_policy_pick_state* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
@@ -70,19 +63,27 @@ static void pf_destroy(grpc_lb_policy* pol) {
}
}
-static void pf_shutdown_locked(grpc_lb_policy* pol) {
+static void pf_shutdown_locked(grpc_lb_policy* pol,
+ grpc_lb_policy* new_policy) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
}
p->shutdown = true;
- pending_pick* pp;
- while ((pp = p->pending_picks) != nullptr) {
- p->pending_picks = pp->next;
- *pp->target = nullptr;
- GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error));
- gpr_free(pp);
+ grpc_lb_policy_pick_state* pick;
+ while ((pick = p->pending_picks) != nullptr) {
+ p->pending_picks = pick->next;
+ if (new_policy != nullptr) {
+ // Hand off to new LB policy.
+ if (grpc_lb_policy_pick_locked(new_policy, pick)) {
+ // Synchronous return, schedule closure.
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
+ }
+ } else {
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
+ }
}
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "shutdown");
@@ -102,19 +103,18 @@ static void pf_shutdown_locked(grpc_lb_policy* pol) {
}
static void pf_cancel_pick_locked(grpc_lb_policy* pol,
- grpc_connected_subchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
- pending_pick* pp = p->pending_picks;
+ grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
- pending_pick* next = pp->next;
- if (pp->target == target) {
- *target = nullptr;
- GRPC_CLOSURE_SCHED(pp->on_complete,
+ grpc_lb_policy_pick_state* next = pp->next;
+ if (pp == pick) {
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
- gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
@@ -129,21 +129,20 @@ static void pf_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
- pending_pick* pp = p->pending_picks;
+ grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
- while (pp != nullptr) {
- pending_pick* next = pp->next;
- if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
+ while (pick != nullptr) {
+ grpc_lb_policy_pick_state* next = pick->next;
+ if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- GRPC_CLOSURE_SCHED(pp->on_complete,
+ GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
- gpr_free(pp);
} else {
- pp->next = p->pending_picks;
- p->pending_picks = pp;
+ pick->next = p->pending_picks;
+ p->pending_picks = pick;
}
- pp = next;
+ pick = next;
}
GRPC_ERROR_UNREF(error);
}
@@ -173,27 +172,20 @@ static void pf_exit_idle_locked(grpc_lb_policy* pol) {
}
static int pf_pick_locked(grpc_lb_policy* pol,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_connected_subchannel** target,
- grpc_call_context_element* context, void** user_data,
- grpc_closure* on_complete) {
+ grpc_lb_policy_pick_state* pick) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
// If we have a selected subchannel already, return synchronously.
if (p->selected != nullptr) {
- *target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
- "picked");
+ pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
+ p->selected->connected_subchannel, "picked");
return 1;
}
// No subchannel selected yet, so handle asynchronously.
if (!p->started_picking) {
start_picking_locked(p);
}
- pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
- pp->next = p->pending_picks;
- pp->target = target;
- pp->initial_metadata_flags = pick_args->initial_metadata_flags;
- pp->on_complete = on_complete;
- p->pending_picks = pp;
+ pick->next = p->pending_picks;
+ p->pending_picks = pick;
return 0;
}
@@ -479,18 +471,17 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
// Drop all other subchannels, since we are now connected.
destroy_unselected_subchannels_locked(p);
// Update any calls that were waiting for a pick.
- pending_pick* pp;
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_lb_policy_pick_state* pick;
+ while ((pick = p->pending_picks)) {
+ p->pending_picks = pick->next;
+ pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "picked");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
(void*)p->selected);
}
- GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
// Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(sd);
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index b0c84017df..92c7d5bd5d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -41,29 +41,6 @@
grpc_core::TraceFlag grpc_lb_round_robin_trace(false, "round_robin");
-/** List of entities waiting for a pick.
- *
- * Once a pick is available, \a target is updated and \a on_complete called. */
-typedef struct pending_pick {
- struct pending_pick* next;
-
- /* output argument where to store the pick()ed user_data. It'll be NULL if no
- * such data is present or there's an error (the definite test for errors is
- * \a target being NULL). */
- void** user_data;
-
- /* bitmask passed to pick() and used for selective cancelling. See
- * grpc_lb_policy_cancel_picks() */
- uint32_t initial_metadata_flags;
-
- /* output argument where to store the pick()ed connected subchannel, or NULL
- * upon error. */
- grpc_connected_subchannel** target;
-
- /* to be invoked once the pick() has completed (regardless of success) */
- grpc_closure* on_complete;
-} pending_pick;
-
typedef struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
@@ -75,7 +52,7 @@ typedef struct round_robin_lb_policy {
/** are we shutting down? */
bool shutdown;
/** List of picks that are waiting on connectivity */
- pending_pick* pending_picks;
+ grpc_lb_policy_pick_state* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
@@ -167,19 +144,27 @@ static void rr_destroy(grpc_lb_policy* pol) {
gpr_free(p);
}
-static void rr_shutdown_locked(grpc_lb_policy* pol) {
+static void rr_shutdown_locked(grpc_lb_policy* pol,
+ grpc_lb_policy* new_policy) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
- pending_pick* pp;
- while ((pp = p->pending_picks) != nullptr) {
- p->pending_picks = pp->next;
- *pp->target = nullptr;
- GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error));
- gpr_free(pp);
+ grpc_lb_policy_pick_state* pick;
+ while ((pick = p->pending_picks) != nullptr) {
+ p->pending_picks = pick->next;
+ if (new_policy != nullptr) {
+ // Hand off to new LB policy.
+ if (grpc_lb_policy_pick_locked(new_policy, pick)) {
+ // Synchronous return; schedule callback.
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
+ }
+ } else {
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
+ }
}
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "rr_shutdown");
@@ -199,19 +184,18 @@ static void rr_shutdown_locked(grpc_lb_policy* pol) {
}
static void rr_cancel_pick_locked(grpc_lb_policy* pol,
- grpc_connected_subchannel** target,
+ grpc_lb_policy_pick_state* pick,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
- pending_pick* pp = p->pending_picks;
+ grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
- pending_pick* next = pp->next;
- if (pp->target == target) {
- *target = nullptr;
- GRPC_CLOSURE_SCHED(pp->on_complete,
+ grpc_lb_policy_pick_state* next = pp->next;
+ if (pp == pick) {
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
- gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
@@ -226,22 +210,21 @@ static void rr_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
- pending_pick* pp = p->pending_picks;
+ grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
- while (pp != nullptr) {
- pending_pick* next = pp->next;
- if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
+ while (pick != nullptr) {
+ grpc_lb_policy_pick_state* next = pick->next;
+ if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- *pp->target = nullptr;
- GRPC_CLOSURE_SCHED(pp->on_complete,
+ pick->connected_subchannel = nullptr;
+ GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
- gpr_free(pp);
} else {
- pp->next = p->pending_picks;
- p->pending_picks = pp;
+ pick->next = p->pending_picks;
+ p->pending_picks = pick;
}
- pp = next;
+ pick = next;
}
GRPC_ERROR_UNREF(error);
}
@@ -266,13 +249,10 @@ static void rr_exit_idle_locked(grpc_lb_policy* pol) {
}
static int rr_pick_locked(grpc_lb_policy* pol,
- const grpc_lb_policy_pick_args* pick_args,
- grpc_connected_subchannel** target,
- grpc_call_context_element* context, void** user_data,
- grpc_closure* on_complete) {
+ grpc_lb_policy_pick_state* pick) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", (void*)pol,
+ gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", pol,
p->shutdown);
}
GPR_ASSERT(!p->shutdown);
@@ -282,18 +262,18 @@ static int rr_pick_locked(grpc_lb_policy* pol,
/* readily available, report right away */
grpc_lb_subchannel_data* sd =
&p->subchannel_list->subchannels[next_ready_index];
- *target =
+ pick->connected_subchannel =
GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
- if (user_data != nullptr) {
- *user_data = sd->user_data;
+ if (pick->user_data != nullptr) {
+ *pick->user_data = sd->user_data;
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
- "index %lu)",
- (void*)p, (void*)sd->subchannel, (void*)*target,
- (void*)sd->subchannel_list, (unsigned long)next_ready_index);
+ "index %" PRIuPTR ")",
+ p, sd->subchannel, pick->connected_subchannel, sd->subchannel_list,
+ next_ready_index);
}
/* only advance the last picked pointer if the selection was used */
update_last_ready_subchannel_index_locked(p, next_ready_index);
@@ -304,13 +284,8 @@ static int rr_pick_locked(grpc_lb_policy* pol,
if (!p->started_picking) {
start_picking_locked(p);
}
- pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
- pp->next = p->pending_picks;
- pp->target = target;
- pp->on_complete = on_complete;
- pp->initial_metadata_flags = pick_args->initial_metadata_flags;
- pp->user_data = user_data;
- p->pending_picks = pp;
+ pick->next = p->pending_picks;
+ p->pending_picks = pick;
return 0;
}
@@ -493,13 +468,13 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
// picks, update the last picked pointer
update_last_ready_subchannel_index_locked(p, next_ready_index);
}
- pending_pick* pp;
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_lb_policy_pick_state* pick;
+ while ((pick = p->pending_picks)) {
+ p->pending_picks = pick->next;
+ pick->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
selected->connected_subchannel, "rr_picked");
- if (pp->user_data != nullptr) {
- *pp->user_data = selected->user_data;
+ if (pick->user_data != nullptr) {
+ *pick->user_data = selected->user_data;
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
@@ -508,8 +483,7 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
(void*)p, (void*)selected->subchannel,
(void*)p->subchannel_list, (unsigned long)next_ready_index);
}
- GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
+ GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
}
// Renew notification.
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
index a3b4c8e524..5ce1298afc 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -213,13 +213,13 @@ void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
- GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason);
+ GRPC_LB_POLICY_REF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_ref(subchannel_list, reason);
}
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
- GRPC_LB_POLICY_WEAK_UNREF(subchannel_list->policy, reason);
+ GRPC_LB_POLICY_UNREF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_unref(subchannel_list, reason);
}
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index f07394d29b..a604c55c58 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -59,11 +59,13 @@
((grpc_connected_subchannel*)(gpr_atm_##barrier##_load( \
&(subchannel)->connected_subchannel)))
-typedef struct {
+namespace {
+struct state_watcher {
grpc_closure closure;
grpc_subchannel* subchannel;
grpc_connectivity_state connectivity_state;
-} state_watcher;
+};
+} // namespace
typedef struct external_state_watcher {
grpc_subchannel* subchannel;
diff --git a/src/core/ext/filters/http/client/http_client_filter.cc b/src/core/ext/filters/http/client/http_client_filter.cc
index a1fb10f5b8..6dbd8c2a6d 100644
--- a/src/core/ext/filters/http/client/http_client_filter.cc
+++ b/src/core/ext/filters/http/client/http_client_filter.cc
@@ -35,7 +35,8 @@
/* default maximum size of payload eligable for GET request */
static const size_t kMaxPayloadSizeForGet = 2048;
-typedef struct call_data {
+namespace {
+struct call_data {
grpc_call_combiner* call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
@@ -60,13 +61,14 @@ typedef struct call_data {
grpc_closure on_send_message_next_done;
grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
grpc_mdelem static_scheme;
grpc_mdelem user_agent;
size_t max_payload_size_for_get;
-} channel_data;
+};
+} // namespace
static grpc_error* client_filter_incoming_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index 9ae13d2ed2..92d1716200 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -35,16 +35,17 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
-typedef enum {
+namespace {
+enum initial_metadata_state {
// Initial metadata not yet seen.
INITIAL_METADATA_UNSEEN = 0,
// Initial metadata seen; compression algorithm set.
HAS_COMPRESSION_ALGORITHM,
// Initial metadata seen; no compression algorithm set.
NO_COMPRESSION_ALGORITHM,
-} initial_metadata_state;
+};
-typedef struct call_data {
+struct call_data {
grpc_call_combiner* call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
@@ -62,9 +63,9 @@ typedef struct call_data {
grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
grpc_closure on_send_message_next_done;
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
/** The default, channel-level, compression algorithm */
grpc_compression_algorithm default_compression_algorithm;
/** Bitset of enabled algorithms */
@@ -78,7 +79,8 @@ typedef struct channel_data {
uint32_t enabled_stream_compression_algorithms_bitset;
/** Supported stream compression algorithms */
uint32_t supported_stream_compression_algorithms;
-} channel_data;
+};
+} // namespace
static bool skip_compression(grpc_call_element* elem, uint32_t flags,
bool has_compression_algorithm) {
diff --git a/src/core/ext/filters/http/server/http_server_filter.cc b/src/core/ext/filters/http/server/http_server_filter.cc
index b872dc98f5..508a3bf9fc 100644
--- a/src/core/ext/filters/http/server/http_server_filter.cc
+++ b/src/core/ext/filters/http/server/http_server_filter.cc
@@ -31,7 +31,8 @@
#define EXPECTED_CONTENT_TYPE "application/grpc"
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
-typedef struct call_data {
+namespace {
+struct call_data {
grpc_call_combiner* call_combiner;
grpc_linked_mdelem status;
@@ -60,11 +61,12 @@ typedef struct call_data {
grpc_closure hs_on_recv;
grpc_closure hs_on_complete;
grpc_closure hs_recv_message_ready;
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
uint8_t unused;
-} channel_data;
+};
+} // namespace
static grpc_error* server_filter_outgoing_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
index f50a928fcd..a414229768 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
@@ -31,7 +31,8 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/static_metadata.h"
-typedef struct call_data {
+namespace {
+struct call_data {
intptr_t id; /**< an id unique to the call */
bool have_trailing_md_string;
grpc_slice trailing_md_string;
@@ -48,11 +49,12 @@ typedef struct call_data {
/* to get notified of the availability of the incoming initial metadata. */
grpc_closure on_initial_md_ready;
grpc_metadata_batch* recv_initial_metadata;
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
intptr_t id; /**< an id unique to the channel */
-} channel_data;
+};
+} // namespace
static void on_initial_md_ready(void* user_data, grpc_error* err) {
grpc_call_element* elem = (grpc_call_element*)user_data;
diff --git a/src/core/ext/filters/max_age/max_age_filter.cc b/src/core/ext/filters/max_age/max_age_filter.cc
index 0499c6ecfc..7b86e4cd6c 100644
--- a/src/core/ext/filters/max_age/max_age_filter.cc
+++ b/src/core/ext/filters/max_age/max_age_filter.cc
@@ -37,7 +37,8 @@
#define MAX_CONNECTION_IDLE_INTEGER_OPTIONS \
{ DEFAULT_MAX_CONNECTION_IDLE_MS, 1, INT_MAX }
-typedef struct channel_data {
+namespace {
+struct channel_data {
/* We take a reference to the channel stack for the timer callback */
grpc_channel_stack* channel_stack;
/* Guards access to max_age_timer, max_age_timer_pending, max_age_grace_timer
@@ -84,7 +85,8 @@ typedef struct channel_data {
grpc_connectivity_state connectivity_state;
/* Number of active calls */
gpr_atm call_count;
-} channel_data;
+};
+} // namespace
/* Increase the nubmer of active calls. Before the increasement, if there are no
calls, the max_idle_timer should be cancelled. */
diff --git a/src/core/ext/filters/message_size/message_size_filter.cc b/src/core/ext/filters/message_size/message_size_filter.cc
index f8487f9a9e..3cb7b136c0 100644
--- a/src/core/ext/filters/message_size/message_size_filter.cc
+++ b/src/core/ext/filters/message_size/message_size_filter.cc
@@ -86,7 +86,8 @@ static void* refcounted_message_size_limits_create_from_json(
return value;
}
-typedef struct call_data {
+namespace {
+struct call_data {
grpc_call_combiner* call_combiner;
message_size_limits limits;
// Receive closures are chained: we inject this closure as the
@@ -97,13 +98,14 @@ typedef struct call_data {
grpc_byte_stream** recv_message;
// Original recv_message_ready callback, invoked after our own.
grpc_closure* next_recv_message_ready;
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
message_size_limits limits;
// Maps path names to refcounted_message_size_limits structs.
grpc_slice_hash_table* method_limit_table;
-} channel_data;
+};
+} // namespace
// Callback invoked when we receive a message. Here we check the max
// receive message size.
diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
index 555a9134a2..88bb8c71cc 100644
--- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
+++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
@@ -25,7 +25,8 @@
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/metadata.h"
-typedef struct call_data {
+namespace {
+struct call_data {
// Receive closures are chained: we inject this closure as the
// recv_initial_metadata_ready up-call on transport_stream_op, and remember to
// call our next_recv_initial_metadata_ready member after handling it.
@@ -37,7 +38,8 @@ typedef struct call_data {
// Marks whether the workaround is active
bool workaround_active;
-} call_data;
+};
+} // namespace
// Find the user agent metadata element in the batch
static bool get_user_agent_mdelem(const grpc_metadata_batch* batch,
diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc
index 043ca9bb83..9a6f5e9bcf 100644
--- a/src/core/ext/transport/chttp2/transport/writing.cc
+++ b/src/core/ext/transport/chttp2/transport/writing.cc
@@ -138,10 +138,11 @@ static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
const char* staller) {
gpr_log(
GPR_DEBUG,
- "%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR ":flowed=%" PRId64
+ "%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR
+ ":pending-compressed=%" PRIdPTR ":flowed=%" PRId64
":peer_initwin=%d:t_win=%" PRId64 ":s_win=%d:s_delta=%" PRId64 "]",
t->peer_string, t, s->id, staller, s->flow_controlled_buffer.length,
- s->flow_controlled_bytes_flowed,
+ s->compressed_data_buffer.length, s->flow_controlled_bytes_flowed,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
t->flow_control->remote_window(),
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 5fa02017f0..08edff5159 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -71,6 +71,7 @@ struct grpc_fd {
int shutdown;
int closed;
int released;
+ gpr_atm pollhup;
grpc_error* shutdown_error;
/* The watcher list.
@@ -335,6 +336,7 @@ static grpc_fd* fd_create(int fd, const char* name) {
r->on_done_closure = nullptr;
r->closed = 0;
r->released = 0;
+ gpr_atm_no_barrier_store(&r->pollhup, 0);
r->read_notifier_pollset = nullptr;
char* name2;
@@ -462,7 +464,7 @@ static grpc_error* fd_shutdown_error(grpc_fd* fd) {
static void notify_on_locked(grpc_fd* fd, grpc_closure** st,
grpc_closure* closure) {
- if (fd->shutdown) {
+ if (fd->shutdown || gpr_atm_no_barrier_load(&fd->pollhup)) {
GRPC_CLOSURE_SCHED(closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown"));
} else if (*st == CLOSURE_NOT_READY) {
@@ -950,7 +952,8 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
pfds[0].events = POLLIN;
pfds[0].revents = 0;
for (i = 0; i < pollset->fd_count; i++) {
- if (fd_is_orphaned(pollset->fds[i])) {
+ if (fd_is_orphaned(pollset->fds[i]) ||
+ gpr_atm_no_barrier_load(&pollset->fds[i]->pollhup) == 1) {
GRPC_FD_UNREF(pollset->fds[i], "multipoller");
} else {
pollset->fds[fd_count++] = pollset->fds[i];
@@ -1017,6 +1020,12 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
}
+ /* This is a mitigation to prevent poll() from spinning on a
+ ** POLLHUP https://github.com/grpc/grpc/pull/13665
+ */
+ if (pfds[i].revents & POLLHUP) {
+ gpr_atm_no_barrier_store(&watchers[i].fd->pollhup, 1);
+ }
fd_end_poll(&watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK, pollset);
}
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index 816acf2a23..d47a077251 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -63,7 +63,8 @@ typedef size_t msg_iovlen_type;
grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
-typedef struct {
+namespace {
+struct grpc_tcp {
grpc_endpoint base;
grpc_fd* em_fd;
int fd;
@@ -96,12 +97,13 @@ typedef struct {
grpc_resource_user* resource_user;
grpc_resource_user_slice_allocator slice_allocator;
-} grpc_tcp;
+};
-typedef struct backup_poller {
+struct backup_poller {
gpr_mu* pollset_mu;
grpc_closure run_poller;
-} backup_poller;
+};
+} // namespace
#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))
diff --git a/src/core/lib/iomgr/tcp_uv.cc b/src/core/lib/iomgr/tcp_uv.cc
index 2c26b60511..baa49d5cc5 100644
--- a/src/core/lib/iomgr/tcp_uv.cc
+++ b/src/core/lib/iomgr/tcp_uv.cc
@@ -65,6 +65,17 @@ typedef struct {
grpc_pollset* pollset;
} grpc_tcp;
+static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) {
+ return grpc_error_set_str(
+ grpc_error_set_int(
+ src_error,
+ /* All tcp errors are marked with UNAVAILABLE so that application may
+ * choose to retry. */
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
+ GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(tcp->peer_string));
+}
+
static void tcp_free(grpc_tcp* tcp) {
grpc_resource_user_unref(tcp->resource_user);
gpr_free(tcp->handle);
@@ -162,7 +173,8 @@ static void read_callback(uv_stream_t* stream, ssize_t nread,
// TODO(murgatroid99): figure out what the return value here means
uv_read_stop(stream);
if (nread == UV_EOF) {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF");
+ error =
+ tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
} else if (nread > 0) {
// Successful read
@@ -177,7 +189,8 @@ static void read_callback(uv_stream_t* stream, ssize_t nread,
}
} else {
// nread < 0: Error
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed");
+ error = tcp_annotate_error(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
}
call_read_cb(tcp, error);
@@ -194,7 +207,9 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
status =
uv_read_start((uv_stream_t*)tcp->handle, alloc_uv_buf, read_callback);
if (status != 0) {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start");
+ error = tcp_annotate_error(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start"),
+ tcp);
error = grpc_error_set_str(
error, GRPC_ERROR_STR_OS_ERROR,
grpc_slice_from_static_string(uv_strerror(status)));
@@ -235,7 +250,8 @@ static void write_callback(uv_write_t* req, int status) {
if (status == 0) {
error = GRPC_ERROR_NONE;
} else {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed");
+ error = tcp_annotate_error(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed"), tcp);
}
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
@@ -268,8 +284,10 @@ static void uv_endpoint_write(grpc_endpoint* ep,
}
if (tcp->shutting_down) {
- GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "TCP socket is shutting down"));
+ GRPC_CLOSURE_SCHED(cb,
+ tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "TCP socket is shutting down"),
+ tcp));
return;
}
diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc
index cd3c2e3f19..6a3641f112 100644
--- a/src/core/lib/security/transport/client_auth_filter.cc
+++ b/src/core/lib/security/transport/client_auth_filter.cc
@@ -37,8 +37,9 @@
#define MAX_CREDENTIALS_METADATA_COUNT 4
+namespace {
/* We can have a per-call credentials. */
-typedef struct {
+struct call_data {
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
grpc_call_credentials* creds;
@@ -57,13 +58,14 @@ typedef struct {
grpc_closure async_result_closure;
grpc_closure check_call_host_cancel_closure;
grpc_closure get_request_metadata_cancel_closure;
-} call_data;
+};
/* We can have a per-channel credentials. */
-typedef struct {
+struct channel_data {
grpc_channel_security_connector* security_connector;
grpc_auth_context* auth_context;
-} channel_data;
+};
+} // namespace
void grpc_auth_metadata_context_reset(
grpc_auth_metadata_context* auth_md_context) {
diff --git a/src/core/lib/security/transport/server_auth_filter.cc b/src/core/lib/security/transport/server_auth_filter.cc
index 73653f2a66..f82971dc56 100644
--- a/src/core/lib/security/transport/server_auth_filter.cc
+++ b/src/core/lib/security/transport/server_auth_filter.cc
@@ -26,13 +26,14 @@
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/slice/slice_internal.h"
-typedef enum {
+namespace {
+enum async_state {
STATE_INIT = 0,
STATE_DONE,
STATE_CANCELLED,
-} async_state;
+};
-typedef struct call_data {
+struct call_data {
grpc_call_combiner* call_combiner;
grpc_call_stack* owning_call;
grpc_transport_stream_op_batch* recv_initial_metadata_batch;
@@ -44,12 +45,13 @@ typedef struct call_data {
grpc_auth_context* auth_context;
grpc_closure cancel_closure;
gpr_atm state; // async_state
-} call_data;
+};
-typedef struct channel_data {
+struct channel_data {
grpc_auth_context* auth_context;
grpc_server_credentials* creds;
-} channel_data;
+};
+} // namespace
static grpc_metadata_array metadata_batch_to_md_array(
const grpc_metadata_batch* batch) {
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index a457aaa7a2..d677576c14 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -1851,8 +1851,9 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
{
grpc_error* override_error = GRPC_ERROR_NONE;
if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
- override_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Error from server send status");
+ override_error =
+ error_from_status(op->data.send_status_from_server.status,
+ "Returned non-ok status");
}
if (op->data.send_status_from_server.status_details != nullptr) {
call->send_extra_metadata[1].md = grpc_mdelem_from_slices(
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index f1d428f0a1..ee98cf2693 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -44,24 +44,23 @@
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/static_metadata.h"
-typedef struct listener {
+grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
+
+namespace {
+struct listener {
void* arg;
void (*start)(grpc_server* server, void* arg, grpc_pollset** pollsets,
size_t pollset_count);
void (*destroy)(grpc_server* server, void* arg, grpc_closure* closure);
struct listener* next;
grpc_closure destroy_done;
-} listener;
+};
-typedef struct call_data call_data;
-typedef struct channel_data channel_data;
-typedef struct registered_method registered_method;
+enum requested_call_type { BATCH_CALL, REGISTERED_CALL };
-typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
+struct registered_method;
-grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
-
-typedef struct requested_call {
+struct requested_call {
gpr_mpscq_node request_link; /* must be first */
requested_call_type type;
size_t cq_idx;
@@ -81,15 +80,15 @@ typedef struct requested_call {
grpc_byte_buffer** optional_payload;
} registered;
} data;
-} requested_call;
+};
-typedef struct channel_registered_method {
+struct channel_registered_method {
registered_method* server_registered_method;
uint32_t flags;
bool has_host;
grpc_slice method;
grpc_slice host;
-} channel_registered_method;
+};
struct channel_data {
grpc_server* server;
@@ -176,6 +175,7 @@ typedef struct {
grpc_channel** channels;
size_t num_channels;
} channel_broadcaster;
+} // namespace
struct grpc_server {
grpc_channel_args* channel_args;
diff --git a/src/core/tsi/ssl_transport_security.cc b/src/core/tsi/ssl_transport_security.cc
index f35caef640..229f7efd37 100644
--- a/src/core/tsi/ssl_transport_security.cc
+++ b/src/core/tsi/ssl_transport_security.cc
@@ -116,6 +116,9 @@ typedef struct {
static gpr_once init_openssl_once = GPR_ONCE_INIT;
static gpr_mu* openssl_mutexes = nullptr;
+static void openssl_locking_cb(int mode, int type, const char* file,
+ int line) GRPC_UNUSED;
+static unsigned long openssl_thread_id_cb(void) GRPC_UNUSED;
static void openssl_locking_cb(int mode, int type, const char* file, int line) {
if (mode & CRYPTO_LOCK) {
diff --git a/src/csharp/Grpc.Auth/Grpc.Auth.csproj b/src/csharp/Grpc.Auth/Grpc.Auth.csproj
index bbcbd95be5..5bbff38948 100755
--- a/src/csharp/Grpc.Auth/Grpc.Auth.csproj
+++ b/src/csharp/Grpc.Auth/Grpc.Auth.csproj
@@ -15,12 +15,12 @@
<PackageTags>gRPC RPC Protocol HTTP/2 Auth OAuth2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <IncludeSymbols>true</IncludeSymbols>
- <IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
+ <Import Project="..\Grpc.Core\SourceLink.csproj.include" />
+
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>
diff --git a/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj b/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
index 4d6767fa98..40840d4da3 100755
--- a/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
+++ b/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
@@ -15,12 +15,12 @@
<PackageTags>gRPC test testing</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <IncludeSymbols>true</IncludeSymbols>
- <IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
+ <Import Project="..\Grpc.Core\SourceLink.csproj.include" />
+
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>
diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj
index d9950b2f20..6d44be7ddd 100755
--- a/src/csharp/Grpc.Core/Grpc.Core.csproj
+++ b/src/csharp/Grpc.Core/Grpc.Core.csproj
@@ -14,12 +14,12 @@
<PackageTags>gRPC RPC Protocol HTTP/2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <IncludeSymbols>true</IncludeSymbols>
- <IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
+ <Import Project="SourceLink.csproj.include" />
+
<ItemGroup>
<EmbeddedResource Include="..\..\..\etc\roots.pem" />
<Content Include="..\nativelibs\csharp_ext_macos_x64\libgrpc_csharp_ext.dylib">
diff --git a/src/csharp/Grpc.Core/SourceLink.csproj.include b/src/csharp/Grpc.Core/SourceLink.csproj.include
new file mode 100755
index 0000000000..02ae79fb89
--- /dev/null
+++ b/src/csharp/Grpc.Core/SourceLink.csproj.include
@@ -0,0 +1,19 @@
+<!-- Ensure that debugging of the resulting NuGet packages work (we're using SourceLink). -->
+<Project>
+
+ <ItemGroup Label="dotnet pack instructions">
+ <Content Include="$(OutputPath)netstandard1.5\$(PackageId).pdb">
+ <Pack>true</Pack>
+ <PackagePath>lib/netstandard1.5</PackagePath>
+ </Content>
+ <Content Include="$(OutputPath)net45\$(PackageId).pdb">
+ <Pack>true</Pack>
+ <PackagePath>lib/net45</PackagePath>
+ </Content>
+ </ItemGroup>
+
+ <ItemGroup>
+ <PackageReference Include="SourceLink.Embed.AllSourceFiles" Version="2.7.3" PrivateAssets="all" />
+ </ItemGroup>
+
+</Project>
diff --git a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
index 681719d124..da61253455 100755
--- a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
+++ b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
@@ -14,12 +14,12 @@
<PackageTags>gRPC health check</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <IncludeSymbols>true</IncludeSymbols>
- <IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
+ <Import Project="..\Grpc.Core\SourceLink.csproj.include" />
+
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>
diff --git a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
index 704eea5c17..862ecda5fd 100755
--- a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
+++ b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
@@ -14,12 +14,12 @@
<PackageTags>gRPC reflection</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <IncludeSymbols>true</IncludeSymbols>
- <IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
+ <Import Project="..\Grpc.Core\SourceLink.csproj.include" />
+
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>
diff --git a/test/core/end2end/end2end_nosec_tests.cc b/test/core/end2end/end2end_nosec_tests.cc
index 3236feea56..6318550ad8 100644
--- a/test/core/end2end/end2end_nosec_tests.cc
+++ b/test/core/end2end/end2end_nosec_tests.cc
@@ -68,6 +68,8 @@ extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void);
+extern void filter_status_code(grpc_end2end_test_config config);
+extern void filter_status_code_pre_init(void);
extern void graceful_server_shutdown(grpc_end2end_test_config config);
extern void graceful_server_shutdown_pre_init(void);
extern void high_initial_seqno(grpc_end2end_test_config config);
@@ -170,6 +172,7 @@ void grpc_end2end_tests_pre_init(void) {
filter_call_init_fails_pre_init();
filter_causes_close_pre_init();
filter_latency_pre_init();
+ filter_status_code_pre_init();
graceful_server_shutdown_pre_init();
high_initial_seqno_pre_init();
hpack_size_pre_init();
@@ -237,6 +240,7 @@ void grpc_end2end_tests(int argc, char **argv,
filter_call_init_fails(config);
filter_causes_close(config);
filter_latency(config);
+ filter_status_code(config);
graceful_server_shutdown(config);
high_initial_seqno(config);
hpack_size(config);
@@ -356,6 +360,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_latency(config);
continue;
}
+ if (0 == strcmp("filter_status_code", argv[i])) {
+ filter_status_code(config);
+ continue;
+ }
if (0 == strcmp("graceful_server_shutdown", argv[i])) {
graceful_server_shutdown(config);
continue;
diff --git a/test/core/end2end/end2end_tests.cc b/test/core/end2end/end2end_tests.cc
index ca9443b642..9d8dfd6723 100644
--- a/test/core/end2end/end2end_tests.cc
+++ b/test/core/end2end/end2end_tests.cc
@@ -70,6 +70,8 @@ extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void);
+extern void filter_status_code(grpc_end2end_test_config config);
+extern void filter_status_code_pre_init(void);
extern void graceful_server_shutdown(grpc_end2end_test_config config);
extern void graceful_server_shutdown_pre_init(void);
extern void high_initial_seqno(grpc_end2end_test_config config);
@@ -173,6 +175,7 @@ void grpc_end2end_tests_pre_init(void) {
filter_call_init_fails_pre_init();
filter_causes_close_pre_init();
filter_latency_pre_init();
+ filter_status_code_pre_init();
graceful_server_shutdown_pre_init();
high_initial_seqno_pre_init();
hpack_size_pre_init();
@@ -241,6 +244,7 @@ void grpc_end2end_tests(int argc, char **argv,
filter_call_init_fails(config);
filter_causes_close(config);
filter_latency(config);
+ filter_status_code(config);
graceful_server_shutdown(config);
high_initial_seqno(config);
hpack_size(config);
@@ -364,6 +368,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_latency(config);
continue;
}
+ if (0 == strcmp("filter_status_code", argv[i])) {
+ filter_status_code(config);
+ continue;
+ }
if (0 == strcmp("graceful_server_shutdown", argv[i])) {
graceful_server_shutdown(config);
continue;
diff --git a/test/core/end2end/gen_build_yaml.py b/test/core/end2end/gen_build_yaml.py
index 7c8e7f420a..e7cf97b2d0 100755
--- a/test/core/end2end/gen_build_yaml.py
+++ b/test/core/end2end/gen_build_yaml.py
@@ -101,6 +101,7 @@ END2END_TESTS = {
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails': default_test_options,
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
+ 'filter_status_code': default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU,exclude_inproc=True),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False,
diff --git a/test/core/end2end/generate_tests.bzl b/test/core/end2end/generate_tests.bzl
index b9a42bdb88..1d759e1ecb 100755
--- a/test/core/end2end/generate_tests.bzl
+++ b/test/core/end2end/generate_tests.bzl
@@ -146,6 +146,7 @@ END2END_TESTS = {
'trailing_metadata': test_options(),
'authority_not_supported': test_options(),
'filter_latency': test_options(),
+ 'filter_status_code': test_options(),
'workaround_cronet_compression': test_options(),
'write_buffering': test_options(needs_write_buffering=True),
'write_buffering_at_end': test_options(needs_write_buffering=True),
diff --git a/test/core/end2end/tests/filter_status_code.cc b/test/core/end2end/tests/filter_status_code.cc
new file mode 100644
index 0000000000..261ddd93ec
--- /dev/null
+++ b/test/core/end2end/tests/filter_status_code.cc
@@ -0,0 +1,353 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/end2end/end2end_tests.h"
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/byte_buffer.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/channel/channel_stack_builder.h"
+#include "src/core/lib/surface/channel_init.h"
+#include "test/core/end2end/cq_verifier.h"
+
+static bool g_enable_filter = false;
+static gpr_mu g_mu;
+static bool g_client_code_recv;
+static bool g_server_code_recv;
+static gpr_cv g_client_code_cv;
+static gpr_cv g_server_code_cv;
+static grpc_status_code g_client_status_code;
+static grpc_status_code g_server_status_code;
+
+static void* tag(intptr_t t) { return (void*)t; }
+
+static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
+ const char* test_name,
+ grpc_channel_args* client_args,
+ grpc_channel_args* server_args) {
+ grpc_end2end_test_fixture f;
+ gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name);
+ f = config.create_fixture(client_args, server_args);
+ config.init_server(&f, server_args);
+ config.init_client(&f, client_args);
+ return f;
+}
+
+static gpr_timespec n_seconds_from_now(int n) {
+ return grpc_timeout_seconds_to_deadline(n);
+}
+
+static gpr_timespec five_seconds_from_now(void) {
+ return n_seconds_from_now(5);
+}
+
+static void drain_cq(grpc_completion_queue* cq) {
+ grpc_event ev;
+ do {
+ ev = grpc_completion_queue_next(cq, five_seconds_from_now(), nullptr);
+ } while (ev.type != GRPC_QUEUE_SHUTDOWN);
+}
+
+static void shutdown_server(grpc_end2end_test_fixture* f) {
+ if (!f->server) return;
+ grpc_server_shutdown_and_notify(f->server, f->shutdown_cq, tag(1000));
+ GPR_ASSERT(grpc_completion_queue_pluck(f->shutdown_cq, tag(1000),
+ grpc_timeout_seconds_to_deadline(5),
+ nullptr)
+ .type == GRPC_OP_COMPLETE);
+ grpc_server_destroy(f->server);
+ f->server = nullptr;
+}
+
+static void shutdown_client(grpc_end2end_test_fixture* f) {
+ if (!f->client) return;
+ grpc_channel_destroy(f->client);
+ f->client = nullptr;
+}
+
+static void end_test(grpc_end2end_test_fixture* f) {
+ shutdown_server(f);
+ shutdown_client(f);
+
+ grpc_completion_queue_shutdown(f->cq);
+ drain_cq(f->cq);
+ grpc_completion_queue_destroy(f->cq);
+ grpc_completion_queue_destroy(f->shutdown_cq);
+}
+
+// Simple request via a server filter that saves the reported status code.
+static void test_request(grpc_end2end_test_config config) {
+ grpc_call* c;
+ grpc_call* s;
+ grpc_end2end_test_fixture f =
+ begin_test(config, "filter_status_code", nullptr, nullptr);
+ cq_verifier* cqv = cq_verifier_create(f.cq);
+ grpc_op ops[6];
+ grpc_op* op;
+ grpc_metadata_array initial_metadata_recv;
+ grpc_metadata_array trailing_metadata_recv;
+ grpc_metadata_array request_metadata_recv;
+ grpc_call_details call_details;
+ grpc_status_code status;
+ grpc_call_error error;
+ grpc_slice details;
+ int was_cancelled = 2;
+
+ gpr_mu_lock(&g_mu);
+ g_client_status_code = GRPC_STATUS_OK;
+ g_server_status_code = GRPC_STATUS_OK;
+ gpr_mu_unlock(&g_mu);
+
+ gpr_timespec deadline = five_seconds_from_now();
+ c = grpc_channel_create_call(
+ f.client, nullptr, GRPC_PROPAGATE_DEFAULTS, f.cq,
+ grpc_slice_from_static_string("/foo"),
+ get_host_override_slice("foo.test.google.fr", config), deadline, nullptr);
+ GPR_ASSERT(c);
+
+ grpc_metadata_array_init(&initial_metadata_recv);
+ grpc_metadata_array_init(&trailing_metadata_recv);
+ grpc_metadata_array_init(&request_metadata_recv);
+ grpc_call_details_init(&call_details);
+
+ memset(ops, 0, sizeof(ops));
+ op = ops;
+ op->op = GRPC_OP_SEND_INITIAL_METADATA;
+ op->data.send_initial_metadata.count = 0;
+ op->data.send_initial_metadata.metadata = nullptr;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ op->op = GRPC_OP_RECV_INITIAL_METADATA;
+ op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+ op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
+ op->data.recv_status_on_client.status = &status;
+ op->data.recv_status_on_client.status_details = &details;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), nullptr);
+ GPR_ASSERT(GRPC_CALL_OK == error);
+
+ error =
+ grpc_server_request_call(f.server, &s, &call_details,
+ &request_metadata_recv, f.cq, f.cq, tag(101));
+ GPR_ASSERT(GRPC_CALL_OK == error);
+
+ CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
+ cq_verify(cqv);
+
+ memset(ops, 0, sizeof(ops));
+ op = ops;
+ op->op = GRPC_OP_SEND_INITIAL_METADATA;
+ op->data.send_initial_metadata.count = 0;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
+ op->data.send_status_from_server.trailing_metadata_count = 0;
+ op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
+ grpc_slice status_string = grpc_slice_from_static_string("xyz");
+ op->data.send_status_from_server.status_details = &status_string;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
+ op->data.recv_close_on_server.cancelled = &was_cancelled;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), nullptr);
+ GPR_ASSERT(GRPC_CALL_OK == error);
+
+ CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
+ CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
+ cq_verify(cqv);
+
+ GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
+ GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz"));
+
+ grpc_slice_unref(details);
+ grpc_metadata_array_destroy(&initial_metadata_recv);
+ grpc_metadata_array_destroy(&trailing_metadata_recv);
+ grpc_metadata_array_destroy(&request_metadata_recv);
+ grpc_call_details_destroy(&call_details);
+
+ grpc_call_unref(s);
+ grpc_call_unref(c);
+
+ cq_verifier_destroy(cqv);
+
+ end_test(&f);
+ config.tear_down_data(&f);
+
+ // Perform checks after test tear-down
+ // Guards against the case that there's outstanding channel-related work on a
+ // call prior to verification
+ // TODO(https://github.com/grpc/grpc/issues/13915) enable this for windows
+#ifndef GPR_WINDOWS
+ gpr_mu_lock(&g_mu);
+ if (!g_client_code_recv) {
+ GPR_ASSERT(gpr_cv_wait(&g_client_code_cv, &g_mu,
+ grpc_timeout_seconds_to_deadline(3)));
+ }
+ if (!g_server_code_recv) {
+ GPR_ASSERT(gpr_cv_wait(&g_client_code_cv, &g_mu,
+ grpc_timeout_seconds_to_deadline(3)));
+ }
+ GPR_ASSERT(g_client_status_code == GRPC_STATUS_UNIMPLEMENTED);
+ GPR_ASSERT(g_server_status_code == GRPC_STATUS_UNIMPLEMENTED);
+ gpr_mu_unlock(&g_mu);
+#endif // GPR_WINDOWS
+}
+
+/*******************************************************************************
+ * Test status_code filter
+ */
+
+static grpc_error* init_call_elem(grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ return GRPC_ERROR_NONE;
+}
+
+static void client_destroy_call_elem(grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ gpr_mu_lock(&g_mu);
+ g_client_status_code = final_info->final_status;
+ g_client_code_recv = true;
+ gpr_cv_signal(&g_client_code_cv);
+ gpr_mu_unlock(&g_mu);
+}
+
+static void server_destroy_call_elem(grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ gpr_mu_lock(&g_mu);
+ g_server_status_code = final_info->final_status;
+ g_server_code_recv = true;
+ gpr_cv_signal(&g_server_code_cv);
+ gpr_mu_unlock(&g_mu);
+}
+
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ return GRPC_ERROR_NONE;
+}
+
+static void destroy_channel_elem(grpc_channel_element* elem) {}
+
+static const grpc_channel_filter test_client_filter = {
+ grpc_call_next_op,
+ grpc_channel_next_op,
+ 0,
+ init_call_elem,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
+ client_destroy_call_elem,
+ 0,
+ init_channel_elem,
+ destroy_channel_elem,
+ grpc_channel_next_get_info,
+ "client_filter_status_code"};
+
+static const grpc_channel_filter test_server_filter = {
+ grpc_call_next_op,
+ grpc_channel_next_op,
+ 0,
+ init_call_elem,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
+ server_destroy_call_elem,
+ 0,
+ init_channel_elem,
+ destroy_channel_elem,
+ grpc_channel_next_get_info,
+ "server_filter_status_code"};
+
+/*******************************************************************************
+ * Registration
+ */
+
+static bool maybe_add_filter(grpc_channel_stack_builder* builder, void* arg) {
+ grpc_channel_filter* filter = (grpc_channel_filter*)arg;
+ if (g_enable_filter) {
+ // Want to add the filter as close to the end as possible, to make
+ // sure that all of the filters work well together. However, we
+ // can't add it at the very end, because the
+ // connected_channel/client_channel filter must be the last one.
+ // So we add it right before the last one.
+ grpc_channel_stack_builder_iterator* it =
+ grpc_channel_stack_builder_create_iterator_at_last(builder);
+ GPR_ASSERT(grpc_channel_stack_builder_move_prev(it));
+ const bool retval = grpc_channel_stack_builder_add_filter_before(
+ it, filter, nullptr, nullptr);
+ grpc_channel_stack_builder_iterator_destroy(it);
+ return retval;
+ } else {
+ return true;
+ }
+}
+
+static void init_plugin(void) {
+ gpr_mu_init(&g_mu);
+ gpr_cv_init(&g_client_code_cv);
+ gpr_cv_init(&g_server_code_cv);
+ g_client_code_recv = false;
+ g_server_code_recv = false;
+
+ grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
+ maybe_add_filter,
+ (void*)&test_client_filter);
+ grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
+ maybe_add_filter,
+ (void*)&test_client_filter);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
+ maybe_add_filter,
+ (void*)&test_server_filter);
+}
+
+static void destroy_plugin(void) {
+ gpr_cv_destroy(&g_client_code_cv);
+ gpr_cv_destroy(&g_server_code_cv);
+ gpr_mu_destroy(&g_mu);
+}
+
+void filter_status_code(grpc_end2end_test_config config) {
+ g_enable_filter = true;
+ test_request(config);
+ g_enable_filter = false;
+}
+
+void filter_status_code_pre_init(void) {
+ grpc_register_plugin(init_plugin, destroy_plugin);
+}
diff --git a/test/core/transport/chttp2/settings_timeout_test.cc b/test/core/transport/chttp2/settings_timeout_test.cc
index 08473c72b6..d7d6ee7508 100644
--- a/test/core/transport/chttp2/settings_timeout_test.cc
+++ b/test/core/transport/chttp2/settings_timeout_test.cc
@@ -21,6 +21,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include <functional>
#include <memory>
#include <thread>
diff --git a/third_party/rake-compiler-dock/Dockerfile b/third_party/rake-compiler-dock/Dockerfile
index b4a5158535..06c721c39b 100644
--- a/third_party/rake-compiler-dock/Dockerfile
+++ b/third_party/rake-compiler-dock/Dockerfile
@@ -1,182 +1,4 @@
-FROM ubuntu:17.04
-
-RUN apt-get -y update && \
- apt-get install -y curl git-core xz-utils build-essential wget unzip sudo gpg dirmngr
-
-# Add "rvm" as system group, to avoid conflicts with host GIDs typically starting with 1000
-RUN groupadd -r rvm && useradd -r -g rvm -G sudo -p "" --create-home rvm && \
- echo "source /etc/profile.d/rvm.sh" >> /etc/rubybashrc
-
-USER root
-RUN apt-get -y update && \
- apt-get install -y gcc-mingw-w64-x86-64 gcc-mingw-w64-i686 g++-mingw-w64-x86-64 g++-mingw-w64-i686 \
- gcc-multilib moreutils
-USER rvm
-
-# install rvm, RVM 1.26.0+ has signed releases, source rvm for usage outside of package scripts
-RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys D39DC0E3 && \
- (curl -L http://get.rvm.io | sudo bash -s stable) && \
- bash -c " \
- source /etc/rubybashrc && \
- rvmsudo rvm cleanup all "
-
-# Regenerate the following using build.sh if the build folder changes.
-RUN echo \\
-H4sIANslcFgAA+08a3fbxo75uvwVqOxTSbFIidTDVhKnTW0n8d28ju2ku6fO5R2RQ4kVRXJnSMve\
-OPe3LzAk9fIrvk3UuylxUnk0DwADYDADzKiD1A/c5oNvCi2E7W7rgdlpdbe3Tfxumj2zpepzeGC2\
-W+1Oy7TaZvcBNpvYHbrflq0MUpkwAfCAxVHgXIQ39uPhcB3srBsGSv8ydSMu5DeiQQrudW7Wv9Xq\
-rejf6nZ7D6D1jfhZgr+4/ve5x9IgkZADD8/sMecxbO1CZZQksR2L6PwCqCjzsjerPdrbt1++PT6x\
-j97/8t/2u1fPTp6/PXq9Uv3h4Oj48O0bVXv4+tmLA1DVe3tFS0X7s6Xw14Vs/Ys0ZN9q9d/t/zsd\
-c3X9W1a3XP/rgI0fmgM/bA6YHGnaUERpzFwX9CFUNl8c7lfoz9Hb9+8q2mTs+gL0GJrJJG6OognX\
-UsnFcm89xdJ7VXoB4mzSoI2FBlWwZjAfCvqEOh4fHFU07eXb1we7mzViAZ48eQIV7owi+GfWXNeC\
-EHQJzVSKZhA5LGgKNua6E01iP+AC0dD4StNYqta0jHJakAFdlzzRFfFTjTzdL8+OX9oHbz7sNnni\
-4AoYXBAHwsmbdR2H/vzdu6Z8//eH3tRwvhGNO/Z/s2etnv+sTrtdrv91QPOhBg/hZORLwC19KNgE\
-Rix0Ay7h+PDF4ZsTwG/gRWLKhCvBTyCJsCpKRrj2cITDpTQIxWECiMMPEx663KVeAw64rwCT8O5w\
-H0zqRR33I2eMY0nsiSxQILmpn4yg4matOLCyNBLehvDKD9PzBiBpcLNjC6DhhizIWRbEZzYEsD4S\
-iJWFF3mnjM0T5JtjNw57iQj0PWBjVsyU+FfDXIgRWU6DhjU1bcMPnSB1OTzJ0Bmjp4t1F7KZXMRc\
-Xq2eMj+hWg1lA7Hvwi60HmvaWeS7Wsa3jRipsUYfhD2d1LVP6IV8r4YD6sofjf0goG+NnNv6Y+1z\
-hnPC/FANZWLoNMAZoT0/xPLZbx/rhEUmInUUYuYkfhRCyKd2VnxMRIhowpJUIme6iawBNB/CMU8g\
-jZWwMwQpygy1KmPu+N6FakBEkCEySEawgNmQzC60sgtL8ySiWOKTOLlAnwy1H5eHTZgc1x9fweYF\
-bChz4cHCbGqZPBqwgKYBNZLvw3qrrnpnYke1j2sKcy7YT0qypKDaj5kIVCuA4DjdEH49+K/Dk+OT\
-Zyfvj2sL7Z95IHk2diY4fs6ds7imxG5+RGbyUo4Q7UlEolahbpVlIhkKhRc1uu71n/n/mCXOiMtv\
-lAf48vjfsnpdk/x/r9Uu4/91wLL+6RSkm8aOsa3H7e3OV7KH++d/OttWp9T/OuB2/YdRbKi2P0Tj\
-rvyP2VqN/3rdtlWe/9YBru95GOsM8WDHmk4Uev6QNvrBvIxnDJefQ7drdreZZxi9XrvFBwxMWtTd\
-7z0++t5hZf0vxtB6y+gb3a+wB9zf//eoqvT/a4C79U9fWaJTeKYPUnWiN8hrfDmN2/2/2eq2V/L/\
-ltUt83/rgecimkDH9HbanU6n7zmdttUeWF6/3+U722aXse5Ox3L7nttvtzvwGuOtYx6DuQ2t1iP1\
-DyxUoUZoHsErJiT8JwsxkH4SYPnnoeD+/+r4EUo3Ep7h8qfaPkv4IzhmSQPaLfgbCxGD2YPWziP6\
-tw1baBEt7Tgd/M6d5BH89u7Zyd7Lj7CHMeSQq7DTQyuFCcdAFylN8lRDZqb+IOBZJqEwVU3DkB9i\
-wc/8CCO1lbFTDAkx3B9cwCiKxn44hChEfEhFqwj+P6mPe2FVDLLNsFoBhwWBoal0SZYxQXoTJOsg\
-Cw5LJS/oqkyICLH7BeSIJBSIGtgrATfiMqwmGvGA7AuMB7GzHwJTXLp0F+OLKJzwMDG0w4S4CrHJ\
-DyTNOYiYCxhF45cRo+CfAwtkBDETCUReHrlj4M2EC4E/EExcNLAw5trvEvVIeR3hRk4+HeRP3QMh\
-fV/KFLmlrtm9z6NmEw8Io3RgoJSXncSKy6D0TTMb39zJRL8icSQVRlPKUqA6VZ6IJD8TO8zFPhmL\
-QS7yhjYd+c6IBicXse8osS5OlJ8nJFrwkIuGmpqv8lFTSvfMjMaFOGCJF4mJdsYCmqOSmpOkCmHI\
-uctdkgfKc8ouchwkaBzKPFRood6mxOA9boCMMukThVzGyFcyUpg1ym4wB1XrkmXlE3zBJ8SlEaCo\
-GkVliDZ0xrMMV47I5xLtROWqtJnVL5gEsUbZjjOONv4u4AyNL4wS3phzpJw7uChNJ4mQMxQWgyGO\
-CMlw1HxlsXwQgeBuA097gPziTLF+qHJALi5YNyNPjEsDjiM0XkRVEbj6JjHE4yHowqsYmq7rGvGv\
-zKKJWuGh9FG3TI4NMYBLaHdhawn060ADU9EqzKQBZgctU3JB2R1Z26o3wDLB5QHPKvS6pi0dZW/m\
-YXBzW37UbbVbzOv3DMPrm52+2aejbq/TocndhlnD2dyK/eefQTd7/cYObGV/sOJv7IzBrCfIUZSi\
-ygbKY3kcfQLaY3E3Cxtq6dCVy6LNS0f4cTLv5UxcePIE2EDaeRdD8EAZmE22aXvorGvUjLpTNXUN\
-NH1OpHBTJK2pHwQ5O1IZnQdTrpaNIyIpIVv5aN5zBNglMbC/WsjST9Bi8oToT7XKxqeC6OfmAp1K\
-XXk3bWvOBRnc/TjYujcHOY2COswhipVhGXEqR7WHipKdcWvnTTXlS+r1+SgeuihI0nK71220+7Cl\
-/rbuqecNyjz7Ic9nxlSWk2xIqvx2NuvCkf1QjMrG2LkntlX/Gvawi54NoOjePuNiQd0btML8sBB2\
-Y0n35EYn44lHZfQGr7GPWpRDHnKh2CpEvoLmS0bmbKvqJZ0UnSuw+xR+u1Zdjbmx3Q43mltDu3vw\
-VQz5lCoflwQYxVk2XE2F1pb6ptSkk8iBri3VnPWb5rzImpp2UWFT34/kbS+TywVpFzSLfrfSJXXM\
-x5IOcOfkM2WRUZHvxOWCy0uNRCMhM5XFqKscF6q4hduZDJ/TdhfFPKwlRsgmuENVp9W66uZdLqrS\
-M6YCV+vCGSAnUz0NT8PKfIHPu1IHWwxutvSrQxQ7gjO3tuokPF/IZHVBLxTnK1QpgMwhE/x1yi6o\
-XhFdYUVKdPRlWWwzeTjxFS+mGMQNXolxyeMWM4YnUDGNfgV+/DEvPNmFxWc4i/K+RjFiK7lWNchO\
-RHd7Cd2jKOnNtXG7freuQ3GnDq6OMmQ6+KHW/HttZhz1wu5q1fpmE6menppkD6enVnUJw73kszjn\
-K7Sz2gFyPob66cNafkcoZ/SRdjaFav1uTPkN0B9FpM6Tp0Y9Fhh01KDI3CkzPK1J4dAe45+f1mfI\
-4zSRmZSWVEyKneIZaLU2Wzc54dXGRKQhmjyvYZEHwRK3fBHXwhdVnLvQe7mkG53ozCWtLqR7+5+7\
-nMo1fqgQztLUF4vqXNDpd+n019npNcz7HgtIUBWHhXjGz2I/mWJEpMSA51JUW9tq4hEpCs4qFPaE\
-efSSrxYMARnFL1G4hPCYZ1Hjo+sCveR36YxSDOhWYjx+pg+i8yLM687xzSNmxUd1LqirsfTcm6JJ\
-BwyPNG9VxL8oXMEn0RmdaUKZwKOjwZ4aumhgKz0WXyHe0e2a5X5Nr/2D472jw3cn9H4RvUh2yHJ/\
-qq024kLSQbOMrtEyaAVFU2mY2v+D5Pjd+T/KpkRpoqOVynsm/nK46/5n27JW8n9m22qV+b91wFLQ\
-rIIG9R5wfqBR7+owrL2xLQ+ae8xxdkwMmnuMWayzEjTfPDoLmm9uV0FzW7lN+rNNXrPw/fSyz6bn\
-f5+byo5lc+PT68M3L35Vr48/47eVl8bXRBeEI3MCFV1XWHbzYb+8P3y1/7kIFaq6zkM2CLguR5Qp\
-qc7rXV+qBh/9BgsCSoJV8xAFW4v1k4yvq3SC3EvmlbTMdquE+iM9XEEX/TwSDodfEXnkjC21NR7R\
-hmgaOw386BfvlFQmx0/u6XOuvf/tG+2v+RTkX7j/t6xeef+zDrhR/1/n6l/Bv3D/j19K/78OKO//\
-/9pwy/q3JQYu6qriD/qBu95/W2ZnZf1vd8v33+uBL17/O/2d7XbLMQzH4z2r2y7Wf3a+m/fOznPz\
-7+r81un16ei2VRToDIfnGObYXho6dOfoTSJXvXGOh+pBbkd90rtderULMmbT8AzkhaS7P7DtouSM\
-RIQB8ZBr8B9ZFCd5InEY5SLoFyuSc/XLFc8ZEQknkiOQfjiChOFHEA0tEFEauqeLCFJEgH+G+Mdl\
-GBViZM4DDE7tgIkhtz2XbvbwP448pWFWOF1NKU/GiT/hQB/DCQwnVLAFBrt4jrOHPFGt+d/Ic9kF\
-oAkGENPn6WqGO6ZkGRQrEuQoTTDIDNU7aDz7JcwZgxvYfkL5dW7HI1csRtILKL5kJOVGHtF8mLRR\
-SWdM7P5jE8vqlzkVrHHOlObszVyFFbgE6pAIW47+oSnNhjb2G3GcrFJyZfPV4ZuDN2/pN0WzUZXN\
-GYXv/qc2/5Zwjf+3jLbR+rPP/2Z5/l8L3Kj/9e3/rU6vfXX/L/M/a4Ev3v/5gLfdHdz/WbvdG2zf\
-b//f6Xfaav9XhR7t/7Md5tadojDBytKuM6vV1FU/lwlUzjev6wC7cH7B5ePs7ZQi6LAEnj4lBl3u\
-SYN+c2o/2zt4+1zTN/KL95fPPhzYxwdv9p8fvjoAU4O8A2jg+d/XNnXt+u/8Wf7fsjC4zPI/5fvf\
-tcCN+v9T/L9lYQfl/ztl/mctUPr/0v+7TZkIP7angsUxF1+dxh3+3zTN7ZXzX3sb3UC5/tcAGz+o\
-/7MG3bxRMoM2AE3bgGMyiOyeLQrVm6YAvChw1cN2mVAiQT0m/uALejw9iM4huxbLeyGKJFKv64Gp\
-9AouQF/CIB3OXxpMp1PjbDbeiMSwmfjOmCfNHdwRNG32ZiDhk5hYqc6r6Gua+IGsapoyXlzolY1P\
-6qXHgOHyZxNe22zVPxs4s4qmqYfL2OfZ0YsPhuD00ODTpXcJ3u4/m3/Xm/DZmLBYvQhhYkhP1+hZ\
-8y6c5JQN+qV7VRGqZo2GE0SSa9njkvfEieHE9Av8hmqlFynY+BuWG1T7UaNHIFr+dLRgJEsqLTMC\
-W/kra2To02XSiC7pFRiig8+alobYIikNhjKpgWKoAQ8LtPT2hp+jKzetbUXQiVyOxDZ/Mqg6+6V5\
-LgyDM2eUPZUhHiPhq3nPpyMmqnJ1isXssiHZtBRNovV9+cYSSiihhBJKKKGEEkoooYQSSiihhBJK\
-KKGEEkoooYQSSiihhBJKKKGEEkoooYQSSiihhH9H+D9i0BbqAHgAAA==\
-| base64 -d | tar xzC /tmp
-
-# Import patch files for ruby and gems
-RUN cp -r /tmp/build/patches /home/rvm/patches/
-ENV BASH_ENV /etc/rubybashrc
-
-# install rubies and fix permissions on
-RUN bash -c " \
- export CFLAGS='-s -O3 -fno-fast-math -fPIC' && \
- echo 'about to install patches for ruby 2.4.0 from:' && \
- ls -r ~/patches && \
- for v in 2.4.0 ; do \
- rvm install \$v --patch \$(echo ~/patches/ruby-\$v/* | tr ' ' ','); \
- done && \
- rvm cleanup all && \
- find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
-
-# Install rake-compiler and typical gems in all Rubies
-# do not generate documentation for gems
-RUN echo "gem: --no-ri --no-rdoc" >> ~/.gemrc && \
- bash -c " \
- rvm all do gem install bundler rake-compiler hoe mini_portile rubygems-tasks && \
- rvm 2.4.0 do gem install mini_portile2 && \
- find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
-
-RUN bash -c "gem env"
-RUN bash -c "gem list rake-compiler"
-
-# Install rake-compiler's cross rubies in global dir instead of /root
-RUN sudo mkdir -p /usr/local/rake-compiler && \
- sudo chown rvm.rvm /usr/local/rake-compiler && \
- ln -s /usr/local/rake-compiler ~/.rake-compiler
-
-# Patch rake-compiler to avoid build of ruby extensions
-RUN cd /usr/local/rvm/gems/ruby-2.4.0/gems/rake-compiler-0.9.5 && git apply /home/rvm/patches/rake-compiler-0.9.5/*.diff ; \
- true
-
-RUN bash -c "rvm use 2.4.0 --default && \
- export MAKE=\"make -j`nproc`\" CFLAGS='-s -O1 -fno-omit-frame-pointer -fno-fast-math' && \
- rake-compiler cross-ruby VERSION=2.4.0 HOST=i686-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.4.0 HOST=x86_64-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.4.0 HOST=x86_64-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.3.0 HOST=i686-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.3.0 HOST=x86_64-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.3.0 HOST=x86_64-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.2.2 HOST=i686-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.2.2 HOST=x86_64-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.2.2 HOST=x86_64-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.1.5 HOST=i686-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.1.5 HOST=x86_64-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.1.5 HOST=x86_64-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=i686-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=x86_64-w64-mingw32 && \
- rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=x86_64-linux-gnu && \
- rm -rf ~/.rake-compiler/tmp/builds ~/.rake-compiler/sources && \
- find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
-
-RUN bash -c "rvm use 2.4.0 --default && \
- export MAKE=\"make -j`nproc`\" CFLAGS='-m32 -s -O1 -fno-omit-frame-pointer -fno-fast-math' LDFLAGS='-m32' && \
- rake-compiler cross-ruby VERSION=2.4.0 HOST=i686-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.3.0 HOST=i686-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.2.2 HOST=i686-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.1.5 HOST=i686-linux-gnu && \
- rake-compiler cross-ruby VERSION=2.0.0-p645 HOST=i686-linux-gnu && \
- rm -rf ~/.rake-compiler/tmp/builds ~/.rake-compiler/sources && \
- find /usr/local/rvm -type d -print0 | sudo xargs -0 chmod g+sw "
-
-RUN bash -c " \
- rvm alias create 2.4 2.4.0 "
-
-USER root
-
-# Fix paths in rake-compiler/config.yml and add rvm and mingw-tools to the global bashrc
-RUN sed -i -- "s:/root/.rake-compiler:/usr/local/rake-compiler:g" /usr/local/rake-compiler/config.yml && \
- echo "source /etc/profile.d/rvm.sh" >> /etc/bash.bashrc && \
- echo "export PATH=\$PATH:/opt/mingw/mingw32/bin" >> /etc/bash.bashrc && \
- echo "export PATH=\$PATH:/opt/mingw/mingw64/bin" >> /etc/bash.bashrc
-
-# Install wrappers for strip commands as a workaround for "Protocol error" in boot2docker.
-RUN cp /tmp/build/strip_wrapper /root/
-RUN sudo chmod +rx /root/strip_wrapper
-RUN mv /usr/bin/i686-w64-mingw32-strip /usr/bin/i686-w64-mingw32-strip.bin && \
- mv /usr/bin/x86_64-w64-mingw32-strip /usr/bin/x86_64-w64-mingw32-strip.bin && \
- ln /root/strip_wrapper /usr/bin/i686-w64-mingw32-strip && \
- ln /root/strip_wrapper /usr/bin/x86_64-w64-mingw32-strip
+FROM larskanis/rake-compiler-dock:0.6.2
RUN find / -name rbconfig.rb | while read f ; do sed -i 's/0x0501/0x0600/' $f ; done
RUN find / -name win32.h | while read f ; do sed -i 's/gettimeofday/rb_gettimeofday/' $f ; done
@@ -184,26 +6,6 @@ RUN sed -i 's/defined.__MINGW64__.$/1/' /usr/local/rake-compiler/ruby/i686-w64-m
RUN find / -name libwinpthread.dll.a | xargs rm
RUN find / -name libwinpthread-1.dll | xargs rm
RUN find / -name *msvcrt-ruby*.dll.a | while read f ; do n=`echo $f | sed s/.dll//` ; mv $f $n ; done
-RUN find /usr/local/rake-compiler/ruby -name libruby.so | xargs rm
-RUN find /usr/local/rake-compiler/ruby -name libruby-static.a | while read f ; do ar t $f | xargs ar d $f ; done
-RUN find /usr/local/rake-compiler/ruby -name libruby-static.a | while read f ; do mv $f `echo $f | sed s/-static//` ; done
-
-# Install SIGINT forwarder
-RUN cp /tmp/build/sigfw.c /root/
-RUN gcc $HOME/sigfw.c -o /usr/local/bin/sigfw
-
-# Install user mapper
-RUN cp /tmp/build/runas /usr/local/bin/
-
-# Install sudoers configuration
-RUN cp /tmp/build/sudoers /etc/sudoers.d/rake-compiler-dock
-
-# Fixup Ruby 2.4 'static' compilation issue.
-RUN echo '!<arch>' > /usr/local/rake-compiler/ruby/x86_64-linux-gnu/ruby-2.4.0/lib/libruby.a
-RUN echo '!<arch>' > /usr/local/rake-compiler/ruby/i686-linux-gnu/ruby-2.4.0/lib/libruby.a
-
-ENV RUBY_CC_VERSION 2.4.0:2.3.0:2.2.2:2.1.5:2.0.0
-
RUN apt-get install -y g++-multilib
CMD bash
diff --git a/third_party/rake-compiler-dock/build.sh b/third_party/rake-compiler-dock/build.sh
deleted file mode 100755
index ca01fa6d4d..0000000000
--- a/third_party/rake-compiler-dock/build.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# Run this to produce the snipplet of data to insert in the Dockerfile.
-
-echo 'RUN echo \\'
-tar cz build | base64 | sed 's/$/\\/'
-echo '| base64 -d | tar xzC /tmp'
diff --git a/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/compat-with-bundler.diff b/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/compat-with-bundler.diff
deleted file mode 100644
index ea22bd928e..0000000000
--- a/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/compat-with-bundler.diff
+++ /dev/null
@@ -1,105 +0,0 @@
-From 41f834449fc4323b2f995e8715aa5842d9fd9334 Mon Sep 17 00:00:00 2001
-From: Lars Kanis <lars@greiz-reinsdorf.de>
-Date: Sat, 30 Jan 2016 08:08:07 +0100
-Subject: [PATCH] Change the fake mechanism to be compatible with bundler.
-
-The previous fake mechanism worked by hooking onto the
-"require 'rbconfig'" call.
-This is problematic because bundler internally requires rbconfig, but doesn't
-work corretly in a faked environment.
-It then fails to load gems that are also part of the standard library, like
-json and rdoc.
-This results in issues like https://github.com/rake-compiler/rake-compiler-dock/issues/8
-
-The fake mechanism is now changed to hook onto the "require 'mkrb'" call,
-which is typically part of the extconf file, and it is where the faked platform
-values are actually needed.
-That way it is loaded after bundler/setup, so that the library paths are
-set according to the Gemfile.lock, to the native Linux libraries, before
-the fake environment is active.
-
-Please note, that the build directory of a given gem needs to be cleared,
-in order to get updated fake files. So do a "rm tmp pkg -rf".
----
- lib/rake/extensiontask.rb | 35 ++++++++++++++---------------------
- 1 file changed, 14 insertions(+), 21 deletions(-)
-
-diff --git a/lib/rake/extensiontask.rb b/lib/rake/extensiontask.rb
-index 030af96..f914919 100644
---- a/lib/rake/extensiontask.rb
-+++ b/lib/rake/extensiontask.rb
-@@ -169,8 +169,8 @@ Java extension should be preferred.
- # now add the extconf script
- cmd << abs_extconf.relative_path_from(abs_tmp_path)
-
-- # rbconfig.rb will be present if we are cross compiling
-- if t.prerequisites.include?("#{tmp_path}/rbconfig.rb") then
-+ # fake.rb will be present if we are cross compiling
-+ if t.prerequisites.include?("#{tmp_path}/fake.rb") then
- options.push(*cross_config_options(platf))
- end
-
-@@ -365,39 +365,30 @@ Java extension should be preferred.
- # define compilation tasks for cross platform!
- define_compile_tasks(for_platform, ruby_ver)
-
-- # chain fake.rb, rbconfig.rb and mkmf.rb to Makefile generation
-+ # chain fake.rb and mkmf.rb to Makefile generation
- file "#{tmp_path}/Makefile" => ["#{tmp_path}/fake.rb",
-- "#{tmp_path}/rbconfig.rb",
- "#{tmp_path}/mkmf.rb"]
-
-- # copy the file from the cross-ruby location
-- file "#{tmp_path}/rbconfig.rb" => [rbconfig_file] do |t|
-+ # copy the rbconfig from the cross-ruby location and
-+ # genearte fake.rb for different ruby versions
-+ file "#{tmp_path}/fake.rb" => [rbconfig_file] do |t|
- File.open(t.name, 'w') do |f|
-- f.write "require 'fake.rb'\n\n"
-+ f.write fake_rb(for_platform, ruby_ver)
- f.write File.read(t.prerequisites.first)
- end
- end
-
- # copy mkmf from cross-ruby location
- file "#{tmp_path}/mkmf.rb" => [mkmf_file] do |t|
-- cp t.prerequisites.first, t.name
-- if ruby_ver < "1.9" && "1.9" <= RUBY_VERSION
-- File.open(t.name, 'r+t') do |f|
-- content = f.read
-+ File.open(t.name, 'w') do |f|
-+ content = File.read(t.prerequisites.first)
-+ content.sub!(/^(require ')rbconfig(')$/, '\\1fake\\2')
-+ if ruby_ver < "1.9" && "1.9" <= RUBY_VERSION
- content.sub!(/^( break )\*(defaults)$/, '\\1\\2.first')
- content.sub!(/^( return )\*(defaults)$/, '\\1\\2.first')
- content.sub!(/^( mfile\.)print( configuration\(srcprefix\))$/, '\\1puts\\2')
-- f.rewind
-- f.write content
-- f.truncate(f.tell)
- end
-- end
-- end
--
-- # genearte fake.rb for different ruby versions
-- file "#{tmp_path}/fake.rb" do |t|
-- File.open(t.name, 'w') do |f|
-- f.write fake_rb(for_platform, ruby_ver)
-+ f.write content
- end
- end
-
-@@ -495,8 +486,10 @@ Java extension should be preferred.
- # "cannot load such file -- win32/resolv" when it is required later on.
- # See also: https://github.com/tjschuck/rake-compiler-dev-box/issues/5
- require 'resolv'
-+ require 'rbconfig'
-
- class Object
-+ remove_const :RbConfig
- remove_const :RUBY_PLATFORM
- remove_const :RUBY_VERSION
- remove_const :RUBY_DESCRIPTION if defined?(RUBY_DESCRIPTION)
---
-2.5.0.windows.1
-
diff --git a/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/without-exts.diff b/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/without-exts.diff
deleted file mode 100644
index 07739d33ec..0000000000
--- a/third_party/rake-compiler-dock/build/patches/rake-compiler-0.9.5/without-exts.diff
+++ /dev/null
@@ -1,14 +0,0 @@
-diff --git a/tasks/bin/cross-ruby.rake b/tasks/bin/cross-ruby.rake
-index 6acc816..6aa2a49 100644
---- a/tasks/bin/cross-ruby.rake
-+++ b/tasks/bin/cross-ruby.rake
-@@ -135,8 +135,7 @@ file "#{USER_HOME}/builds/#{MINGW_HOST}/#{RUBY_CC_VERSION}/Makefile" => ["#{USER
- "--build=#{RUBY_BUILD}",
- '--enable-shared',
- '--disable-install-doc',
-- '--without-tk',
-- '--without-tcl'
-+ '--with-ext='
- ]
-
- # Force Winsock2 for Ruby 1.8, 1.9 defaults to it
diff --git a/third_party/rake-compiler-dock/build/patches/ruby-1.8.7-p374/nop.patch b/third_party/rake-compiler-dock/build/patches/ruby-1.8.7-p374/nop.patch
deleted file mode 100644
index fac8525da6..0000000000
--- a/third_party/rake-compiler-dock/build/patches/ruby-1.8.7-p374/nop.patch
+++ /dev/null
@@ -1,2 +0,0 @@
-diff --git a/configure b/configure
-index 55157af..6630eba 100755
diff --git a/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/no_sendfile.patch b/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/no_sendfile.patch
deleted file mode 100644
index d8f339e814..0000000000
--- a/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/no_sendfile.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/configure b/configure
-index 898730c..cfe6253 100755
---- a/configure
-+++ b/configure
-@@ -14695,7 +14695,7 @@ for ac_func in fmod killpg wait4 waitpid fork spawnv syscall __syscall chroot ge
- setsid telldir seekdir fchmod cosh sinh tanh log2 round\
- setuid setgid daemon select_large_fdset setenv unsetenv\
- mktime timegm gmtime_r clock_gettime gettimeofday poll ppoll\
-- pread sendfile shutdown sigaltstack dl_iterate_phdr
-+ pread shutdown sigaltstack dl_iterate_phdr
- do :
- as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
- ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
diff --git a/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/nop.patch b/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/nop.patch
deleted file mode 100644
index fac8525da6..0000000000
--- a/third_party/rake-compiler-dock/build/patches/ruby-1.9.3/nop.patch
+++ /dev/null
@@ -1,2 +0,0 @@
-diff --git a/configure b/configure
-index 55157af..6630eba 100755
diff --git a/third_party/rake-compiler-dock/build/patches/ruby-2.3.0/no_sendfile.patch b/third_party/rake-compiler-dock/build/patches/ruby-2.3.0/no_sendfile.patch
deleted file mode 100644
index 915fc7b790..0000000000
--- a/third_party/rake-compiler-dock/build/patches/ruby-2.3.0/no_sendfile.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff --git a/configure b/configure
-index ebe3d8c..a336b73 100755
---- a/configure
-+++ b/configure
-@@ -18943,7 +18943,6 @@ do :
- ac_fn_c_check_func "$LINENO" "sendfile" "ac_cv_func_sendfile"
- if test "x$ac_cv_func_sendfile" = xyes; then :
- cat >>confdefs.h <<_ACEOF
--#define HAVE_SENDFILE 1
- _ACEOF
-
- fi
diff --git a/third_party/rake-compiler-dock/build/patches/ruby-2.4.0/no_sendfile.patch b/third_party/rake-compiler-dock/build/patches/ruby-2.4.0/no_sendfile.patch
deleted file mode 100644
index 915fc7b790..0000000000
--- a/third_party/rake-compiler-dock/build/patches/ruby-2.4.0/no_sendfile.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff --git a/configure b/configure
-index ebe3d8c..a336b73 100755
---- a/configure
-+++ b/configure
-@@ -18943,7 +18943,6 @@ do :
- ac_fn_c_check_func "$LINENO" "sendfile" "ac_cv_func_sendfile"
- if test "x$ac_cv_func_sendfile" = xyes; then :
- cat >>confdefs.h <<_ACEOF
--#define HAVE_SENDFILE 1
- _ACEOF
-
- fi
diff --git a/third_party/rake-compiler-dock/build/runas b/third_party/rake-compiler-dock/build/runas
deleted file mode 100755
index b29ce31fcc..0000000000
--- a/third_party/rake-compiler-dock/build/runas
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-groupadd -g "$GID" "$GROUP"
-mkdir -p /tmp/home
-useradd -g "$GID" -u "$UID" -G rvm,sudo -p "" -b /tmp/home -m "$USER"
-
-HOME=$(bash <<< "echo ~$USER")
-ln -s /usr/local/rake-compiler "$HOME"/.rake-compiler
-
-sudo -u "$USER" --set-home \
- BASH_ENV=/etc/rubybashrc \
- -- "$@"
diff --git a/third_party/rake-compiler-dock/build/sigfw.c b/third_party/rake-compiler-dock/build/sigfw.c
deleted file mode 100644
index 291d76cec8..0000000000
--- a/third_party/rake-compiler-dock/build/sigfw.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * This program handles SIGINT and forwards it to another process.
- * It is intended to be run as PID 1.
- *
- * Docker starts processes with "docker run" as PID 1.
- * On Linux, the default signal handler for PID 1 ignores any signals.
- * Therefore Ctrl-C aka SIGINT is ignored per default.
- */
-
-#include <signal.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-
-int pid = 0;
-
-void
-handle_sigint (int signum)
-{
- if(pid)
- kill(pid, SIGINT);
-}
-
-int main(int argc, char *argv[]){
- struct sigaction new_action;
- int status = -1;
-
- /* Set up the structure to specify the new action. */
- new_action.sa_handler = handle_sigint;
- sigemptyset (&new_action.sa_mask);
- new_action.sa_flags = 0;
-
- sigaction (SIGINT, &new_action, (void*)0);
-
- pid = fork();
- if(pid){
- wait(&status);
- return WEXITSTATUS(status);
- }else{
- status = execvp(argv[1], &argv[1]);
- perror("exec");
- return status;
- }
-}
diff --git a/third_party/rake-compiler-dock/build/strip_wrapper b/third_party/rake-compiler-dock/build/strip_wrapper
deleted file mode 100755
index 7f8a1346a1..0000000000
--- a/third_party/rake-compiler-dock/build/strip_wrapper
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env ruby
-
-# Strip file on local folder instead of a Virtualbox shared folder
-# to work around this bug: https://www.virtualbox.org/ticket/8463
-
-require 'tempfile'
-require 'fileutils'
-
-strip = "#{File.basename($0)}.bin"
-
-files = ARGV.reject{|f| f=~/^-/ }.map do |arg|
- tmp = Tempfile.new 'strip'
- tmp.close
- FileUtils.cp arg, tmp.path
- [tmp, arg]
-end
-
-options = ARGV.select{|f| f=~/^-/ } + files.map{|t,o| t.path }
-
-unless system( strip, *options )
- exit 127
-end
-code = $?.exitstatus
-
-files.each do |tmp, orig|
- FileUtils.rm orig
- FileUtils.cp tmp.path, orig
-end
-
-exit code
diff --git a/third_party/rake-compiler-dock/build/sudoers b/third_party/rake-compiler-dock/build/sudoers
deleted file mode 100644
index f9f9b97c95..0000000000
--- a/third_party/rake-compiler-dock/build/sudoers
+++ /dev/null
@@ -1 +0,0 @@
-Defaults env_keep += "http_proxy https_proxy ftp_proxy RCD_HOST_RUBY_PLATFORM RCD_HOST_RUBY_VERSION RCD_IMAGE RUBY_CC_VERSION"
diff --git a/tools/bazel.rc b/tools/bazel.rc
index c554f03971..8af2fc981d 100644
--- a/tools/bazel.rc
+++ b/tools/bazel.rc
@@ -1,4 +1,5 @@
build --client_env=CC=clang
+build --copt -DGRPC_BAZEL_BUILD
build:asan --strip=never
build:asan --copt -fsanitize-coverage=edge
diff --git a/tools/distrib/build_ruby_environment_macos.sh b/tools/distrib/build_ruby_environment_macos.sh
index fe0c5a4d70..af36740255 100644
--- a/tools/distrib/build_ruby_environment_macos.sh
+++ b/tools/distrib/build_ruby_environment_macos.sh
@@ -47,7 +47,7 @@ EOF
MAKE="make -j8"
-for v in 2.4.0 2.3.0 2.2.2 2.1.5 2.0.0-p645 ; do
+for v in 2.5.0 2.4.0 2.3.0 2.2.2 2.1.6 2.0.0-p645 ; do
ccache -c
rake -f $CROSS_RUBY cross-ruby VERSION=$v HOST=x86_64-darwin11
done
diff --git a/tools/distrib/check_copyright.py b/tools/distrib/check_copyright.py
index 8f782e07c2..0eb2cbe1a2 100755
--- a/tools/distrib/check_copyright.py
+++ b/tools/distrib/check_copyright.py
@@ -48,6 +48,7 @@ LICENSE_PREFIX = {
'.cc': r'\s*(?://|\*)\s*',
'.h': r'\s*(?://|\*)\s*',
'.m': r'\s*\*\s*',
+ '.mm': r'\s*\*\s*',
'.php': r'\s*\*\s*',
'.js': r'\s*\*\s*',
'.py': r'#\s*',
diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json
index 0fc5a25afd..d432bd0e53 100644
--- a/tools/run_tests/generated/sources_and_headers.json
+++ b/tools/run_tests/generated/sources_and_headers.json
@@ -7616,6 +7616,7 @@
"test/core/end2end/tests/filter_call_init_fails.cc",
"test/core/end2end/tests/filter_causes_close.cc",
"test/core/end2end/tests/filter_latency.cc",
+ "test/core/end2end/tests/filter_status_code.cc",
"test/core/end2end/tests/graceful_server_shutdown.cc",
"test/core/end2end/tests/high_initial_seqno.cc",
"test/core/end2end/tests/hpack_size.cc",
@@ -7697,6 +7698,7 @@
"test/core/end2end/tests/filter_call_init_fails.cc",
"test/core/end2end/tests/filter_causes_close.cc",
"test/core/end2end/tests/filter_latency.cc",
+ "test/core/end2end/tests/filter_status_code.cc",
"test/core/end2end/tests/graceful_server_shutdown.cc",
"test/core/end2end/tests/high_initial_seqno.cc",
"test/core/end2end/tests/hpack_size.cc",
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index 5cf371190c..98517cba2e 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -6799,6 +6799,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_census_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -8137,6 +8160,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_compress_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -9432,6 +9478,28 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_fakesec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -10638,6 +10706,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_fd_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -11905,6 +11996,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -13161,6 +13275,25 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+pipe_test",
+ "platforms": [
+ "linux"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -14345,6 +14478,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+trace_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -15637,6 +15793,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+workarounds_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -16993,6 +17172,30 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_http_proxy_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -18391,6 +18594,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_load_reporting_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -19747,6 +19973,30 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_oauth2_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -21067,6 +21317,30 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_proxy_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -22171,6 +22445,30 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -23395,6 +23693,30 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair+trace_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -24579,6 +24901,32 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [
+ "msan"
+ ],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_1byte_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -25925,6 +26273,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_ssl_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -27209,6 +27580,30 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_ssl_proxy_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -28366,6 +28761,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_uds_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -29562,6 +29980,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "inproc_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"high_initial_seqno"
],
"ci_platforms": [
@@ -30599,6 +31040,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_census_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -31914,6 +32378,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_compress_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -33112,6 +33599,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_fd_nosec_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -34356,6 +34866,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -35593,6 +36126,25 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+pipe_nosec_test",
+ "platforms": [
+ "linux"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -36754,6 +37306,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+trace_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -38023,6 +38598,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_full+workarounds_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -39355,6 +39953,30 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_http_proxy_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -40730,6 +41352,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_load_reporting_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -41990,6 +42635,30 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_proxy_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -43070,6 +43739,30 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -44270,6 +44963,30 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair+trace_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -45428,6 +46145,32 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [
+ "msan"
+ ],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_sockpair_1byte_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -46726,6 +47469,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "h2_uds_nosec_test",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"graceful_server_shutdown"
],
"ci_platforms": [
@@ -47899,6 +48665,29 @@
},
{
"args": [
+ "filter_status_code"
+ ],
+ "ci_platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "language": "c",
+ "name": "inproc_nosec_test",
+ "platforms": [
+ "windows",
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
"high_initial_seqno"
],
"ci_platforms": [