aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Yong Ni <yongni@google.com>2017-07-05 14:56:13 -0700
committerGravatar Yong Ni <yongni@google.com>2017-07-05 14:56:13 -0700
commitef698b60cd0dc121f0d8d50f972bbcca6743905f (patch)
treeb103d0b3c6fd128e141b7900f42bb094e52b191f /src
parent5f32c517b1f9ac4854039af8d0e1bb2da04f5821 (diff)
parent0c009ba2b590618fdeade42bb7ebd3034f6fd045 (diff)
Merge branch 'master' of github.com:grpc/grpc into matrix
Diffstat (limited to 'src')
-rw-r--r--src/boringssl/err_data.c536
-rw-r--r--src/compiler/objective_c_generator_helpers.h4
-rw-r--r--src/compiler/objective_c_plugin.cc3
-rw-r--r--src/compiler/php_generator.cc40
-rw-r--r--src/compiler/php_generator_helpers.h14
-rw-r--r--src/compiler/python_generator.cc1
-rw-r--r--src/core/ext/census/grpc_filter.c2
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.c6
-rw-r--r--src/core/ext/filters/client_channel/client_channel.c868
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.c9
-rw-r--r--src/core/ext/filters/client_channel/client_channel_plugin.c9
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.c10
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.c7
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.c26
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h7
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c65
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c61
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c54
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.c8
-rw-r--r--src/core/ext/filters/client_channel/resolver.c35
-rw-r--r--src/core/ext/filters/client_channel/resolver.h14
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c49
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h6
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c29
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c331
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h23
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c59
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c12
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c30
-rw-r--r--src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c6
-rw-r--r--src/core/ext/filters/client_channel/subchannel.c45
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h2
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.c14
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.c14
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.c39
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.c14
-rw-r--r--src/core/ext/filters/load_reporting/load_reporting.c6
-rw-r--r--src/core/ext/filters/load_reporting/load_reporting_filter.c2
-rw-r--r--src/core/ext/filters/max_age/max_age_filter.c18
-rw-r--r--src/core/ext/filters/message_size/message_size_filter.c4
-rw-r--r--src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c4
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.c6
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create.c8
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c6
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.c8
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_plugin.c3
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c288
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.h4
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.c11
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.c3
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.c14
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c10
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h11
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c14
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c92
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c86
-rw-r--r--src/core/lib/channel/channel_args.c26
-rw-r--r--src/core/lib/channel/channel_args.h6
-rw-r--r--src/core/lib/channel/channel_stack.h4
-rw-r--r--src/core/lib/channel/handshaker.c8
-rw-r--r--src/core/lib/http/httpcli.c19
-rw-r--r--src/core/lib/http/httpcli_security_connector.c12
-rw-r--r--src/core/lib/iomgr/closure.c52
-rw-r--r--src/core/lib/iomgr/closure.h72
-rw-r--r--src/core/lib/iomgr/combiner.c261
-rw-r--r--src/core/lib/iomgr/combiner.h11
-rw-r--r--src/core/lib/iomgr/endpoint.c4
-rw-r--r--src/core/lib/iomgr/endpoint.h4
-rw-r--r--src/core/lib/iomgr/error.c55
-rw-r--r--src/core/lib/iomgr/error.h20
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.c110
-rw-r--r--src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c257
-rw-r--r--src/core/lib/iomgr/ev_epoll_thread_pool_linux.c194
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.c326
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.c357
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c82
-rw-r--r--src/core/lib/iomgr/ev_posix.c32
-rw-r--r--src/core/lib/iomgr/ev_posix.h16
-rw-r--r--src/core/lib/iomgr/exec_ctx.c12
-rw-r--r--src/core/lib/iomgr/executor.c244
-rw-r--r--src/core/lib/iomgr/executor.h9
-rw-r--r--src/core/lib/iomgr/iomgr.c7
-rw-r--r--src/core/lib/iomgr/iomgr.h4
-rw-r--r--src/core/lib/iomgr/lockfree_event.c8
-rw-r--r--src/core/lib/iomgr/pollset.h4
-rw-r--r--src/core/lib/iomgr/pollset_uv.c8
-rw-r--r--src/core/lib/iomgr/pollset_windows.c8
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.c6
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.c6
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.c6
-rw-r--r--src/core/lib/iomgr/resource_quota.c81
-rw-r--r--src/core/lib/iomgr/socket_factory_posix.c9
-rw-r--r--src/core/lib/iomgr/socket_mutator.c10
-rw-r--r--src/core/lib/iomgr/socket_windows.c4
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.c14
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.c4
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.c8
-rw-r--r--src/core/lib/iomgr/tcp_posix.c52
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c10
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.c4
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.c8
-rw-r--r--src/core/lib/iomgr/tcp_uv.c40
-rw-r--r--src/core/lib/iomgr/tcp_windows.c57
-rw-r--r--src/core/lib/iomgr/timer_generic.c8
-rw-r--r--src/core/lib/iomgr/timer_uv.c6
-rw-r--r--src/core/lib/iomgr/udp_server.c12
-rw-r--r--src/core/lib/iomgr/workqueue.h72
-rw-r--r--src/core/lib/iomgr/workqueue_uv.c50
-rw-r--r--src/core/lib/iomgr/workqueue_uv.h22
-rw-r--r--src/core/lib/iomgr/workqueue_windows.c48
-rw-r--r--src/core/lib/iomgr/workqueue_windows.h22
-rw-r--r--src/core/lib/security/context/security_context.c37
-rw-r--r--src/core/lib/security/context/security_context.h6
-rw-r--r--src/core/lib/security/credentials/credentials.c17
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.c11
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c4
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.c6
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.c4
-rw-r--r--src/core/lib/security/credentials/ssl/ssl_credentials.c10
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c36
-rw-r--r--src/core/lib/security/transport/lb_targets_info.c9
-rw-r--r--src/core/lib/security/transport/secure_endpoint.c34
-rw-r--r--src/core/lib/security/transport/security_connector.c43
-rw-r--r--src/core/lib/security/transport/security_connector.h6
-rw-r--r--src/core/lib/security/transport/security_handshaker.c12
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c8
-rw-r--r--src/core/lib/support/stack_lockfree.c137
-rw-r--r--src/core/lib/support/stack_lockfree.h38
-rw-r--r--src/core/lib/support/time_precise.c16
-rw-r--r--src/core/lib/surface/alarm.c2
-rw-r--r--src/core/lib/surface/call.c80
-rw-r--r--src/core/lib/surface/call.h2
-rw-r--r--src/core/lib/surface/channel.c2
-rw-r--r--src/core/lib/surface/channel.h2
-rw-r--r--src/core/lib/surface/channel_ping.c2
-rw-r--r--src/core/lib/surface/completion_queue.c31
-rw-r--r--src/core/lib/surface/completion_queue.h6
-rw-r--r--src/core/lib/surface/init.c24
-rw-r--r--src/core/lib/surface/init_secure.c10
-rw-r--r--src/core/lib/surface/lame_client.cc8
-rw-r--r--src/core/lib/surface/server.c40
-rw-r--r--src/core/lib/transport/connectivity_state.c10
-rw-r--r--src/core/lib/transport/metadata.c136
-rw-r--r--src/core/lib/transport/metadata.h8
-rw-r--r--src/core/lib/transport/transport.c57
-rw-r--r--src/core/lib/transport/transport.h26
-rw-r--r--src/core/lib/transport/transport_op_string.c3
-rw-r--r--src/cpp/common/channel_filter.h54
-rw-r--r--src/csharp/Grpc.Core.Tests/ServerTest.cs16
-rw-r--r--src/csharp/Grpc.Core/Server.cs21
-rw-r--r--src/csharp/Grpc.IntegrationTesting/Control.cs260
-rw-r--r--src/csharp/Grpc.IntegrationTesting/CustomErrorDetailsTest.cs112
-rw-r--r--src/csharp/Grpc.IntegrationTesting/EchoMessages.cs1354
-rw-r--r--src/csharp/Grpc.IntegrationTesting/Services.cs34
-rw-r--r--src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs266
-rw-r--r--src/csharp/Grpc.IntegrationTesting/Stats.cs93
-rw-r--r--src/csharp/ext/grpc_csharp_ext.c14
-rwxr-xr-xsrc/csharp/generate_proto_csharp.sh2
-rw-r--r--src/csharp/tests.json1
-rw-r--r--src/node/src/grpc_extension.js7
-rw-r--r--src/node/test/surface_test.js47
-rw-r--r--src/objective-c/GRPCClient/GRPCCall.h6
-rw-r--r--src/objective-c/GRPCClient/GRPCCall.m2
-rw-r--r--src/objective-c/GRPCClient/private/GRPCChannel.h1
-rw-r--r--src/objective-c/GRPCClient/private/GRPCChannel.m23
-rw-r--r--src/objective-c/GRPCClient/private/GRPCHost.h1
-rw-r--r--src/objective-c/GRPCClient/private/GRPCHost.m3
-rw-r--r--src/objective-c/GRPCClient/private/GRPCWrappedCall.h1
-rw-r--r--src/objective-c/GRPCClient/private/GRPCWrappedCall.m9
-rw-r--r--src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m4
-rw-r--r--src/objective-c/tests/PluginTest/imported-with-dash.proto22
-rw-r--r--src/objective-c/tests/PluginTest/test-dash-filename.proto27
-rwxr-xr-xsrc/objective-c/tests/build_tests.sh1
-rwxr-xr-xsrc/objective-c/tests/run_tests.sh32
-rw-r--r--src/php/ext/grpc/version.h2
-rw-r--r--src/proto/grpc/testing/control.proto6
-rw-r--r--src/python/grpcio/grpc/_channel.py5
-rw-r--r--src/python/grpcio/grpc/_server.py3
-rw-r--r--src/python/grpcio/grpc_core_dependencies.py26
-rw-r--r--src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py6
-rw-r--r--src/ruby/lib/grpc/errors.rb11
-rw-r--r--src/ruby/spec/channel_credentials_spec.rb2
187 files changed, 5417 insertions, 3797 deletions
diff --git a/src/boringssl/err_data.c b/src/boringssl/err_data.c
index c1257cdc78..88462d1376 100644
--- a/src/boringssl/err_data.c
+++ b/src/boringssl/err_data.c
@@ -178,42 +178,42 @@ const uint32_t kOpenSSLReasonValues[] = {
0x28340c19,
0x283480ac,
0x283500ea,
- 0x2c3228ca,
- 0x2c32a8d8,
- 0x2c3328ea,
- 0x2c33a8fc,
- 0x2c342910,
- 0x2c34a922,
- 0x2c35293d,
- 0x2c35a94f,
- 0x2c362962,
+ 0x2c3229b1,
+ 0x2c32a9bf,
+ 0x2c3329d1,
+ 0x2c33a9e3,
+ 0x2c3429f7,
+ 0x2c34aa09,
+ 0x2c352a24,
+ 0x2c35aa36,
+ 0x2c362a49,
0x2c36832d,
- 0x2c37296f,
- 0x2c37a981,
- 0x2c382994,
- 0x2c38a9ab,
- 0x2c3929b9,
- 0x2c39a9c9,
- 0x2c3a29db,
- 0x2c3aa9ef,
- 0x2c3b2a00,
- 0x2c3baa1f,
- 0x2c3c2a33,
- 0x2c3caa49,
- 0x2c3d2a62,
- 0x2c3daa7f,
- 0x2c3e2a90,
- 0x2c3eaa9e,
- 0x2c3f2ab6,
- 0x2c3faace,
- 0x2c402adb,
+ 0x2c372a56,
+ 0x2c37aa68,
+ 0x2c382a7b,
+ 0x2c38aa92,
+ 0x2c392aa0,
+ 0x2c39aab0,
+ 0x2c3a2ac2,
+ 0x2c3aaad6,
+ 0x2c3b2ae7,
+ 0x2c3bab06,
+ 0x2c3c2b1a,
+ 0x2c3cab30,
+ 0x2c3d2b49,
+ 0x2c3dab66,
+ 0x2c3e2b77,
+ 0x2c3eab85,
+ 0x2c3f2b9d,
+ 0x2c3fabb5,
+ 0x2c402bc2,
0x2c4090e7,
- 0x2c412aec,
- 0x2c41aaff,
+ 0x2c412bd3,
+ 0x2c41abe6,
0x2c4210c0,
- 0x2c42ab10,
+ 0x2c42abf7,
0x2c430720,
- 0x2c43aa11,
+ 0x2c43aaf8,
0x30320000,
0x30328015,
0x3033001f,
@@ -366,180 +366,189 @@ const uint32_t kOpenSSLReasonValues[] = {
0x403b9861,
0x403c0064,
0x403c8083,
- 0x403d18aa,
- 0x403d98c0,
- 0x403e18cf,
- 0x403e98e2,
- 0x403f18fc,
- 0x403f990a,
- 0x4040191f,
- 0x40409933,
- 0x40411950,
- 0x4041996b,
- 0x40421984,
- 0x40429997,
- 0x404319ab,
- 0x404399c3,
- 0x404419da,
+ 0x403d18c1,
+ 0x403d98d7,
+ 0x403e18e6,
+ 0x403e98f9,
+ 0x403f1913,
+ 0x403f9921,
+ 0x40401936,
+ 0x4040994a,
+ 0x40411967,
+ 0x40419982,
+ 0x4042199b,
+ 0x404299ae,
+ 0x404319c2,
+ 0x404399da,
+ 0x404419f1,
0x404480ac,
- 0x404519ef,
- 0x40459a01,
- 0x40461a25,
- 0x40469a45,
- 0x40471a53,
- 0x40479a7a,
- 0x40481ab7,
- 0x40489ad0,
- 0x40491ae7,
- 0x40499b01,
- 0x404a1b18,
- 0x404a9b36,
- 0x404b1b4e,
- 0x404b9b65,
- 0x404c1b7b,
- 0x404c9b8d,
- 0x404d1bae,
- 0x404d9bd0,
- 0x404e1be4,
- 0x404e9bf1,
- 0x404f1c1e,
- 0x404f9c47,
- 0x40501c71,
- 0x40509c85,
- 0x40511ca0,
- 0x40519cb0,
- 0x40521cc7,
- 0x40529ceb,
- 0x40531d03,
- 0x40539d16,
- 0x40541d2b,
- 0x40549d4e,
- 0x40551d5c,
- 0x40559d79,
- 0x40561d86,
- 0x40569d9f,
- 0x40571db7,
- 0x40579dca,
- 0x40581ddf,
- 0x40589e06,
- 0x40591e35,
- 0x40599e62,
- 0x405a1e76,
- 0x405a9e86,
- 0x405b1e9e,
- 0x405b9eaf,
- 0x405c1ec2,
- 0x405c9ed3,
- 0x405d1ee0,
- 0x405d9ef7,
- 0x405e1f17,
+ 0x40451a06,
+ 0x40459a18,
+ 0x40461a3c,
+ 0x40469a5c,
+ 0x40471a6a,
+ 0x40479a91,
+ 0x40481ace,
+ 0x40489ae7,
+ 0x40491afe,
+ 0x40499b18,
+ 0x404a1b2f,
+ 0x404a9b4d,
+ 0x404b1b65,
+ 0x404b9b7c,
+ 0x404c1b92,
+ 0x404c9ba4,
+ 0x404d1bc5,
+ 0x404d9be7,
+ 0x404e1bfb,
+ 0x404e9c08,
+ 0x404f1c35,
+ 0x404f9c5e,
+ 0x40501c99,
+ 0x40509cad,
+ 0x40511cc8,
+ 0x40519cd8,
+ 0x40521cef,
+ 0x40529d13,
+ 0x40531d2b,
+ 0x40539d3e,
+ 0x40541d53,
+ 0x40549d76,
+ 0x40551d84,
+ 0x40559da1,
+ 0x40561dae,
+ 0x40569dc7,
+ 0x40571ddf,
+ 0x40579df2,
+ 0x40581e07,
+ 0x40589e2e,
+ 0x40591e5d,
+ 0x40599e8a,
+ 0x405a1e9e,
+ 0x405a9eae,
+ 0x405b1ec6,
+ 0x405b9ed7,
+ 0x405c1eea,
+ 0x405c9f0b,
+ 0x405d1f18,
+ 0x405d9f2f,
+ 0x405e1f6d,
0x405e8a95,
- 0x405f1f38,
- 0x405f9f45,
- 0x40601f53,
- 0x40609f75,
- 0x40611f9d,
- 0x40619fb2,
- 0x40621fc9,
- 0x40629fda,
- 0x40631feb,
- 0x4063a000,
- 0x40642017,
- 0x4064a043,
- 0x4065205e,
- 0x4065a075,
- 0x4066208d,
- 0x4066a0b7,
- 0x406720e2,
- 0x4067a103,
- 0x40682116,
- 0x4068a137,
- 0x40692169,
- 0x4069a197,
- 0x406a21b8,
- 0x406aa1d8,
- 0x406b2360,
- 0x406ba383,
- 0x406c2399,
- 0x406ca5c5,
- 0x406d25f4,
- 0x406da61c,
- 0x406e264a,
- 0x406ea662,
- 0x406f2681,
- 0x406fa696,
- 0x407026a9,
- 0x4070a6c6,
+ 0x405f1f8e,
+ 0x405f9f9b,
+ 0x40601fa9,
+ 0x40609fcb,
+ 0x4061200f,
+ 0x4061a047,
+ 0x4062205e,
+ 0x4062a06f,
+ 0x40632080,
+ 0x4063a095,
+ 0x406420ac,
+ 0x4064a0d8,
+ 0x406520f3,
+ 0x4065a10a,
+ 0x40662122,
+ 0x4066a14c,
+ 0x40672177,
+ 0x4067a198,
+ 0x406821ab,
+ 0x4068a1cc,
+ 0x406921fe,
+ 0x4069a22c,
+ 0x406a224d,
+ 0x406aa26d,
+ 0x406b23f5,
+ 0x406ba418,
+ 0x406c242e,
+ 0x406ca690,
+ 0x406d26bf,
+ 0x406da6e7,
+ 0x406e2715,
+ 0x406ea749,
+ 0x406f2768,
+ 0x406fa77d,
+ 0x40702790,
+ 0x4070a7ad,
0x40710800,
- 0x4071a6d8,
- 0x407226eb,
- 0x4072a704,
- 0x4073271c,
+ 0x4071a7bf,
+ 0x407227d2,
+ 0x4072a7eb,
+ 0x40732803,
0x4073936d,
- 0x40742730,
- 0x4074a74a,
- 0x4075275b,
- 0x4075a76f,
- 0x4076277d,
+ 0x40742817,
+ 0x4074a831,
+ 0x40752842,
+ 0x4075a856,
+ 0x40762864,
0x407691aa,
- 0x407727a2,
- 0x4077a7c4,
- 0x407827df,
- 0x4078a818,
- 0x4079282f,
- 0x4079a845,
- 0x407a2851,
- 0x407aa864,
- 0x407b2879,
- 0x407ba88b,
- 0x407c28a0,
- 0x407ca8a9,
- 0x407d2152,
- 0x407d9c57,
- 0x407e27f4,
- 0x407e9e16,
- 0x407f1a67,
+ 0x40772889,
+ 0x4077a8ab,
+ 0x407828c6,
+ 0x4078a8ff,
+ 0x40792916,
+ 0x4079a92c,
+ 0x407a2938,
+ 0x407aa94b,
+ 0x407b2960,
+ 0x407ba972,
+ 0x407c2987,
+ 0x407ca990,
+ 0x407d21e7,
+ 0x407d9c6e,
+ 0x407e28db,
+ 0x407e9e3e,
+ 0x407f1a7e,
0x407f9887,
- 0x40801c2e,
- 0x40809a8f,
- 0x40811cd9,
- 0x40819c08,
- 0x40822635,
+ 0x40801c45,
+ 0x40809aa6,
+ 0x40811d01,
+ 0x40819c1f,
+ 0x40822700,
0x4082986d,
- 0x40831df1,
- 0x4083a028,
- 0x40841aa3,
- 0x40849e4e,
- 0x41f4228b,
- 0x41f9231d,
- 0x41fe2210,
- 0x41fea3ec,
- 0x41ff24dd,
- 0x420322a4,
- 0x420822c6,
- 0x4208a302,
- 0x420921f4,
- 0x4209a33c,
- 0x420a224b,
- 0x420aa22b,
- 0x420b226b,
- 0x420ba2e4,
- 0x420c24f9,
- 0x420ca3b9,
- 0x420d23d3,
- 0x420da40a,
- 0x42122424,
- 0x421724c0,
- 0x4217a466,
- 0x421c2488,
- 0x421f2443,
- 0x42212510,
- 0x422624a3,
- 0x422b25a9,
- 0x422ba572,
- 0x422c2591,
- 0x422ca54c,
- 0x422d252b,
+ 0x40831e19,
+ 0x4083a0bd,
+ 0x40841aba,
+ 0x40849e76,
+ 0x40851efb,
+ 0x40859ff3,
+ 0x40861f4f,
+ 0x40869c88,
+ 0x4087272d,
+ 0x4087a024,
+ 0x408818aa,
+ 0x41f42320,
+ 0x41f923b2,
+ 0x41fe22a5,
+ 0x41fea481,
+ 0x41ff2572,
+ 0x42032339,
+ 0x4208235b,
+ 0x4208a397,
+ 0x42092289,
+ 0x4209a3d1,
+ 0x420a22e0,
+ 0x420aa2c0,
+ 0x420b2300,
+ 0x420ba379,
+ 0x420c258e,
+ 0x420ca44e,
+ 0x420d2468,
+ 0x420da49f,
+ 0x421224b9,
+ 0x42172555,
+ 0x4217a4fb,
+ 0x421c251d,
+ 0x421f24d8,
+ 0x422125a5,
+ 0x42262538,
+ 0x422b2674,
+ 0x422ba622,
+ 0x422c265c,
+ 0x422ca5e1,
+ 0x422d25c0,
+ 0x422da641,
+ 0x422e2607,
0x4432072b,
0x4432873a,
0x44330746,
@@ -582,69 +591,69 @@ const uint32_t kOpenSSLReasonValues[] = {
0x4c3d136d,
0x4c3d937c,
0x4c3e1389,
- 0x50322b22,
- 0x5032ab31,
- 0x50332b3c,
- 0x5033ab4c,
- 0x50342b65,
- 0x5034ab7f,
- 0x50352b8d,
- 0x5035aba3,
- 0x50362bb5,
- 0x5036abcb,
- 0x50372be4,
- 0x5037abf7,
- 0x50382c0f,
- 0x5038ac20,
- 0x50392c35,
- 0x5039ac49,
- 0x503a2c69,
- 0x503aac7f,
- 0x503b2c97,
- 0x503baca9,
- 0x503c2cc5,
- 0x503cacdc,
- 0x503d2cf5,
- 0x503dad0b,
- 0x503e2d18,
- 0x503ead2e,
- 0x503f2d40,
+ 0x50322c09,
+ 0x5032ac18,
+ 0x50332c23,
+ 0x5033ac33,
+ 0x50342c4c,
+ 0x5034ac66,
+ 0x50352c74,
+ 0x5035ac8a,
+ 0x50362c9c,
+ 0x5036acb2,
+ 0x50372ccb,
+ 0x5037acde,
+ 0x50382cf6,
+ 0x5038ad07,
+ 0x50392d1c,
+ 0x5039ad30,
+ 0x503a2d50,
+ 0x503aad66,
+ 0x503b2d7e,
+ 0x503bad90,
+ 0x503c2dac,
+ 0x503cadc3,
+ 0x503d2ddc,
+ 0x503dadf2,
+ 0x503e2dff,
+ 0x503eae15,
+ 0x503f2e27,
0x503f8382,
- 0x50402d53,
- 0x5040ad63,
- 0x50412d7d,
- 0x5041ad8c,
- 0x50422da6,
- 0x5042adc3,
- 0x50432dd3,
- 0x5043ade3,
- 0x50442df2,
+ 0x50402e3a,
+ 0x5040ae4a,
+ 0x50412e64,
+ 0x5041ae73,
+ 0x50422e8d,
+ 0x5042aeaa,
+ 0x50432eba,
+ 0x5043aeca,
+ 0x50442ed9,
0x5044843f,
- 0x50452e06,
- 0x5045ae24,
- 0x50462e37,
- 0x5046ae4d,
- 0x50472e5f,
- 0x5047ae74,
- 0x50482e9a,
- 0x5048aea8,
- 0x50492ebb,
- 0x5049aed0,
- 0x504a2ee6,
- 0x504aaef6,
- 0x504b2f16,
- 0x504baf29,
- 0x504c2f4c,
- 0x504caf7a,
- 0x504d2f8c,
- 0x504dafa9,
- 0x504e2fc4,
- 0x504eafe0,
- 0x504f2ff2,
- 0x504fb009,
- 0x50503018,
+ 0x50452eed,
+ 0x5045af0b,
+ 0x50462f1e,
+ 0x5046af34,
+ 0x50472f46,
+ 0x5047af5b,
+ 0x50482f81,
+ 0x5048af8f,
+ 0x50492fa2,
+ 0x5049afb7,
+ 0x504a2fcd,
+ 0x504aafdd,
+ 0x504b2ffd,
+ 0x504bb010,
+ 0x504c3033,
+ 0x504cb061,
+ 0x504d3073,
+ 0x504db090,
+ 0x504e30ab,
+ 0x504eb0c7,
+ 0x504f30d9,
+ 0x504fb0f0,
+ 0x505030ff,
0x505086ef,
- 0x5051302b,
+ 0x50513112,
0x58320ec9,
0x68320e8b,
0x68328c25,
@@ -1007,6 +1016,7 @@ const char kOpenSSLReasonStringData[] =
"BIO_NOT_SET\0"
"BLOCK_CIPHER_PAD_IS_WRONG\0"
"BUFFERED_MESSAGES_ON_CIPHER_CHANGE\0"
+ "CANNOT_PARSE_LEAF_CERT\0"
"CA_DN_LENGTH_MISMATCH\0"
"CA_DN_TOO_LONG\0"
"CCS_RECEIVED_EARLY\0"
@@ -1050,6 +1060,7 @@ const char kOpenSSLReasonStringData[] =
"INVALID_COMPRESSION_LIST\0"
"INVALID_MESSAGE\0"
"INVALID_OUTER_RECORD_TYPE\0"
+ "INVALID_SCT_LIST\0"
"INVALID_SSL_SESSION\0"
"INVALID_TICKET_KEYS_LENGTH\0"
"LENGTH_MISMATCH\0"
@@ -1079,15 +1090,19 @@ const char kOpenSSLReasonStringData[] =
"NO_RENEGOTIATION\0"
"NO_REQUIRED_DIGEST\0"
"NO_SHARED_CIPHER\0"
+ "NO_SHARED_GROUP\0"
"NULL_SSL_CTX\0"
"NULL_SSL_METHOD_PASSED\0"
"OLD_SESSION_CIPHER_NOT_RETURNED\0"
+ "OLD_SESSION_PRF_HASH_MISMATCH\0"
"OLD_SESSION_VERSION_NOT_RETURNED\0"
"PARSE_TLSEXT\0"
"PATH_TOO_LONG\0"
"PEER_DID_NOT_RETURN_A_CERTIFICATE\0"
"PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE\0"
+ "PRE_SHARED_KEY_MUST_BE_LAST\0"
"PROTOCOL_IS_SHUTDOWN\0"
+ "PSK_IDENTITY_BINDER_COUNT_MISMATCH\0"
"PSK_IDENTITY_NOT_FOUND\0"
"PSK_NO_CLIENT_CB\0"
"PSK_NO_SERVER_CB\0"
@@ -1139,7 +1154,9 @@ const char kOpenSSLReasonStringData[] =
"TLSV1_ALERT_USER_CANCELLED\0"
"TLSV1_BAD_CERTIFICATE_HASH_VALUE\0"
"TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE\0"
+ "TLSV1_CERTIFICATE_REQUIRED\0"
"TLSV1_CERTIFICATE_UNOBTAINABLE\0"
+ "TLSV1_UNKNOWN_PSK_IDENTITY\0"
"TLSV1_UNRECOGNIZED_NAME\0"
"TLSV1_UNSUPPORTED_EXTENSION\0"
"TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST\0"
@@ -1147,6 +1164,7 @@ const char kOpenSSLReasonStringData[] =
"TOO_MANY_EMPTY_FRAGMENTS\0"
"TOO_MANY_KEY_UPDATES\0"
"TOO_MANY_WARNING_ALERTS\0"
+ "TOO_MUCH_SKIPPED_EARLY_DATA\0"
"UNABLE_TO_FIND_ECDH_PARAMETERS\0"
"UNEXPECTED_EXTENSION\0"
"UNEXPECTED_MESSAGE\0"
diff --git a/src/compiler/objective_c_generator_helpers.h b/src/compiler/objective_c_generator_helpers.h
index 6f6220c6b4..9c9589e1db 100644
--- a/src/compiler/objective_c_generator_helpers.h
+++ b/src/compiler/objective_c_generator_helpers.h
@@ -23,6 +23,8 @@
#include "src/compiler/config.h"
#include "src/compiler/generator_helpers.h"
+#include <google/protobuf/compiler/objectivec/objectivec_helpers.h>
+
namespace grpc_objective_c_generator {
using ::grpc::protobuf::FileDescriptor;
@@ -30,7 +32,7 @@ using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::string;
inline string MessageHeaderName(const FileDescriptor *file) {
- return grpc_generator::FileNameInUpperCamel(file) + ".pbobjc.h";
+ return google::protobuf::compiler::objectivec::FilePath(file) + ".pbobjc.h";
}
inline string ServiceClassName(const ServiceDescriptor *service) {
diff --git a/src/compiler/objective_c_plugin.cc b/src/compiler/objective_c_plugin.cc
index bd9bc7482c..96a3375e96 100644
--- a/src/compiler/objective_c_plugin.cc
+++ b/src/compiler/objective_c_plugin.cc
@@ -44,7 +44,8 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
return true;
}
- ::grpc::string file_name = grpc_generator::FileNameInUpperCamel(file);
+ ::grpc::string file_name =
+ google::protobuf::compiler::objectivec::FilePath(file);
::grpc::string prefix = file->options().objc_class_prefix();
{
diff --git a/src/compiler/php_generator.cc b/src/compiler/php_generator.cc
index a7387d7223..6d34761fdf 100644
--- a/src/compiler/php_generator.cc
+++ b/src/compiler/php_generator.cc
@@ -52,14 +52,16 @@ void PrintMethod(const MethodDescriptor *method, Printer *out) {
vars["input_type_id"] = MessageIdentifierName(input_type->full_name());
vars["output_type_id"] = MessageIdentifierName(output_type->full_name());
- out->Print(GetPHPComments(method, " //").c_str());
+ out->Print("/**\n");
+ out->Print(GetPHPComments(method, " *").c_str());
if (method->client_streaming()) {
out->Print(vars,
- " // @param array $$metadata metadata\n"
- " // @param array $$options call options\n"
+ " * @param array $$metadata metadata\n"
+ " * @param array $$options call options\n */\n"
"public function $name$($$metadata = [], "
"$$options = []) {\n");
out->Indent();
+ out->Indent();
if (method->server_streaming()) {
out->Print("return $$this->_bidiRequest(");
} else {
@@ -71,12 +73,13 @@ void PrintMethod(const MethodDescriptor *method, Printer *out) {
"$$metadata, $$options);\n");
} else {
out->Print(vars,
- " // @param \\$input_type_id$ $$argument input argument\n"
- " // @param array $$metadata metadata\n"
- " // @param array $$options call options\n"
+ " * @param \\$input_type_id$ $$argument input argument\n"
+ " * @param array $$metadata metadata\n"
+ " * @param array $$options call options\n */\n"
"public function $name$(\\$input_type_id$ $$argument,\n"
" $$metadata = [], $$options = []) {\n");
out->Indent();
+ out->Indent();
if (method->server_streaming()) {
out->Print("return $$this->_serverStreamRequest(");
} else {
@@ -89,26 +92,32 @@ void PrintMethod(const MethodDescriptor *method, Printer *out) {
"$$metadata, $$options);\n");
}
out->Outdent();
+ out->Outdent();
out->Print("}\n\n");
}
// Prints out the service descriptor object
void PrintService(const ServiceDescriptor *service, Printer *out) {
map<grpc::string, grpc::string> vars;
- out->Print(GetPHPComments(service, "//").c_str());
+ out->Print("/**\n");
+ out->Print(GetPHPComments(service, " *").c_str());
+ out->Print(" */\n");
vars["name"] = service->name();
out->Print(vars, "class $name$Client extends \\Grpc\\BaseStub {\n\n");
out->Indent();
+ out->Indent();
out->Print(
- " // @param string $$hostname hostname\n"
- " // @param array $$opts channel options\n"
- " // @param \\Grpc\\Channel $$channel (optional) re-use channel "
- "object\n"
+ "/**\n * @param string $$hostname hostname\n"
+ " * @param array $$opts channel options\n"
+ " * @param \\Grpc\\Channel $$channel (optional) re-use channel "
+ "object\n */\n"
"public function __construct($$hostname, $$opts, "
"$$channel = null) {\n");
out->Indent();
+ out->Indent();
out->Print("parent::__construct($$hostname, $$opts, $$channel);\n");
out->Outdent();
+ out->Outdent();
out->Print("}\n\n");
for (int i = 0; i < service->method_count(); i++) {
grpc::string method_name =
@@ -116,7 +125,8 @@ void PrintService(const ServiceDescriptor *service, Printer *out) {
PrintMethod(service->method(i), out);
}
out->Outdent();
- out->Print("}\n\n");
+ out->Outdent();
+ out->Print("}\n");
}
}
@@ -138,13 +148,9 @@ grpc::string GenerateFile(const FileDescriptor *file,
map<grpc::string, grpc::string> vars;
vars["package"] = MessageIdentifierName(file->package());
- out.Print(vars, "namespace $package$ {\n\n");
- out.Indent();
+ out.Print(vars, "namespace $package$;\n\n");
PrintService(service, &out);
-
- out.Outdent();
- out.Print("}\n");
}
return output;
}
diff --git a/src/compiler/php_generator_helpers.h b/src/compiler/php_generator_helpers.h
index 8e35809357..3a5c08b3e6 100644
--- a/src/compiler/php_generator_helpers.h
+++ b/src/compiler/php_generator_helpers.h
@@ -39,12 +39,24 @@ inline grpc::string GetPHPServiceFilename(
return oss.str() + "/" + service->name() + "Client.php";
}
+// ReplaceAll replaces all instances of search with replace in s.
+inline grpc::string ReplaceAll(grpc::string s, const grpc::string &search,
+ const grpc::string &replace) {
+ size_t pos = 0;
+ while ((pos = s.find(search, pos)) != grpc::string::npos) {
+ s.replace(pos, search.length(), replace);
+ pos += replace.length();
+ }
+ return s;
+}
+
// Get leading or trailing comments in a string. Comment lines start with "// ".
// Leading detached comments are put in in front of leading comments.
template <typename DescriptorType>
inline grpc::string GetPHPComments(const DescriptorType *desc,
grpc::string prefix) {
- return grpc_generator::GetPrefixedComments(desc, true, prefix);
+ return ReplaceAll(grpc_generator::GetPrefixedComments(desc, true, prefix),
+ "*/", "&#42;/");
}
} // namespace grpc_php_generator
diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc
index 1a06d0f21e..31b177c28e 100644
--- a/src/compiler/python_generator.cc
+++ b/src/compiler/python_generator.cc
@@ -756,6 +756,7 @@ bool PythonGrpcGenerator::Generate(const FileDescriptor* file,
file->name().find_last_of(".proto") == file->name().size() - 1) {
grpc::string base =
file->name().substr(0, file->name().size() - proto_suffix_length);
+ std::replace(base.begin(), base.end(), '-', '_');
pb2_file_name = base + "_pb2.py";
pb2_grpc_file_name = base + "_pb2_grpc.py";
} else {
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 1e805e33e0..13fe2e6b1c 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -141,7 +141,7 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
memset(d, 0, sizeof(*d));
d->start_ts = args->start_time;
/* TODO(hongyu): call census_tracing_start_op here. */
- grpc_closure_init(&d->finish_recv, server_on_done_recv, elem,
+ GRPC_CLOSURE_INIT(&d->finish_recv, server_on_done_recv, elem,
grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.c b/src/core/ext/filters/client_channel/channel_connectivity.c
index 2e99257cd9..c3dca14305 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.c
+++ b/src/core/ext/filters/client_channel/channel_connectivity.c
@@ -211,9 +211,9 @@ void grpc_channel_watch_connectivity_state(
grpc_cq_begin_op(cq, tag);
gpr_mu_init(&w->mu);
- grpc_closure_init(&w->on_complete, watch_complete, w,
+ GRPC_CLOSURE_INIT(&w->on_complete, watch_complete, w,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&w->on_timeout, timeout_complete, w,
+ GRPC_CLOSURE_INIT(&w->on_timeout, timeout_complete, w,
grpc_schedule_on_exec_ctx);
w->phase = WAITING;
w->state = last_observed_state;
@@ -225,7 +225,7 @@ void grpc_channel_watch_connectivity_state(
watcher_timer_init_arg *wa = gpr_malloc(sizeof(watcher_timer_init_arg));
wa->w = w;
wa->deadline = deadline;
- grpc_closure_init(&w->watcher_timer_init, watcher_timer_init, wa,
+ GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
grpc_schedule_on_exec_ctx);
if (client_channel_elem->filter == &grpc_client_channel_filter) {
diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c
index 013096d46b..de516ab4c9 100644
--- a/src/core/ext/filters/client_channel/client_channel.c
+++ b/src/core/ext/filters/client_channel/client_channel.c
@@ -178,8 +178,8 @@ typedef struct client_channel_channel_data {
grpc_slice_hash_table *method_params_table;
/** incoming resolver result - set by resolver.next() */
grpc_channel_args *resolver_result;
- /** a list of closures that are all waiting for config to come in */
- grpc_closure_list waiting_for_config_closures;
+ /** a list of closures that are all waiting for resolver result to come in */
+ grpc_closure_list waiting_for_resolver_result_closures;
/** resolver callback */
grpc_closure on_resolver_result_changed;
/** connectivity state being tracked */
@@ -275,8 +275,8 @@ static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
- grpc_closure_init(&w->on_changed, on_lb_policy_state_changed_locked, w,
- grpc_combiner_scheduler(chand->combiner, false));
+ GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
+ grpc_combiner_scheduler(chand->combiner));
w->state = current_state;
w->lb_policy = lb_policy;
grpc_lb_policy_notify_on_state_change_locked(exec_ctx, lb_policy, &w->state,
@@ -342,49 +342,15 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
}
}
-// Wrap a closure associated with \a lb_policy. The associated callback (\a
-// wrapped_on_pick_closure_cb) is responsible for unref'ing \a lb_policy after
-// scheduling \a wrapped_closure.
-typedef struct wrapped_on_pick_closure_arg {
- /* the closure instance using this struct as argument */
- grpc_closure wrapper_closure;
-
- /* the original closure. Usually a on_complete/notify cb for pick() and ping()
- * calls against the internal RR instance, respectively. */
- grpc_closure *wrapped_closure;
-
- /* The policy instance related to the closure */
- grpc_lb_policy *lb_policy;
-} wrapped_on_pick_closure_arg;
-
-// Invoke \a arg->wrapped_closure, unref \a arg->lb_policy and free \a arg.
-static void wrapped_on_pick_closure_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- wrapped_on_pick_closure_arg *wc_arg = arg;
- GPR_ASSERT(wc_arg != NULL);
- GPR_ASSERT(wc_arg->wrapped_closure != NULL);
- GPR_ASSERT(wc_arg->lb_policy != NULL);
- grpc_closure_run(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->lb_policy, "pick_subchannel_wrapping");
- gpr_free(wc_arg);
-}
-
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
channel_data *chand = arg;
+ // Extract the following fields from the resolver result, if non-NULL.
char *lb_policy_name = NULL;
- grpc_lb_policy *lb_policy = NULL;
- grpc_lb_policy *old_lb_policy = NULL;
- grpc_slice_hash_table *method_params_table = NULL;
- grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
- bool exit_idle = false;
- grpc_error *state_error =
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
+ grpc_lb_policy *new_lb_policy = NULL;
char *service_config_json = NULL;
- service_config_parsing_state parsing_state;
- memset(&parsing_state, 0, sizeof(parsing_state));
-
- bool lb_policy_updated = false;
+ grpc_server_retry_throttle_data *retry_throttle_data = NULL;
+ grpc_slice_hash_table *method_params_table = NULL;
if (chand->resolver_result != NULL) {
// Find LB policy name.
const grpc_arg *channel_arg =
@@ -419,32 +385,29 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
if (lb_policy_name == NULL) lb_policy_name = "pick_first";
- // Instantiate LB policy.
grpc_lb_policy_args lb_policy_args;
lb_policy_args.args = chand->resolver_result;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
lb_policy_args.combiner = chand->combiner;
-
+ // Check to see if we're already using the right LB policy.
+ // Note: It's safe to use chand->info_lb_policy_name here without
+ // taking a lock on chand->info_mu, because this function is the
+ // only thing that modifies its value, and it can only be invoked
+ // once at any given time.
const bool lb_policy_type_changed =
- (chand->info_lb_policy_name == NULL) ||
- (strcmp(chand->info_lb_policy_name, lb_policy_name) != 0);
+ chand->info_lb_policy_name == NULL ||
+ strcmp(chand->info_lb_policy_name, lb_policy_name) != 0;
if (chand->lb_policy != NULL && !lb_policy_type_changed) {
- // update
- lb_policy_updated = true;
+ // Continue using the same LB policy. Update with new addresses.
grpc_lb_policy_update_locked(exec_ctx, chand->lb_policy, &lb_policy_args);
} else {
- lb_policy =
+ // Instantiate new LB policy.
+ new_lb_policy =
grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
- if (lb_policy != NULL) {
- GRPC_LB_POLICY_REF(lb_policy, "config_change");
- GRPC_ERROR_UNREF(state_error);
- state = grpc_lb_policy_check_connectivity_locked(exec_ctx, lb_policy,
- &state_error);
- old_lb_policy = chand->lb_policy;
- chand->lb_policy = lb_policy;
+ if (new_lb_policy == NULL) {
+ gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
}
}
-
// Find service config.
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
@@ -461,12 +424,14 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_uri *uri =
grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
GPR_ASSERT(uri->path[0] != '\0');
+ service_config_parsing_state parsing_state;
+ memset(&parsing_state, 0, sizeof(parsing_state));
parsing_state.server_name =
uri->path[0] == '/' ? uri->path + 1 : uri->path;
grpc_service_config_parse_global_params(
service_config, parse_retry_throttle_params, &parsing_state);
- parsing_state.server_name = NULL;
grpc_uri_destroy(uri);
+ retry_throttle_data = parsing_state.retry_throttle_data;
method_params_table = grpc_service_config_create_method_config_table(
exec_ctx, service_config, method_parameters_create_from_json,
method_parameters_free);
@@ -480,12 +445,11 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
chand->resolver_result = NULL;
}
-
- if (lb_policy != NULL) {
- grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties,
- chand->interested_parties);
- }
-
+ // Now swap out fields in chand. Note that the new values may still
+ // be NULL if (e.g.) the resolver failed to return results or the
+ // results did not contain the necessary data.
+ //
+ // First, swap out the data used by cc_get_channel_info().
gpr_mu_lock(&chand->info_mu);
if (lb_policy_name != NULL) {
gpr_free(chand->info_lb_policy_name);
@@ -496,75 +460,77 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
chand->info_service_config_json = service_config_json;
}
gpr_mu_unlock(&chand->info_mu);
-
+ // Swap out the retry throttle data.
if (chand->retry_throttle_data != NULL) {
grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
}
- chand->retry_throttle_data = parsing_state.retry_throttle_data;
+ chand->retry_throttle_data = retry_throttle_data;
+ // Swap out the method params table.
if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
chand->method_params_table = method_params_table;
- if (lb_policy != NULL) {
- grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
- } else if (chand->resolver == NULL /* disconnected */) {
- grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Channel disconnected", &error, 1));
- grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
- }
- if (!lb_policy_updated && lb_policy != NULL &&
- chand->exit_idle_when_lb_policy_arrives) {
- GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
- exit_idle = true;
- chand->exit_idle_when_lb_policy_arrives = false;
- }
-
- if (error == GRPC_ERROR_NONE && chand->resolver) {
- if (!lb_policy_updated) {
- set_channel_connectivity_state_locked(exec_ctx, chand, state,
- GRPC_ERROR_REF(state_error),
- "new_lb+resolver");
- if (lb_policy != NULL) {
- watch_lb_policy_locked(exec_ctx, chand, lb_policy, state);
- }
+ // If we have a new LB policy or are shutting down (in which case
+ // new_lb_policy will be NULL), swap out the LB policy, unreffing the
+ // old one and removing its fds from chand->interested_parties.
+ // Note that we do NOT do this if either (a) we updated the existing
+ // LB policy above or (b) we failed to create the new LB policy (in
+ // which case we want to continue using the most recent one we had).
+ if (new_lb_policy != NULL || error != GRPC_ERROR_NONE ||
+ chand->resolver == NULL) {
+ if (chand->lb_policy != NULL) {
+ grpc_pollset_set_del_pollset_set(exec_ctx,
+ chand->lb_policy->interested_parties,
+ chand->interested_parties);
+ GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
- GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- grpc_resolver_next_locked(exec_ctx, chand->resolver,
- &chand->resolver_result,
- &chand->on_resolver_result_changed);
- } else {
+ chand->lb_policy = new_lb_policy;
+ }
+ // Now that we've swapped out the relevant fields of chand, check for
+ // error or shutdown.
+ if (error != GRPC_ERROR_NONE || chand->resolver == NULL) {
if (chand->resolver != NULL) {
grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
}
- grpc_error *refs[] = {error, state_error};
set_channel_connectivity_state_locked(
exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Got config after disconnection", refs, GPR_ARRAY_SIZE(refs)),
+ "Got resolver result after disconnection", &error, 1),
"resolver_gone");
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
+ grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Channel disconnected", &error, 1));
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx,
+ &chand->waiting_for_resolver_result_closures);
+ } else { // Not shutting down.
+ grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
+ grpc_error *state_error =
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
+ if (new_lb_policy != NULL) {
+ GRPC_ERROR_UNREF(state_error);
+ state = grpc_lb_policy_check_connectivity_locked(exec_ctx, new_lb_policy,
+ &state_error);
+ grpc_pollset_set_add_pollset_set(exec_ctx,
+ new_lb_policy->interested_parties,
+ chand->interested_parties);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx,
+ &chand->waiting_for_resolver_result_closures);
+ if (chand->exit_idle_when_lb_policy_arrives) {
+ grpc_lb_policy_exit_idle_locked(exec_ctx, new_lb_policy);
+ chand->exit_idle_when_lb_policy_arrives = false;
+ }
+ watch_lb_policy_locked(exec_ctx, chand, new_lb_policy, state);
+ }
+ set_channel_connectivity_state_locked(
+ exec_ctx, chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
+ grpc_resolver_next_locked(exec_ctx, chand->resolver,
+ &chand->resolver_result,
+ &chand->on_resolver_result_changed);
+ GRPC_ERROR_UNREF(state_error);
}
-
- if (!lb_policy_updated && lb_policy != NULL && exit_idle) {
- grpc_lb_policy_exit_idle_locked(exec_ctx, lb_policy);
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
- }
-
- if (old_lb_policy != NULL) {
- grpc_pollset_set_del_pollset_set(
- exec_ctx, old_lb_policy->interested_parties, chand->interested_parties);
- GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
- old_lb_policy = NULL;
- }
-
- if (!lb_policy_updated && lb_policy != NULL) {
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
- }
-
- GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
- GRPC_ERROR_UNREF(state_error);
}
static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -583,7 +549,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (op->send_ping != NULL) {
if (chand->lb_policy == NULL) {
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, op->send_ping,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
} else {
@@ -602,9 +568,10 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
if (!chand->started_resolving) {
- grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
+ grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
GRPC_ERROR_REF(op->disconnect_with_error));
- grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx,
+ &chand->waiting_for_resolver_result_closures);
}
if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
@@ -618,7 +585,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "start_transport_op");
- grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
}
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
@@ -634,10 +601,10 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
op->handler_private.extra_arg = elem;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op");
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx,
- grpc_closure_init(&op->handler_private.closure, start_transport_op_locked,
- op, grpc_combiner_scheduler(chand->combiner, false)),
+ GRPC_CLOSURE_INIT(&op->handler_private.closure, start_transport_op_locked,
+ op, grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
@@ -668,7 +635,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
- chand->combiner = grpc_combiner_create(NULL);
+ chand->combiner = grpc_combiner_create();
gpr_mu_init(&chand->info_mu);
gpr_mu_init(&chand->external_connectivity_watcher_list_mu);
@@ -677,9 +644,9 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
chand->owning_stack = args->channel_stack;
- grpc_closure_init(&chand->on_resolver_result_changed,
+ GRPC_CLOSURE_INIT(&chand->on_resolver_result_changed,
on_resolver_result_changed_locked, chand,
- grpc_combiner_scheduler(chand->combiner, false));
+ grpc_combiner_scheduler(chand->combiner));
chand->interested_parties = grpc_pollset_set_create();
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
@@ -737,10 +704,9 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
if (chand->resolver != NULL) {
- grpc_closure_sched(
- exec_ctx,
- grpc_closure_create(shutdown_resolver_locked, chand->resolver,
- grpc_combiner_scheduler(chand->combiner, false)),
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
+ grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
if (chand->client_channel_factory != NULL) {
@@ -771,10 +737,15 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
* PER-CALL FUNCTIONS
*/
-#define GET_CALL(call_data) \
- ((grpc_subchannel_call *)(gpr_atm_acq_load(&(call_data)->subchannel_call)))
-
-#define CANCELLED_CALL ((grpc_subchannel_call *)1)
+// Max number of batches that can be pending on a call at any given
+// time. This includes:
+// recv_initial_metadata
+// send_initial_metadata
+// recv_message
+// send_message
+// recv_trailing_metadata
+// send_trailing_metadata
+#define MAX_WAITING_BATCHES 6
/** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer.
@@ -796,11 +767,9 @@ typedef struct client_channel_call_data {
grpc_server_retry_throttle_data *retry_throttle_data;
method_parameters *method_params;
- grpc_error *cancel_error;
-
- /** either 0 for no call, 1 for cancelled, or a pointer to a
- grpc_subchannel_call */
- gpr_atm subchannel_call;
+ /** either 0 for no call, a pointer to a grpc_subchannel_call (if the lowest
+ bit is 0), or a pointer to an error (if the lowest bit is 1) */
+ gpr_atm subchannel_call_or_error;
gpr_arena *arena;
bool pick_pending;
@@ -808,11 +777,10 @@ typedef struct client_channel_call_data {
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
grpc_polling_entity *pollent;
- grpc_transport_stream_op_batch **waiting_ops;
- size_t waiting_ops_count;
- size_t waiting_ops_capacity;
+ grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
+ size_t waiting_for_pick_batches_count;
- grpc_closure next_step;
+ grpc_transport_stream_op_batch_payload *initial_metadata_payload;
grpc_call_stack *owning_call;
@@ -822,63 +790,83 @@ typedef struct client_channel_call_data {
grpc_closure *original_on_complete;
} call_data;
+typedef struct {
+ grpc_subchannel_call *subchannel_call;
+ grpc_error *error;
+} call_or_error;
+
+static call_or_error get_call_or_error(call_data *p) {
+ gpr_atm c = gpr_atm_acq_load(&p->subchannel_call_or_error);
+ if (c == 0)
+ return (call_or_error){NULL, NULL};
+ else if (c & 1)
+ return (call_or_error){NULL, (grpc_error *)((c) & ~(gpr_atm)1)};
+ else
+ return (call_or_error){(grpc_subchannel_call *)c, NULL};
+}
+
+static bool set_call_or_error(call_data *p, call_or_error coe) {
+ // this should always be under a lock
+ call_or_error existing = get_call_or_error(p);
+ if (existing.error != GRPC_ERROR_NONE) {
+ GRPC_ERROR_UNREF(coe.error);
+ return false;
+ }
+ GPR_ASSERT(existing.subchannel_call == NULL);
+ if (coe.error != GRPC_ERROR_NONE) {
+ GPR_ASSERT(coe.subchannel_call == NULL);
+ gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error);
+ } else {
+ GPR_ASSERT(coe.subchannel_call != NULL);
+ gpr_atm_rel_store(&p->subchannel_call_or_error,
+ (gpr_atm)coe.subchannel_call);
+ }
+ return true;
+}
+
grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
grpc_call_element *call_elem) {
- grpc_subchannel_call *scc = GET_CALL((call_data *)call_elem->call_data);
- return scc == CANCELLED_CALL ? NULL : scc;
+ return get_call_or_error(call_elem->call_data).subchannel_call;
}
-static void add_waiting_locked(call_data *calld,
- grpc_transport_stream_op_batch *op) {
- GPR_TIMER_BEGIN("add_waiting_locked", 0);
- if (calld->waiting_ops_count == calld->waiting_ops_capacity) {
- calld->waiting_ops_capacity = GPR_MAX(3, 2 * calld->waiting_ops_capacity);
- calld->waiting_ops =
- gpr_realloc(calld->waiting_ops,
- calld->waiting_ops_capacity * sizeof(*calld->waiting_ops));
- }
- calld->waiting_ops[calld->waiting_ops_count++] = op;
- GPR_TIMER_END("add_waiting_locked", 0);
+static void waiting_for_pick_batches_add_locked(
+ call_data *calld, grpc_transport_stream_op_batch *batch) {
+ GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
+ batch;
}
-static void fail_locked(grpc_exec_ctx *exec_ctx, call_data *calld,
- grpc_error *error) {
- size_t i;
- for (i = 0; i < calld->waiting_ops_count; i++) {
+static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
+ call_data *calld,
+ grpc_error *error) {
+ for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->waiting_ops[i], GRPC_ERROR_REF(error));
+ exec_ctx, calld->waiting_for_pick_batches[i], GRPC_ERROR_REF(error));
}
- calld->waiting_ops_count = 0;
+ calld->waiting_for_pick_batches_count = 0;
GRPC_ERROR_UNREF(error);
}
-static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
- if (calld->waiting_ops_count == 0) {
+static void waiting_for_pick_batches_resume_locked(grpc_exec_ctx *exec_ctx,
+ call_data *calld) {
+ if (calld->waiting_for_pick_batches_count == 0) return;
+ call_or_error coe = get_call_or_error(calld);
+ if (coe.error != GRPC_ERROR_NONE) {
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld,
+ GRPC_ERROR_REF(coe.error));
return;
}
-
- grpc_subchannel_call *call = GET_CALL(calld);
- grpc_transport_stream_op_batch **ops = calld->waiting_ops;
- size_t nops = calld->waiting_ops_count;
- if (call == CANCELLED_CALL) {
- fail_locked(exec_ctx, calld, GRPC_ERROR_CANCELLED);
- return;
- }
- calld->waiting_ops = NULL;
- calld->waiting_ops_count = 0;
- calld->waiting_ops_capacity = 0;
- for (size_t i = 0; i < nops; i++) {
- grpc_subchannel_call_process_op(exec_ctx, call, ops[i]);
+ for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
+ grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call,
+ calld->waiting_for_pick_batches[i]);
}
- gpr_free(ops);
+ calld->waiting_for_pick_batches_count = 0;
}
-// Sets calld->method_params and calld->retry_throttle_data.
-// If the method params specify a timeout, populates
-// *per_method_deadline and returns true.
-static bool set_call_method_params_from_service_config_locked(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- gpr_timespec *per_method_deadline) {
+// Applies service config to the call. Must be invoked once we know
+// that the resolver has returned results to the channel.
+static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
if (chand->retry_throttle_data != NULL) {
@@ -890,134 +878,124 @@ static bool set_call_method_params_from_service_config_locked(
exec_ctx, chand->method_params_table, calld->path);
if (calld->method_params != NULL) {
method_parameters_ref(calld->method_params);
- if (gpr_time_cmp(calld->method_params->timeout,
+ // If the deadline from the service config is shorter than the one
+ // from the client API, reset the deadline timer.
+ if (chand->deadline_checking_enabled &&
+ gpr_time_cmp(calld->method_params->timeout,
gpr_time_0(GPR_TIMESPAN)) != 0) {
- *per_method_deadline =
+ const gpr_timespec per_method_deadline =
gpr_time_add(calld->call_start_time, calld->method_params->timeout);
- return true;
+ if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+ calld->deadline = per_method_deadline;
+ grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
+ }
}
}
}
- return false;
}
-static void apply_final_configuration_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- /* apply service-config level configuration to the call (now that we're
- * certain it exists) */
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- gpr_timespec per_method_deadline;
- if (set_call_method_params_from_service_config_locked(exec_ctx, elem,
- &per_method_deadline)) {
- // If the deadline from the service config is shorter than the one
- // from the client API, reset the deadline timer.
- if (chand->deadline_checking_enabled &&
- gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
- calld->deadline = per_method_deadline;
- grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
- }
+static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
+ call_data *calld, grpc_error *error) {
+ grpc_subchannel_call *subchannel_call = NULL;
+ const grpc_connected_subchannel_call_args call_args = {
+ .pollent = calld->pollent,
+ .path = calld->path,
+ .start_time = calld->call_start_time,
+ .deadline = calld->deadline,
+ .arena = calld->arena,
+ .context = calld->subchannel_call_context};
+ grpc_error *new_error = grpc_connected_subchannel_create_call(
+ exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
+ GPR_ASSERT(set_call_or_error(
+ calld, (call_or_error){.subchannel_call = subchannel_call}));
+ if (new_error != GRPC_ERROR_NONE) {
+ new_error = grpc_error_add_child(new_error, error);
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld, new_error);
+ } else {
+ waiting_for_pick_batches_resume_locked(exec_ctx, calld);
}
+ GRPC_ERROR_UNREF(error);
}
-static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
+static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
grpc_error *error) {
- grpc_call_element *elem = arg;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GPR_ASSERT(calld->pick_pending);
calld->pick_pending = false;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
+ call_or_error coe = get_call_or_error(calld);
if (calld->connected_subchannel == NULL) {
- gpr_atm_no_barrier_store(&calld->subchannel_call, (gpr_atm)CANCELLED_CALL);
- fail_locked(exec_ctx, calld,
- error == GRPC_ERROR_NONE
- ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Call dropped by load balancing policy")
- : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Failed to create subchannel", &error, 1));
- } else if (GET_CALL(calld) == CANCELLED_CALL) {
+ // Failed to create subchannel.
+ grpc_error *failure =
+ error == GRPC_ERROR_NONE
+ ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Call dropped by load balancing policy")
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to create subchannel", &error, 1);
+ set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)});
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld, failure);
+ } else if (coe.error != GRPC_ERROR_NONE) {
/* already cancelled before subchannel became ready */
+ grpc_error *child_errors[] = {error, coe.error};
grpc_error *cancellation_error =
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Cancelled before creating subchannel", &error, 1);
+ "Cancelled before creating subchannel", child_errors,
+ GPR_ARRAY_SIZE(child_errors));
/* if due to deadline, attach the deadline exceeded status to the error */
if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
cancellation_error =
grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_DEADLINE_EXCEEDED);
}
- fail_locked(exec_ctx, calld, cancellation_error);
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld, cancellation_error);
} else {
/* Create call on subchannel. */
- grpc_subchannel_call *subchannel_call = NULL;
- const grpc_connected_subchannel_call_args call_args = {
- .pollent = calld->pollent,
- .path = calld->path,
- .start_time = calld->call_start_time,
- .deadline = calld->deadline,
- .arena = calld->arena,
- .context = calld->subchannel_call_context};
- grpc_error *new_error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
- gpr_atm_rel_store(&calld->subchannel_call,
- (gpr_atm)(uintptr_t)subchannel_call);
- if (new_error != GRPC_ERROR_NONE) {
- new_error = grpc_error_add_child(new_error, error);
- fail_locked(exec_ctx, calld, new_error);
- } else {
- retry_waiting_locked(exec_ctx, calld);
- }
+ create_subchannel_call_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
}
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
+ GRPC_ERROR_UNREF(error);
}
static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
call_data *calld = elem->call_data;
- grpc_subchannel_call *subchannel_call = GET_CALL(calld);
- if (subchannel_call == NULL || subchannel_call == CANCELLED_CALL) {
+ grpc_subchannel_call *subchannel_call =
+ get_call_or_error(calld).subchannel_call;
+ if (subchannel_call == NULL) {
return NULL;
} else {
return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
}
}
+/** Return true if subchannel is available immediately (in which case
+ subchannel_ready_locked() should not be called), or false otherwise (in
+ which case subchannel_ready_locked() should be called when the subchannel
+ is available). */
+static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem);
+
typedef struct {
- grpc_metadata_batch *initial_metadata;
- uint32_t initial_metadata_flags;
- grpc_connected_subchannel **connected_subchannel;
- grpc_call_context_element *subchannel_call_context;
- grpc_closure *on_ready;
grpc_call_element *elem;
+ bool cancelled;
grpc_closure closure;
-} continue_picking_args;
+} pick_after_resolver_result_args;
-/** Return true if subchannel is available immediately (in which case on_ready
- should not be called), or false otherwise (in which case on_ready should be
- called when the subchannel is available). */
-static bool pick_subchannel_locked(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags,
- grpc_connected_subchannel **connected_subchannel,
- grpc_call_context_element *subchannel_call_context, grpc_closure *on_ready);
-
-static void continue_picking_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- continue_picking_args *cpa = arg;
- if (cpa->connected_subchannel == NULL) {
+static void continue_picking_after_resolver_result_locked(
+ grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ pick_after_resolver_result_args *args = arg;
+ if (args->cancelled) {
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
- grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error));
+ subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
} else {
- if (pick_subchannel_locked(exec_ctx, cpa->elem, cpa->initial_metadata,
- cpa->initial_metadata_flags,
- cpa->connected_subchannel,
- cpa->subchannel_call_context, cpa->on_ready)) {
- grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE);
+ if (pick_subchannel_locked(exec_ctx, args->elem)) {
+ subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE);
}
}
- gpr_free(cpa);
+ gpr_free(args);
}
static void cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
@@ -1029,39 +1007,85 @@ static void cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
&calld->connected_subchannel,
GRPC_ERROR_REF(error));
}
- for (grpc_closure *closure = chand->waiting_for_config_closures.head;
+ // If we don't yet have a resolver result, then a closure for
+ // continue_picking_after_resolver_result_locked() will have been added to
+ // chand->waiting_for_resolver_result_closures, and it may not be invoked
+ // until after this call has been destroyed. We mark the operation as
+ // cancelled, so that when continue_picking_after_resolver_result_locked()
+ // is called, it will be a no-op. We also immediately invoke
+ // subchannel_ready_locked() to propagate the error back to the caller.
+ for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
closure != NULL; closure = closure->next_data.next) {
- continue_picking_args *cpa = closure->cb_arg;
- if (cpa->connected_subchannel == &calld->connected_subchannel) {
- cpa->connected_subchannel = NULL;
- grpc_closure_sched(exec_ctx, cpa->on_ready,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick cancelled", &error, 1));
+ pick_after_resolver_result_args *args = closure->cb_arg;
+ if (!args->cancelled && args->elem == elem) {
+ args->cancelled = true;
+ subchannel_ready_locked(exec_ctx, elem,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
}
}
GRPC_ERROR_UNREF(error);
}
-static bool pick_subchannel_locked(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags,
- grpc_connected_subchannel **connected_subchannel,
- grpc_call_context_element *subchannel_call_context,
- grpc_closure *on_ready) {
- GPR_TIMER_BEGIN("pick_subchannel", 0);
+// State for pick callback that holds a reference to the LB policy
+// from which the pick was requested.
+typedef struct {
+ grpc_lb_policy *lb_policy;
+ grpc_call_element *elem;
+ grpc_closure closure;
+} pick_callback_args;
+
+// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
+// Unrefs the LB policy after invoking subchannel_ready_locked().
+static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ pick_callback_args *args = arg;
+ GPR_ASSERT(args != NULL);
+ GPR_ASSERT(args->lb_policy != NULL);
+ subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
+ GRPC_LB_POLICY_UNREF(exec_ctx, args->lb_policy, "pick_subchannel");
+ gpr_free(args);
+}
+// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
+// If the pick was completed synchronously, unrefs the LB policy and
+// returns true.
+static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ const grpc_lb_policy_pick_args *inputs) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
+ pick_callback_args *pick_args = gpr_zalloc(sizeof(*pick_args));
+ GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
+ pick_args->lb_policy = chand->lb_policy;
+ pick_args->elem = elem;
+ GRPC_CLOSURE_INIT(&pick_args->closure, pick_callback_done_locked, pick_args,
+ grpc_combiner_scheduler(chand->combiner));
+ const bool pick_done = grpc_lb_policy_pick_locked(
+ exec_ctx, chand->lb_policy, inputs, &calld->connected_subchannel,
+ calld->subchannel_call_context, NULL, &pick_args->closure);
+ if (pick_done) {
+ /* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
+ GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "pick_subchannel");
+ gpr_free(pick_args);
+ }
+ return pick_done;
+}
- GPR_ASSERT(connected_subchannel);
-
+static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ GPR_TIMER_BEGIN("pick_subchannel", 0);
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ bool pick_done = false;
if (chand->lb_policy != NULL) {
- apply_final_configuration_locked(exec_ctx, elem);
- grpc_lb_policy *lb_policy = chand->lb_policy;
- GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
+ apply_service_config_to_call_locked(exec_ctx, elem);
// If the application explicitly set wait_for_ready, use that.
// Otherwise, if the service config specified a value for this
// method, use that.
+ uint32_t initial_metadata_flags =
+ calld->initial_metadata_payload->send_initial_metadata
+ .send_initial_metadata_flags;
const bool wait_for_ready_set_from_api =
initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
@@ -1077,169 +1101,105 @@ static bool pick_subchannel_locked(
}
}
const grpc_lb_policy_pick_args inputs = {
- initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem};
-
- // Wrap the user-provided callback in order to hold a strong reference to
- // the LB policy for the duration of the pick.
- wrapped_on_pick_closure_arg *w_on_pick_arg =
- gpr_zalloc(sizeof(*w_on_pick_arg));
- grpc_closure_init(&w_on_pick_arg->wrapper_closure,
- wrapped_on_pick_closure_cb, w_on_pick_arg,
- grpc_schedule_on_exec_ctx);
- w_on_pick_arg->wrapped_closure = on_ready;
- GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel_wrapping");
- w_on_pick_arg->lb_policy = lb_policy;
- const bool pick_done = grpc_lb_policy_pick_locked(
- exec_ctx, lb_policy, &inputs, connected_subchannel,
- subchannel_call_context, NULL, &w_on_pick_arg->wrapper_closure);
- if (pick_done) {
- /* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
- GRPC_LB_POLICY_UNREF(exec_ctx, w_on_pick_arg->lb_policy,
- "pick_subchannel_wrapping");
- gpr_free(w_on_pick_arg);
+ calld->initial_metadata_payload->send_initial_metadata
+ .send_initial_metadata,
+ initial_metadata_flags, &calld->lb_token_mdelem};
+ pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs);
+ } else if (chand->resolver != NULL) {
+ if (!chand->started_resolving) {
+ chand->started_resolving = true;
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
+ grpc_resolver_next_locked(exec_ctx, chand->resolver,
+ &chand->resolver_result,
+ &chand->on_resolver_result_changed);
}
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
- GPR_TIMER_END("pick_subchannel", 0);
- return pick_done;
- }
- if (chand->resolver != NULL && !chand->started_resolving) {
- chand->started_resolving = true;
- GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- grpc_resolver_next_locked(exec_ctx, chand->resolver,
- &chand->resolver_result,
- &chand->on_resolver_result_changed);
- }
- if (chand->resolver != NULL) {
- continue_picking_args *cpa = gpr_malloc(sizeof(*cpa));
- cpa->initial_metadata = initial_metadata;
- cpa->initial_metadata_flags = initial_metadata_flags;
- cpa->connected_subchannel = connected_subchannel;
- cpa->subchannel_call_context = subchannel_call_context;
- cpa->on_ready = on_ready;
- cpa->elem = elem;
- grpc_closure_init(&cpa->closure, continue_picking_locked, cpa,
- grpc_combiner_scheduler(chand->combiner, true));
- grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
- GRPC_ERROR_NONE);
+ pick_after_resolver_result_args *args =
+ (pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
+ args->elem = elem;
+ GRPC_CLOSURE_INIT(&args->closure,
+ continue_picking_after_resolver_result_locked, args,
+ grpc_combiner_scheduler(chand->combiner));
+ grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
+ &args->closure, GRPC_ERROR_NONE);
} else {
- grpc_closure_sched(exec_ctx, on_ready,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
+ subchannel_ready_locked(
+ exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
}
-
GPR_TIMER_END("pick_subchannel", 0);
- return false;
+ return pick_done;
}
-static void start_transport_stream_op_batch_locked_inner(
- grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
- grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
+static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error_ignored) {
+ GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0);
+ grpc_transport_stream_op_batch *op = arg;
+ grpc_call_element *elem = op->handler_private.extra_arg;
call_data *calld = elem->call_data;
- grpc_subchannel_call *call;
-
+ channel_data *chand = elem->channel_data;
/* need to recheck that another thread hasn't set the call */
- call = GET_CALL(calld);
- if (call == CANCELLED_CALL) {
+ call_or_error coe = get_call_or_error(calld);
+ if (coe.error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
- /* early out */
- return;
+ exec_ctx, op, GRPC_ERROR_REF(coe.error));
+ goto done;
}
- if (call != NULL) {
- grpc_subchannel_call_process_op(exec_ctx, call, op);
- /* early out */
- return;
+ if (coe.subchannel_call != NULL) {
+ grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, op);
+ goto done;
}
+ // Add to waiting-for-pick list. If we succeed in getting a
+ // subchannel call below, we'll handle this batch (along with any
+ // other waiting batches) in waiting_for_pick_batches_resume_locked().
+ waiting_for_pick_batches_add_locked(calld, op);
/* if this is a cancellation, then we can raise our cancelled flag */
if (op->cancel_stream) {
- if (!gpr_atm_rel_cas(&calld->subchannel_call, 0,
- (gpr_atm)(uintptr_t)CANCELLED_CALL)) {
- /* recurse to retry */
- start_transport_stream_op_batch_locked_inner(exec_ctx, op, elem);
- /* early out */
- return;
- } else {
- /* Stash a copy of cancel_error in our call data, so that we can use
- it for subsequent operations. This ensures that if the call is
- cancelled before any ops are passed down (e.g., if the deadline
- is in the past when the call starts), we can return the right
- error to the caller when the first op does get passed down. */
- calld->cancel_error =
- GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error);
- if (calld->pick_pending) {
- cancel_pick_locked(
- exec_ctx, elem,
- GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
- } else {
- fail_locked(exec_ctx, calld,
- GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
- }
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, op,
- GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
- /* early out */
- return;
+ grpc_error *error = op->payload->cancel_stream.cancel_error;
+ /* Stash a copy of cancel_error in our call data, so that we can use
+ it for subsequent operations. This ensures that if the call is
+ cancelled before any ops are passed down (e.g., if the deadline
+ is in the past when the call starts), we can return the right
+ error to the caller when the first op does get passed down. */
+ set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)});
+ if (calld->pick_pending) {
+ cancel_pick_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld,
+ GRPC_ERROR_REF(error));
+ goto done;
}
/* if we don't have a subchannel, try to get one */
if (!calld->pick_pending && calld->connected_subchannel == NULL &&
op->send_initial_metadata) {
+ calld->initial_metadata_payload = op->payload;
calld->pick_pending = true;
- grpc_closure_init(&calld->next_step, subchannel_ready_locked, elem,
- grpc_combiner_scheduler(chand->combiner, true));
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
/* If a subchannel is not available immediately, the polling entity from
call_data should be provided to channel_data's interested_parties, so
that IO of the lb_policy and resolver could be done under it. */
- if (pick_subchannel_locked(
- exec_ctx, elem,
- op->payload->send_initial_metadata.send_initial_metadata,
- op->payload->send_initial_metadata.send_initial_metadata_flags,
- &calld->connected_subchannel, calld->subchannel_call_context,
- &calld->next_step)) {
+ if (pick_subchannel_locked(exec_ctx, elem)) {
+ // Pick was returned synchronously.
calld->pick_pending = false;
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
if (calld->connected_subchannel == NULL) {
- gpr_atm_no_barrier_store(&calld->subchannel_call,
- (gpr_atm)CANCELLED_CALL);
grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy");
- fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- return; // Early out.
+ set_call_or_error(calld,
+ (call_or_error){.error = GRPC_ERROR_REF(error)});
+ waiting_for_pick_batches_fail_locked(exec_ctx, calld, error);
+ } else {
+ // Create subchannel call.
+ create_subchannel_call_locked(exec_ctx, calld, GRPC_ERROR_NONE);
}
} else {
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
}
}
- /* if we've got a subchannel, then let's ask it to create a call */
- if (!calld->pick_pending && calld->connected_subchannel != NULL) {
- grpc_subchannel_call *subchannel_call = NULL;
- const grpc_connected_subchannel_call_args call_args = {
- .pollent = calld->pollent,
- .path = calld->path,
- .start_time = calld->call_start_time,
- .deadline = calld->deadline,
- .arena = calld->arena,
- .context = calld->subchannel_call_context};
- grpc_error *error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
- gpr_atm_rel_store(&calld->subchannel_call,
- (gpr_atm)(uintptr_t)subchannel_call);
- if (error != GRPC_ERROR_NONE) {
- fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- } else {
- retry_waiting_locked(exec_ctx, calld);
- /* recurse to retry */
- start_transport_stream_op_batch_locked_inner(exec_ctx, op, elem);
- }
- /* early out */
- return;
- }
- /* nothing to be done but wait */
- add_waiting_locked(calld, op);
+done:
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
+ "start_transport_stream_op_batch");
+ GPR_TIMER_END("start_transport_stream_op_batch_locked", 0);
}
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -1258,34 +1218,10 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
calld->retry_throttle_data);
}
}
- grpc_closure_run(exec_ctx, calld->original_on_complete,
+ GRPC_CLOSURE_RUN(exec_ctx, calld->original_on_complete,
GRPC_ERROR_REF(error));
}
-static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error_ignored) {
- GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0);
-
- grpc_transport_stream_op_batch *op = arg;
- grpc_call_element *elem = op->handler_private.extra_arg;
- call_data *calld = elem->call_data;
-
- if (op->recv_trailing_metadata) {
- GPR_ASSERT(op->on_complete != NULL);
- calld->original_on_complete = op->on_complete;
- grpc_closure_init(&calld->on_complete, on_complete, elem,
- grpc_schedule_on_exec_ctx);
- op->on_complete = &calld->on_complete;
- }
-
- start_transport_stream_op_batch_locked_inner(exec_ctx, op, elem);
-
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
- "start_transport_stream_op_batch");
- GPR_TIMER_END("start_transport_stream_op_batch_locked", 0);
-}
-
/* The logic here is fairly complicated, due to (a) the fact that we
need to handle the case where we receive the send op before the
initial metadata op, and (b) the need for efficiency, especially in
@@ -1304,18 +1240,27 @@ static void cc_start_transport_stream_op_batch(
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
op);
}
+ // Intercept on_complete for recv_trailing_metadata so that we can
+ // check retry throttle status.
+ if (op->recv_trailing_metadata) {
+ GPR_ASSERT(op->on_complete != NULL);
+ calld->original_on_complete = op->on_complete;
+ GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
+ grpc_schedule_on_exec_ctx);
+ op->on_complete = &calld->on_complete;
+ }
/* try to (atomically) get the call */
- grpc_subchannel_call *call = GET_CALL(calld);
+ call_or_error coe = get_call_or_error(calld);
GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
- if (call == CANCELLED_CALL) {
+ if (coe.error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
+ exec_ctx, op, GRPC_ERROR_REF(coe.error));
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
/* early out */
return;
}
- if (call != NULL) {
- grpc_subchannel_call_process_op(exec_ctx, call, op);
+ if (coe.subchannel_call != NULL) {
+ grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, op);
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
/* early out */
return;
@@ -1323,11 +1268,10 @@ static void cc_start_transport_stream_op_batch(
/* we failed; lock and figure out what to do */
GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch");
op->handler_private.extra_arg = elem;
- grpc_closure_sched(
- exec_ctx,
- grpc_closure_init(&op->handler_private.closure,
- start_transport_stream_op_batch_locked, op,
- grpc_combiner_scheduler(chand->combiner, false)),
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, GRPC_CLOSURE_INIT(&op->handler_private.closure,
+ start_transport_stream_op_batch_locked, op,
+ grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
}
@@ -1364,15 +1308,17 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
if (calld->method_params != NULL) {
method_parameters_unref(calld->method_params);
}
- GRPC_ERROR_UNREF(calld->cancel_error);
- grpc_subchannel_call *call = GET_CALL(calld);
- if (call != NULL && call != CANCELLED_CALL) {
- grpc_subchannel_call_set_cleanup_closure(call, then_schedule_closure);
+ call_or_error coe = get_call_or_error(calld);
+ GRPC_ERROR_UNREF(coe.error);
+ if (coe.subchannel_call != NULL) {
+ grpc_subchannel_call_set_cleanup_closure(coe.subchannel_call,
+ then_schedule_closure);
then_schedule_closure = NULL;
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call");
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, coe.subchannel_call,
+ "client_channel_destroy_call");
}
GPR_ASSERT(!calld->pick_pending);
- GPR_ASSERT(calld->waiting_ops_count == 0);
+ GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
if (calld->connected_subchannel != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
"picked");
@@ -1383,8 +1329,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
calld->subchannel_call_context[i].value);
}
}
- gpr_free(calld->waiting_ops);
- grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
@@ -1438,10 +1383,9 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
- grpc_closure_sched(
- exec_ctx,
- grpc_closure_create(try_to_connect_locked, chand,
- grpc_combiner_scheduler(chand->combiner, false)),
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
+ grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
return out;
@@ -1530,7 +1474,7 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
"external_connectivity_watcher");
external_connectivity_watcher_list_remove(w->chand, w);
gpr_free(w);
- grpc_closure_run(exec_ctx, follow_up, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
}
static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -1539,8 +1483,8 @@ static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
external_connectivity_watcher *found = NULL;
if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
- grpc_closure_run(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
- grpc_closure_init(&w->my_closure, on_external_watch_complete, w,
+ GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete, w,
grpc_schedule_on_exec_ctx);
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure);
@@ -1575,9 +1519,9 @@ void grpc_client_channel_watch_connectivity_state(
chand->interested_parties);
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
"external_connectivity_watcher");
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx,
- grpc_closure_init(&w->my_closure, watch_connectivity_state_locked, w,
- grpc_combiner_scheduler(chand->combiner, true)),
+ GRPC_CLOSURE_INIT(&w->my_closure, watch_connectivity_state_locked, w,
+ grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.c b/src/core/ext/filters/client_channel/client_channel_factory.c
index 04bb4d5a2d..7220a8639e 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.c
+++ b/src/core/ext/filters/client_channel/client_channel_factory.c
@@ -17,6 +17,7 @@
*/
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
+#include "src/core/lib/channel/channel_args.h"
void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory) {
factory->vtable->ref(factory);
@@ -63,10 +64,6 @@ static const grpc_arg_pointer_vtable factory_arg_vtable = {
grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory* factory) {
- grpc_arg arg;
- arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_ARG_CLIENT_CHANNEL_FACTORY;
- arg.value.pointer.p = factory;
- arg.value.pointer.vtable = &factory_arg_vtable;
- return arg;
+ return grpc_channel_arg_pointer_create(GRPC_ARG_CLIENT_CHANNEL_FACTORY,
+ factory, &factory_arg_vtable);
}
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.c b/src/core/ext/filters/client_channel/client_channel_plugin.c
index 06a3d9e25a..60e77d6268 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.c
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.c
@@ -54,10 +54,8 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
char *default_authority = grpc_get_default_authority(
exec_ctx, grpc_channel_stack_builder_get_target(builder));
if (default_authority != NULL) {
- grpc_arg arg;
- arg.type = GRPC_ARG_STRING;
- arg.key = GRPC_ARG_DEFAULT_AUTHORITY;
- arg.value.string = default_authority;
+ grpc_arg arg = grpc_channel_arg_string_create(GRPC_ARG_DEFAULT_AUTHORITY,
+ default_authority);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
new_args);
@@ -80,6 +78,9 @@ void grpc_client_channel_init(void) {
GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter,
(void *)&grpc_client_channel_filter);
grpc_http_connect_register_handshaker_factory();
+#ifndef NDEBUG
+ grpc_register_tracer("resolver_refcount", &grpc_trace_resolver_refcount);
+#endif
}
void grpc_client_channel_shutdown(void) {
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.c b/src/core/ext/filters/client_channel/http_connect_handshaker.c
index 5e4bfe74d8..0952dc6d4e 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.c
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.c
@@ -118,7 +118,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
handshaker->shutdown = true;
}
// Invoke callback.
- grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, handshaker->on_handshake_done, error);
}
// Callback invoked when finished writing HTTP CONNECT request.
@@ -217,7 +217,7 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
goto done;
}
// Success. Invoke handshake-done callback.
- grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, handshaker->on_handshake_done, error);
done:
// Set shutdown to true so that subsequent calls to
// http_connect_handshaker_shutdown() do nothing.
@@ -266,7 +266,7 @@ static void http_connect_handshaker_do_handshake(
gpr_mu_lock(&handshaker->mu);
handshaker->shutdown = true;
gpr_mu_unlock(&handshaker->mu);
- grpc_closure_sched(exec_ctx, on_handshake_done, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_handshake_done, GRPC_ERROR_NONE);
return;
}
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
@@ -339,9 +339,9 @@ static grpc_handshaker* grpc_http_connect_handshaker_create() {
gpr_mu_init(&handshaker->mu);
gpr_ref_init(&handshaker->refcount, 1);
grpc_slice_buffer_init(&handshaker->write_buffer);
- grpc_closure_init(&handshaker->request_done_closure, on_write_done,
+ GRPC_CLOSURE_INIT(&handshaker->request_done_closure, on_write_done,
handshaker, grpc_schedule_on_exec_ctx);
- grpc_closure_init(&handshaker->response_read_closure, on_read_done,
+ GRPC_CLOSURE_INIT(&handshaker->response_read_closure, on_read_done,
handshaker, grpc_schedule_on_exec_ctx);
grpc_http_parser_init(&handshaker->http_parser, GRPC_HTTP_RESPONSE,
&handshaker->http_response);
diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c
index b8332980e5..cfb5ec6f00 100644
--- a/src/core/ext/filters/client_channel/http_proxy.c
+++ b/src/core/ext/filters/client_channel/http_proxy.c
@@ -80,10 +80,9 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
grpc_uri_destroy(uri);
return false;
}
- grpc_arg new_arg;
- new_arg.key = GRPC_ARG_HTTP_CONNECT_SERVER;
- new_arg.type = GRPC_ARG_STRING;
- new_arg.value.string = uri->path[0] == '/' ? uri->path + 1 : uri->path;
+ grpc_arg new_arg = grpc_channel_arg_string_create(
+ GRPC_ARG_HTTP_CONNECT_SERVER,
+ uri->path[0] == '/' ? uri->path + 1 : uri->path);
*new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1);
grpc_uri_destroy(uri);
return true;
diff --git a/src/core/ext/filters/client_channel/lb_policy.c b/src/core/ext/filters/client_channel/lb_policy.c
index e3efb735df..8d69ba6af5 100644
--- a/src/core/ext/filters/client_channel/lb_policy.c
+++ b/src/core/ext/filters/client_channel/lb_policy.c
@@ -21,6 +21,10 @@
#define WEAK_REF_BITS 16
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_lb_policy_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable,
grpc_combiner *combiner) {
@@ -30,7 +34,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy");
}
-#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char *purpose
#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
@@ -46,11 +50,12 @@ static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
-#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "LB_POLICY: 0x%" PRIxPTR " %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR
- " [%s]",
- (intptr_t)c, purpose, old_val, old_val + delta, reason);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_lb_policy_refcount)) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "LB_POLICY: 0x%p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c,
+ purpose, old_val, old_val + delta, reason);
+ }
#endif
return old_val;
}
@@ -74,11 +79,10 @@ void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
- grpc_closure_sched(
- exec_ctx,
- grpc_closure_create(shutdown_locked, policy,
- grpc_combiner_scheduler(policy->combiner, false)),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(
+ shutdown_locked, policy,
+ grpc_combiner_scheduler(policy->combiner)),
+ GRPC_ERROR_NONE);
} else {
grpc_lb_policy_weak_unref(exec_ctx,
policy REF_FUNC_PASS_ARGS("strong-unref"));
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 42503c37ca..645d51e138 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -29,6 +29,10 @@ typedef struct grpc_lb_policy grpc_lb_policy;
typedef struct grpc_lb_policy_vtable grpc_lb_policy_vtable;
typedef struct grpc_lb_policy_args grpc_lb_policy_args;
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_lb_policy_refcount;
+#endif
+
struct grpc_lb_policy {
const grpc_lb_policy_vtable *vtable;
gpr_atm ref_pair;
@@ -96,8 +100,7 @@ struct grpc_lb_policy_vtable {
const grpc_lb_policy_args *args);
};
-//#define GRPC_LB_POLICY_REFCOUNT_DEBUG
-#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
+#ifndef NDEBUG
/* Strong references: the policy will shutdown when they reach zero */
#define GRPC_LB_POLICY_REF(p, r) \
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
index 10e59a99de..52c6e38c87 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
@@ -53,7 +53,7 @@ static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true;
}
- grpc_closure_run(exec_ctx, calld->original_on_complete_for_send,
+ GRPC_CLOSURE_RUN(exec_ctx, calld->original_on_complete_for_send,
GRPC_ERROR_REF(error));
}
@@ -63,7 +63,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true;
}
- grpc_closure_run(exec_ctx, calld->original_recv_initial_metadata_ready,
+ GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_initial_metadata_ready,
GRPC_ERROR_REF(error));
}
@@ -104,7 +104,7 @@ static void start_transport_stream_op_batch(
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {
calld->original_on_complete_for_send = batch->on_complete;
- grpc_closure_init(&calld->on_complete_for_send, on_complete_for_send, calld,
+ GRPC_CLOSURE_INIT(&calld->on_complete_for_send, on_complete_for_send, calld,
grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete_for_send;
}
@@ -112,7 +112,7 @@ static void start_transport_stream_op_batch(
if (batch->recv_initial_metadata) {
calld->original_recv_initial_metadata_ready =
batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
- grpc_closure_init(&calld->recv_initial_metadata_ready,
+ GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, calld,
grpc_schedule_on_exec_ctx);
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
index 5ecbd3ba93..5a5ff2902d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
@@ -184,7 +184,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
wrapped_rr_closure_arg *wc_arg = arg;
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
- grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
if (wc_arg->rr_policy != NULL) {
/* if *target is NULL, no pick has been made by the RR policy (eg, all
@@ -256,7 +256,7 @@ static void add_pending_pick(pending_pick **root,
pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
pick_args->lb_token_mdelem_storage;
pp->wrapped_on_complete_arg.free_when_done = pp;
- grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
wrapped_rr_closure, &pp->wrapped_on_complete_arg,
grpc_schedule_on_exec_ctx);
*root = pp;
@@ -275,7 +275,7 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
pping->wrapped_notify_arg.wrapped_closure = notify;
pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root;
- grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure,
+ GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
wrapped_rr_closure, &pping->wrapped_notify_arg,
grpc_schedule_on_exec_ctx);
*root = pping;
@@ -635,7 +635,7 @@ static bool pick_from_internal_rr_locked(
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
- grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done);
return false;
}
@@ -663,7 +663,7 @@ static bool pick_from_internal_rr_locked(
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
- grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done);
return false;
}
@@ -739,9 +739,9 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
* It'll be deallocated in glb_rr_connectivity_changed() */
rr_connectivity_data *rr_connectivity =
gpr_zalloc(sizeof(rr_connectivity_data));
- grpc_closure_init(&rr_connectivity->on_change,
+ GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
glb_rr_connectivity_changed_locked, rr_connectivity,
- grpc_combiner_scheduler(glb_policy->base.combiner, false));
+ grpc_combiner_scheduler(glb_policy->base.combiner));
rr_connectivity->glb_policy = glb_policy;
rr_connectivity->state = rr_state;
@@ -974,10 +974,8 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
- grpc_arg new_arg;
- new_arg.key = GRPC_ARG_LB_POLICY_NAME;
- new_arg.type = GRPC_ARG_STRING;
- new_arg.value.string = "grpclb";
+ grpc_arg new_arg =
+ grpc_channel_arg_string_create(GRPC_ARG_LB_POLICY_NAME, "grpclb");
static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
@@ -1004,9 +1002,9 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
return NULL;
}
- grpc_closure_init(&glb_policy->lb_channel_on_connectivity_changed,
+ GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
glb_lb_channel_on_connectivity_changed_cb, glb_policy,
- grpc_combiner_scheduler(args->combiner, false));
+ grpc_combiner_scheduler(args->combiner));
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
"grpclb");
@@ -1078,14 +1076,14 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_NONE);
pp = next;
}
while (pping != NULL) {
pending_ping *next = pping->next;
- grpc_closure_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
GRPC_ERROR_NONE);
pping = next;
}
@@ -1101,7 +1099,7 @@ static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
@@ -1125,7 +1123,7 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
pending_pick *next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
@@ -1160,7 +1158,7 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL;
- grpc_closure_sched(exec_ctx, on_complete,
+ GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No mdelem storage for the LB token. Load reporting "
"won't work without it. Failing"));
@@ -1179,7 +1177,7 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
wrapped_rr_closure_arg *wc_arg = gpr_zalloc(sizeof(wrapped_rr_closure_arg));
- grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
+ GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx);
wc_arg->rr_policy = glb_policy->rr_policy;
wc_arg->target = target;
@@ -1250,9 +1248,9 @@ static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
const gpr_timespec next_client_load_report_time =
gpr_time_add(now, glb_policy->client_stats_report_interval);
- grpc_closure_init(&glb_policy->client_load_report_closure,
+ GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
send_client_load_report_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner, false));
+ grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
next_client_load_report_time,
&glb_policy->client_load_report_closure, now);
@@ -1278,9 +1276,9 @@ static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_SEND_MESSAGE;
op.data.send_message.send_message = glb_policy->client_load_report_payload;
- grpc_closure_init(&glb_policy->client_load_report_closure,
+ GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
client_load_report_done_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner, false));
+ grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_call_error call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, &op, 1,
&glb_policy->client_load_report_closure);
@@ -1384,15 +1382,15 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
- grpc_closure_init(&glb_policy->lb_on_sent_initial_request,
+ GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
lb_on_sent_initial_request_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner, false));
- grpc_closure_init(&glb_policy->lb_on_server_status_received,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
+ GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner, false));
- grpc_closure_init(&glb_policy->lb_on_response_received,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
+ GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received,
lb_on_response_received_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner, false));
+ grpc_combiner_scheduler(glb_policy->base.combiner));
gpr_backoff_init(&glb_policy->lb_call_backoff_state,
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
@@ -1693,9 +1691,9 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
}
}
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
- grpc_closure_init(
- &glb_policy->lb_on_call_retry, lb_call_on_retry_timer_locked,
- glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner, false));
+ GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
+ lb_call_on_retry_timer_locked, glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->retry_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry, now);
@@ -1882,6 +1880,9 @@ static bool maybe_add_client_load_reporting_filter(
void grpc_lb_policy_grpclb_init() {
grpc_register_lb_policy(grpc_glb_lb_factory_create());
grpc_register_tracer("glb", &grpc_lb_glb_trace);
+#ifndef NDEBUG
+ grpc_register_tracer("lb_policy_refcount", &grpc_trace_lb_policy_refcount);
+#endif
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_client_load_reporting_filter,
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
index 51cc632649..d0acd7a901 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
@@ -95,6 +95,9 @@ static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p->subchannels);
gpr_free(p->new_subchannels);
gpr_free(p);
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p);
+ }
}
static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
@@ -118,7 +121,7 @@ static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
pp = next;
}
@@ -135,7 +138,7 @@ static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
gpr_free(pp);
@@ -160,7 +163,7 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
gpr_free(pp);
@@ -258,7 +261,7 @@ static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
} else {
- grpc_closure_sched(exec_ctx, closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
}
}
@@ -268,11 +271,20 @@ static void stop_connectivity_watchers(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
if (p->num_subchannels > 0) {
GPR_ASSERT(p->selected == NULL);
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p unsubscribing from subchannel %p",
+ (void *)p, (void *)p->subchannels[p->checking_subchannel]);
+ }
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
p->updating_subchannels = true;
} else if (p->selected != NULL) {
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG,
+ "Pick First %p unsubscribing from selected subchannel %p",
+ (void *)p, (void *)p->selected);
+ }
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
p->updating_selected = true;
@@ -451,12 +463,25 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_subchannel *selected_subchannel;
pending_pick *pp;
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(
+ GPR_DEBUG,
+ "Pick First %p connectivity changed. Updating selected: %d; Updating "
+ "subchannels: %d; Checking %lu index (%lu total); State: %d; ",
+ (void *)p, p->updating_selected, p->updating_subchannels,
+ (unsigned long)p->checking_subchannel,
+ (unsigned long)p->num_subchannels, p->checking_connectivity);
+ }
bool restart = false;
- if (p->updating_selected && error == GRPC_ERROR_CANCELLED) {
+ if (p->updating_selected && error != GRPC_ERROR_NONE) {
/* Captured the unsubscription for p->selected */
GPR_ASSERT(p->selected != NULL);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected,
"pf_update_connectivity");
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p unreffing selected subchannel %p",
+ (void *)p, (void *)p->selected);
+ }
p->updating_selected = false;
if (p->num_new_subchannels == 0) {
p->selected = NULL;
@@ -464,12 +489,16 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
restart = true;
}
- if (p->updating_subchannels && error == GRPC_ERROR_CANCELLED) {
+ if (p->updating_subchannels && error != GRPC_ERROR_NONE) {
/* Captured the unsubscription for the checking subchannel */
GPR_ASSERT(p->selected == NULL);
for (size_t i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i],
"pf_update_connectivity");
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p unreffing subchannel %p", (void *)p,
+ (void *)p->subchannels[i]);
+ }
}
gpr_free(p->subchannels);
p->subchannels = NULL;
@@ -481,14 +510,12 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (restart) {
p->selected = NULL;
p->selected_key = NULL;
-
GPR_ASSERT(p->new_subchannels != NULL);
GPR_ASSERT(p->num_new_subchannels > 0);
p->num_subchannels = p->num_new_subchannels;
p->subchannels = p->new_subchannels;
p->num_new_subchannels = 0;
p->new_subchannels = NULL;
-
if (p->started_picking) {
/* If we were picking, continue to do so over the new subchannels,
* starting from the 0th index. */
@@ -542,7 +569,9 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
"picked_first");
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO, "Selected subchannel %p", (void *)p->selected);
+ gpr_log(GPR_INFO,
+ "Pick First %p selected subchannel %p (connected %p)",
+ (void *)p, (void *)selected_subchannel, (void *)p->selected);
}
p->selected_key = grpc_subchannel_get_key(selected_subchannel);
/* drop the pick list: we are connected now */
@@ -557,7 +586,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
"Servicing pending pick with selected subchannel %p",
(void *)p->selected);
}
- grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
grpc_connected_subchannel_notify_on_state_change(
@@ -568,7 +597,8 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
p->checking_subchannel =
(p->checking_subchannel + 1) % p->num_subchannels;
if (p->checking_subchannel == 0) {
- /* only trigger transient failure when we've tried all alternatives */
+ /* only trigger transient failure when we've tried all alternatives
+ */
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connecting_transient_failure");
@@ -610,7 +640,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
@@ -652,10 +682,13 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->client_channel_factory != NULL);
pick_first_lb_policy *p = gpr_zalloc(sizeof(*p));
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p created.", (void *)p);
+ }
pf_update_locked(exec_ctx, &p->base, args);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
- grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed_locked, p,
- grpc_combiner_scheduler(args->combiner, false));
+ GRPC_CLOSURE_INIT(&p->connectivity_changed, pf_connectivity_changed_locked, p,
+ grpc_combiner_scheduler(args->combiner));
return &p->base;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
index 33d9522380..8e9d6b0f47 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
@@ -126,6 +126,8 @@ struct rr_subchannel_list {
size_t num_ready;
/** how many subchannels are in state TRANSIENT_FAILURE */
size_t num_transient_failures;
+ /** how many subchannels are in state SHUTDOWN */
+ size_t num_shutdown;
/** how many subchannels are in state IDLE */
size_t num_idle;
@@ -288,7 +290,7 @@ static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
gpr_free(pp);
@@ -311,7 +313,7 @@ static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
gpr_free(pp);
@@ -336,7 +338,7 @@ static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
*pp->target = NULL;
- grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
gpr_free(pp);
@@ -425,6 +427,9 @@ static void update_state_counters_locked(subchannel_data *sd) {
} else if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
GPR_ASSERT(subchannel_list->num_transient_failures > 0);
--subchannel_list->num_transient_failures;
+ } else if (sd->prev_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
+ GPR_ASSERT(subchannel_list->num_shutdown > 0);
+ --subchannel_list->num_shutdown;
} else if (sd->prev_connectivity_state == GRPC_CHANNEL_IDLE) {
GPR_ASSERT(subchannel_list->num_idle > 0);
--subchannel_list->num_idle;
@@ -433,6 +438,8 @@ static void update_state_counters_locked(subchannel_data *sd) {
++subchannel_list->num_ready;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
++subchannel_list->num_transient_failures;
+ } else if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
+ ++subchannel_list->num_shutdown;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_IDLE) {
++subchannel_list->num_idle;
}
@@ -455,7 +462,8 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
* CHECK: sd->curr_connectivity_state == CONNECTING.
*
* 3) RULE: ALL subchannels are SHUTDOWN => policy is SHUTDOWN.
- * CHECK: p->subchannel_list->num_subchannels = 0.
+ * CHECK: p->subchannel_list->num_shutdown ==
+ * p->subchannel_list->num_subchannels.
*
* 4) RULE: ALL subchannels are TRANSIENT_FAILURE => policy is
* TRANSIENT_FAILURE.
@@ -464,37 +472,39 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
* 5) RULE: ALL subchannels are IDLE => policy is IDLE.
* CHECK: p->num_idle == p->subchannel_list->num_subchannels.
*/
+ grpc_connectivity_state new_state = sd->curr_connectivity_state;
rr_subchannel_list *subchannel_list = sd->subchannel_list;
round_robin_lb_policy *p = subchannel_list->policy;
if (subchannel_list->num_ready > 0) { /* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
- return GRPC_CHANNEL_READY;
+ new_state = GRPC_CHANNEL_READY;
} else if (sd->curr_connectivity_state ==
GRPC_CHANNEL_CONNECTING) { /* 2) CONNECTING */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
"rr_connecting");
- return GRPC_CHANNEL_CONNECTING;
- } else if (p->subchannel_list->num_subchannels == 0) { /* 3) SHUTDOWN */
+ new_state = GRPC_CHANNEL_CONNECTING;
+ } else if (p->subchannel_list->num_shutdown ==
+ p->subchannel_list->num_subchannels) { /* 3) SHUTDOWN */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"rr_shutdown");
- return GRPC_CHANNEL_SHUTDOWN;
+ new_state = GRPC_CHANNEL_SHUTDOWN;
} else if (subchannel_list->num_transient_failures ==
p->subchannel_list->num_subchannels) { /* 4) TRANSIENT_FAILURE */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "rr_transient_failure");
- return GRPC_CHANNEL_TRANSIENT_FAILURE;
+ new_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
} else if (subchannel_list->num_idle ==
p->subchannel_list->num_subchannels) { /* 5) IDLE */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_IDLE,
GRPC_ERROR_NONE, "rr_idle");
- return GRPC_CHANNEL_IDLE;
+ new_state = GRPC_CHANNEL_IDLE;
}
- /* no change */
- return sd->curr_connectivity_state;
+ GRPC_ERROR_UNREF(error);
+ return new_state;
}
static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -553,7 +563,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
}
@@ -571,13 +581,15 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(sd->subchannel_list == p->latest_pending_subchannel_list);
GPR_ASSERT(!sd->subchannel_list->shutting_down);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ const unsigned long num_subchannels =
+ p->subchannel_list != NULL
+ ? (unsigned long)p->subchannel_list->num_subchannels
+ : 0;
gpr_log(GPR_DEBUG,
"[RR %p] phasing out subchannel list %p (size %lu) in favor "
"of %p (size %lu)",
- (void *)p, (void *)p->subchannel_list,
- (unsigned long)p->subchannel_list->num_subchannels,
- (void *)sd->subchannel_list,
- (unsigned long)sd->subchannel_list->num_subchannels);
+ (void *)p, (void *)p->subchannel_list, num_subchannels,
+ (void *)sd->subchannel_list, num_subchannels);
}
if (p->subchannel_list != NULL) {
// dispose of the current subchannel_list
@@ -614,7 +626,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
(void *)selected->subchannel,
(unsigned long)next_ready_index);
}
- grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
}
@@ -655,7 +667,7 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
} else {
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
}
}
@@ -747,9 +759,9 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
subchannel_data *sd = &subchannel_list->subchannels[subchannel_index++];
sd->subchannel_list = subchannel_list;
sd->subchannel = subchannel;
- grpc_closure_init(&sd->connectivity_changed_closure,
+ GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
rr_connectivity_changed_locked, sd,
- grpc_combiner_scheduler(args->combiner, false));
+ grpc_combiner_scheduler(args->combiner));
/* use some sentinel value outside of the range of
* grpc_connectivity_state to signal an undefined previous state. We
* won't be referring to this value again and it'll be overwritten after
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.c b/src/core/ext/filters/client_channel/lb_policy_factory.c
index abac0fd7b4..538d8d65ed 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.c
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.c
@@ -138,12 +138,8 @@ static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
grpc_arg grpc_lb_addresses_create_channel_arg(
const grpc_lb_addresses* addresses) {
- grpc_arg arg;
- arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_ARG_LB_ADDRESSES;
- arg.value.pointer.p = (void*)addresses;
- arg.value.pointer.vtable = &lb_addresses_arg_vtable;
- return arg;
+ return grpc_channel_arg_pointer_create(
+ GRPC_ARG_LB_ADDRESSES, (void*)addresses, &lb_addresses_arg_vtable);
}
grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
diff --git a/src/core/ext/filters/client_channel/resolver.c b/src/core/ext/filters/client_channel/resolver.c
index 69b1c31e59..de9a8ce41b 100644
--- a/src/core/ext/filters/client_channel/resolver.c
+++ b/src/core/ext/filters/client_channel/resolver.c
@@ -19,6 +19,10 @@
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/iomgr/combiner.h"
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_resolver_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
void grpc_resolver_init(grpc_resolver *resolver,
const grpc_resolver_vtable *vtable,
grpc_combiner *combiner) {
@@ -27,25 +31,30 @@ void grpc_resolver_init(grpc_resolver *resolver,
gpr_ref_init(&resolver->refs, 1);
}
-#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
-void grpc_resolver_ref(grpc_resolver *resolver, grpc_closure_list *closure_list,
- const char *file, int line, const char *reason) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "RESOLVER:%p ref %d -> %d %s",
- resolver, (int)resolver->refs.count, (int)resolver->refs.count + 1,
- reason);
+#ifndef NDEBUG
+void grpc_resolver_ref(grpc_resolver *resolver, const char *file, int line,
+ const char *reason) {
+ if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
+ gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "RESOLVER:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", resolver,
+ old_refs, old_refs + 1, reason);
+ }
#else
void grpc_resolver_ref(grpc_resolver *resolver) {
#endif
gpr_ref(&resolver->refs);
}
-#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
-void grpc_resolver_unref(grpc_resolver *resolver,
- grpc_closure_list *closure_list, const char *file,
- int line, const char *reason) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "RESOLVER:%p unref %d -> %d %s",
- resolver, (int)resolver->refs.count, (int)resolver->refs.count - 1,
- reason);
+#ifndef NDEBUG
+void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ const char *file, int line, const char *reason) {
+ if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
+ gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "RESOLVER:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", resolver,
+ old_refs, old_refs - 1, reason);
+ }
#else
void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
#endif
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index c78bb316cb..ae9c8f66fe 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -25,6 +25,10 @@
typedef struct grpc_resolver grpc_resolver;
typedef struct grpc_resolver_vtable grpc_resolver_vtable;
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_resolver_refcount;
+#endif
+
/** \a grpc_resolver provides \a grpc_channel_args objects to its caller */
struct grpc_resolver {
const grpc_resolver_vtable *vtable;
@@ -41,17 +45,17 @@ struct grpc_resolver_vtable {
grpc_channel_args **result, grpc_closure *on_complete);
};
-#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_RESOLVER_UNREF(cl, p, r) \
- grpc_resolver_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_RESOLVER_UNREF(e, p, r) \
+ grpc_resolver_unref((e), (p), __FILE__, __LINE__, (r))
void grpc_resolver_ref(grpc_resolver *policy, const char *file, int line,
const char *reason);
-void grpc_resolver_unref(grpc_resolver *policy, grpc_closure_list *closure_list,
+void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy,
const char *file, int line, const char *reason);
#else
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p))
-#define GRPC_RESOLVER_UNREF(cl, p, r) grpc_resolver_unref((cl), (p))
+#define GRPC_RESOLVER_UNREF(e, p, r) grpc_resolver_unref((e), (p))
void grpc_resolver_ref(grpc_resolver *policy);
void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy);
#endif
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
index a0e6604f38..04a7852323 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
@@ -65,6 +65,8 @@ typedef struct {
grpc_combiner *combiner;
/** are we currently resolving? */
bool resolving;
+ /** the pending resolving request */
+ grpc_ares_request *pending_request;
/** which version of the result have we published? */
int published_version;
/** which version of the result is current? */
@@ -82,7 +84,7 @@ typedef struct {
gpr_backoff backoff_state;
/** currently resolving addresses */
- grpc_resolved_addresses *addresses;
+ grpc_lb_addresses *lb_addresses;
} ares_dns_resolver;
static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
@@ -109,9 +111,12 @@ static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx,
if (r->have_retry_timer) {
grpc_timer_cancel(exec_ctx, &r->retry_timer);
}
+ if (r->pending_request != NULL) {
+ grpc_cancel_ares_request(exec_ctx, r->pending_request);
+ }
if (r->next_completion != NULL) {
*r->target_result = NULL;
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, r->next_completion,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
r->next_completion = NULL;
@@ -145,19 +150,11 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_channel_args *result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
- if (r->addresses != NULL) {
- grpc_lb_addresses *addresses = grpc_lb_addresses_create(
- r->addresses->naddrs, NULL /* user_data_vtable */);
- for (size_t i = 0; i < r->addresses->naddrs; ++i) {
- grpc_lb_addresses_set_address(
- addresses, i, &r->addresses->addrs[i].addr,
- r->addresses->addrs[i].len, false /* is_balancer */,
- NULL /* balancer_name */, NULL /* user_data */);
- }
- grpc_arg new_arg = grpc_lb_addresses_create_channel_arg(addresses);
+ r->pending_request = NULL;
+ if (r->lb_addresses != NULL) {
+ grpc_arg new_arg = grpc_lb_addresses_create_channel_arg(r->lb_addresses);
result = grpc_channel_args_copy_and_add(r->channel_args, &new_arg, 1);
- grpc_resolved_addresses_destroy(r->addresses);
- grpc_lb_addresses_destroy(exec_ctx, addresses);
+ grpc_lb_addresses_destroy(exec_ctx, r->lb_addresses);
} else {
const char *msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
@@ -209,10 +206,11 @@ static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
GRPC_RESOLVER_REF(&r->base, "dns-resolving");
GPR_ASSERT(!r->resolving);
r->resolving = true;
- r->addresses = NULL;
- grpc_dns_lookup_ares(exec_ctx, r->dns_server, r->name_to_resolve,
- r->default_port, r->interested_parties,
- &r->dns_ares_on_resolved_locked, &r->addresses);
+ r->lb_addresses = NULL;
+ r->pending_request = grpc_dns_lookup_ares(
+ exec_ctx, r->dns_server, r->name_to_resolve, r->default_port,
+ r->interested_parties, &r->dns_ares_on_resolved_locked, &r->lb_addresses,
+ true /* check_grpclb */);
}
static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
@@ -222,7 +220,8 @@ static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
*r->target_result = r->resolved_result == NULL
? NULL
: grpc_channel_args_copy(r->resolved_result);
- grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+ gpr_log(GPR_DEBUG, "dns_ares_maybe_finish_next_locked");
+ GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
r->published_version = r->resolved_version;
}
@@ -245,10 +244,10 @@ static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
grpc_resolver_args *args,
const char *default_port) {
- // Get name from args.
+ /* Get name from args. */
const char *path = args->uri->path;
if (path[0] == '/') ++path;
- // Create resolver.
+ /* Create resolver. */
ares_dns_resolver *r = gpr_zalloc(sizeof(ares_dns_resolver));
grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
if (0 != strcmp(args->uri->authority, "")) {
@@ -267,12 +266,12 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
GRPC_DNS_RECONNECT_JITTER,
GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
- grpc_closure_init(&r->dns_ares_on_retry_timer_locked,
+ GRPC_CLOSURE_INIT(&r->dns_ares_on_retry_timer_locked,
dns_ares_on_retry_timer_locked, r,
- grpc_combiner_scheduler(r->base.combiner, false));
- grpc_closure_init(&r->dns_ares_on_resolved_locked,
+ grpc_combiner_scheduler(r->base.combiner));
+ GRPC_CLOSURE_INIT(&r->dns_ares_on_resolved_locked,
dns_ares_on_resolved_locked, r,
- grpc_combiner_scheduler(r->base.combiner, false));
+ grpc_combiner_scheduler(r->base.combiner));
return &r->base;
}
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
index ff3904e252..386012d2ed 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -19,8 +19,6 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H
-#include <ares.h>
-
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/pollset_set.h"
@@ -47,5 +45,9 @@ grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
of ARES_ECANCELLED. */
void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver);
+/* Shutdown all the grpc_fds used by \a ev_driver */
+void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_ares_ev_driver *ev_driver);
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H \
*/
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
index 03ce303616..1ab8295e9e 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
@@ -19,6 +19,8 @@
#include "src/core/lib/iomgr/port.h"
#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET)
+#include <ares.h>
+
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
#include <grpc/support/alloc.h>
@@ -99,9 +101,12 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
GPR_ASSERT(!fdn->writable_registered);
gpr_mu_destroy(&fdn->mu);
grpc_pollset_set_del_fd(exec_ctx, fdn->ev_driver->pollset_set, fdn->grpc_fd);
- grpc_fd_shutdown(exec_ctx, fdn->grpc_fd,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("fd node destroyed"));
- grpc_fd_orphan(exec_ctx, fdn->grpc_fd, NULL, NULL, "c-ares query finished");
+ /* c-ares library has closed the fd inside grpc_fd. This fd may be picked up
+ immediately by another thread, and should not be closed by the following
+ grpc_fd_orphan. To prevent this fd from being closed by grpc_fd_orphan,
+ a fd pointer is provided. */
+ int fd;
+ grpc_fd_orphan(exec_ctx, fdn->grpc_fd, NULL, &fd, "c-ares query finished");
gpr_free(fdn);
}
@@ -140,6 +145,20 @@ void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver) {
grpc_ares_ev_driver_unref(ev_driver);
}
+void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_ares_ev_driver *ev_driver) {
+ gpr_mu_lock(&ev_driver->mu);
+ ev_driver->shutting_down = true;
+ fd_node *fn = ev_driver->fds;
+ while (fn != NULL) {
+ grpc_fd_shutdown(
+ exec_ctx, fn->grpc_fd,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_ares_ev_driver_shutdown"));
+ fn = fn->next;
+ }
+ gpr_mu_unlock(&ev_driver->mu);
+}
+
// Search fd in the fd_node list head. This is an O(n) search, the max possible
// value of n is ARES_GETSOCK_MAXNUM (16). n is typically 1 - 2 in our tests.
static fd_node *pop_fd_node(fd_node **head, int fd) {
@@ -240,9 +259,9 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
fdn->readable_registered = false;
fdn->writable_registered = false;
gpr_mu_init(&fdn->mu);
- grpc_closure_init(&fdn->read_closure, on_readable_cb, fdn,
+ GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_cb, fdn,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&fdn->write_closure, on_writable_cb, fdn,
+ GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_cb, fdn,
grpc_schedule_on_exec_ctx);
grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set,
fdn->grpc_fd);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
index 4ee07e170a..244b260dfa 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
@@ -33,6 +33,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
+#include <nameser.h>
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
@@ -45,20 +46,14 @@
static gpr_once g_basic_init = GPR_ONCE_INIT;
static gpr_mu g_init_mu;
-typedef struct grpc_ares_request {
+struct grpc_ares_request {
/** indicates the DNS server to use, if specified */
struct ares_addr_port_node dns_server_addr;
/** following members are set in grpc_resolve_address_ares_impl */
- /** host to resolve, parsed from the name to resolve */
- char *host;
- /** port to fill in sockaddr_in, parsed from the name to resolve */
- char *port;
- /** default port to use */
- char *default_port;
/** closure to call when the request completes */
grpc_closure *on_done;
/** the pointer to receive the resolved addresses */
- grpc_resolved_addresses **addrs_out;
+ grpc_lb_addresses **lb_addrs_out;
/** the evernt driver used by this request */
grpc_ares_ev_driver *ev_driver;
/** number of ongoing queries */
@@ -70,7 +65,19 @@ typedef struct grpc_ares_request {
bool success;
/** the errors explaining the request failure, set in on_done_cb */
grpc_error *error;
-} grpc_ares_request;
+};
+
+typedef struct grpc_ares_hostbyname_request {
+ /** following members are set in create_hostbyname_request */
+ /** the top-level request instance */
+ grpc_ares_request *parent_request;
+ /** host to resolve, parsed from the name to resolve */
+ char *host;
+ /** port to fill in sockaddr_in, parsed from the name to resolve */
+ uint16_t port;
+ /** is it a grpclb address */
+ bool is_balancer;
+} grpc_ares_hostbyname_request;
static void do_basic_init(void) { gpr_mu_init(&g_init_mu); }
@@ -83,6 +90,10 @@ static uint16_t strhtons(const char *port) {
return htons((unsigned short)atoi(port));
}
+static void grpc_ares_request_ref(grpc_ares_request *r) {
+ gpr_ref(&r->pending_queries);
+}
+
static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
grpc_ares_request *r) {
/* If there are no pending queries, invoke on_done callback and destroy the
@@ -96,74 +107,102 @@ static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
acquire locks in on_done. ares_dns_resolver is using combiner to
protect resources needed by on_done. */
grpc_exec_ctx new_exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_closure_sched(&new_exec_ctx, r->on_done, r->error);
+ GRPC_CLOSURE_SCHED(&new_exec_ctx, r->on_done, r->error);
grpc_exec_ctx_finish(&new_exec_ctx);
} else {
- grpc_closure_sched(exec_ctx, r->on_done, r->error);
+ GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, r->error);
}
gpr_mu_destroy(&r->mu);
grpc_ares_ev_driver_destroy(r->ev_driver);
- gpr_free(r->host);
- gpr_free(r->port);
- gpr_free(r->default_port);
gpr_free(r);
}
}
-static void on_done_cb(void *arg, int status, int timeouts,
- struct hostent *hostent) {
- grpc_ares_request *r = (grpc_ares_request *)arg;
+static grpc_ares_hostbyname_request *create_hostbyname_request(
+ grpc_ares_request *parent_request, char *host, uint16_t port,
+ bool is_balancer) {
+ grpc_ares_hostbyname_request *hr =
+ gpr_zalloc(sizeof(grpc_ares_hostbyname_request));
+ hr->parent_request = parent_request;
+ hr->host = gpr_strdup(host);
+ hr->port = port;
+ hr->is_balancer = is_balancer;
+ grpc_ares_request_ref(parent_request);
+ return hr;
+}
+
+static void destroy_hostbyname_request(grpc_exec_ctx *exec_ctx,
+ grpc_ares_hostbyname_request *hr) {
+ grpc_ares_request_unref(exec_ctx, hr->parent_request);
+ gpr_free(hr->host);
+ gpr_free(hr);
+}
+
+static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
+ struct hostent *hostent) {
+ grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)arg;
+ grpc_ares_request *r = hr->parent_request;
gpr_mu_lock(&r->mu);
if (status == ARES_SUCCESS) {
GRPC_ERROR_UNREF(r->error);
r->error = GRPC_ERROR_NONE;
r->success = true;
- grpc_resolved_addresses **addresses = r->addrs_out;
- if (*addresses == NULL) {
- *addresses = gpr_malloc(sizeof(grpc_resolved_addresses));
- (*addresses)->naddrs = 0;
- (*addresses)->addrs = NULL;
+ grpc_lb_addresses **lb_addresses = r->lb_addrs_out;
+ if (*lb_addresses == NULL) {
+ *lb_addresses = grpc_lb_addresses_create(0, NULL);
}
- size_t prev_naddr = (*addresses)->naddrs;
+ size_t prev_naddr = (*lb_addresses)->num_addresses;
size_t i;
for (i = 0; hostent->h_addr_list[i] != NULL; i++) {
}
- (*addresses)->naddrs += i;
- (*addresses)->addrs =
- gpr_realloc((*addresses)->addrs,
- sizeof(grpc_resolved_address) * (*addresses)->naddrs);
- for (i = prev_naddr; i < (*addresses)->naddrs; i++) {
- memset(&(*addresses)->addrs[i], 0, sizeof(grpc_resolved_address));
- if (hostent->h_addrtype == AF_INET6) {
- (*addresses)->addrs[i].len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6 *addr =
- (struct sockaddr_in6 *)&(*addresses)->addrs[i].addr;
- addr->sin6_family = (sa_family_t)hostent->h_addrtype;
- addr->sin6_port = strhtons(r->port);
-
- char output[INET6_ADDRSTRLEN];
- memcpy(&addr->sin6_addr, hostent->h_addr_list[i - prev_naddr],
- sizeof(struct in6_addr));
- ares_inet_ntop(AF_INET6, &addr->sin6_addr, output, INET6_ADDRSTRLEN);
- gpr_log(GPR_DEBUG,
- "c-ares resolver gets a AF_INET6 result: \n"
- " addr: %s\n port: %s\n sin6_scope_id: %d\n",
- output, r->port, addr->sin6_scope_id);
- } else {
- (*addresses)->addrs[i].len = sizeof(struct sockaddr_in);
- struct sockaddr_in *addr =
- (struct sockaddr_in *)&(*addresses)->addrs[i].addr;
- memcpy(&addr->sin_addr, hostent->h_addr_list[i - prev_naddr],
- sizeof(struct in_addr));
- addr->sin_family = (sa_family_t)hostent->h_addrtype;
- addr->sin_port = strhtons(r->port);
-
- char output[INET_ADDRSTRLEN];
- ares_inet_ntop(AF_INET, &addr->sin_addr, output, INET_ADDRSTRLEN);
- gpr_log(GPR_DEBUG,
- "c-ares resolver gets a AF_INET result: \n"
- " addr: %s\n port: %s\n",
- output, r->port);
+ (*lb_addresses)->num_addresses += i;
+ (*lb_addresses)->addresses =
+ gpr_realloc((*lb_addresses)->addresses,
+ sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
+ for (i = prev_naddr; i < (*lb_addresses)->num_addresses; i++) {
+ switch (hostent->h_addrtype) {
+ case AF_INET6: {
+ size_t addr_len = sizeof(struct sockaddr_in6);
+ struct sockaddr_in6 addr;
+ memset(&addr, 0, addr_len);
+ memcpy(&addr.sin6_addr, hostent->h_addr_list[i - prev_naddr],
+ sizeof(struct in6_addr));
+ addr.sin6_family = (sa_family_t)hostent->h_addrtype;
+ addr.sin6_port = hr->port;
+ grpc_lb_addresses_set_address(
+ *lb_addresses, i, &addr, addr_len,
+ hr->is_balancer /* is_balancer */,
+ hr->is_balancer ? strdup(hr->host) : NULL /* balancer_name */,
+ NULL /* user_data */);
+ char output[INET6_ADDRSTRLEN];
+ ares_inet_ntop(AF_INET6, &addr.sin6_addr, output, INET6_ADDRSTRLEN);
+ gpr_log(GPR_DEBUG,
+ "c-ares resolver gets a AF_INET6 result: \n"
+ " addr: %s\n port: %d\n sin6_scope_id: %d\n",
+ output, ntohs(hr->port), addr.sin6_scope_id);
+ break;
+ }
+ case AF_INET: {
+ size_t addr_len = sizeof(struct sockaddr_in);
+ struct sockaddr_in addr;
+ memset(&addr, 0, addr_len);
+ memcpy(&addr.sin_addr, hostent->h_addr_list[i - prev_naddr],
+ sizeof(struct in_addr));
+ addr.sin_family = (sa_family_t)hostent->h_addrtype;
+ addr.sin_port = hr->port;
+ grpc_lb_addresses_set_address(
+ *lb_addresses, i, &addr, addr_len,
+ hr->is_balancer /* is_balancer */,
+ hr->is_balancer ? strdup(hr->host) : NULL /* balancer_name */,
+ NULL /* user_data */);
+ char output[INET_ADDRSTRLEN];
+ ares_inet_ntop(AF_INET, &addr.sin_addr, output, INET_ADDRSTRLEN);
+ gpr_log(GPR_DEBUG,
+ "c-ares resolver gets a AF_INET result: \n"
+ " addr: %s\n port: %d\n",
+ output, ntohs(hr->port));
+ break;
+ }
}
}
} else if (!r->success) {
@@ -179,14 +218,58 @@ static void on_done_cb(void *arg, int status, int timeouts,
}
}
gpr_mu_unlock(&r->mu);
- grpc_ares_request_unref(NULL, r);
+ destroy_hostbyname_request(NULL, hr);
+}
+
+static void on_srv_query_done_cb(void *arg, int status, int timeouts,
+ unsigned char *abuf, int alen) {
+ grpc_ares_request *r = (grpc_ares_request *)arg;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ gpr_log(GPR_DEBUG, "on_query_srv_done_cb");
+ if (status == ARES_SUCCESS) {
+ gpr_log(GPR_DEBUG, "on_query_srv_done_cb ARES_SUCCESS");
+ struct ares_srv_reply *reply;
+ const int parse_status = ares_parse_srv_reply(abuf, alen, &reply);
+ if (parse_status == ARES_SUCCESS) {
+ ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
+ for (struct ares_srv_reply *srv_it = reply; srv_it != NULL;
+ srv_it = srv_it->next) {
+ if (grpc_ipv6_loopback_available()) {
+ grpc_ares_hostbyname_request *hr = create_hostbyname_request(
+ r, srv_it->host, srv_it->port, true /* is_balancer */);
+ ares_gethostbyname(*channel, hr->host, AF_INET6,
+ on_hostbyname_done_cb, hr);
+ }
+ grpc_ares_hostbyname_request *hr = create_hostbyname_request(
+ r, srv_it->host, srv_it->port, true /* is_balancer */);
+ ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb,
+ hr);
+ grpc_ares_ev_driver_start(&exec_ctx, r->ev_driver);
+ }
+ }
+ if (reply != NULL) {
+ ares_free_data(reply);
+ }
+ } else if (!r->success) {
+ char *error_msg;
+ gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
+ ares_strerror(status));
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ gpr_free(error_msg);
+ if (r->error == GRPC_ERROR_NONE) {
+ r->error = error;
+ } else {
+ r->error = grpc_error_add_child(error, r->error);
+ }
+ }
+ grpc_ares_request_unref(&exec_ctx, r);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_dns_lookup_ares(grpc_exec_ctx *exec_ctx, const char *dns_server,
- const char *name, const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addrs) {
+static grpc_ares_request *grpc_dns_lookup_ares_impl(
+ grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
+ const char *default_port, grpc_pollset_set *interested_parties,
+ grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb) {
grpc_error *error = GRPC_ERROR_NONE;
/* TODO(zyc): Enable tracing after #9603 is checked in */
/* if (grpc_dns_trace) {
@@ -221,10 +304,7 @@ void grpc_dns_lookup_ares(grpc_exec_ctx *exec_ctx, const char *dns_server,
gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
- r->addrs_out = addrs;
- r->default_port = gpr_strdup(default_port);
- r->port = port;
- r->host = host;
+ r->lb_addrs_out = addrs;
r->success = false;
r->error = GRPC_ERROR_NONE;
ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
@@ -248,6 +328,7 @@ void grpc_dns_lookup_ares(grpc_exec_ctx *exec_ctx, const char *dns_server,
error = grpc_error_set_str(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("cannot parse authority"),
GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name));
+ gpr_free(r);
goto error_cleanup;
}
int status = ares_set_servers_ports(*channel, &r->dns_server_addr);
@@ -257,41 +338,55 @@ void grpc_dns_lookup_ares(grpc_exec_ctx *exec_ctx, const char *dns_server,
ares_strerror(status));
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
+ gpr_free(r);
goto error_cleanup;
}
}
// An extra reference is put here to avoid destroying the request in
// on_done_cb before calling grpc_ares_ev_driver_start.
- gpr_ref_init(&r->pending_queries, 2);
+ gpr_ref_init(&r->pending_queries, 1);
if (grpc_ipv6_loopback_available()) {
- gpr_ref(&r->pending_queries);
- ares_gethostbyname(*channel, r->host, AF_INET6, on_done_cb, r);
+ grpc_ares_hostbyname_request *hr = create_hostbyname_request(
+ r, host, strhtons(port), false /* is_balancer */);
+ ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_cb, hr);
+ }
+ grpc_ares_hostbyname_request *hr = create_hostbyname_request(
+ r, host, strhtons(port), false /* is_balancer */);
+ ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb, hr);
+ if (check_grpclb) {
+ /* Query the SRV record */
+ grpc_ares_request_ref(r);
+ char *service_name;
+ gpr_asprintf(&service_name, "_grpclb._tcp.%s", host);
+ ares_query(*channel, service_name, ns_c_in, ns_t_srv, on_srv_query_done_cb,
+ r);
+ gpr_free(service_name);
}
- ares_gethostbyname(*channel, r->host, AF_INET, on_done_cb, r);
/* TODO(zyc): Handle CNAME records here. */
grpc_ares_ev_driver_start(exec_ctx, r->ev_driver);
grpc_ares_request_unref(exec_ctx, r);
- return;
+ gpr_free(host);
+ gpr_free(port);
+ return r;
error_cleanup:
- grpc_closure_sched(exec_ctx, on_done, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
gpr_free(host);
gpr_free(port);
+ return NULL;
}
-void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx, const char *name,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addrs) {
- grpc_dns_lookup_ares(exec_ctx, NULL /* dns_server */, name, default_port,
- interested_parties, on_done, addrs);
-}
+grpc_ares_request *(*grpc_dns_lookup_ares)(
+ grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
+ const char *default_port, grpc_pollset_set *interested_parties,
+ grpc_closure *on_done, grpc_lb_addresses **addrs,
+ bool check_grpclb) = grpc_dns_lookup_ares_impl;
-void (*grpc_resolve_address_ares)(
- grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
- grpc_pollset_set *interested_parties, grpc_closure *on_done,
- grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl;
+void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx, grpc_ares_request *r) {
+ if (grpc_dns_lookup_ares == grpc_dns_lookup_ares_impl) {
+ grpc_ares_ev_driver_shutdown(exec_ctx, r->ev_driver);
+ }
+}
grpc_error *grpc_ares_init(void) {
gpr_once_init(&g_basic_init, do_basic_init);
@@ -316,4 +411,66 @@ void grpc_ares_cleanup(void) {
gpr_mu_unlock(&g_init_mu);
}
+/*
+ * grpc_resolve_address_ares related structs and functions
+ */
+
+typedef struct grpc_resolve_address_ares_request {
+ /** the pointer to receive the resolved addresses */
+ grpc_resolved_addresses **addrs_out;
+ /** currently resolving lb addresses */
+ grpc_lb_addresses *lb_addrs;
+ /** closure to call when the resolve_address_ares request completes */
+ grpc_closure *on_resolve_address_done;
+ /** a closure wrapping on_dns_lookup_done_cb, which should be invoked when the
+ grpc_dns_lookup_ares operation is done. */
+ grpc_closure on_dns_lookup_done;
+} grpc_resolve_address_ares_request;
+
+static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_resolve_address_ares_request *r =
+ (grpc_resolve_address_ares_request *)arg;
+ grpc_resolved_addresses **resolved_addresses = r->addrs_out;
+ if (r->lb_addrs == NULL || r->lb_addrs->num_addresses == 0) {
+ *resolved_addresses = NULL;
+ } else {
+ *resolved_addresses = gpr_zalloc(sizeof(grpc_resolved_addresses));
+ (*resolved_addresses)->naddrs = r->lb_addrs->num_addresses;
+ (*resolved_addresses)->addrs = gpr_zalloc(sizeof(grpc_resolved_address) *
+ (*resolved_addresses)->naddrs);
+ for (size_t i = 0; i < (*resolved_addresses)->naddrs; i++) {
+ GPR_ASSERT(!r->lb_addrs->addresses[i].is_balancer);
+ memcpy(&(*resolved_addresses)->addrs[i],
+ &r->lb_addrs->addresses[i].address, sizeof(grpc_resolved_address));
+ }
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, r->on_resolve_address_done,
+ GRPC_ERROR_REF(error));
+ grpc_lb_addresses_destroy(exec_ctx, r->lb_addrs);
+ gpr_free(r);
+}
+
+static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
+ const char *name,
+ const char *default_port,
+ grpc_pollset_set *interested_parties,
+ grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) {
+ grpc_resolve_address_ares_request *r =
+ gpr_zalloc(sizeof(grpc_resolve_address_ares_request));
+ r->addrs_out = addrs;
+ r->on_resolve_address_done = on_done;
+ GRPC_CLOSURE_INIT(&r->on_dns_lookup_done, on_dns_lookup_done_cb, r,
+ grpc_schedule_on_exec_ctx);
+ grpc_dns_lookup_ares(exec_ctx, NULL /* dns_server */, name, default_port,
+ interested_parties, &r->on_dns_lookup_done, &r->lb_addrs,
+ false /* check_grpclb */);
+}
+
+void (*grpc_resolve_address_ares)(
+ grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
+ grpc_pollset_set *interested_parties, grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl;
+
#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
index 08efeea04a..5d2d6c993b 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -19,11 +19,14 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H
+#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/resolve_address.h"
+typedef struct grpc_ares_request grpc_ares_request;
+
/* Asynchronously resolve addr. Use \a default_port if a port isn't designated
in addr, otherwise use the port in addr. grpc_ares_init() must be called at
least once before this function. \a on_done may be called directly in this
@@ -36,11 +39,21 @@ extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx,
grpc_closure *on_done,
grpc_resolved_addresses **addresses);
-void grpc_dns_lookup_ares(grpc_exec_ctx *exec_ctx, const char *dns_server,
- const char *addr, const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addresses);
+/* Asynchronously resolve addr. It will try to resolve grpclb SRV records in
+ addition to the normal address records. For normal address records, it uses
+ \a default_port if a port isn't designated in \a addr, otherwise it uses the
+ port in \a addr. grpc_ares_init() must be called at least once before this
+ function. \a on_done may be called directly in this function without being
+ scheduled with \a exec_ctx, it must not try to acquire locks that are being
+ held by the caller. */
+extern grpc_ares_request *(*grpc_dns_lookup_ares)(
+ grpc_exec_ctx *exec_ctx, const char *dns_server, const char *addr,
+ const char *default_port, grpc_pollset_set *interested_parties,
+ grpc_closure *on_done, grpc_lb_addresses **addresses, bool check_grpclb);
+
+/* Cancel the pending grpc_ares_request \a request */
+void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx,
+ grpc_ares_request *request);
/* Initialize gRPC ares wrapper. Must be called at least once before
grpc_resolve_address_ares(). */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c
new file mode 100644
index 0000000000..b67636a3e4
--- /dev/null
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c
@@ -0,0 +1,59 @@
+/*
+ *
+ * Copyright 2016-2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+#if GRPC_ARES != 1 || defined(GRPC_UV)
+
+#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
+
+struct grpc_ares_request {
+ char val;
+};
+
+static grpc_ares_request *grpc_dns_lookup_ares_impl(
+ grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
+ const char *default_port, grpc_pollset_set *interested_parties,
+ grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb) {
+ return NULL;
+}
+
+grpc_ares_request *(*grpc_dns_lookup_ares)(
+ grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
+ const char *default_port, grpc_pollset_set *interested_parties,
+ grpc_closure *on_done, grpc_lb_addresses **addrs,
+ bool check_grpclb) = grpc_dns_lookup_ares_impl;
+
+void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx, grpc_ares_request *r) {}
+
+grpc_error *grpc_ares_init(void) { return GRPC_ERROR_NONE; }
+
+void grpc_ares_cleanup(void) {}
+
+static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
+ const char *name,
+ const char *default_port,
+ grpc_pollset_set *interested_parties,
+ grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) {}
+
+void (*grpc_resolve_address_ares)(
+ grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
+ grpc_pollset_set *interested_parties, grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl;
+
+#endif /* GRPC_ARES != 1 || defined(GRPC_UV) */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c
index afa978ff01..af3391a731 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c
@@ -99,7 +99,7 @@ static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx,
}
if (r->next_completion != NULL) {
*r->target_result = NULL;
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, r->next_completion,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
r->next_completion = NULL;
@@ -178,8 +178,8 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
- grpc_closure_init(&r->on_retry, dns_on_retry_timer_locked, r,
- grpc_combiner_scheduler(r->base.combiner, false));
+ GRPC_CLOSURE_INIT(&r->on_retry, dns_on_retry_timer_locked, r,
+ grpc_combiner_scheduler(r->base.combiner));
grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now);
}
if (r->resolved_result != NULL) {
@@ -200,8 +200,8 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
r->addresses = NULL;
grpc_resolve_address(
exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties,
- grpc_closure_create(dns_on_resolved_locked, r,
- grpc_combiner_scheduler(r->base.combiner, false)),
+ GRPC_CLOSURE_CREATE(dns_on_resolved_locked, r,
+ grpc_combiner_scheduler(r->base.combiner)),
&r->addresses);
}
@@ -212,7 +212,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
*r->target_result = r->resolved_result == NULL
? NULL
: grpc_channel_args_copy(r->resolved_result);
- grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
r->published_version = r->resolved_version;
}
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
index 59560e068d..56ed4371a9 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
@@ -56,6 +56,10 @@ typedef struct {
// grpc_resolver_next_locked()'s closure.
grpc_channel_args* next_results;
+ // Results to use for the pretended re-resolution in
+ // fake_resolver_channel_saw_error_locked().
+ grpc_channel_args* results_upon_error;
+
// pending next completion, or NULL
grpc_closure* next_completion;
// target result address for next completion
@@ -65,6 +69,7 @@ typedef struct {
static void fake_resolver_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
fake_resolver* r = (fake_resolver*)gr;
grpc_channel_args_destroy(exec_ctx, r->next_results);
+ grpc_channel_args_destroy(exec_ctx, r->results_upon_error);
grpc_channel_args_destroy(exec_ctx, r->channel_args);
gpr_free(r);
}
@@ -74,7 +79,9 @@ static void fake_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
fake_resolver* r = (fake_resolver*)resolver;
if (r->next_completion != NULL) {
*r->target_result = NULL;
- grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, r->next_completion,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
r->next_completion = NULL;
}
}
@@ -85,15 +92,19 @@ static void fake_resolver_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
*r->target_result =
grpc_channel_args_union(r->next_results, r->channel_args);
grpc_channel_args_destroy(exec_ctx, r->next_results);
- grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
- r->next_completion = NULL;
r->next_results = NULL;
+ GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+ r->next_completion = NULL;
}
}
static void fake_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
grpc_resolver* resolver) {
fake_resolver* r = (fake_resolver*)resolver;
+ if (r->next_results == NULL && r->results_upon_error != NULL) {
+ // Pretend we re-resolved.
+ r->next_results = grpc_channel_args_copy(r->results_upon_error);
+ }
fake_resolver_maybe_finish_next_locked(exec_ctx, r);
}
@@ -149,6 +160,10 @@ static void set_response_cb(grpc_exec_ctx* exec_ctx, void* arg,
grpc_channel_args_destroy(exec_ctx, r->next_results);
}
r->next_results = generator->next_response;
+ if (r->results_upon_error != NULL) {
+ grpc_channel_args_destroy(exec_ctx, r->results_upon_error);
+ }
+ r->results_upon_error = grpc_channel_args_copy(generator->next_response);
fake_resolver_maybe_finish_next_locked(exec_ctx, r);
}
@@ -157,11 +172,10 @@ void grpc_fake_resolver_response_generator_set_response(
grpc_channel_args* next_response) {
GPR_ASSERT(generator->resolver != NULL);
generator->next_response = grpc_channel_args_copy(next_response);
- grpc_closure_sched(
- exec_ctx,
- grpc_closure_create(
- set_response_cb, generator,
- grpc_combiner_scheduler(generator->resolver->base.combiner, false)),
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, GRPC_CLOSURE_CREATE(set_response_cb, generator,
+ grpc_combiner_scheduler(
+ generator->resolver->base.combiner)),
GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c
index 641c8d3afe..7b4fe38272 100644
--- a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c
@@ -73,7 +73,9 @@ static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx,
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
if (r->next_completion != NULL) {
*r->target_result = NULL;
- grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, r->next_completion,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
r->next_completion = NULL;
}
}
@@ -103,7 +105,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
*r->target_result =
grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
- grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
}
}
diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c
index 68a8bb8303..88157ed738 100644
--- a/src/core/ext/filters/client_channel/subchannel.c
+++ b/src/core/ext/filters/client_channel/subchannel.c
@@ -140,25 +140,13 @@ struct grpc_subchannel_call {
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
grpc_error *error);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define REF_REASON reason
-#define REF_LOG(name, p) \
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "%s: %p ref %d -> %d %s", \
- (name), (p), (p)->refs.count, (p)->refs.count + 1, reason)
-#define UNREF_LOG(name, p) \
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "%s: %p unref %d -> %d %s", \
- (name), (p), (p)->refs.count, (p)->refs.count - 1, reason)
#define REF_MUTATE_EXTRA_ARGS \
GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char *purpose
#define REF_MUTATE_PURPOSE(x) , file, line, reason, x
#else
#define REF_REASON ""
-#define REF_LOG(name, p) \
- do { \
- } while (0)
-#define UNREF_LOG(name, p) \
- do { \
- } while (0)
#define REF_MUTATE_EXTRA_ARGS
#define REF_MUTATE_PURPOSE(x)
#endif
@@ -207,10 +195,12 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "SUBCHANNEL: %p %s 0x%08" PRIxPTR " -> 0x%08" PRIxPTR " [%s]", c,
- purpose, old_val, old_val + delta, reason);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SUBCHANNEL: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c,
+ purpose, old_val, old_val + delta, reason);
+ }
#endif
return old_val;
}
@@ -283,7 +273,7 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
gpr_atm old_refs;
old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) {
- grpc_closure_sched(exec_ctx, grpc_closure_create(subchannel_destroy, c,
+ GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(subchannel_destroy, c,
grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
}
@@ -333,7 +323,7 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
&c->root_external_state_watcher;
- grpc_closure_init(&c->connected, subchannel_connected, c,
+ GRPC_CLOSURE_INIT(&c->connected, subchannel_connected, c,
grpc_schedule_on_exec_ctx);
grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
"subchannel");
@@ -421,7 +411,7 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&w->subchannel->mu);
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, w->subchannel, "external_state_watcher");
gpr_free(w);
- grpc_closure_run(exec_ctx, follow_up, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
}
static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -488,7 +478,7 @@ static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds",
time_til_next.tv_sec, time_til_next.tv_nsec);
}
- grpc_closure_init(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm, now);
}
}
@@ -514,7 +504,7 @@ void grpc_subchannel_notify_on_state_change(
w->subchannel = c;
w->pollset_set = interested_parties;
w->notify = notify;
- grpc_closure_init(&w->closure, on_external_state_watcher_done, w,
+ GRPC_CLOSURE_INIT(&w->closure, on_external_state_watcher_done, w,
grpc_schedule_on_exec_ctx);
if (interested_parties != NULL) {
grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set,
@@ -635,7 +625,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
sw_subchannel = gpr_malloc(sizeof(*sw_subchannel));
sw_subchannel->subchannel = c;
sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
- grpc_closure_init(&sw_subchannel->closure, subchannel_on_child_state_changed,
+ GRPC_CLOSURE_INIT(&sw_subchannel->closure, subchannel_on_child_state_changed,
sw_subchannel, grpc_schedule_on_exec_ctx);
if (c->disconnected) {
@@ -819,10 +809,7 @@ const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args) {
}
grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr) {
- grpc_arg new_arg;
- new_arg.key = GRPC_ARG_SUBCHANNEL_ADDRESS;
- new_arg.type = GRPC_ARG_STRING;
- new_arg.value.string =
- addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup("");
- return new_arg;
+ return grpc_channel_arg_string_create(
+ GRPC_ARG_SUBCHANNEL_ADDRESS,
+ addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup(""));
}
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index f38bf42803..6d2abb04df 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -37,7 +37,7 @@ typedef struct grpc_subchannel_call grpc_subchannel_call;
typedef struct grpc_subchannel_args grpc_subchannel_args;
typedef struct grpc_subchannel_key grpc_subchannel_key;
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_SUBCHANNEL_REF(p, r) \
grpc_subchannel_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \
diff --git a/src/core/ext/filters/deadline/deadline_filter.c b/src/core/ext/filters/deadline/deadline_filter.c
index c02756a726..ced025e2e2 100644
--- a/src/core/ext/filters/deadline/deadline_filter.c
+++ b/src/core/ext/filters/deadline/deadline_filter.c
@@ -74,7 +74,7 @@ retry:
// If we've already created and destroyed a timer, we always create a
// new closure: we have no other guarantee that the inlined closure is
// not in use (it may hold a pending call to timer_callback)
- closure = grpc_closure_create(timer_callback, elem,
+ closure = GRPC_CLOSURE_CREATE(timer_callback, elem,
grpc_schedule_on_exec_ctx);
} else {
goto retry;
@@ -85,7 +85,7 @@ retry:
GRPC_DEADLINE_STATE_INITIAL,
GRPC_DEADLINE_STATE_PENDING)) {
closure =
- grpc_closure_init(&deadline_state->timer_callback, timer_callback,
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
elem, grpc_schedule_on_exec_ctx);
} else {
goto retry;
@@ -115,7 +115,7 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_deadline_state* deadline_state = arg;
cancel_timer_if_needed(exec_ctx, deadline_state);
// Invoke the next callback.
- grpc_closure_run(exec_ctx, deadline_state->next_on_complete,
+ GRPC_CLOSURE_RUN(exec_ctx, deadline_state->next_on_complete,
GRPC_ERROR_REF(error));
}
@@ -123,7 +123,7 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
grpc_transport_stream_op_batch* op) {
deadline_state->next_on_complete = op->on_complete;
- grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state,
+ GRPC_CLOSURE_INIT(&deadline_state->on_complete, on_complete, deadline_state,
grpc_schedule_on_exec_ctx);
op->on_complete = &deadline_state->on_complete;
}
@@ -161,9 +161,9 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
state->elem = elem;
state->deadline = deadline;
- grpc_closure_init(&state->closure, start_timer_after_init, state,
+ GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
grpc_schedule_on_exec_ctx);
- grpc_closure_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &state->closure, GRPC_ERROR_NONE);
}
}
@@ -281,7 +281,7 @@ static void server_start_transport_stream_op_batch(
op->payload->recv_initial_metadata.recv_initial_metadata_ready;
calld->recv_initial_metadata =
op->payload->recv_initial_metadata.recv_initial_metadata;
- grpc_closure_init(&calld->recv_initial_metadata_ready,
+ GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
op->payload->recv_initial_metadata.recv_initial_metadata_ready =
diff --git a/src/core/ext/filters/http/client/http_client_filter.c b/src/core/ext/filters/http/client/http_client_filter.c
index fb2a5d10fe..90f0aed7a0 100644
--- a/src/core/ext/filters/http/client/http_client_filter.c
+++ b/src/core/ext/filters/http/client/http_client_filter.c
@@ -158,7 +158,7 @@ static void hc_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
} else {
GRPC_ERROR_REF(error);
}
- grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata, error);
+ GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv_initial_metadata, error);
}
static void hc_on_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
@@ -171,7 +171,7 @@ static void hc_on_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
} else {
GRPC_ERROR_REF(error);
}
- grpc_closure_run(exec_ctx, calld->on_done_recv_trailing_metadata, error);
+ GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv_trailing_metadata, error);
}
static void hc_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
@@ -445,17 +445,17 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
calld->payload_bytes = NULL;
calld->send_message_blocked = false;
grpc_slice_buffer_init(&calld->slices);
- grpc_closure_init(&calld->hc_on_recv_initial_metadata,
+ GRPC_CLOSURE_INIT(&calld->hc_on_recv_initial_metadata,
hc_on_recv_initial_metadata, elem,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&calld->hc_on_recv_trailing_metadata,
+ GRPC_CLOSURE_INIT(&calld->hc_on_recv_trailing_metadata,
hc_on_recv_trailing_metadata, elem,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem,
+ GRPC_CLOSURE_INIT(&calld->hc_on_complete, hc_on_complete, elem,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&calld->got_slice, got_slice, elem,
+ GRPC_CLOSURE_INIT(&calld->got_slice, got_slice, elem,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&calld->send_done, send_done, elem,
+ GRPC_CLOSURE_INIT(&calld->send_done, send_done, elem,
grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.c b/src/core/ext/filters/http/message_compress/message_compress_filter.c
index 4f753cef1c..71a8bc5bec 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.c
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.c
@@ -255,6 +255,23 @@ static void continue_send_message(grpc_exec_ctx *exec_ctx,
}
}
+static void handle_send_message_batch(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op,
+ bool has_compression_algorithm) {
+ call_data *calld = elem->call_data;
+ if (!skip_compression(elem, op->payload->send_message.send_message->flags,
+ has_compression_algorithm)) {
+ calld->send_op = op;
+ calld->send_length = op->payload->send_message.send_message->length;
+ calld->send_flags = op->payload->send_message.send_message->flags;
+ continue_send_message(exec_ctx, elem);
+ } else {
+ /* pass control down the stack */
+ grpc_call_next_op(exec_ctx, elem, op);
+ }
+}
+
static void compress_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
@@ -307,8 +324,9 @@ static void compress_start_transport_stream_op_batch(
goto retry_send_im;
}
if (cur != INITIAL_METADATA_UNSEEN) {
- grpc_call_next_op(exec_ctx, elem,
- (grpc_transport_stream_op_batch *)cur);
+ handle_send_message_batch(exec_ctx, elem,
+ (grpc_transport_stream_op_batch *)cur,
+ has_compression_algorithm);
}
}
}
@@ -325,17 +343,8 @@ static void compress_start_transport_stream_op_batch(
break;
case HAS_COMPRESSION_ALGORITHM:
case NO_COMPRESSION_ALGORITHM:
- if (!skip_compression(elem,
- op->payload->send_message.send_message->flags,
- cur == HAS_COMPRESSION_ALGORITHM)) {
- calld->send_op = op;
- calld->send_length = op->payload->send_message.send_message->length;
- calld->send_flags = op->payload->send_message.send_message->flags;
- continue_send_message(exec_ctx, elem);
- } else {
- /* pass control down the stack */
- grpc_call_next_op(exec_ctx, elem, op);
- }
+ handle_send_message_batch(exec_ctx, elem, op,
+ cur == HAS_COMPRESSION_ALGORITHM);
break;
default:
if (cur & CANCELLED_BIT) {
@@ -364,9 +373,9 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* initialize members */
grpc_slice_buffer_init(&calld->slices);
- grpc_closure_init(&calld->got_slice, got_slice, elem,
+ GRPC_CLOSURE_INIT(&calld->got_slice, got_slice, elem,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&calld->send_done, send_done, elem,
+ GRPC_CLOSURE_INIT(&calld->send_done, send_done, elem,
grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/filters/http/server/http_server_filter.c b/src/core/ext/filters/http/server/http_server_filter.c
index 113e07d249..b145f12aff 100644
--- a/src/core/ext/filters/http/server/http_server_filter.c
+++ b/src/core/ext/filters/http/server/http_server_filter.c
@@ -269,7 +269,7 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
} else {
GRPC_ERROR_REF(err);
}
- grpc_closure_run(exec_ctx, calld->on_done_recv, err);
+ GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv, err);
}
static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
@@ -281,11 +281,11 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
: (grpc_byte_stream *)&calld->read_stream;
- grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
+ GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
calld->recv_message_ready = NULL;
calld->payload_bin_delivered = true;
}
- grpc_closure_run(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err));
+ GRPC_CLOSURE_RUN(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err));
}
static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
@@ -296,7 +296,7 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
/* do nothing. This is probably a GET request, and payload will be returned
in hs_on_complete callback. */
} else {
- grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
+ GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
}
}
@@ -383,11 +383,11 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
- grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem,
+ GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem,
+ GRPC_CLOSURE_INIT(&calld->hs_on_complete, hs_on_complete, elem,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem,
+ GRPC_CLOSURE_INIT(&calld->hs_recv_message_ready, hs_recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&calld->read_slice_buffer);
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/filters/load_reporting/load_reporting.c b/src/core/ext/filters/load_reporting/load_reporting.c
index a97322ee1d..9745763c91 100644
--- a/src/core/ext/filters/load_reporting/load_reporting.c
+++ b/src/core/ext/filters/load_reporting/load_reporting.c
@@ -50,11 +50,7 @@ static bool maybe_add_load_reporting_filter(grpc_exec_ctx *exec_ctx,
}
grpc_arg grpc_load_reporting_enable_arg() {
- grpc_arg arg;
- arg.type = GRPC_ARG_INTEGER;
- arg.key = GRPC_ARG_ENABLE_LOAD_REPORTING;
- arg.value.integer = 1;
- return arg;
+ return grpc_channel_arg_integer_create(GRPC_ARG_ENABLE_LOAD_REPORTING, 1);
}
/* Plugin registration */
diff --git a/src/core/ext/filters/load_reporting/load_reporting_filter.c b/src/core/ext/filters/load_reporting/load_reporting_filter.c
index 80446ca914..08474efb2e 100644
--- a/src/core/ext/filters/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/filters/load_reporting/load_reporting_filter.c
@@ -90,7 +90,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
calld->id = (intptr_t)args->call_stack;
- grpc_closure_init(&calld->on_initial_md_ready, on_initial_md_ready, elem,
+ GRPC_CLOSURE_INIT(&calld->on_initial_md_ready, on_initial_md_ready, elem,
grpc_schedule_on_exec_ctx);
/* TODO(dgq): do something with the data
diff --git a/src/core/ext/filters/max_age/max_age_filter.c b/src/core/ext/filters/max_age/max_age_filter.c
index 604f74e751..35304f8150 100644
--- a/src/core/ext/filters/max_age/max_age_filter.c
+++ b/src/core/ext/filters/max_age/max_age_filter.c
@@ -329,23 +329,23 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
: gpr_time_from_millis(value, GPR_TIMESPAN);
}
}
- grpc_closure_init(&chand->close_max_idle_channel, close_max_idle_channel,
+ GRPC_CLOSURE_INIT(&chand->close_max_idle_channel, close_max_idle_channel,
chand, grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->close_max_age_channel, close_max_age_channel, chand,
+ GRPC_CLOSURE_INIT(&chand->close_max_age_channel, close_max_age_channel, chand,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->force_close_max_age_channel,
+ GRPC_CLOSURE_INIT(&chand->force_close_max_age_channel,
force_close_max_age_channel, chand,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->start_max_idle_timer_after_init,
+ GRPC_CLOSURE_INIT(&chand->start_max_idle_timer_after_init,
start_max_idle_timer_after_init, chand,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->start_max_age_timer_after_init,
+ GRPC_CLOSURE_INIT(&chand->start_max_age_timer_after_init,
start_max_age_timer_after_init, chand,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->start_max_age_grace_timer_after_goaway_op,
+ GRPC_CLOSURE_INIT(&chand->start_max_age_grace_timer_after_goaway_op,
start_max_age_grace_timer_after_goaway_op, chand,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->channel_connectivity_changed,
+ GRPC_CLOSURE_INIT(&chand->channel_connectivity_changed,
channel_connectivity_changed, chand,
grpc_schedule_on_exec_ctx);
@@ -360,7 +360,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
initialization is done. */
GRPC_CHANNEL_STACK_REF(chand->channel_stack,
"max_age start_max_age_timer_after_init");
- grpc_closure_sched(exec_ctx, &chand->start_max_age_timer_after_init,
+ GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_age_timer_after_init,
GRPC_ERROR_NONE);
}
@@ -371,7 +371,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
0) {
GRPC_CHANNEL_STACK_REF(chand->channel_stack,
"max_age start_max_idle_timer_after_init");
- grpc_closure_sched(exec_ctx, &chand->start_max_idle_timer_after_init,
+ GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_idle_timer_after_init,
GRPC_ERROR_NONE);
}
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/filters/message_size/message_size_filter.c b/src/core/ext/filters/message_size/message_size_filter.c
index e68ba149f2..9bb565ed6d 100644
--- a/src/core/ext/filters/message_size/message_size_filter.c
+++ b/src/core/ext/filters/message_size/message_size_filter.c
@@ -110,7 +110,7 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
GRPC_ERROR_REF(error);
}
// Invoke the next callback.
- grpc_closure_run(exec_ctx, calld->next_recv_message_ready, error);
+ GRPC_CLOSURE_RUN(exec_ctx, calld->next_recv_message_ready, error);
}
// Start transport stream op.
@@ -152,7 +152,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL;
- grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem,
+ GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
// Get max sizes from channel data, then merge in per-method config values.
// Note: Per-method config is only available on the client, so we
diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
index 9095d6167c..8b3fff5fa3 100644
--- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
+++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
@@ -67,7 +67,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx,
}
// Invoke the next callback.
- grpc_closure_run(exec_ctx, calld->next_recv_initial_metadata_ready,
+ GRPC_CLOSURE_RUN(exec_ctx, calld->next_recv_initial_metadata_ready,
GRPC_ERROR_REF(error));
}
@@ -106,7 +106,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
call_data* calld = elem->call_data;
calld->next_recv_initial_metadata_ready = NULL;
calld->workaround_active = false;
- grpc_closure_init(&calld->recv_initial_metadata_ready,
+ GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c
index c636170319..983691bbad 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.c
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c
@@ -124,7 +124,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
}
grpc_closure *notify = c->notify;
c->notify = NULL;
- grpc_closure_sched(exec_ctx, notify, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, notify, error);
grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
c->handshake_mgr = NULL;
gpr_mu_unlock(&c->mu);
@@ -156,7 +156,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
memset(c->result, 0, sizeof(*c->result));
grpc_closure *notify = c->notify;
c->notify = NULL;
- grpc_closure_sched(exec_ctx, notify, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, notify, error);
if (c->endpoint != NULL) {
grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(error));
}
@@ -184,7 +184,7 @@ static void chttp2_connector_connect(grpc_exec_ctx *exec_ctx,
c->result = result;
GPR_ASSERT(c->endpoint == NULL);
chttp2_connector_ref(con); // Ref taken for callback.
- grpc_closure_init(&c->connected, connected, c, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&c->connected, connected, c, grpc_schedule_on_exec_ctx);
GPR_ASSERT(!c->connecting);
c->connecting = true;
grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint,
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
index 99bae76237..cccb347bf1 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
@@ -54,11 +54,9 @@ static grpc_channel *client_channel_factory_create_channel(
return NULL;
}
// Add channel arg containing the server URI.
- grpc_arg arg;
- arg.type = GRPC_ARG_STRING;
- arg.key = GRPC_ARG_SERVER_URI;
- arg.value.string =
- grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target);
+ grpc_arg arg = grpc_channel_arg_string_create(
+ GRPC_ARG_SERVER_URI,
+ grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target));
const char *to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args *new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
index 05145aeb2f..0346d50b6c 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
@@ -41,10 +41,8 @@ grpc_channel *grpc_insecure_channel_create_from_fd(
GRPC_API_TRACE("grpc_insecure_channel_create(target=%p, fd=%d, args=%p)", 3,
(target, fd, args));
- grpc_arg default_authority_arg;
- default_authority_arg.type = GRPC_ARG_STRING;
- default_authority_arg.key = GRPC_ARG_DEFAULT_AUTHORITY;
- default_authority_arg.value.string = "test.authority";
+ grpc_arg default_authority_arg = grpc_channel_arg_string_create(
+ GRPC_ARG_DEFAULT_AUTHORITY, "test.authority");
grpc_channel_args *final_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
index 7b76caba17..d4580f15f5 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
@@ -163,11 +163,9 @@ static grpc_channel *client_channel_factory_create_channel(
return NULL;
}
// Add channel arg containing the server URI.
- grpc_arg arg;
- arg.type = GRPC_ARG_STRING;
- arg.key = GRPC_ARG_SERVER_URI;
- arg.value.string =
- grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target);
+ grpc_arg arg = grpc_channel_arg_string_create(
+ GRPC_ARG_SERVER_URI,
+ grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target));
const char *to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args *new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c
index 28393775d3..f207155900 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.c
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.c
@@ -209,7 +209,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
goto error;
}
state = gpr_zalloc(sizeof(*state));
- grpc_closure_init(&state->tcp_server_shutdown_complete,
+ GRPC_CLOSURE_INIT(&state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, state,
grpc_schedule_on_exec_ctx);
err = grpc_tcp_server_create(exec_ctx, &state->tcp_server_shutdown_complete,
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
index b0ffdc0cf9..6a8c81445a 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
@@ -23,6 +23,9 @@
void grpc_chttp2_plugin_init(void) {
grpc_register_tracer("http", &grpc_http_trace);
grpc_register_tracer("flowctl", &grpc_flowctl_trace);
+#ifndef NDEBUG
+ grpc_register_tracer("chttp2_refcount", &grpc_trace_chttp2_refcount);
+#endif
}
void grpc_chttp2_plugin_shutdown(void) {}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 0428cf4e5b..6e8eadf7a1 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -35,7 +35,6 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@@ -53,7 +52,7 @@
#define DEFAULT_CONNECTION_WINDOW_TARGET (1024 * 1024)
#define MAX_WINDOW 0x7fffffffu
#define MAX_WRITE_BUFFER_SIZE (64 * 1024 * 1024)
-#define DEFAULT_MAX_HEADER_LIST_SIZE (16 * 1024)
+#define DEFAULT_MAX_HEADER_LIST_SIZE (8 * 1024)
#define DEFAULT_CLIENT_KEEPALIVE_TIME_MS INT_MAX
#define DEFAULT_CLIENT_KEEPALIVE_TIMEOUT_MS 20000 /* 20 seconds */
@@ -77,6 +76,10 @@ static bool g_default_keepalive_permit_without_calls =
grpc_tracer_flag grpc_http_trace = GRPC_TRACER_INITIALIZER(false);
grpc_tracer_flag grpc_flowctl_trace = GRPC_TRACER_INITIALIZER(false);
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_chttp2_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
static const grpc_transport_vtable vtable;
/* forward declarations of various callbacks that we'll build closures around */
@@ -92,8 +95,9 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *t,
static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
grpc_error *error);
/** Set a transport level setting, and push it to our peer */
-static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_setting_id id, uint32_t value);
+static void queue_setting_update(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_setting_id id, uint32_t value);
static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error);
@@ -213,20 +217,26 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
gpr_free(t);
}
-#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
+#ifndef NDEBUG
void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, const char *reason,
const char *file, int line) {
- gpr_log(GPR_DEBUG, "chttp2:unref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]", t,
- t->refs.count, t->refs.count - 1, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_chttp2_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&t->refs.count);
+ gpr_log(GPR_DEBUG, "chttp2:unref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]",
+ t, val, val - 1, reason, file, line);
+ }
if (!gpr_unref(&t->refs)) return;
destruct_transport(exec_ctx, t);
}
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
const char *file, int line) {
- gpr_log(GPR_DEBUG, "chttp2: ref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]", t,
- t->refs.count, t->refs.count + 1, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_chttp2_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&t->refs.count);
+ gpr_log(GPR_DEBUG, "chttp2: ref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]",
+ t, val, val + 1, reason, file, line);
+ }
gpr_ref(&t->refs);
}
#else
@@ -252,7 +262,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
- t->combiner = grpc_combiner_create(grpc_endpoint_get_workqueue(ep));
+ t->combiner = grpc_combiner_create();
t->peer_string = grpc_endpoint_get_peer(ep);
t->endpoint_reading = 1;
t->next_stream_id = is_client ? 1 : 2;
@@ -270,32 +280,32 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_slice_buffer_init(&t->outbuf);
grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
- grpc_closure_init(&t->write_action, write_action, t,
+ GRPC_CLOSURE_INIT(&t->write_action, write_action, t,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&t->read_action_locked, read_action_locked, t,
- grpc_combiner_scheduler(t->combiner, false));
- grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t,
- grpc_combiner_scheduler(t->combiner, false));
- grpc_closure_init(&t->destructive_reclaimer_locked,
+ GRPC_CLOSURE_INIT(&t->read_action_locked, read_action_locked, t,
+ grpc_combiner_scheduler(t->combiner));
+ GRPC_CLOSURE_INIT(&t->benign_reclaimer_locked, benign_reclaimer_locked, t,
+ grpc_combiner_scheduler(t->combiner));
+ GRPC_CLOSURE_INIT(&t->destructive_reclaimer_locked,
destructive_reclaimer_locked, t,
- grpc_combiner_scheduler(t->combiner, false));
- grpc_closure_init(&t->retry_initiate_ping_locked, retry_initiate_ping_locked,
- t, grpc_combiner_scheduler(t->combiner, false));
- grpc_closure_init(&t->start_bdp_ping_locked, start_bdp_ping_locked, t,
- grpc_combiner_scheduler(t->combiner, false));
- grpc_closure_init(&t->finish_bdp_ping_locked, finish_bdp_ping_locked, t,
- grpc_combiner_scheduler(t->combiner, false));
- grpc_closure_init(&t->init_keepalive_ping_locked, init_keepalive_ping_locked,
- t, grpc_combiner_scheduler(t->combiner, false));
- grpc_closure_init(&t->start_keepalive_ping_locked,
+ grpc_combiner_scheduler(t->combiner));
+ GRPC_CLOSURE_INIT(&t->retry_initiate_ping_locked, retry_initiate_ping_locked,
+ t, grpc_combiner_scheduler(t->combiner));
+ GRPC_CLOSURE_INIT(&t->start_bdp_ping_locked, start_bdp_ping_locked, t,
+ grpc_combiner_scheduler(t->combiner));
+ GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked, finish_bdp_ping_locked, t,
+ grpc_combiner_scheduler(t->combiner));
+ GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping_locked,
+ t, grpc_combiner_scheduler(t->combiner));
+ GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked,
start_keepalive_ping_locked, t,
- grpc_combiner_scheduler(t->combiner, false));
- grpc_closure_init(&t->finish_keepalive_ping_locked,
+ grpc_combiner_scheduler(t->combiner));
+ GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked,
finish_keepalive_ping_locked, t,
- grpc_combiner_scheduler(t->combiner, false));
- grpc_closure_init(&t->keepalive_watchdog_fired_locked,
+ grpc_combiner_scheduler(t->combiner));
+ GRPC_CLOSURE_INIT(&t->keepalive_watchdog_fired_locked,
keepalive_watchdog_fired_locked, t,
- grpc_combiner_scheduler(t->combiner, false));
+ grpc_combiner_scheduler(t->combiner));
grpc_bdp_estimator_init(&t->bdp_estimator, t->peer_string);
t->last_pid_update = gpr_now(GPR_CLOCK_MONOTONIC);
@@ -338,20 +348,21 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (is_client) {
grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
GRPC_CHTTP2_CLIENT_CONNECT_STRING));
- grpc_chttp2_initiate_write(exec_ctx, t, false, "initial_write");
+ grpc_chttp2_initiate_write(exec_ctx, t, "initial_write");
}
/* configure http2 the way we like it */
if (is_client) {
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
+ queue_setting_update(exec_ctx, t,
+ GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
}
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
- DEFAULT_WINDOW);
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
- DEFAULT_MAX_HEADER_LIST_SIZE);
- push_setting(exec_ctx, t,
- GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
+ DEFAULT_WINDOW);
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
+ DEFAULT_MAX_HEADER_LIST_SIZE);
+ queue_setting_update(exec_ctx, t,
+ GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
t->ping_policy = (grpc_chttp2_repeated_ping_policy){
.max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA,
@@ -518,8 +529,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
int value = grpc_channel_arg_get_integer(
&channel_args->args[i], settings_map[j].integer_options);
if (value >= 0) {
- push_setting(exec_ctx, t, settings_map[j].setting_id,
- (uint32_t)value);
+ queue_setting_update(exec_ctx, t, settings_map[j].setting_id,
+ (uint32_t)value);
}
}
break;
@@ -550,7 +561,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
- grpc_chttp2_initiate_write(exec_ctx, t, false, "init");
+ grpc_chttp2_initiate_write(exec_ctx, t, "init");
post_benign_reclaimer(exec_ctx, t);
}
@@ -568,9 +579,9 @@ static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
- grpc_closure_sched(exec_ctx, grpc_closure_create(
- destroy_transport_locked, t,
- grpc_combiner_scheduler(t->combiner, false)),
+ GRPC_CLOSURE_SCHED(exec_ctx,
+ GRPC_CLOSURE_CREATE(destroy_transport_locked, t,
+ grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE);
}
@@ -621,7 +632,7 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason) {
grpc_stream_ref(s->refcount, reason);
}
@@ -657,13 +668,13 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_data_parser_init(&s->data_parser);
grpc_slice_buffer_init(&s->flow_controlled_buffer);
s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s,
+ GRPC_CLOSURE_INIT(&s->complete_fetch_locked, complete_fetch_locked, s,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer);
grpc_slice_buffer_init(&s->frame_storage);
s->pending_byte_stream = false;
- grpc_closure_init(&s->reset_byte_stream, reset_byte_stream, s,
- grpc_combiner_scheduler(t->combiner, false));
+ GRPC_CLOSURE_INIT(&s->reset_byte_stream, reset_byte_stream, s,
+ grpc_combiner_scheduler(t->combiner));
GRPC_CHTTP2_REF_TRANSPORT(t, "stream");
@@ -734,7 +745,7 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
GPR_TIMER_END("destroy_stream", 0);
- grpc_closure_sched(exec_ctx, s->destroy_stream_arg, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, s->destroy_stream_arg, GRPC_ERROR_NONE);
}
static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
@@ -745,9 +756,9 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
s->destroy_stream_arg = then_schedule_closure;
- grpc_closure_sched(
- exec_ctx, grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s,
- grpc_combiner_scheduler(t->combiner, false)),
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s,
+ grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("destroy_stream", 0);
}
@@ -785,8 +796,6 @@ static const char *write_state_name(grpc_chttp2_write_state st) {
return "WRITING";
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
return "WRITING+MORE";
- case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
- return "WRITING+MORE+COVERED";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
@@ -799,7 +808,7 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
write_state_name(st), reason));
t->write_state = st;
if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) {
- grpc_closure_list_sched(exec_ctx, &t->run_after_write);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &t->run_after_write);
if (t->close_transport_on_writes_finished != NULL) {
grpc_error *err = t->close_transport_on_writes_finished;
t->close_transport_on_writes_finished = NULL;
@@ -809,38 +818,25 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- bool covered_by_poller, const char *reason) {
+ grpc_chttp2_transport *t, const char *reason) {
GPR_TIMER_BEGIN("grpc_chttp2_initiate_write", 0);
switch (t->write_state) {
case GRPC_CHTTP2_WRITE_STATE_IDLE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx,
- grpc_closure_init(
- &t->write_action_begin_locked, write_action_begin_locked, t,
- grpc_combiner_finally_scheduler(t->combiner, covered_by_poller)),
+ GRPC_CLOSURE_INIT(&t->write_action_begin_locked,
+ write_action_begin_locked, t,
+ grpc_combiner_finally_scheduler(t->combiner)),
GRPC_ERROR_NONE);
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING:
- set_write_state(
- exec_ctx, t,
- covered_by_poller
- ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER
- : GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
- reason);
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
+ reason);
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
- if (covered_by_poller) {
- set_write_state(
- exec_ctx, t,
- GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER,
- reason);
- }
- break;
- case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
break;
}
GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
@@ -856,10 +852,10 @@ void grpc_chttp2_become_writable(
case GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK:
break;
case GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED:
- grpc_chttp2_initiate_write(exec_ctx, t, true, reason);
+ grpc_chttp2_initiate_write(exec_ctx, t, reason);
break;
case GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED:
- grpc_chttp2_initiate_write(exec_ctx, t, false, reason);
+ grpc_chttp2_initiate_write(exec_ctx, t, reason);
break;
}
}
@@ -879,12 +875,12 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
case GRPC_CHTTP2_PARTIAL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
"begin writing partial");
- grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break;
case GRPC_CHTTP2_FULL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"begin writing");
- grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break;
}
GPR_TIMER_END("write_action_begin_locked", 0);
@@ -895,8 +891,8 @@ static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
GPR_TIMER_BEGIN("write_action", 0);
grpc_endpoint_write(
exec_ctx, t->ep, &t->outbuf,
- grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t,
- grpc_combiner_scheduler(t->combiner, false)));
+ GRPC_CLOSURE_INIT(&t->write_action_end_locked, write_action_end_locked, t,
+ grpc_combiner_scheduler(t->combiner)));
GPR_TIMER_END("write_action", 0);
}
@@ -930,23 +926,11 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"continue writing [!covered]");
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
- grpc_closure_run(
+ GRPC_CLOSURE_RUN(
exec_ctx,
- grpc_closure_init(
- &t->write_action_begin_locked, write_action_begin_locked, t,
- grpc_combiner_finally_scheduler(t->combiner, false)),
- GRPC_ERROR_NONE);
- break;
- case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
- GPR_TIMER_MARK("state=writing_stale_with_poller", 0);
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
- "continue writing [covered]");
- GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
- grpc_closure_run(
- exec_ctx,
- grpc_closure_init(&t->write_action_begin_locked,
+ GRPC_CLOSURE_INIT(&t->write_action_begin_locked,
write_action_begin_locked, t,
- grpc_combiner_finally_scheduler(t->combiner, true)),
+ grpc_combiner_finally_scheduler(t->combiner)),
GRPC_ERROR_NONE);
break;
}
@@ -957,8 +941,11 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_END("terminate_writing_with_lock", 0);
}
-static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_setting_id id, uint32_t value) {
+// Dirties an HTTP2 setting to be sent out next time a writing path occurs.
+// If the change needs to occur immediately, manually initiate a write.
+static void queue_setting_update(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_setting_id id, uint32_t value) {
const grpc_chttp2_setting_parameters *sp =
&grpc_chttp2_settings_parameters[id];
uint32_t use_value = GPR_CLAMP(value, sp->min_value, sp->max_value);
@@ -969,7 +956,6 @@ static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (use_value != t->settings[GRPC_LOCAL_SETTINGS][id]) {
t->settings[GRPC_LOCAL_SETTINGS][id] = use_value;
t->dirtied_local_settings = 1;
- grpc_chttp2_initiate_write(exec_ctx, t, false, "push_setting");
}
}
@@ -1074,7 +1060,7 @@ static void null_then_run_closure(grpc_exec_ctx *exec_ctx,
grpc_closure **closure, grpc_error *error) {
grpc_closure *c = *closure;
*closure = NULL;
- grpc_closure_run(exec_ctx, c, error);
+ GRPC_CLOSURE_RUN(exec_ctx, c, error);
}
void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
@@ -1116,7 +1102,7 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
}
if ((t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE) ||
!(closure->next_data.scratch & CLOSURE_BARRIER_MAY_COVER_WRITE)) {
- grpc_closure_run(exec_ctx, closure, closure->error_data.error);
+ GRPC_CLOSURE_RUN(exec_ctx, closure, closure->error_data.error);
} else {
grpc_closure_list_append(&t->run_after_write, closure,
closure->error_data.error);
@@ -1252,7 +1238,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_closure *on_complete = op->on_complete;
if (on_complete == NULL) {
on_complete =
- grpc_closure_create(do_nothing, NULL, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_CREATE(do_nothing, NULL, grpc_schedule_on_exec_ctx);
}
/* use final_data as a barrier until enqueue time; the inital counter is
@@ -1365,7 +1351,6 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
s->next_message_end_offset = s->flow_controlled_bytes_written +
(int64_t)s->flow_controlled_buffer.length +
(int64_t)len;
- s->complete_fetch_covered_by_poller = op->covered_by_poller;
if (flags & GRPC_WRITE_BUFFER_HINT) {
s->next_message_end_offset -= t->write_buffer_size;
s->write_buffering = true;
@@ -1432,6 +1417,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
op_payload->recv_initial_metadata.recv_initial_metadata_ready;
s->recv_initial_metadata =
op_payload->recv_initial_metadata.recv_initial_metadata;
+ s->trailing_metadata_available =
+ op_payload->recv_initial_metadata.trailing_metadata_available;
grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
}
@@ -1485,11 +1472,10 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
op->handler_private.extra_arg = gs;
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx,
- grpc_closure_init(
- &op->handler_private.closure, perform_stream_op_locked, op,
- grpc_combiner_scheduler(t->combiner, op->covered_by_poller)),
+ GRPC_CLOSURE_INIT(&op->handler_private.closure, perform_stream_op_locked,
+ op, grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("perform_stream_op", 0);
}
@@ -1502,7 +1488,7 @@ static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_ping_queue *pq = &t->ping_queues[i];
for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) {
grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error));
- grpc_closure_list_sched(exec_ctx, &pq->lists[j]);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]);
}
}
GRPC_ERROR_UNREF(error);
@@ -1516,7 +1502,7 @@ static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ERROR_NONE);
if (grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack,
GRPC_ERROR_NONE)) {
- grpc_chttp2_initiate_write(exec_ctx, t, false, "send_ping");
+ grpc_chttp2_initiate_write(exec_ctx, t, "send_ping");
}
}
@@ -1524,7 +1510,7 @@ static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
t->ping_state.is_delayed_ping_timer_set = false;
- grpc_chttp2_initiate_write(exec_ctx, t, false, "retry_send_ping");
+ grpc_chttp2_initiate_write(exec_ctx, t, "retry_send_ping");
}
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -1537,9 +1523,9 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_free(from);
return;
}
- grpc_closure_list_sched(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
- grpc_chttp2_initiate_write(exec_ctx, t, false, "continue_pings");
+ grpc_chttp2_initiate_write(exec_ctx, t, "continue_pings");
}
}
@@ -1552,7 +1538,7 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&slice, &http_error);
grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error,
grpc_slice_ref_internal(slice), &t->qbuf);
- grpc_chttp2_initiate_write(exec_ctx, t, false, "goaway_sent");
+ grpc_chttp2_initiate_write(exec_ctx, t, "goaway_sent");
GRPC_ERROR_UNREF(error);
}
@@ -1578,12 +1564,6 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t = op->handler_private.extra_arg;
grpc_error *close_transport = op->disconnect_with_error;
- if (op->on_connectivity_state_change != NULL) {
- grpc_connectivity_state_notify_on_state_change(
- exec_ctx, &t->channel_callback.state_tracker, op->connectivity_state,
- op->on_connectivity_state_change);
- }
-
if (op->goaway_error) {
send_goaway(exec_ctx, t, op->goaway_error);
}
@@ -1607,11 +1587,17 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
op->send_ping);
}
+ if (op->on_connectivity_state_change != NULL) {
+ grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &t->channel_callback.state_tracker, op->connectivity_state,
+ op->on_connectivity_state_change);
+ }
+
if (close_transport != GRPC_ERROR_NONE) {
close_transport_locked(exec_ctx, t, close_transport);
}
- grpc_closure_run(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_RUN(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "transport_op");
}
@@ -1623,11 +1609,11 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
gpr_free(msg);
op->handler_private.extra_arg = gt;
GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op");
- grpc_closure_sched(
- exec_ctx, grpc_closure_init(&op->handler_private.closure,
- perform_transport_op_locked, op,
- grpc_combiner_scheduler(t->combiner, false)),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx,
+ GRPC_CLOSURE_INIT(&op->handler_private.closure,
+ perform_transport_op_locked, op,
+ grpc_combiner_scheduler(t->combiner)),
+ GRPC_ERROR_NONE);
}
/*******************************************************************************
@@ -1782,7 +1768,7 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
&s->stats.outgoing));
- grpc_chttp2_initiate_write(exec_ctx, t, false, "rst_stream");
+ grpc_chttp2_initiate_write(exec_ctx, t, "rst_stream");
}
}
if (due_to_error != GRPC_ERROR_NONE && !s->seen_error) {
@@ -2095,7 +2081,7 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&s->stats.outgoing));
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, error);
- grpc_chttp2_initiate_write(exec_ctx, t, false, "close_from_api");
+ grpc_chttp2_initiate_write(exec_ctx, t, "close_from_api");
}
typedef struct {
@@ -2137,8 +2123,8 @@ static void update_bdp(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_log(GPR_DEBUG, "%s: update initial window size to %d", t->peer_string,
(int)bdp);
}
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
- (uint32_t)bdp);
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
+ (uint32_t)bdp);
}
static void update_frame(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -2157,8 +2143,8 @@ static void update_frame(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_log(GPR_DEBUG, "%s: update max_frame size to %d", t->peer_string,
(int)frame_size);
}
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
- (uint32_t)frame_size);
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
+ (uint32_t)frame_size);
}
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
@@ -2502,7 +2488,7 @@ static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg,
grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, s->t, s);
} else {
GPR_ASSERT(error != GRPC_ERROR_NONE);
- grpc_closure_sched(exec_ctx, s->on_next, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_REF(error));
s->on_next = NULL;
GRPC_ERROR_UNREF(s->byte_stream_error);
s->byte_stream_error = GRPC_ERROR_NONE;
@@ -2581,9 +2567,9 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
if (s->frame_storage.length > 0) {
grpc_slice_buffer_swap(&s->frame_storage,
&s->unprocessed_incoming_frames_buffer);
- grpc_closure_sched(exec_ctx, bs->next_action.on_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete, GRPC_ERROR_NONE);
} else if (s->byte_stream_error != GRPC_ERROR_NONE) {
- grpc_closure_sched(exec_ctx, bs->next_action.on_complete,
+ GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete,
GRPC_ERROR_REF(s->byte_stream_error));
if (s->data_parser.parsing_frame != NULL) {
incoming_byte_stream_unref(exec_ctx, s->data_parser.parsing_frame);
@@ -2593,7 +2579,7 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
if (bs->remaining_bytes != 0) {
s->byte_stream_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message");
- grpc_closure_sched(exec_ctx, bs->next_action.on_complete,
+ GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete,
GRPC_ERROR_REF(s->byte_stream_error));
if (s->data_parser.parsing_frame != NULL) {
incoming_byte_stream_unref(exec_ctx, s->data_parser.parsing_frame);
@@ -2624,11 +2610,11 @@ static bool incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
gpr_ref(&bs->refs);
bs->next_action.max_size_hint = max_size_hint;
bs->next_action.on_complete = on_complete;
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx,
- grpc_closure_init(
- &bs->next_action.closure, incoming_byte_stream_next_locked, bs,
- grpc_combiner_scheduler(bs->transport->combiner, false)),
+ GRPC_CLOSURE_INIT(&bs->next_action.closure,
+ incoming_byte_stream_next_locked, bs,
+ grpc_combiner_scheduler(bs->transport->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("incoming_byte_stream_next", 0);
return false;
@@ -2653,7 +2639,7 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx,
} else {
grpc_error *error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message");
- grpc_closure_sched(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
return error;
}
GPR_TIMER_END("incoming_byte_stream_pull", 0);
@@ -2682,11 +2668,10 @@ static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
GPR_TIMER_BEGIN("incoming_byte_stream_destroy", 0);
grpc_chttp2_incoming_byte_stream *bs =
(grpc_chttp2_incoming_byte_stream *)byte_stream;
- grpc_closure_sched(
- exec_ctx,
- grpc_closure_init(
- &bs->destroy_action, incoming_byte_stream_destroy_locked, bs,
- grpc_combiner_scheduler(bs->transport->combiner, false)),
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, GRPC_CLOSURE_INIT(
+ &bs->destroy_action, incoming_byte_stream_destroy_locked,
+ bs, grpc_combiner_scheduler(bs->transport->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("incoming_byte_stream_destroy", 0);
}
@@ -2697,7 +2682,7 @@ static void incoming_byte_stream_publish_error(
grpc_chttp2_stream *s = bs->stream;
GPR_ASSERT(error != GRPC_ERROR_NONE);
- grpc_closure_sched(exec_ctx, s->on_next, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_REF(error));
s->on_next = NULL;
GRPC_ERROR_UNREF(s->byte_stream_error);
s->byte_stream_error = GRPC_ERROR_REF(error);
@@ -2714,7 +2699,7 @@ grpc_error *grpc_chttp2_incoming_byte_stream_push(
grpc_error *error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many bytes in stream");
- grpc_closure_sched(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
grpc_slice_unref_internal(exec_ctx, slice);
return error;
} else {
@@ -2737,7 +2722,7 @@ grpc_error *grpc_chttp2_incoming_byte_stream_finished(
}
}
if (error != GRPC_ERROR_NONE && reset_on_error) {
- grpc_closure_sched(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
}
incoming_byte_stream_unref(exec_ctx, bs);
return error;
@@ -2757,6 +2742,7 @@ grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
gpr_ref_init(&incoming_byte_stream->refs, 2);
incoming_byte_stream->transport = t;
incoming_byte_stream->stream = s;
+ GRPC_ERROR_UNREF(s->byte_stream_error);
s->byte_stream_error = GRPC_ERROR_NONE;
return incoming_byte_stream;
}
@@ -2971,5 +2957,5 @@ void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_move_into(read_buffer, &t->read_buffer);
gpr_free(read_buffer);
}
- grpc_closure_sched(exec_ctx, &t->read_action_locked, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &t->read_action_locked, GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
index 0a1fb4d772..0c4e2a91c0 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.h
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
@@ -26,6 +26,10 @@
extern grpc_tracer_flag grpc_http_trace;
extern grpc_tracer_flag grpc_flowctl_trace;
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_chttp2_refcount;
+#endif
+
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
grpc_endpoint *ep, int is_client);
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.c b/src/core/ext/transport/chttp2/transport/frame_data.c
index dac0cb63a3..dead6be77f 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.c
+++ b/src/core/ext/transport/chttp2/transport/frame_data.c
@@ -128,6 +128,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
grpc_slice_unref_internal(exec_ctx, slice);
return GRPC_ERROR_REF(p->error);
case GRPC_CHTTP2_DATA_FH_0:
+ s->stats.incoming.framing_bytes++;
p->frame_type = *cur;
switch (p->frame_type) {
case 0:
@@ -159,6 +160,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_1:
+ s->stats.incoming.framing_bytes++;
p->frame_size = ((uint32_t)*cur) << 24;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_2;
@@ -167,6 +169,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_2:
+ s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 16;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_3;
@@ -175,6 +178,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_3:
+ s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 8;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_4;
@@ -183,6 +187,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_4:
+ s->stats.incoming.framing_bytes++;
GPR_ASSERT(stream_out != NULL);
GPR_ASSERT(p->parsing_frame == NULL);
p->frame_size |= ((uint32_t)*cur);
@@ -219,6 +224,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
}
uint32_t remaining = (uint32_t)(end - cur);
if (remaining == p->frame_size) {
+ s->stats.incoming.data_bytes += remaining;
if (GRPC_ERROR_NONE != (error = grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
grpc_slice_sub(slice, (size_t)(cur - beg),
@@ -238,6 +244,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
grpc_slice_unref_internal(exec_ctx, slice);
return GRPC_ERROR_NONE;
} else if (remaining < p->frame_size) {
+ s->stats.incoming.data_bytes += remaining;
if (GRPC_ERROR_NONE != (error = grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
grpc_slice_sub(slice, (size_t)(cur - beg),
@@ -250,6 +257,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
return GRPC_ERROR_NONE;
} else {
GPR_ASSERT(remaining > p->frame_size);
+ s->stats.incoming.data_bytes += p->frame_size;
if (GRPC_ERROR_NONE !=
(grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
@@ -286,7 +294,6 @@ grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last) {
/* grpc_error *error = parse_inner_buffer(exec_ctx, p, t, s, slice); */
- s->stats.incoming.framing_bytes += GRPC_SLICE_LENGTH(slice);
if (!s->pending_byte_stream) {
grpc_slice_ref_internal(slice);
grpc_slice_buffer_add(&s->frame_storage, slice);
@@ -295,7 +302,7 @@ grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
GPR_ASSERT(s->frame_storage.length == 0);
grpc_slice_ref_internal(slice);
grpc_slice_buffer_add(&s->unprocessed_incoming_frames_buffer, slice);
- grpc_closure_sched(exec_ctx, s->on_next, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_NONE);
s->on_next = NULL;
} else {
grpc_slice_ref_internal(slice);
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c
index 04354e0dc2..3d7c6fbfad 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.c
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.c
@@ -117,7 +117,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks));
}
t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes;
- grpc_chttp2_initiate_write(exec_ctx, t, false, "ping response");
+ grpc_chttp2_initiate_write(exec_ctx, t, "ping response");
}
}
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
index ccca0f1871..689dc8935c 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
@@ -93,7 +93,7 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]));
grpc_error *error = GRPC_ERROR_NONE;
- if (reason != GRPC_HTTP2_NO_ERROR || s->header_frames_received < 2) {
+ if (reason != GRPC_HTTP2_NO_ERROR || s->metadata_buffer[1].size == 0) {
char *message;
gpr_asprintf(&message, "Received RST_STREAM with error code %d", reason);
error = grpc_error_set_int(
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.c b/src/core/ext/transport/chttp2/transport/frame_window_update.c
index 815a87cbe3..682be2c89b 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.c
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.c
@@ -109,8 +109,7 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
received_update);
bool is_zero = t->outgoing_window <= 0;
if (was_zero && !is_zero) {
- grpc_chttp2_initiate_write(exec_ctx, t, false,
- "new_global_flow_control");
+ grpc_chttp2_initiate_write(exec_ctx, t, "new_global_flow_control");
}
}
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
index 28c6632695..a0e748e7b1 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
@@ -608,15 +608,14 @@ void grpc_chttp2_hpack_compressor_set_max_table_size(
void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c,
+ grpc_mdelem **extra_headers,
+ size_t extra_headers_size,
grpc_metadata_batch *metadata,
const grpc_encode_header_options *options,
grpc_slice_buffer *outbuf) {
- framer_state st;
- grpc_linked_mdelem *l;
- gpr_timespec deadline;
-
GPR_ASSERT(options->stream_id != 0);
+ framer_state st;
st.seen_regular_header = 0;
st.stream_id = options->stream_id;
st.output = outbuf;
@@ -633,11 +632,14 @@ void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
if (c->advertise_table_size_change != 0) {
emit_advertise_table_size_change(c, &st);
}
+ for (size_t i = 0; i < extra_headers_size; ++i) {
+ hpack_enc(exec_ctx, c, *extra_headers[i], &st);
+ }
grpc_metadata_batch_assert_ok(metadata);
- for (l = metadata->list.head; l; l = l->next) {
+ for (grpc_linked_mdelem *l = metadata->list.head; l; l = l->next) {
hpack_enc(exec_ctx, c, l->md, &st);
}
- deadline = metadata->deadline;
+ gpr_timespec deadline = metadata->deadline;
if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
deadline_enc(exec_ctx, c, deadline, &st);
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.h b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
index 84ab6dde2c..271192f894 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
@@ -85,6 +85,8 @@ typedef struct {
void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_compressor *c,
+ grpc_mdelem **extra_headers,
+ size_t extra_headers_size,
grpc_metadata_batch *metadata,
const grpc_encode_header_options *options,
grpc_slice_buffer *outbuf);
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 7476ff6188..7f37365558 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -1649,7 +1649,7 @@ static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing));
- grpc_chttp2_initiate_write(exec_ctx, t, false, "force_rst_stream");
+ grpc_chttp2_initiate_write(exec_ctx, t, "force_rst_stream");
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE);
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst");
@@ -1696,10 +1696,10 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
however -- it might be that we receive a RST_STREAM following this
and can avoid the extra write */
GRPC_CHTTP2_STREAM_REF(s, "final_rst");
- grpc_closure_sched(
- exec_ctx, grpc_closure_create(force_client_rst_stream, s,
- grpc_combiner_finally_scheduler(
- t->combiner, false)),
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(force_client_rst_stream, s,
+ grpc_combiner_finally_scheduler(t->combiner)),
GRPC_ERROR_NONE);
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 933f254bde..9fa72ddbdf 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -58,7 +58,6 @@ typedef enum {
GRPC_CHTTP2_WRITE_STATE_IDLE,
GRPC_CHTTP2_WRITE_STATE_WRITING,
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
- GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER,
} grpc_chttp2_write_state;
typedef enum {
@@ -443,12 +442,12 @@ struct grpc_chttp2_stream {
grpc_slice fetching_slice;
int64_t next_message_end_offset;
int64_t flow_controlled_bytes_written;
- bool complete_fetch_covered_by_poller;
grpc_closure complete_fetch_locked;
grpc_closure *fetching_send_message_finished;
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *recv_initial_metadata_ready;
+ bool *trailing_metadata_available;
grpc_byte_stream **recv_message;
grpc_closure *recv_message_ready;
grpc_metadata_batch *recv_trailing_metadata;
@@ -534,8 +533,7 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function.
*/
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- bool covered_by_poller, const char *reason);
+ grpc_chttp2_transport *t, const char *reason);
typedef enum {
GRPC_CHTTP2_NOTHING_TO_WRITE,
@@ -751,7 +749,7 @@ void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_CHTTP2_STREAM_REF(stream, reason) \
grpc_chttp2_stream_ref(stream, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
@@ -767,8 +765,7 @@ void grpc_chttp2_stream_ref(grpc_chttp2_stream *s);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s);
#endif
-//#define GRPC_CHTTP2_REFCOUNTING_DEBUG 1
-#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
+#ifndef NDEBUG
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) \
grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) \
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 8bbd9c24b8..3c8b470b4f 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -418,7 +418,7 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("parse", t, incoming_window,
incoming_frame_size);
if (t->incoming_window <= target_incoming_window / 2) {
- grpc_chttp2_initiate_write(exec_ctx, t, false, "flow_control");
+ grpc_chttp2_initiate_write(exec_ctx, t, "flow_control");
}
return GRPC_ERROR_NONE;
@@ -681,9 +681,19 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
t->parser_data = &t->hpack_parser;
switch (s->header_frames_received) {
case 0:
- t->hpack_parser.on_header = on_initial_header;
+ if (t->is_client && t->header_eof) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing Trailers-Only"));
+ if (s->trailing_metadata_available != NULL) {
+ *s->trailing_metadata_available = true;
+ }
+ t->hpack_parser.on_header = on_trailing_header;
+ } else {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata"));
+ t->hpack_parser.on_header = on_initial_header;
+ }
break;
case 1:
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata"));
t->hpack_parser.on_header = on_trailing_header;
break;
case 2:
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index 5c3b9f5ada..315f2a67a2 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -111,7 +111,7 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
}
pq->inflight_id = t->ping_ctr * GRPC_CHTTP2_PING_TYPE_COUNT + ping_type;
t->ping_ctr++;
- grpc_closure_list_sched(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INITIATE]);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INITIATE]);
grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT],
&pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
grpc_slice_buffer_add(&t->outbuf,
@@ -162,6 +162,20 @@ static uint32_t target_write_size(grpc_chttp2_transport *t) {
return 1024 * 1024;
}
+// Returns true if initial_metadata contains only default headers.
+//
+// TODO(roth): The fact that we hard-code these particular headers here
+// is fairly ugly. Need some better way to know which headers are
+// default, maybe via a bit in the static metadata table?
+static bool is_default_initial_metadata(grpc_metadata_batch *initial_metadata) {
+ int num_default_fields =
+ (initial_metadata->idx.named.status != NULL) +
+ (initial_metadata->idx.named.content_type != NULL) +
+ (initial_metadata->idx.named.grpc_encoding != NULL) +
+ (initial_metadata->idx.named.grpc_accept_encoding != NULL);
+ return (size_t)num_default_fields == initial_metadata->list.count;
+}
+
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
@@ -191,8 +205,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s) &&
stream_ref_if_not_destroyed(&s->refcount->refs)) {
- grpc_chttp2_initiate_write(exec_ctx, t, false,
- "transport.read_flow_control");
+ grpc_chttp2_initiate_write(exec_ctx, t, "transport.read_flow_control");
}
}
}
@@ -219,31 +232,59 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
t->is_client ? "CLIENT" : "SERVER", s->id, sent_initial_metadata,
s->send_initial_metadata != NULL, s->announce_window));
+ grpc_mdelem *extra_headers_for_trailing_metadata[2];
+ size_t num_extra_headers_for_trailing_metadata = 0;
+
/* send initial metadata if it's available */
- if (!sent_initial_metadata && s->send_initial_metadata) {
- grpc_encode_header_options hopt = {
- .stream_id = s->id,
- .is_eof = false,
- .use_true_binary_metadata =
- t->settings
- [GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] != 0,
- .max_frame_size = t->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
- .stats = &s->stats.outgoing};
- grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor,
- s->send_initial_metadata, &hopt, &t->outbuf);
+ if (!sent_initial_metadata && s->send_initial_metadata != NULL) {
+ // We skip this on the server side if there is no custom initial
+ // metadata, there are no messages to send, and we are also sending
+ // trailing metadata. This results in a Trailers-Only response,
+ // which is required for retries, as per:
+ // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#when-retries-are-valid
+ if (t->is_client || s->fetching_send_message != NULL ||
+ s->flow_controlled_buffer.length != 0 ||
+ s->send_trailing_metadata == NULL ||
+ !is_default_initial_metadata(s->send_initial_metadata)) {
+ grpc_encode_header_options hopt = {
+ .stream_id = s->id,
+ .is_eof = false,
+ .use_true_binary_metadata =
+ t->settings
+ [GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] != 0,
+ .max_frame_size = t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ .stats = &s->stats.outgoing};
+ grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0,
+ s->send_initial_metadata, &hopt, &t->outbuf);
+ now_writing = true;
+ t->ping_state.pings_before_data_required =
+ t->ping_policy.max_pings_without_data;
+ if (!t->is_client) {
+ t->ping_recv_state.last_ping_recv_time =
+ gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ t->ping_recv_state.ping_strikes = 0;
+ }
+ } else {
+ GRPC_CHTTP2_IF_TRACING(
+ gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)"));
+ // When sending Trailers-Only, we need to move the :status and
+ // content-type headers to the trailers.
+ if (s->send_initial_metadata->idx.named.status != NULL) {
+ extra_headers_for_trailing_metadata
+ [num_extra_headers_for_trailing_metadata++] =
+ &s->send_initial_metadata->idx.named.status->md;
+ }
+ if (s->send_initial_metadata->idx.named.content_type != NULL) {
+ extra_headers_for_trailing_metadata
+ [num_extra_headers_for_trailing_metadata++] =
+ &s->send_initial_metadata->idx.named.content_type->md;
+ }
+ }
s->send_initial_metadata = NULL;
s->sent_initial_metadata = true;
sent_initial_metadata = true;
- now_writing = true;
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
- if (!t->is_client) {
- t->ping_recv_state.last_ping_recv_time =
- gpr_inf_past(GPR_CLOCK_MONOTONIC);
- t->ping_recv_state.ping_strikes = 0;
- }
}
/* send any window updates */
if (s->announce_window > 0) {
@@ -321,6 +362,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (s->send_trailing_metadata != NULL &&
s->fetching_send_message == NULL &&
s->flow_controlled_buffer.length == 0) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata"));
if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) {
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true,
&s->stats.outgoing, &t->outbuf);
@@ -338,6 +380,8 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
.stats = &s->stats.outgoing};
grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor,
+ extra_headers_for_trailing_metadata,
+ num_extra_headers_for_trailing_metadata,
s->send_trailing_metadata, &hopt,
&t->outbuf);
}
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index 8d0eb74d9d..29dfa885de 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -766,20 +766,50 @@ static bool op_can_be_run(grpc_transport_stream_op_batch *curr_op,
bool is_canceled_or_failed = stream_state->state_op_done[OP_CANCEL_ERROR] ||
stream_state->state_callback_received[OP_FAILED];
if (is_canceled_or_failed) {
- if (op_id == OP_SEND_INITIAL_METADATA) result = false;
- if (op_id == OP_SEND_MESSAGE) result = false;
- if (op_id == OP_SEND_TRAILING_METADATA) result = false;
- if (op_id == OP_CANCEL_ERROR) result = false;
+ if (op_id == OP_SEND_INITIAL_METADATA) {
+ CRONET_LOG(GPR_DEBUG, "Because");
+ result = false;
+ }
+ if (op_id == OP_SEND_MESSAGE) {
+ CRONET_LOG(GPR_DEBUG, "Because");
+ result = false;
+ }
+ if (op_id == OP_SEND_TRAILING_METADATA) {
+ CRONET_LOG(GPR_DEBUG, "Because");
+ result = false;
+ }
+ if (op_id == OP_CANCEL_ERROR) {
+ CRONET_LOG(GPR_DEBUG, "Because");
+ result = false;
+ }
/* already executed */
if (op_id == OP_RECV_INITIAL_METADATA &&
- stream_state->state_op_done[OP_RECV_INITIAL_METADATA])
+ stream_state->state_op_done[OP_RECV_INITIAL_METADATA]) {
+ CRONET_LOG(GPR_DEBUG, "Because");
result = false;
- if (op_id == OP_RECV_MESSAGE &&
- stream_state->state_op_done[OP_RECV_MESSAGE])
+ }
+ if (op_id == OP_RECV_MESSAGE && op_state->state_op_done[OP_RECV_MESSAGE]) {
+ CRONET_LOG(GPR_DEBUG, "Because");
result = false;
+ }
if (op_id == OP_RECV_TRAILING_METADATA &&
- stream_state->state_op_done[OP_RECV_TRAILING_METADATA])
+ stream_state->state_op_done[OP_RECV_TRAILING_METADATA]) {
+ CRONET_LOG(GPR_DEBUG, "Because");
result = false;
+ }
+ /* ON_COMPLETE can be processed if one of the following conditions is met:
+ * 1. the stream failed
+ * 2. the stream is cancelled, and the callback is received
+ * 3. the stream succeeded before cancel is effective
+ * 4. the stream is cancelled, and the stream is never started */
+ if (op_id == OP_ON_COMPLETE &&
+ !(stream_state->state_callback_received[OP_FAILED] ||
+ stream_state->state_callback_received[OP_CANCELED] ||
+ stream_state->state_callback_received[OP_SUCCEEDED] ||
+ !stream_state->state_op_done[OP_SEND_INITIAL_METADATA])) {
+ CRONET_LOG(GPR_DEBUG, "Because");
+ result = false;
+ }
} else if (op_id == OP_SEND_INITIAL_METADATA) {
/* already executed */
if (stream_state->state_op_done[OP_SEND_INITIAL_METADATA]) result = false;
@@ -868,7 +898,7 @@ static bool op_can_be_run(grpc_transport_stream_op_batch *curr_op,
CRONET_LOG(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->recv_message &&
- !stream_state->state_op_done[OP_RECV_MESSAGE]) {
+ !op_state->state_op_done[OP_RECV_MESSAGE]) {
CRONET_LOG(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->cancel_stream &&
@@ -1033,17 +1063,17 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
OP_RECV_INITIAL_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx,
stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_NONE);
} else if (stream_state->state_callback_received[OP_FAILED]) {
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx,
stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_NONE);
} else if (stream_state->state_op_done[OP_RECV_TRAILING_METADATA]) {
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx,
stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_NONE);
@@ -1051,7 +1081,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_metadata_buffer_publish(
exec_ctx, &oas->s->state.rs.initial_metadata,
stream_op->payload->recv_initial_metadata.recv_initial_metadata);
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx,
stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_NONE);
@@ -1063,22 +1093,24 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
CRONET_LOG(GPR_DEBUG, "Stream is cancelled.");
- grpc_closure_sched(exec_ctx,
+ GRPC_CLOSURE_SCHED(exec_ctx,
stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
+ oas->state.state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->state_callback_received[OP_FAILED]) {
CRONET_LOG(GPR_DEBUG, "Stream failed.");
- grpc_closure_sched(exec_ctx,
+ GRPC_CLOSURE_SCHED(exec_ctx,
stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
+ oas->state.state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->rs.read_stream_closed == true) {
/* No more data will be received */
CRONET_LOG(GPR_DEBUG, "read stream closed");
- grpc_closure_sched(exec_ctx,
+ GRPC_CLOSURE_SCHED(exec_ctx,
stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
@@ -1086,7 +1118,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->flush_read) {
CRONET_LOG(GPR_DEBUG, "flush read");
- grpc_closure_sched(exec_ctx,
+ GRPC_CLOSURE_SCHED(exec_ctx,
stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
@@ -1127,7 +1159,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
*((grpc_byte_buffer **)
stream_op->payload->recv_message.recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs;
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
@@ -1181,7 +1213,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
}
*((grpc_byte_buffer **)stream_op->payload->recv_message.recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs;
- grpc_closure_sched(exec_ctx,
+ GRPC_CLOSURE_SCHED(exec_ctx,
stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
@@ -1214,8 +1246,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
} else if (stream_op->cancel_stream &&
op_can_be_run(stream_op, s, &oas->state, OP_CANCEL_ERROR)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_CANCEL_ERROR", oas);
- CRONET_LOG(GPR_DEBUG, "W: bidirectional_stream_cancel(%p)", s->cbs);
if (s->cbs) {
+ CRONET_LOG(GPR_DEBUG, "W: bidirectional_stream_cancel(%p)", s->cbs);
bidirectional_stream_cancel(s->cbs);
result = ACTION_TAKEN_WITH_CALLBACK;
} else {
@@ -1230,17 +1262,17 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
op_can_be_run(stream_op, s, &oas->state, OP_ON_COMPLETE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
- grpc_closure_sched(exec_ctx, stream_op->on_complete,
+ GRPC_CLOSURE_SCHED(exec_ctx, stream_op->on_complete,
GRPC_ERROR_REF(stream_state->cancel_error));
} else if (stream_state->state_callback_received[OP_FAILED]) {
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, stream_op->on_complete,
make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
} else {
/* All actions in this stream_op are complete. Call the on_complete
* callback
*/
- grpc_closure_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE);
}
oas->state.state_op_done[OP_ON_COMPLETE] = true;
oas->done = true;
@@ -1312,16 +1344,16 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
/* Cronet does not support :authority header field. We cancel the call when
this field is present in metadata */
if (op->recv_initial_metadata) {
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx,
op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_CANCELLED);
}
if (op->recv_message) {
- grpc_closure_sched(exec_ctx, op->payload->recv_message.recv_message_ready,
+ GRPC_CLOSURE_SCHED(exec_ctx, op->payload->recv_message.recv_message_ready,
GRPC_ERROR_CANCELLED);
}
- grpc_closure_sched(exec_ctx, op->on_complete, GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_CANCELLED);
return;
}
stream_obj *s = (stream_obj *)gs;
@@ -1335,7 +1367,7 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
stream_obj *s = (stream_obj *)gs;
null_and_maybe_free_read_buffer(s);
GRPC_ERROR_UNREF(s->state.cancel_error);
- grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {}
diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.c
index 4b7f258740..8fdef0bc64 100644
--- a/src/core/lib/channel/channel_args.c
+++ b/src/core/lib/channel/channel_args.c
@@ -373,3 +373,29 @@ bool grpc_channel_args_want_minimal_stack(const grpc_channel_args *args) {
return grpc_channel_arg_get_bool(
grpc_channel_args_find(args, GRPC_ARG_MINIMAL_STACK), false);
}
+
+grpc_arg grpc_channel_arg_string_create(char *name, char *value) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_STRING;
+ arg.key = name;
+ arg.value.string = value;
+ return arg;
+}
+
+grpc_arg grpc_channel_arg_integer_create(char *name, int value) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_INTEGER;
+ arg.key = name;
+ arg.value.integer = value;
+ return arg;
+}
+
+grpc_arg grpc_channel_arg_pointer_create(
+ char *name, void *value, const grpc_arg_pointer_vtable *vtable) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = name;
+ arg.value.pointer.p = value;
+ arg.value.pointer.vtable = vtable;
+ return arg;
+}
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index ba1d234005..f649a8d9ec 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -112,4 +112,10 @@ int grpc_channel_arg_get_integer(const grpc_arg *arg,
bool grpc_channel_arg_get_bool(const grpc_arg *arg, bool default_value);
+// Helpers for creating channel args.
+grpc_arg grpc_channel_arg_string_create(char *name, char *value);
+grpc_arg grpc_channel_arg_integer_create(char *name, int value);
+grpc_arg grpc_channel_arg_pointer_create(char *name, void *value,
+ const grpc_arg_pointer_vtable *vtable);
+
#endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_ARGS_H */
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index d6877f6a91..a80f8aa826 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -126,7 +126,7 @@ typedef struct {
/* Destroy per call data.
The filter does not need to do any chaining.
The bottom filter of a stack will be passed a non-NULL pointer to
- \a then_schedule_closure that should be passed to grpc_closure_sched when
+ \a then_schedule_closure that should be passed to GRPC_CLOSURE_SCHED when
destruction is complete. \a final_info contains data about the completed
call, mainly for reporting purposes. */
void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
@@ -234,7 +234,7 @@ void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_stack *call_stack,
grpc_polling_entity *pollent);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_CALL_STACK_REF(call_stack, reason) \
grpc_stream_ref(&(call_stack)->refcount, reason)
#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index a351e98203..2cb83f4114 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -190,7 +190,7 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
// Cancel deadline timer, since we're invoking the on_handshake_done
// callback now.
grpc_timer_cancel(exec_ctx, &mgr->deadline_timer);
- grpc_closure_sched(exec_ctx, &mgr->on_handshake_done, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, &mgr->on_handshake_done, error);
mgr->shutdown = true;
} else {
grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index],
@@ -245,13 +245,13 @@ void grpc_handshake_manager_do_handshake(
grpc_slice_buffer_init(mgr->args.read_buffer);
// Initialize state needed for calling handshakers.
mgr->acceptor = acceptor;
- grpc_closure_init(&mgr->call_next_handshaker, call_next_handshaker, mgr,
+ GRPC_CLOSURE_INIT(&mgr->call_next_handshaker, call_next_handshaker, mgr,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&mgr->on_handshake_done, on_handshake_done, &mgr->args,
+ GRPC_CLOSURE_INIT(&mgr->on_handshake_done, on_handshake_done, &mgr->args,
grpc_schedule_on_exec_ctx);
// Start deadline timer, which owns a ref.
gpr_ref(&mgr->refs);
- grpc_closure_init(&mgr->on_timeout, on_timeout, mgr,
+ GRPC_CLOSURE_INIT(&mgr->on_timeout, on_timeout, mgr,
grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &mgr->deadline_timer,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index 492eb5ad7b..77af7b7c08 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -25,6 +25,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/http/format_request.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/endpoint.h"
@@ -90,7 +91,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
grpc_error *error) {
grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent,
req->context->pollset_set);
- grpc_closure_sched(exec_ctx, req->on_done, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, req->on_done, error);
grpc_http_parser_destroy(&req->parser);
if (req->addresses != NULL) {
grpc_resolved_addresses_destroy(req->addresses);
@@ -213,13 +214,11 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
return;
}
addr = &req->addresses->addrs[req->next_address++];
- grpc_closure_init(&req->connected, on_connected, req,
+ GRPC_CLOSURE_INIT(&req->connected, on_connected, req,
grpc_schedule_on_exec_ctx);
- grpc_arg arg;
- arg.key = GRPC_ARG_RESOURCE_QUOTA;
- arg.type = GRPC_ARG_POINTER;
- arg.value.pointer.p = req->resource_quota;
- arg.value.pointer.vtable = grpc_resource_quota_arg_vtable();
+ grpc_arg arg = grpc_channel_arg_pointer_create(
+ GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
+ grpc_resource_quota_arg_vtable());
grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep,
req->context->pollset_set, &args, addr,
@@ -256,8 +255,8 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
req->pollent = pollent;
req->overall_error = GRPC_ERROR_NONE;
req->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
- grpc_closure_init(&req->on_read, on_read, req, grpc_schedule_on_exec_ctx);
- grpc_closure_init(&req->done_write, done_write, req,
+ GRPC_CLOSURE_INIT(&req->on_read, on_read, req, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&req->done_write, done_write, req,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&req->incoming);
grpc_slice_buffer_init(&req->outgoing);
@@ -271,7 +270,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
grpc_resolve_address(
exec_ctx, request->host, req->handshaker->default_port,
req->context->pollset_set,
- grpc_closure_create(on_resolved, req, grpc_schedule_on_exec_ctx),
+ GRPC_CLOSURE_CREATE(on_resolved, req, grpc_schedule_on_exec_ctx),
&req->addresses);
}
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index 6500192277..97c2886525 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -25,6 +25,7 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/channel/handshaker_registry.h"
#include "src/core/lib/security/transport/security_handshaker.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
@@ -85,7 +86,7 @@ static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx,
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
}
- grpc_closure_sched(exec_ctx, on_peer_checked, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer);
}
@@ -157,7 +158,6 @@ static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
gpr_timespec deadline,
void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *endpoint)) {
- grpc_channel_security_connector *sc = NULL;
on_done_closure *c = gpr_malloc(sizeof(*c));
const char *pem_root_certs = grpc_get_default_ssl_roots();
if (pem_root_certs == NULL) {
@@ -168,11 +168,13 @@ static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
}
c->func = on_done;
c->arg = arg;
- c->handshake_mgr = grpc_handshake_manager_create();
+ grpc_channel_security_connector *sc = NULL;
GPR_ASSERT(httpcli_ssl_channel_security_connector_create(
exec_ctx, pem_root_certs, host, &sc) == GRPC_SECURITY_OK);
- grpc_channel_security_connector_add_handshakers(exec_ctx, sc,
- c->handshake_mgr);
+ grpc_arg channel_arg = grpc_security_connector_to_arg(&sc->base);
+ grpc_channel_args args = {1, &channel_arg};
+ c->handshake_mgr = grpc_handshake_manager_create();
+ grpc_handshakers_add(exec_ctx, HANDSHAKER_CLIENT, &args, c->handshake_mgr);
grpc_handshake_manager_do_handshake(
exec_ctx, c->handshake_mgr, tcp, NULL /* channel_args */, deadline,
NULL /* acceptor */, on_handshake_done, c /* user_data */);
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index 72a1b20443..e028e72ed6 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -24,14 +24,30 @@
#include "src/core/lib/profiling/timers.h"
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_closure = GRPC_TRACER_INITIALIZER(false);
+#endif
+
+#ifndef NDEBUG
+grpc_closure *grpc_closure_init(const char *file, int line,
+ grpc_closure *closure, grpc_iomgr_cb_func cb,
+ void *cb_arg,
+ grpc_closure_scheduler *scheduler) {
+#else
grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg,
grpc_closure_scheduler *scheduler) {
+#endif
closure->cb = cb;
closure->cb_arg = cb_arg;
closure->scheduler = scheduler;
#ifndef NDEBUG
closure->scheduled = false;
+ closure->file_initiated = NULL;
+ closure->line_initiated = 0;
+ closure->run = false;
+ closure->file_created = file;
+ closure->line_created = line;
#endif
return closure;
}
@@ -100,19 +116,39 @@ static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
cb(exec_ctx, cb_arg, error);
}
+#ifndef NDEBUG
+grpc_closure *grpc_closure_create(const char *file, int line,
+ grpc_iomgr_cb_func cb, void *cb_arg,
+ grpc_closure_scheduler *scheduler) {
+#else
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler) {
+#endif
wrapped_closure *wc = gpr_malloc(sizeof(*wc));
wc->cb = cb;
wc->cb_arg = cb_arg;
+#ifndef NDEBUG
+ grpc_closure_init(file, line, &wc->wrapper, closure_wrapper, wc, scheduler);
+#else
grpc_closure_init(&wc->wrapper, closure_wrapper, wc, scheduler);
+#endif
return &wc->wrapper;
}
+#ifndef NDEBUG
+void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx,
+ grpc_closure *c, grpc_error *error) {
+#else
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) {
+#endif
GPR_TIMER_BEGIN("grpc_closure_run", 0);
if (c != NULL) {
+#ifndef NDEBUG
+ c->file_initiated = file;
+ c->line_initiated = line;
+ c->run = true;
+#endif
assert(c->cb);
c->scheduler->vtable->run(exec_ctx, c, error);
} else {
@@ -121,13 +157,21 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_END("grpc_closure_run", 0);
}
+#ifndef NDEBUG
+void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx,
+ grpc_closure *c, grpc_error *error) {
+#else
void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) {
+#endif
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) {
#ifndef NDEBUG
GPR_ASSERT(!c->scheduled);
c->scheduled = true;
+ c->file_initiated = file;
+ c->line_initiated = line;
+ c->run = false;
#endif
assert(c->cb);
c->scheduler->vtable->sched(exec_ctx, c, error);
@@ -137,13 +181,21 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_END("grpc_closure_sched", 0);
}
+#ifndef NDEBUG
+void grpc_closure_list_sched(const char *file, int line,
+ grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
+#else
void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
+#endif
grpc_closure *c = list->head;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
#ifndef NDEBUG
GPR_ASSERT(!c->scheduled);
c->scheduled = true;
+ c->file_initiated = file;
+ c->line_initiated = line;
+ c->run = false;
#endif
assert(c->cb);
c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error);
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index 25086fa483..cd32a4ba38 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -26,9 +26,17 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/support/mpscq.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
struct grpc_closure;
typedef struct grpc_closure grpc_closure;
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_closure;
+#endif
+
typedef struct grpc_closure_list {
grpc_closure *head;
grpc_closure *tail;
@@ -38,7 +46,9 @@ typedef struct grpc_closure_list {
*
* \param arg Arbitrary input.
* \param error GRPC_ERROR_NONE if no error occurred, otherwise some grpc_error
- * describing what went wrong */
+ * describing what went wrong.
+ * Error contract: it is not the cb's job to unref this error;
+ * the closure scheduler will do that after the cb returns */
typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
@@ -85,19 +95,47 @@ struct grpc_closure {
uintptr_t scratch;
} error_data;
+// extra tracing and debugging for grpc_closure. This incurs a decent amount of
+// overhead per closure, so it must be enabled at compile time.
#ifndef NDEBUG
bool scheduled;
+ bool run; // true = run, false = scheduled
+ const char *file_created;
+ int line_created;
+ const char *file_initiated;
+ int line_initiated;
#endif
};
/** Initializes \a closure with \a cb and \a cb_arg. Returns \a closure. */
+#ifndef NDEBUG
+grpc_closure *grpc_closure_init(const char *file, int line,
+ grpc_closure *closure, grpc_iomgr_cb_func cb,
+ void *cb_arg,
+ grpc_closure_scheduler *scheduler);
+#define GRPC_CLOSURE_INIT(closure, cb, cb_arg, scheduler) \
+ grpc_closure_init(__FILE__, __LINE__, closure, cb, cb_arg, scheduler)
+#else
grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg,
grpc_closure_scheduler *scheduler);
+#define GRPC_CLOSURE_INIT(closure, cb, cb_arg, scheduler) \
+ grpc_closure_init(closure, cb, cb_arg, scheduler)
+#endif
/* Create a heap allocated closure: try to avoid except for very rare events */
+#ifndef NDEBUG
+grpc_closure *grpc_closure_create(const char *file, int line,
+ grpc_iomgr_cb_func cb, void *cb_arg,
+ grpc_closure_scheduler *scheduler);
+#define GRPC_CLOSURE_CREATE(cb, cb_arg, scheduler) \
+ grpc_closure_create(__FILE__, __LINE__, cb, cb_arg, scheduler)
+#else
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler);
+#define GRPC_CLOSURE_CREATE(cb, cb_arg, scheduler) \
+ grpc_closure_create(cb, cb_arg, scheduler)
+#endif
#define GRPC_CLOSURE_LIST_INIT \
{ NULL, NULL }
@@ -123,16 +161,48 @@ bool grpc_closure_list_empty(grpc_closure_list list);
/** Run a closure directly. Caller ensures that no locks are being held above.
* Note that calling this at the end of a closure callback function itself is
* by definition safe. */
+#ifndef NDEBUG
+void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx,
+ grpc_closure *closure, grpc_error *error);
+#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \
+ grpc_closure_run(__FILE__, __LINE__, exec_ctx, closure, error)
+#else
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
+#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \
+ grpc_closure_run(exec_ctx, closure, error)
+#endif
/** Schedule a closure to be run. Does not need to be run from a safe point. */
+#ifndef NDEBUG
+void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx,
+ grpc_closure *closure, grpc_error *error);
+#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \
+ grpc_closure_sched(__FILE__, __LINE__, exec_ctx, closure, error)
+#else
void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
+#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \
+ grpc_closure_sched(exec_ctx, closure, error)
+#endif
/** Schedule all closures in a list to be run. Does not need to be run from a
* safe point. */
+#ifndef NDEBUG
+void grpc_closure_list_sched(const char *file, int line,
+ grpc_exec_ctx *exec_ctx,
+ grpc_closure_list *closure_list);
+#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \
+ grpc_closure_list_sched(__FILE__, __LINE__, exec_ctx, closure_list)
+#else
void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx,
grpc_closure_list *closure_list);
+#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \
+ grpc_closure_list_sched(exec_ctx, closure_list)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
#endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index 4b8102ea54..7f9c5d837f 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -24,7 +24,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include "src/core/lib/iomgr/workqueue.h"
+#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
grpc_tracer_flag grpc_combiner_trace = GRPC_TRACER_INITIALIZER(false);
@@ -41,93 +41,47 @@ grpc_tracer_flag grpc_combiner_trace = GRPC_TRACER_INITIALIZER(false);
struct grpc_combiner {
grpc_combiner *next_combiner_on_this_exec_ctx;
- grpc_workqueue *optional_workqueue;
- grpc_closure_scheduler uncovered_scheduler;
- grpc_closure_scheduler covered_scheduler;
- grpc_closure_scheduler uncovered_finally_scheduler;
- grpc_closure_scheduler covered_finally_scheduler;
+ grpc_closure_scheduler scheduler;
+ grpc_closure_scheduler finally_scheduler;
gpr_mpscq queue;
+ // either:
+ // a pointer to the initiating exec ctx if that is the only exec_ctx that has
+ // ever queued to this combiner, or NULL. If this is non-null, it's not
+ // dereferencable (since the initiating exec_ctx may have gone out of scope)
+ gpr_atm initiating_exec_ctx_or_null;
// state is:
// lower bit - zero if orphaned (STATE_UNORPHANED)
// other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT)
gpr_atm state;
- // number of elements in the list that are covered by a poller: if >0, we can
- // offload safely
- gpr_atm elements_covered_by_poller;
bool time_to_execute_final_list;
- bool final_list_covered_by_poller;
grpc_closure_list final_list;
grpc_closure offload;
gpr_refcount refs;
};
-static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx,
- grpc_closure *closure, grpc_error *error);
-static void combiner_exec_covered(grpc_exec_ctx *exec_ctx,
+static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error);
+static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error);
-static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
- grpc_closure *closure,
- grpc_error *error);
-static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
- grpc_closure *closure,
- grpc_error *error);
-
-static const grpc_closure_scheduler_vtable scheduler_uncovered = {
- combiner_exec_uncovered, combiner_exec_uncovered,
- "combiner:immediately:uncovered"};
-static const grpc_closure_scheduler_vtable scheduler_covered = {
- combiner_exec_covered, combiner_exec_covered,
- "combiner:immediately:covered"};
-static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = {
- combiner_finally_exec_uncovered, combiner_finally_exec_uncovered,
- "combiner:finally:uncovered"};
-static const grpc_closure_scheduler_vtable finally_scheduler_covered = {
- combiner_finally_exec_covered, combiner_finally_exec_covered,
- "combiner:finally:covered"};
-static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-
-typedef struct {
- grpc_error *error;
- bool covered_by_poller;
-} error_data;
-
-static uintptr_t pack_error_data(error_data d) {
- return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0);
-}
-
-static error_data unpack_error_data(uintptr_t p) {
- return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1};
-}
+static const grpc_closure_scheduler_vtable scheduler = {
+ combiner_exec, combiner_exec, "combiner:immediately"};
+static const grpc_closure_scheduler_vtable finally_scheduler = {
+ combiner_finally_exec, combiner_finally_exec, "combiner:finally"};
-static bool is_covered_by_poller(grpc_combiner *lock) {
- return lock->final_list_covered_by_poller ||
- gpr_atm_acq_load(&lock->elements_covered_by_poller) > 0;
-}
-
-#define IS_COVERED_BY_POLLER_FMT "(final=%d elems=%" PRIdPTR ")->%d"
-#define IS_COVERED_BY_POLLER_ARGS(lock) \
- (lock)->final_list_covered_by_poller, \
- gpr_atm_acq_load(&(lock)->elements_covered_by_poller), \
- is_covered_by_poller((lock))
+static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
+grpc_combiner *grpc_combiner_create(void) {
grpc_combiner *lock = gpr_malloc(sizeof(*lock));
gpr_ref_init(&lock->refs, 1);
lock->next_combiner_on_this_exec_ctx = NULL;
lock->time_to_execute_final_list = false;
- lock->optional_workqueue = optional_workqueue;
- lock->final_list_covered_by_poller = false;
- lock->uncovered_scheduler.vtable = &scheduler_uncovered;
- lock->covered_scheduler.vtable = &scheduler_covered;
- lock->uncovered_finally_scheduler.vtable = &finally_scheduler_uncovered;
- lock->covered_finally_scheduler.vtable = &finally_scheduler_covered;
+ lock->scheduler.vtable = &scheduler;
+ lock->finally_scheduler.vtable = &finally_scheduler;
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
- gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0);
gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list);
- grpc_closure_init(&lock->offload, offload, lock,
- grpc_workqueue_scheduler(lock->optional_workqueue));
+ GRPC_CLOSURE_INIT(&lock->offload, offload, lock, grpc_executor_scheduler);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@@ -136,7 +90,6 @@ static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock));
GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
gpr_mpscq_destroy(&lock->queue);
- GRPC_WORKQUEUE_UNREF(exec_ctx, lock->optional_workqueue, "combiner");
gpr_free(lock);
}
@@ -149,12 +102,14 @@ static void start_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
}
-#ifdef GRPC_COMBINER_REFCOUNT_DEBUG
-#define GRPC_COMBINER_DEBUG_SPAM(op, delta) \
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, \
- "combiner[%p] %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op), \
- gpr_atm_no_barrier_load(&lock->refs.count), \
- gpr_atm_no_barrier_load(&lock->refs.count) + (delta), reason);
+#ifndef NDEBUG
+#define GRPC_COMBINER_DEBUG_SPAM(op, delta) \
+ if (GRPC_TRACER_ON(grpc_combiner_trace)) { \
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, \
+ "C:%p %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op), \
+ gpr_atm_no_barrier_load(&lock->refs.count), \
+ gpr_atm_no_barrier_load(&lock->refs.count) + (delta), reason); \
+ }
#else
#define GRPC_COMBINER_DEBUG_SPAM(op, delta)
#endif
@@ -193,48 +148,40 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
}
}
-static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
- grpc_closure *cl, grpc_error *error,
- bool covered_by_poller) {
+#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
+ ((grpc_combiner *)(((char *)((closure)->scheduler)) - \
+ offsetof(grpc_combiner, scheduler_name)))
+
+static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
+ grpc_error *error) {
GPR_TIMER_BEGIN("combiner.execute", 0);
+ grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
- GRPC_COMBINER_TRACE(gpr_log(
- GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock,
- cl, covered_by_poller, last));
- GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed
- assert(cl->cb);
- cl->error_data.scratch =
- pack_error_data((error_data){error, covered_by_poller});
- if (covered_by_poller) {
- gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, 1);
- }
- gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
+ GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
+ "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
+ lock, cl, last));
if (last == 1) {
+ gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null,
+ (gpr_atm)exec_ctx);
// first element on this list: add it to the list of combiner locks
// executing within this exec_ctx
push_last_on_exec_ctx(exec_ctx, lock);
+ } else {
+ // there may be a race with setting here: if that happens, we may delay
+ // offload for one or two actions, and that's fine
+ gpr_atm initiator =
+ gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null);
+ if (initiator != 0 && initiator != (gpr_atm)exec_ctx) {
+ gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 0);
+ }
}
+ GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed
+ assert(cl->cb);
+ cl->error_data.error = error;
+ gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
GPR_TIMER_END("combiner.execute", 0);
}
-#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
- ((grpc_combiner *)(((char *)((closure)->scheduler)) - \
- offsetof(grpc_combiner, scheduler_name)))
-
-static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
- grpc_error *error) {
- combiner_exec(exec_ctx,
- COMBINER_FROM_CLOSURE_SCHEDULER(cl, uncovered_scheduler), cl,
- error, false);
-}
-
-static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
- grpc_error *error) {
- combiner_exec(exec_ctx,
- COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_scheduler), cl,
- error, true);
-}
-
static void move_next(grpc_exec_ctx *exec_ctx) {
exec_ctx->active_combiner =
exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
@@ -250,9 +197,8 @@ static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
move_next(exec_ctx);
- GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
- lock->optional_workqueue));
- grpc_closure_sched(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
+ GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
+ GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
}
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
@@ -263,22 +209,23 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
return false;
}
- GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG,
- "C:%p grpc_combiner_continue_exec_ctx workqueue=%p "
- "is_covered_by_poller=" IS_COVERED_BY_POLLER_FMT
- " exec_ctx_ready_to_finish=%d "
- "time_to_execute_final_list=%d",
- lock, lock->optional_workqueue, IS_COVERED_BY_POLLER_ARGS(lock),
- grpc_exec_ctx_ready_to_finish(exec_ctx),
- lock->time_to_execute_final_list));
-
- if (lock->optional_workqueue != NULL && is_covered_by_poller(lock) &&
- grpc_exec_ctx_ready_to_finish(exec_ctx)) {
+ bool contended =
+ gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null) == 0;
+
+ GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
+ "C:%p grpc_combiner_continue_exec_ctx "
+ "contended=%d "
+ "exec_ctx_ready_to_finish=%d "
+ "time_to_execute_final_list=%d",
+ lock, contended,
+ grpc_exec_ctx_ready_to_finish(exec_ctx),
+ lock->time_to_execute_final_list));
+
+ if (contended && grpc_exec_ctx_ready_to_finish(exec_ctx) &&
+ grpc_executor_is_threaded()) {
GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
- // this execution context wants to move on, and we have a workqueue (and
- // so can help the execution context out): schedule remaining work to be
- // picked up on the workqueue
+ // this execution context wants to move on: schedule remaining work to be
+ // picked up on the executor
queue_offload(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
@@ -295,29 +242,23 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
// queue is in an inconsistent state: use this as a cue that we should
// go off and do something else for a while (and come back later)
GPR_TIMER_MARK("delay_busy", 0);
- if (lock->optional_workqueue != NULL && is_covered_by_poller(lock)) {
- queue_offload(exec_ctx, lock);
- }
+ queue_offload(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
GPR_TIMER_BEGIN("combiner.exec1", 0);
grpc_closure *cl = (grpc_closure *)n;
- error_data err = unpack_error_data(cl->error_data.scratch);
+ grpc_error *cl_err = cl->error_data.error;
#ifndef NDEBUG
cl->scheduled = false;
#endif
- cl->cb(exec_ctx, cl->cb_arg, err.error);
- if (err.covered_by_poller) {
- gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, -1);
- }
- GRPC_ERROR_UNREF(err.error);
+ cl->cb(exec_ctx, cl->cb_arg, cl_err);
+ GRPC_ERROR_UNREF(cl_err);
GPR_TIMER_END("combiner.exec1", 0);
} else {
grpc_closure *c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
- lock->final_list_covered_by_poller = false;
int loops = 0;
while (c != NULL) {
GPR_TIMER_BEGIN("combiner.exec_1final", 0);
@@ -383,20 +324,20 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error);
-static void combiner_execute_finally(grpc_exec_ctx *exec_ctx,
- grpc_combiner *lock, grpc_closure *closure,
- grpc_error *error,
- bool covered_by_poller) {
- GRPC_COMBINER_TRACE(gpr_log(
- GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
- closure, exec_ctx->active_combiner, covered_by_poller));
+static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
+ grpc_closure *closure, grpc_error *error) {
+ grpc_combiner *lock =
+ COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
+ GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
+ "C:%p grpc_combiner_execute_finally c=%p; ac=%p",
+ lock, closure, exec_ctx->active_combiner));
GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
- grpc_closure_sched(
- exec_ctx, grpc_closure_create(enqueue_finally, closure,
- grpc_combiner_scheduler(lock, false)),
- error);
+ GRPC_CLOSURE_SCHED(exec_ctx,
+ GRPC_CLOSURE_CREATE(enqueue_finally, closure,
+ grpc_combiner_scheduler(lock)),
+ error);
GPR_TIMER_END("combiner.execute_finally", 0);
return;
}
@@ -404,42 +345,20 @@ static void combiner_execute_finally(grpc_exec_ctx *exec_ctx,
if (grpc_closure_list_empty(lock->final_list)) {
gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
}
- if (covered_by_poller) {
- lock->final_list_covered_by_poller = true;
- }
grpc_closure_list_append(&lock->final_list, closure, error);
GPR_TIMER_END("combiner.execute_finally", 0);
}
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error) {
- combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure,
- GRPC_ERROR_REF(error), false);
-}
-
-static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
- grpc_closure *cl,
- grpc_error *error) {
- combiner_execute_finally(exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(
- cl, uncovered_finally_scheduler),
- cl, error, false);
-}
-
-static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
- grpc_closure *cl, grpc_error *error) {
- combiner_execute_finally(
- exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_finally_scheduler),
- cl, error, true);
+ combiner_finally_exec(exec_ctx, closure, GRPC_ERROR_REF(error));
}
-grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner,
- bool covered_by_poller) {
- return covered_by_poller ? &combiner->covered_scheduler
- : &combiner->uncovered_scheduler;
+grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) {
+ return &combiner->scheduler;
}
grpc_closure_scheduler *grpc_combiner_finally_scheduler(
- grpc_combiner *combiner, bool covered_by_poller) {
- return covered_by_poller ? &combiner->covered_finally_scheduler
- : &combiner->uncovered_finally_scheduler;
+ grpc_combiner *combiner) {
+ return &combiner->finally_scheduler;
}
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index d6571ad4c6..8e0434369d 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -33,10 +33,9 @@
// Initialize the lock, with an optional workqueue to shift load to when
// necessary
-grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
+grpc_combiner *grpc_combiner_create(void);
-//#define GRPC_COMBINER_REFCOUNT_DEBUG
-#ifdef GRPC_COMBINER_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_COMBINER_DEBUG_ARGS \
, const char *file, int line, const char *reason
#define GRPC_COMBINER_REF(combiner, reason) \
@@ -56,11 +55,9 @@ grpc_combiner *grpc_combiner_ref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS);
void grpc_combiner_unref(grpc_exec_ctx *exec_ctx,
grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS);
// Fetch a scheduler to schedule closures against
-grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock,
- bool covered_by_poller);
+grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock);
// Scheduler to execute \a action within the lock just prior to unlocking.
-grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock,
- bool covered_by_poller);
+grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock);
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
diff --git a/src/core/lib/iomgr/endpoint.c b/src/core/lib/iomgr/endpoint.c
index 116f18424c..37cce335ca 100644
--- a/src/core/lib/iomgr/endpoint.c
+++ b/src/core/lib/iomgr/endpoint.c
@@ -54,10 +54,6 @@ char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
int grpc_endpoint_get_fd(grpc_endpoint* ep) { return ep->vtable->get_fd(ep); }
-grpc_workqueue* grpc_endpoint_get_workqueue(grpc_endpoint* ep) {
- return ep->vtable->get_workqueue(ep);
-}
-
grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* ep) {
return ep->vtable->get_resource_user(ep);
}
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index ec355d413a..8f0523a981 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -37,7 +37,6 @@ struct grpc_endpoint_vtable {
grpc_slice_buffer *slices, grpc_closure *cb);
void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_slice_buffer *slices, grpc_closure *cb);
- grpc_workqueue *(*get_workqueue)(grpc_endpoint *ep);
void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset *pollset);
void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -63,9 +62,6 @@ char *grpc_endpoint_get_peer(grpc_endpoint *ep);
*/
int grpc_endpoint_get_fd(grpc_endpoint *ep);
-/* Retrieve a reference to the workqueue associated with this endpoint */
-grpc_workqueue *grpc_endpoint_get_workqueue(grpc_endpoint *ep);
-
/* Write slices out to the socket.
If the connection is ready for more data after the end of the call, it
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 68884226b5..a95929a1fb 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -30,10 +30,15 @@
#include <grpc/support/log_windows.h>
#endif
+#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/error_internal.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_error_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
static const char *error_int_name(grpc_error_ints key) {
switch (key) {
case GRPC_ERROR_INT_ERRNO:
@@ -119,14 +124,14 @@ bool grpc_error_is_special(grpc_error *err) {
err == GRPC_ERROR_CANCELLED;
}
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
-grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line,
- const char *func) {
+#ifndef NDEBUG
+grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line) {
if (grpc_error_is_special(err)) return err;
- gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err,
- gpr_atm_no_barrier_load(&err->atomics.refs.count),
- gpr_atm_no_barrier_load(&err->atomics.refs.count) + 1, file, line,
- func);
+ if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
+ gpr_atm_no_barrier_load(&err->atomics.refs.count),
+ gpr_atm_no_barrier_load(&err->atomics.refs.count) + 1, file, line);
+ }
gpr_ref(&err->atomics.refs);
return err;
}
@@ -172,14 +177,14 @@ static void error_destroy(grpc_error *err) {
gpr_free(err);
}
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
-void grpc_error_unref(grpc_error *err, const char *file, int line,
- const char *func) {
+#ifndef NDEBUG
+void grpc_error_unref(grpc_error *err, const char *file, int line) {
if (grpc_error_is_special(err)) return;
- gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err,
- gpr_atm_no_barrier_load(&err->atomics.refs.count),
- gpr_atm_no_barrier_load(&err->atomics.refs.count) - 1, file, line,
- func);
+ if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
+ gpr_atm_no_barrier_load(&err->atomics.refs.count),
+ gpr_atm_no_barrier_load(&err->atomics.refs.count) - 1, file, line);
+ }
if (gpr_unref(&err->atomics.refs)) {
error_destroy(err);
}
@@ -202,13 +207,17 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
if ((*err)->arena_size + slots > (*err)->arena_capacity) {
return UINT8_MAX;
}
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
+#ifndef NDEBUG
grpc_error *orig = *err;
#endif
*err = gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
- if (*err != orig) gpr_log(GPR_DEBUG, "realloc %p -> %p", orig, *err);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ if (*err != orig) {
+ gpr_log(GPR_DEBUG, "realloc %p -> %p", orig, *err);
+ }
+ }
#endif
}
uint8_t placement = (*err)->arena_size;
@@ -316,8 +325,10 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
if (err == NULL) { // TODO(ctiller): make gpr_malloc return NULL
return GRPC_ERROR_OOM;
}
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
- gpr_log(GPR_DEBUG, "%p create [%s:%d]", err, file, line);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ gpr_log(GPR_DEBUG, "%p create [%s:%d]", err, file, line);
+ }
#endif
err->arena_size = 0;
@@ -395,8 +406,10 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2);
}
out = gpr_malloc(sizeof(*in) + new_arena_capacity * sizeof(intptr_t));
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
- gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
+ }
#endif
// bulk memcpy of the rest of the struct.
size_t skip = sizeof(&out->atomics);
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index e626845fa4..b362948691 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -26,6 +26,8 @@
#include <grpc/status.h>
#include <grpc/support/time.h>
+#include "src/core/lib/debug/trace.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -36,6 +38,10 @@ extern "C" {
typedef struct grpc_error grpc_error;
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_error_refcount;
+#endif
+
typedef enum {
/// 'errno' from the operating system
GRPC_ERROR_INT_ERRNO,
@@ -149,15 +155,11 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
grpc_error_create(__FILE__, __LINE__, grpc_slice_from_copied_string(desc), \
errs, count)
-//#define GRPC_ERROR_REFCOUNT_DEBUG
-#ifdef GRPC_ERROR_REFCOUNT_DEBUG
-grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line,
- const char *func);
-void grpc_error_unref(grpc_error *err, const char *file, int line,
- const char *func);
-#define GRPC_ERROR_REF(err) grpc_error_ref(err, __FILE__, __LINE__, __func__)
-#define GRPC_ERROR_UNREF(err) \
- grpc_error_unref(err, __FILE__, __LINE__, __func__)
+#ifndef NDEBUG
+grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line);
+void grpc_error_unref(grpc_error *err, const char *file, int line);
+#define GRPC_ERROR_REF(err) grpc_error_ref(err, __FILE__, __LINE__)
+#define GRPC_ERROR_UNREF(err) grpc_error_unref(err, __FILE__, __LINE__)
#else
grpc_error *grpc_error_ref(grpc_error *err);
void grpc_error_unref(grpc_error *err);
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c
index 7d7aa44912..66ba601adb 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.c
+++ b/src/core/lib/iomgr/ev_epoll1_linux.c
@@ -43,7 +43,6 @@
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
@@ -115,7 +114,9 @@ struct grpc_pollset {
* Pollset-set Declarations
*/
-struct grpc_pollset_set {};
+struct grpc_pollset_set {
+ char unused;
+};
/*******************************************************************************
* Common helpers
@@ -193,8 +194,10 @@ static grpc_fd *fd_create(int fd, const char *name) {
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
+ }
#endif
gpr_free(fd_name);
@@ -236,7 +239,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
close(fd->fd);
}
- grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error));
grpc_iomgr_unregister_object(&fd->iomgr_object);
grpc_lfev_destroy(&fd->read_closure);
@@ -268,10 +271,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
-static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
- return (grpc_workqueue *)0xb0b51ed;
-}
-
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
@@ -298,8 +297,6 @@ GPR_TLS_DECL(g_current_thread_worker);
static gpr_atm g_active_poller;
static pollset_neighbourhood *g_neighbourhoods;
static size_t g_num_neighbourhoods;
-static gpr_mu g_wq_mu;
-static grpc_closure_list g_wq_items;
/* Return true if first in list */
static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
@@ -348,8 +345,6 @@ static grpc_error *pollset_global_init(void) {
gpr_atm_no_barrier_store(&g_active_poller, 0);
global_wakeup_fd.read_fd = -1;
grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
- gpr_mu_init(&g_wq_mu);
- g_wq_items = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
if (err != GRPC_ERROR_NONE) return err;
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
.data.ptr = &global_wakeup_fd};
@@ -368,7 +363,6 @@ static grpc_error *pollset_global_init(void) {
static void pollset_global_shutdown(void) {
gpr_tls_destroy(&g_current_thread_pollset);
gpr_tls_destroy(&g_current_thread_worker);
- gpr_mu_destroy(&g_wq_mu);
if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
for (size_t i = 0; i < g_num_neighbourhoods; i++) {
gpr_mu_destroy(&g_neighbourhoods[i].mu);
@@ -435,7 +429,7 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset) {
if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
pollset->begin_refs == 0) {
- grpc_closure_sched(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
pollset->shutdown_closure = NULL;
}
}
@@ -492,9 +486,6 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
for (int i = 0; i < r; i++) {
void *data_ptr = events[i].data.ptr;
if (data_ptr == &global_wakeup_fd) {
- gpr_mu_lock(&g_wq_mu);
- grpc_closure_list_move(&g_wq_items, &exec_ctx->closure_list);
- gpr_mu_unlock(&g_wq_mu);
append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
err_desc);
} else {
@@ -777,84 +768,6 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {}
/*******************************************************************************
- * Workqueue Definitions
- */
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
- const char *file, int line,
- const char *reason) {
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {}
-#else
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {}
-#endif
-
-static void wq_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
- // find a neighbourhood to wakeup
- bool scheduled = false;
- size_t initial_neighbourhood = choose_neighbourhood();
- for (size_t i = 0; !scheduled && i < g_num_neighbourhoods; i++) {
- pollset_neighbourhood *neighbourhood =
- &g_neighbourhoods[(initial_neighbourhood + i) % g_num_neighbourhoods];
- if (gpr_mu_trylock(&neighbourhood->mu)) {
- if (neighbourhood->active_root != NULL) {
- grpc_pollset *inspect = neighbourhood->active_root;
- do {
- if (gpr_mu_trylock(&inspect->mu)) {
- if (inspect->root_worker != NULL) {
- grpc_pollset_worker *inspect_worker = inspect->root_worker;
- do {
- if (inspect_worker->kick_state == UNKICKED) {
- inspect_worker->kick_state = KICKED;
- grpc_closure_list_append(
- &inspect_worker->schedule_on_end_work, closure, error);
- if (inspect_worker->initialized_cv) {
- gpr_cv_signal(&inspect_worker->cv);
- }
- scheduled = true;
- }
- inspect_worker = inspect_worker->next;
- } while (!scheduled && inspect_worker != inspect->root_worker);
- }
- gpr_mu_unlock(&inspect->mu);
- }
- inspect = inspect->next;
- } while (!scheduled && inspect != neighbourhood->active_root);
- }
- gpr_mu_unlock(&neighbourhood->mu);
- }
- }
- if (!scheduled) {
- gpr_mu_lock(&g_wq_mu);
- grpc_closure_list_append(&g_wq_items, closure, error);
- gpr_mu_unlock(&g_wq_mu);
- GRPC_LOG_IF_ERROR("workqueue_scheduler",
- grpc_wakeup_fd_wakeup(&global_wakeup_fd));
- }
-}
-
-static const grpc_closure_scheduler_vtable
- singleton_workqueue_scheduler_vtable = {wq_sched, wq_sched,
- "epoll1_workqueue"};
-
-static grpc_closure_scheduler singleton_workqueue_scheduler = {
- &singleton_workqueue_scheduler_vtable};
-
-static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
- return &singleton_workqueue_scheduler;
-}
-
-/*******************************************************************************
* Pollset-set Definitions
*/
@@ -905,7 +818,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
- .fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@@ -923,10 +835,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
- .workqueue_ref = workqueue_ref,
- .workqueue_unref = workqueue_unref,
- .workqueue_scheduler = workqueue_scheduler,
-
.shutdown_engine = shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
index 0304d97fd8..2c91ad357c 100644
--- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
@@ -46,7 +46,6 @@
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/env.h"
@@ -148,8 +147,7 @@ struct grpc_fd {
};
/* Reference counting for fds */
-// #define GRPC_FD_REF_COUNT_DEBUG
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
int line);
@@ -169,18 +167,18 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
+#else
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
-#endif /* !defined(GRPC_PI_REF_COUNT_DEBUG) */
+#endif
typedef struct worker_node {
struct worker_node *next;
@@ -189,8 +187,6 @@ typedef struct worker_node {
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
- grpc_closure_scheduler workqueue_scheduler;
-
gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
the refcount.
@@ -211,15 +207,6 @@ typedef struct polling_island {
/* Number of threads currently polling on this island */
gpr_atm poller_count;
- /* Mutex guarding the read end of the workqueue (must be held to pop from
- * workqueue_items) */
- gpr_mu workqueue_read_mu;
- /* Queue of closures to be executed */
- gpr_mpscq workqueue_items;
- /* Count of items in workqueue_items */
- gpr_atm workqueue_item_count;
- /* Wakeup fd used to wake pollers to check the contents of workqueue_items */
- grpc_wakeup_fd workqueue_wakeup_fd;
/* The list of workers waiting to do polling on this polling island */
gpr_mu worker_list_mu;
@@ -308,8 +295,6 @@ static __thread polling_island *g_current_thread_polling_island;
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
@@ -322,57 +307,30 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
-static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
- workqueue_enqueue, workqueue_enqueue, "workqueue"};
-
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#ifndef NDEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ pi, old_cnt, old_cnt + 1, reason, file, line);
+ }
pi_add_ref(pi);
- gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
- (void *)pi, old_cnt, old_cnt + 1, reason, file, line);
}
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
const char *reason, const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&pi->ref_count);
- pi_unref(exec_ctx, pi);
- gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
- (void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
-}
-
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
- const char *file, int line,
- const char *reason) {
- if (workqueue != NULL) {
- pi_add_ref_dbg((polling_island *)workqueue, reason, file, line);
- }
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {
- if (workqueue != NULL) {
- pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line);
- }
-}
-#else
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
- if (workqueue != NULL) {
- pi_add_ref((polling_island *)workqueue);
- }
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {
- if (workqueue != NULL) {
- pi_unref(exec_ctx, (polling_island *)workqueue);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ pi, old_cnt, (old_cnt - 1), reason, file, line);
}
+ pi_unref(exec_ctx, pi);
}
#endif
@@ -577,17 +535,12 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
*error = GRPC_ERROR_NONE;
pi = gpr_malloc(sizeof(*pi));
- pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
pi->fds = NULL;
pi->epoll_fd = -1;
- gpr_mu_init(&pi->workqueue_read_mu);
- gpr_mpscq_init(&pi->workqueue_items);
- gpr_atm_rel_store(&pi->workqueue_item_count, 0);
-
gpr_atm_rel_store(&pi->ref_count, 0);
gpr_atm_rel_store(&pi->poller_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
@@ -595,11 +548,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
gpr_mu_init(&pi->worker_list_mu);
worker_node_init(&pi->worker_list_head);
- if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd),
- err_desc)) {
- goto done;
- }
-
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
@@ -607,8 +555,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
goto done;
}
- polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
-
if (initial_fd != NULL) {
polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
}
@@ -627,11 +573,7 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
if (pi->epoll_fd >= 0) {
close(pi->epoll_fd);
}
- GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0);
- gpr_mu_destroy(&pi->workqueue_read_mu);
- gpr_mpscq_destroy(&pi->workqueue_items);
gpr_mu_destroy(&pi->mu);
- grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd);
gpr_mu_destroy(&pi->worker_list_mu);
GPR_ASSERT(is_worker_node_detached(&pi->worker_list_head));
@@ -779,45 +721,6 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
}
}
-static void workqueue_maybe_wakeup(polling_island *pi) {
- /* If this thread is the current poller, then it may be that it's about to
- decrement the current poller count, so we need to look past this thread */
- bool is_current_poller = (g_current_thread_polling_island == pi);
- gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
- gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
- /* Only issue a wakeup if it's likely that some poller could come in and take
- it right now. Note that since we do an anticipatory mpscq_pop every poll
- loop, it's ok if we miss the wakeup here, as we'll get the work item when
- the next poller enters anyway. */
- if (current_pollers > min_current_pollers_for_wakeup) {
- GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
- grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd));
- }
-}
-
-static void workqueue_move_items_to_parent(polling_island *q) {
- polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to);
- if (p == NULL) {
- return;
- }
- gpr_mu_lock(&q->workqueue_read_mu);
- int num_added = 0;
- while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) {
- gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items);
- if (n != NULL) {
- gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1);
- gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1);
- gpr_mpscq_push(&p->workqueue_items, n);
- num_added++;
- }
- }
- gpr_mu_unlock(&q->workqueue_read_mu);
- if (num_added > 0) {
- workqueue_maybe_wakeup(p);
- }
- workqueue_move_items_to_parent(p);
-}
-
static polling_island *polling_island_merge(polling_island *p,
polling_island *q,
grpc_error **error) {
@@ -842,8 +745,6 @@ static polling_island *polling_island_merge(polling_island *p,
/* Add the 'merged_to' link from p --> q */
gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
-
- workqueue_move_items_to_parent(p);
}
/* else if p == q, nothing needs to be done */
@@ -854,32 +755,6 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
- GPR_TIMER_BEGIN("workqueue.enqueue", 0);
- grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler;
- /* take a ref to the workqueue: otherwise it can happen that whatever events
- * this kicks off ends up destroying the workqueue before this function
- * completes */
- GRPC_WORKQUEUE_REF(workqueue, "enqueue");
- polling_island *pi = (polling_island *)workqueue;
- gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1);
- closure->error_data.error = error;
- gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next);
- if (last == 0) {
- workqueue_maybe_wakeup(pi);
- }
- workqueue_move_items_to_parent(pi);
- GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
- GPR_TIMER_END("workqueue.enqueue", 0);
-}
-
-static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
- polling_island *pi = (polling_island *)workqueue;
- return workqueue == NULL ? grpc_schedule_on_exec_ctx
- : &pi->workqueue_scheduler;
-}
-
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@@ -920,14 +795,17 @@ static void polling_island_global_shutdown() {
static grpc_fd *fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_log(GPR_DEBUG, "FD %d %p ref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ }
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
@@ -936,18 +814,19 @@ static void ref_by(grpc_fd *fd, int n) {
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_atm old;
- gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ }
#else
static void unref_by(grpc_fd *fd, int n) {
- gpr_atm old;
#endif
- old = gpr_atm_full_fetch_add(&fd->refst, -n);
+ gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
/* Add the fd to the freelist */
gpr_mu_lock(&fd_freelist_mu);
@@ -965,7 +844,7 @@ static void unref_by(grpc_fd *fd, int n) {
}
/* Increment refcount by two to avoid changing the orphan bit */
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
int line) {
ref_by(fd, 2, reason, file, line);
@@ -1033,8 +912,10 @@ static grpc_fd *fd_create(int fd, const char *name) {
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
+ }
#endif
gpr_free(fd_name);
return new_fd;
@@ -1093,7 +974,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
fd->po.pi = NULL;
}
- grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->po.mu);
UNREF_BY(fd, 2, reason); /* Drop the reference */
@@ -1138,14 +1019,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
-static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
- gpr_mu_lock(&fd->po.mu);
- grpc_workqueue *workqueue =
- GRPC_WORKQUEUE_REF((grpc_workqueue *)fd->po.pi, "fd_get_workqueue");
- gpr_mu_unlock(&fd->po.mu);
- return workqueue;
-}
-
/*******************************************************************************
* Pollset Definitions
*/
@@ -1386,7 +1259,7 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
/* Release the ref and set pollset->po.pi to NULL */
pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
- grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
}
/* pollset->po.mu lock must be held by the caller before calling this */
@@ -1417,33 +1290,6 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->po.mu);
}
-static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
- polling_island *pi) {
- if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
- gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items);
- gpr_mu_unlock(&pi->workqueue_read_mu);
- if (n != NULL) {
- if (gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) > 1) {
- workqueue_maybe_wakeup(pi);
- }
- grpc_closure *c = (grpc_closure *)n;
- grpc_error *error = c->error_data.error;
-#ifndef NDEBUG
- c->scheduled = false;
-#endif
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- return true;
- } else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
- /* n == NULL might mean there's work but it's not available to be popped
- * yet - try to ensure another workqueue wakes up to check shortly if so
- */
- workqueue_maybe_wakeup(pi);
- }
- }
- return false;
-}
-
/* NOTE: This function may modify 'now' */
static bool acquire_polling_lease(grpc_pollset_worker *worker,
polling_island *pi, gpr_timespec deadline,
@@ -1579,12 +1425,7 @@ static void pollset_do_epoll_pwait(grpc_exec_ctx *exec_ctx, int epoll_fd,
for (int i = 0; i < ep_rv; ++i) {
void *data_ptr = ep_ev[i].data.ptr;
- if (data_ptr == &pi->workqueue_wakeup_fd) {
- append_error(error,
- grpc_wakeup_fd_consume_wakeup(&pi->workqueue_wakeup_fd),
- err_desc);
- maybe_do_workqueue_work(exec_ctx, pi);
- } else if (data_ptr == &polling_island_wakeup_fd) {
+ if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
"%d) got merged",
@@ -1660,15 +1501,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
PI_ADD_REF(pi, "ps_work");
gpr_mu_unlock(&pollset->po.mu);
- /* If we get some workqueue work to do, it might end up completing an item on
- the completion queue, so there's no need to poll... so we skip that and
- redo the complete loop to verify */
- if (!maybe_do_workqueue_work(exec_ctx, pi)) {
- g_current_thread_polling_island = pi;
- pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now,
- deadline, sig_mask, error);
- g_current_thread_polling_island = NULL;
- }
+ g_current_thread_polling_island = pi;
+ pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now, deadline,
+ sig_mask, error);
+ g_current_thread_polling_island = NULL;
GPR_ASSERT(pi != NULL);
@@ -2021,7 +1857,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
- .fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@@ -2039,10 +1874,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
- .workqueue_ref = workqueue_ref,
- .workqueue_unref = workqueue_unref,
- .workqueue_scheduler = workqueue_scheduler,
-
.shutdown_engine = shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
index 23f0aa68d9..49be72c03e 100644
--- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
@@ -46,7 +46,6 @@
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
@@ -94,23 +93,20 @@ static void fd_global_shutdown(void);
* epoll set Declarations
*/
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define EPS_ADD_REF(p, r) eps_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define EPS_UNREF(exec_ctx, p, r) \
eps_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
+#else
#define EPS_ADD_REF(p, r) eps_add_ref((p))
#define EPS_UNREF(exec_ctx, p, r) eps_unref((exec_ctx), (p))
-#endif /* !defined(GRPC_EPS_REF_COUNT_DEBUG) */
+#endif
-/* This is also used as grpc_workqueue (by directly casting it) */
typedef struct epoll_set {
- grpc_closure_scheduler workqueue_scheduler;
-
/* Mutex poller should acquire to poll this. This enforces that only one
* poller can be polling on epoll_set at any time */
gpr_mu mu;
@@ -124,15 +120,6 @@ typedef struct epoll_set {
/* Number of threads currently polling on this epoll set*/
gpr_atm poller_count;
- /* Mutex guarding the read end of the workqueue (must be held to pop from
- * workqueue_items) */
- gpr_mu workqueue_read_mu;
- /* Queue of closures to be executed */
- gpr_mpscq workqueue_items;
- /* Count of items in workqueue_items */
- gpr_atm workqueue_item_count;
- /* Wakeup fd used to wake pollers to check the contents of workqueue_items */
- grpc_wakeup_fd workqueue_wakeup_fd;
/* Is the epoll set shutdown */
gpr_atm is_shutdown;
@@ -166,7 +153,9 @@ struct grpc_pollset {
/*******************************************************************************
* Pollset-set Declarations
*/
-struct grpc_pollset_set {};
+struct grpc_pollset_set {
+ char unused;
+};
/*****************************************************************************
* Dedicated polling threads and pollsets - Declarations
@@ -220,8 +209,6 @@ static __thread epoll_set *g_current_thread_epoll_set;
/* Forward declaration */
static void epoll_set_delete(epoll_set *eps);
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
@@ -234,57 +221,30 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
-static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
- workqueue_enqueue, workqueue_enqueue, "workqueue"};
-
static void eps_add_ref(epoll_set *eps);
static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps);
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#ifndef NDEBUG
static void eps_add_ref_dbg(epoll_set *eps, const char *reason,
const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&eps->ref_count);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count);
+ gpr_log(GPR_DEBUG, "Add ref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ eps, old_cnt, old_cnt + 1, reason, file, line);
+ }
eps_add_ref(eps);
- gpr_log(GPR_DEBUG, "Add ref eps: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
- (void *)eps, old_cnt, old_cnt + 1, reason, file, line);
}
static void eps_unref_dbg(grpc_exec_ctx *exec_ctx, epoll_set *eps,
const char *reason, const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&eps->ref_count);
- eps_unref(exec_ctx, eps);
- gpr_log(GPR_DEBUG, "Unref eps: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
- (void *)eps, old_cnt, (old_cnt - 1), reason, file, line);
-}
-
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
- const char *file, int line,
- const char *reason) {
- if (workqueue != NULL) {
- eps_add_ref_dbg((epoll_set *)workqueue, reason, file, line);
- }
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {
- if (workqueue != NULL) {
- eps_unref_dbg(exec_ctx, (epoll_set *)workqueue, reason, file, line);
- }
-}
-#else
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
- if (workqueue != NULL) {
- eps_add_ref((epoll_set *)workqueue);
- }
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {
- if (workqueue != NULL) {
- eps_unref(exec_ctx, (epoll_set *)workqueue);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count);
+ gpr_log(GPR_DEBUG, "Unref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ eps, old_cnt, (old_cnt - 1), reason, file, line);
}
+ eps_unref(exec_ctx, eps);
}
#endif
@@ -379,24 +339,15 @@ static epoll_set *epoll_set_create(grpc_error **error) {
*error = GRPC_ERROR_NONE;
eps = gpr_malloc(sizeof(*eps));
- eps->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
eps->epoll_fd = -1;
gpr_mu_init(&eps->mu);
- gpr_mu_init(&eps->workqueue_read_mu);
- gpr_mpscq_init(&eps->workqueue_items);
- gpr_atm_rel_store(&eps->workqueue_item_count, 0);
gpr_atm_rel_store(&eps->ref_count, 0);
gpr_atm_rel_store(&eps->poller_count, 0);
gpr_atm_rel_store(&eps->is_shutdown, false);
- if (!append_error(error, grpc_wakeup_fd_init(&eps->workqueue_wakeup_fd),
- err_desc)) {
- goto done;
- }
-
eps->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (eps->epoll_fd < 0) {
@@ -404,8 +355,6 @@ static epoll_set *epoll_set_create(grpc_error **error) {
goto done;
}
- epoll_set_add_wakeup_fd_locked(eps, &eps->workqueue_wakeup_fd, error);
-
done:
if (*error != GRPC_ERROR_NONE) {
epoll_set_delete(eps);
@@ -419,57 +368,11 @@ static void epoll_set_delete(epoll_set *eps) {
close(eps->epoll_fd);
}
- GPR_ASSERT(gpr_atm_no_barrier_load(&eps->workqueue_item_count) == 0);
gpr_mu_destroy(&eps->mu);
- gpr_mu_destroy(&eps->workqueue_read_mu);
- gpr_mpscq_destroy(&eps->workqueue_items);
- grpc_wakeup_fd_destroy(&eps->workqueue_wakeup_fd);
gpr_free(eps);
}
-static void workqueue_maybe_wakeup(epoll_set *eps) {
- /* If this thread is the current poller, then it may be that it's about to
- decrement the current poller count, so we need to look past this thread */
- bool is_current_poller = (g_current_thread_epoll_set == eps);
- gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
- gpr_atm current_pollers = gpr_atm_no_barrier_load(&eps->poller_count);
- /* Only issue a wakeup if it's likely that some poller could come in and take
- it right now. Note that since we do an anticipatory mpscq_pop every poll
- loop, it's ok if we miss the wakeup here, as we'll get the work item when
- the next poller enters anyway. */
- if (current_pollers > min_current_pollers_for_wakeup) {
- GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
- grpc_wakeup_fd_wakeup(&eps->workqueue_wakeup_fd));
- }
-}
-
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
- GPR_TIMER_BEGIN("workqueue.enqueue", 0);
- grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler;
- /* take a ref to the workqueue: otherwise it can happen that whatever events
- * this kicks off ends up destroying the workqueue before this function
- * completes */
- GRPC_WORKQUEUE_REF(workqueue, "enqueue");
- epoll_set *eps = (epoll_set *)workqueue;
- gpr_atm last = gpr_atm_no_barrier_fetch_add(&eps->workqueue_item_count, 1);
- closure->error_data.error = error;
- gpr_mpscq_push(&eps->workqueue_items, &closure->next_data.atm_next);
- if (last == 0) {
- workqueue_maybe_wakeup(eps);
- }
-
- GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
- GPR_TIMER_END("workqueue.enqueue", 0);
-}
-
-static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
- epoll_set *eps = (epoll_set *)workqueue;
- return workqueue == NULL ? grpc_schedule_on_exec_ctx
- : &eps->workqueue_scheduler;
-}
-
static grpc_error *epoll_set_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@@ -616,7 +519,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
fd->eps = NULL;
}
- grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->mu);
@@ -665,8 +568,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
-static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; }
-
/*******************************************************************************
* Pollset Definitions
*/
@@ -819,7 +720,7 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
/* Release the ref and set pollset->eps to NULL */
pollset_release_epoll_set(exec_ctx, pollset, "ps_shutdown");
- grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
}
/* pollset->mu lock must be held by the caller before calling this */
@@ -850,32 +751,6 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu);
}
-static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx, epoll_set *eps) {
- if (gpr_mu_trylock(&eps->workqueue_read_mu)) {
- gpr_mpscq_node *n = gpr_mpscq_pop(&eps->workqueue_items);
- gpr_mu_unlock(&eps->workqueue_read_mu);
- if (n != NULL) {
- if (gpr_atm_full_fetch_add(&eps->workqueue_item_count, -1) > 1) {
- workqueue_maybe_wakeup(eps);
- }
- grpc_closure *c = (grpc_closure *)n;
- grpc_error *error = c->error_data.error;
-#ifndef NDEBUG
- c->scheduled = false;
-#endif
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- return true;
- } else if (gpr_atm_no_barrier_load(&eps->workqueue_item_count) > 0) {
- /* n == NULL might mean there's work but it's not available to be popped
- * yet - try to ensure another workqueue wakes up to check shortly if so
- */
- workqueue_maybe_wakeup(eps);
- }
- }
- return false;
-}
-
/* Blocking call */
static void acquire_epoll_lease(epoll_set *eps) {
if (g_num_threads_per_eps > 1) {
@@ -919,12 +794,7 @@ static void do_epoll_wait(grpc_exec_ctx *exec_ctx, int epoll_fd, epoll_set *eps,
for (int i = 0; i < ep_rv; ++i) {
void *data_ptr = ep_ev[i].data.ptr;
- if (data_ptr == &eps->workqueue_wakeup_fd) {
- append_error(error,
- grpc_wakeup_fd_consume_wakeup(&eps->workqueue_wakeup_fd),
- err_desc);
- maybe_do_workqueue_work(exec_ctx, eps);
- } else if (data_ptr == &epoll_set_wakeup_fd) {
+ if (data_ptr == &epoll_set_wakeup_fd) {
gpr_atm_rel_store(&eps->is_shutdown, 1);
gpr_log(GPR_INFO, "pollset poller: shutdown set");
} else {
@@ -951,18 +821,13 @@ static void epoll_set_work(grpc_exec_ctx *exec_ctx, epoll_set *eps,
epoll set. */
epoll_fd = eps->epoll_fd;
- /* If we get some workqueue work to do, it might end up completing an item on
- the completion queue, so there's no need to poll... so we skip that and
- redo the complete loop to verify */
- if (!maybe_do_workqueue_work(exec_ctx, eps)) {
- gpr_atm_no_barrier_fetch_add(&eps->poller_count, 1);
- g_current_thread_epoll_set = eps;
+ gpr_atm_no_barrier_fetch_add(&eps->poller_count, 1);
+ g_current_thread_epoll_set = eps;
- do_epoll_wait(exec_ctx, epoll_fd, eps, error);
+ do_epoll_wait(exec_ctx, epoll_fd, eps, error);
- g_current_thread_epoll_set = NULL;
- gpr_atm_no_barrier_fetch_add(&eps->poller_count, -1);
- }
+ g_current_thread_epoll_set = NULL;
+ gpr_atm_no_barrier_fetch_add(&eps->poller_count, -1);
GPR_TIMER_END("epoll_set_work", 0);
}
@@ -1105,7 +970,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
- .fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@@ -1123,10 +987,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
- .workqueue_ref = workqueue_ref,
- .workqueue_unref = workqueue_unref,
- .workqueue_scheduler = workqueue_scheduler,
-
.shutdown_engine = shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_epollex_linux.c b/src/core/lib/iomgr/ev_epollex_linux.c
index 096edb7fb9..5574838187 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.c
+++ b/src/core/lib/iomgr/ev_epollex_linux.c
@@ -44,7 +44,6 @@
#include "src/core/lib/iomgr/sys_epoll_wrapper.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/spinlock.h"
@@ -124,17 +123,6 @@ struct grpc_fd {
Ref/Unref by two to avoid altering the orphaned bit */
gpr_atm refst;
- /* Wakeup fd used to wake pollers to check the contents of workqueue_items */
- grpc_wakeup_fd workqueue_wakeup_fd;
- grpc_closure_scheduler workqueue_scheduler;
- /* Spinlock guarding the read end of the workqueue (must be held to pop from
- * workqueue_items) */
- gpr_spinlock workqueue_read_mu;
- /* Queue of closures to be executed */
- gpr_mpscq workqueue_items;
- /* Count of items in workqueue_items */
- gpr_atm workqueue_item_count;
-
/* The fd is either closed or we relinquished control of it. In either
cases, this indicates that the 'fd' on this structure is no longer
valid */
@@ -157,12 +145,6 @@ struct grpc_fd {
static void fd_global_init(void);
static void fd_global_shutdown(void);
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error);
-
-static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
- workqueue_enqueue, workqueue_enqueue, "workqueue"};
-
/*******************************************************************************
* Pollset Declarations
*/
@@ -187,12 +169,20 @@ struct grpc_pollset_worker {
pollable *pollable;
};
+#define MAX_EPOLL_EVENTS 100
+#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5
+
struct grpc_pollset {
pollable pollable;
pollable *current_pollable;
+ int kick_alls_pending;
bool kicked_without_poller;
grpc_closure *shutdown_closure;
grpc_pollset_worker *root_worker;
+
+ int event_cursor;
+ int event_count;
+ struct epoll_event events[MAX_EPOLL_EVENTS];
};
/*******************************************************************************
@@ -241,15 +231,18 @@ static bool append_error(grpc_error **composite, grpc_error *error,
static grpc_fd *fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(ec, fd, n, reason) \
unref_by(ec, fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_log(GPR_DEBUG, "FD %d %p ref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ }
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n)
@@ -274,20 +267,21 @@ static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_mu_unlock(&fd_freelist_mu);
}
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n,
const char *reason, const char *file, int line) {
- gpr_atm old;
- gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ }
#else
static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) {
- gpr_atm old;
#endif
- old = gpr_atm_full_fetch_add(&fd->refst, -n);
+ gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
- grpc_closure_sched(exec_ctx, grpc_closure_create(fd_destroy, fd,
+ GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd,
grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
} else {
@@ -332,21 +326,16 @@ static grpc_fd *fd_create(int fd, const char *name) {
grpc_lfev_init(&new_fd->write_closure);
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
- GRPC_LOG_IF_ERROR("fd_create",
- grpc_wakeup_fd_init(&new_fd->workqueue_wakeup_fd));
- new_fd->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
- new_fd->workqueue_read_mu = GPR_SPINLOCK_INITIALIZER;
- gpr_mpscq_init(&new_fd->workqueue_items);
- gpr_atm_no_barrier_store(&new_fd->workqueue_item_count, 0);
-
new_fd->freelist_next = NULL;
new_fd->on_done_closure = NULL;
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
+ }
#endif
gpr_free(fd_name);
return new_fd;
@@ -392,7 +381,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
to be alive (and not added to freelist) until the end of this function */
REF_BY(fd, 1, reason);
- grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->orphaned_mu);
gpr_mu_unlock(&fd->pollable.po.mu);
@@ -431,91 +420,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
-static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
- REF_BY(fd, 2, "return_workqueue");
- return (grpc_workqueue *)fd;
-}
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
- const char *file, int line,
- const char *reason) {
- if (workqueue != NULL) {
- ref_by((grpc_fd *)workqueue, 2, file, line, reason);
- }
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {
- if (workqueue != NULL) {
- unref_by(exec_ctx, (grpc_fd *)workqueue, 2, file, line, reason);
- }
-}
-#else
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
- if (workqueue != NULL) {
- ref_by((grpc_fd *)workqueue, 2);
- }
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {
- if (workqueue != NULL) {
- unref_by(exec_ctx, (grpc_fd *)workqueue, 2);
- }
-}
-#endif
-
-static void workqueue_wakeup(grpc_fd *fd) {
- GRPC_LOG_IF_ERROR("workqueue_enqueue",
- grpc_wakeup_fd_wakeup(&fd->workqueue_wakeup_fd));
-}
-
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
- GPR_TIMER_BEGIN("workqueue.enqueue", 0);
- grpc_fd *fd = (grpc_fd *)(((char *)closure->scheduler) -
- offsetof(grpc_fd, workqueue_scheduler));
- REF_BY(fd, 2, "workqueue_enqueue");
- gpr_atm last = gpr_atm_no_barrier_fetch_add(&fd->workqueue_item_count, 1);
- closure->error_data.error = error;
- gpr_mpscq_push(&fd->workqueue_items, &closure->next_data.atm_next);
- if (last == 0) {
- workqueue_wakeup(fd);
- }
- UNREF_BY(exec_ctx, fd, 2, "workqueue_enqueue");
-}
-
-static void fd_invoke_workqueue(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- /* handle spurious wakeups */
- if (!gpr_spinlock_trylock(&fd->workqueue_read_mu)) return;
- gpr_mpscq_node *n = gpr_mpscq_pop(&fd->workqueue_items);
- gpr_spinlock_unlock(&fd->workqueue_read_mu);
- if (n != NULL) {
- if (gpr_atm_full_fetch_add(&fd->workqueue_item_count, -1) > 1) {
- workqueue_wakeup(fd);
- }
- grpc_closure *c = (grpc_closure *)n;
- grpc_error *error = c->error_data.error;
-#ifndef NDEBUG
- c->scheduled = false;
-#endif
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- } else if (gpr_atm_no_barrier_load(&fd->workqueue_item_count) > 0) {
- /* n == NULL might mean there's work but it's not available to be popped
- * yet - try to ensure another workqueue wakes up to check shortly if so
- */
- workqueue_wakeup(fd);
- }
-}
-
-static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
- return &((grpc_fd *)workqueue)->workqueue_scheduler;
-}
-
/*******************************************************************************
* Pollable Definitions
*/
@@ -547,7 +451,7 @@ static grpc_error *pollable_materialize(pollable *p) {
return err;
}
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
- .data.ptr = &p->wakeup};
+ .data.ptr = (void *)(1 | (intptr_t)&p->wakeup)};
if (epoll_ctl(new_epfd, EPOLL_CTL_ADD, p->wakeup.read_fd, &ev) != 0) {
err = GRPC_OS_ERROR(errno, "epoll_ctl");
close(new_epfd);
@@ -581,22 +485,7 @@ static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
.data.ptr = fd};
if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
switch (errno) {
- case EEXIST: /* if this fd is already in the epoll set, the workqueue fd
- must also be - just return */
- gpr_mu_unlock(&fd->orphaned_mu);
- return GRPC_ERROR_NONE;
- default:
- append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
- }
- }
- struct epoll_event ev_wq = {
- .events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE),
- .data.ptr = (void *)(1 + (intptr_t)fd)};
- if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->workqueue_wakeup_fd.read_fd, &ev_wq) !=
- 0) {
- switch (errno) {
- case EEXIST: /* if the workqueue fd is already in the epoll set we're ok
- - no need to do anything special */
+ case EEXIST:
break;
default:
append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
@@ -628,8 +517,20 @@ static void pollset_global_shutdown(void) {
gpr_tls_destroy(&g_current_thread_worker);
}
-static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
+static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset) {
+ if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
+ pollset->kick_alls_pending == 0) {
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
+ pollset->shutdown_closure = NULL;
+ }
+}
+
+static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error_unused) {
grpc_error *error = GRPC_ERROR_NONE;
+ grpc_pollset *pollset = arg;
+ gpr_mu_lock(&pollset->pollable.po.mu);
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
do {
@@ -650,7 +551,17 @@ static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
worker = worker->links[PWL_POLLSET].next;
} while (worker != pollset->root_worker);
}
- return error;
+ pollset->kick_alls_pending--;
+ pollset_maybe_finish_shutdown(exec_ctx, pollset);
+ gpr_mu_unlock(&pollset->pollable.po.mu);
+ GRPC_LOG_IF_ERROR("kick_all", error);
+}
+
+static void pollset_kick_all(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+ pollset->kick_alls_pending++;
+ GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(do_kick_all, pollset,
+ grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
}
static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p,
@@ -789,20 +700,12 @@ static grpc_error *fd_become_pollable_locked(grpc_fd *fd) {
return error;
}
-static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
- if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL) {
- grpc_closure_sched(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
- pollset->shutdown_closure = NULL;
- }
-}
-
/* pollset->po.mu lock must be held by the caller before calling this */
static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
GPR_ASSERT(pollset->shutdown_closure == NULL);
pollset->shutdown_closure = closure;
- GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
+ pollset_kick_all(exec_ctx, pollset);
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
@@ -810,6 +713,46 @@ static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable *p) {
return p != &g_empty_pollable && p != &pollset->pollable;
}
+static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset, bool drain) {
+ static const char *err_desc = "pollset_process_events";
+ grpc_error *error = GRPC_ERROR_NONE;
+ for (int i = 0; (drain || i < MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) &&
+ pollset->event_cursor != pollset->event_count;
+ i++) {
+ int n = pollset->event_cursor++;
+ struct epoll_event *ev = &pollset->events[n];
+ void *data_ptr = ev->data.ptr;
+ if (1 & (intptr_t)data_ptr) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
+ }
+ append_error(&error, grpc_wakeup_fd_consume_wakeup(
+ (void *)((~(intptr_t)1) & (intptr_t)data_ptr)),
+ err_desc);
+ } else {
+ grpc_fd *fd = (grpc_fd *)data_ptr;
+ bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
+ bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
+ bool write_ev = (ev->events & EPOLLOUT) != 0;
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG,
+ "PS:%p got fd %p: cancel=%d read=%d "
+ "write=%d",
+ pollset, fd, cancel, read_ev, write_ev);
+ }
+ if (read_ev || cancel) {
+ fd_become_readable(exec_ctx, fd, pollset);
+ }
+ if (write_ev || cancel) {
+ fd_become_writable(exec_ctx, fd);
+ }
+ }
+ }
+
+ return error;
+}
+
/* pollset_shutdown is guaranteed to be called before pollset_destroy. */
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
pollable_destroy(&pollset->pollable);
@@ -817,16 +760,13 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable, 2,
"pollset_pollable");
}
+ GRPC_LOG_IF_ERROR("pollset_process_events",
+ pollset_process_events(exec_ctx, pollset, true));
}
-#define MAX_EPOLL_EVENTS 100
-
static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollable *p, gpr_timespec now,
gpr_timespec deadline) {
- struct epoll_event events[MAX_EPOLL_EVENTS];
- static const char *err_desc = "pollset_poll";
-
int timeout = poll_deadline_to_millis_timeout(deadline, now);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
@@ -838,7 +778,7 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
int r;
do {
- r = epoll_wait(p->epfd, events, MAX_EPOLL_EVENTS, timeout);
+ r = epoll_wait(p->epfd, pollset->events, MAX_EPOLL_EVENTS, timeout);
} while (r < 0 && errno == EINTR);
if (timeout != 0) {
GRPC_SCHEDULING_END_BLOCKING_REGION;
@@ -850,43 +790,10 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_log(GPR_DEBUG, "PS:%p poll %p got %d events", pollset, p, r);
}
- grpc_error *error = GRPC_ERROR_NONE;
- for (int i = 0; i < r; i++) {
- void *data_ptr = events[i].data.ptr;
- if (data_ptr == &p->wakeup) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p poll %p got pollset_wakeup", pollset, p);
- }
- append_error(&error, grpc_wakeup_fd_consume_wakeup(&p->wakeup), err_desc);
- } else {
- grpc_fd *fd = (grpc_fd *)(((intptr_t)data_ptr) & ~(intptr_t)1);
- bool is_workqueue = (((intptr_t)data_ptr) & 1) != 0;
- bool cancel = (events[i].events & (EPOLLERR | EPOLLHUP)) != 0;
- bool read_ev = (events[i].events & (EPOLLIN | EPOLLPRI)) != 0;
- bool write_ev = (events[i].events & EPOLLOUT) != 0;
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG,
- "PS:%p poll %p got fd %p: is_wq=%d cancel=%d read=%d "
- "write=%d",
- pollset, p, fd, is_workqueue, cancel, read_ev, write_ev);
- }
- if (is_workqueue) {
- append_error(&error,
- grpc_wakeup_fd_consume_wakeup(&fd->workqueue_wakeup_fd),
- err_desc);
- fd_invoke_workqueue(exec_ctx, fd);
- } else {
- if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd, pollset);
- }
- if (write_ev || cancel) {
- fd_become_writable(exec_ctx, fd);
- }
- }
- }
- }
+ pollset->event_cursor = 0;
+ pollset->event_count = r;
- return error;
+ return GRPC_ERROR_NONE;
}
/* Return true if first in list */
@@ -1038,10 +945,13 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_mu_unlock(&worker.pollable->po.mu);
}
gpr_mu_unlock(&pollset->pollable.po.mu);
- append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable, now,
- deadline),
+ if (pollset->event_cursor == pollset->event_count) {
+ append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable,
+ now, deadline),
+ err_desc);
+ }
+ append_error(&error, pollset_process_events(exec_ctx, pollset, false),
err_desc);
- grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->pollable.po.mu);
if (worker.pollable != &pollset->pollable) {
gpr_mu_lock(&worker.pollable->po.mu);
@@ -1054,6 +964,11 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (worker.pollable != &pollset->pollable) {
gpr_mu_unlock(&worker.pollable->po.mu);
}
+ if (grpc_exec_ctx_has_work(exec_ctx)) {
+ gpr_mu_unlock(&pollset->pollable.po.mu);
+ grpc_exec_ctx_flush(exec_ctx);
+ gpr_mu_lock(&pollset->pollable.po.mu);
+ }
return error;
}
@@ -1075,7 +990,7 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
"PS:%p add fd %p; transition pollable from empty to fd", pollset,
fd);
/* empty pollable --> single fd pollable */
- append_error(&error, pollset_kick_all(pollset), err_desc);
+ pollset_kick_all(exec_ctx, pollset);
pollset->current_pollable = &fd->pollable;
if (!fd_locked) gpr_mu_lock(&fd->pollable.po.mu);
append_error(&error, fd_become_pollable_locked(fd), err_desc);
@@ -1092,15 +1007,15 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG,
"PS:%p add fd %p; transition pollable from fd %p to multipoller",
pollset, fd, had_fd);
- append_error(&error, pollset_kick_all(pollset), err_desc);
+ pollset_kick_all(exec_ctx, pollset);
pollset->current_pollable = &pollset->pollable;
if (append_error(&error, pollable_materialize(&pollset->pollable),
err_desc)) {
pollable_add_fd(&pollset->pollable, had_fd);
pollable_add_fd(&pollset->pollable, fd);
}
- grpc_closure_sched(exec_ctx,
- grpc_closure_create(unref_fd_no_longer_poller, had_fd,
+ GRPC_CLOSURE_SCHED(exec_ctx,
+ GRPC_CLOSURE_CREATE(unref_fd_no_longer_poller, had_fd,
grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
}
@@ -1434,7 +1349,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
- .fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@@ -1452,17 +1366,11 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
- .workqueue_ref = workqueue_ref,
- .workqueue_unref = workqueue_unref,
- .workqueue_scheduler = workqueue_scheduler,
-
.shutdown_engine = shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_epollex_linux(
bool explicitly_requested) {
- if (!explicitly_requested) return NULL;
-
if (!grpc_has_wakeup_fd()) {
return NULL;
}
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.c b/src/core/lib/iomgr/ev_epollsig_linux.c
index 328b5b297c..255e07010b 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.c
+++ b/src/core/lib/iomgr/ev_epollsig_linux.c
@@ -44,7 +44,6 @@
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
@@ -141,8 +140,7 @@ struct grpc_fd {
};
/* Reference counting for fds */
-// #define GRPC_FD_REF_COUNT_DEBUG
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
int line);
@@ -162,23 +160,21 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
+#else
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
-#endif /* !defined(GRPC_PI_REF_COUNT_DEBUG) */
+#endif
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
- grpc_closure_scheduler workqueue_scheduler;
-
gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
the refcount.
@@ -199,15 +195,6 @@ typedef struct polling_island {
/* Number of threads currently polling on this island */
gpr_atm poller_count;
- /* Mutex guarding the read end of the workqueue (must be held to pop from
- * workqueue_items) */
- gpr_mu workqueue_read_mu;
- /* Queue of closures to be executed */
- gpr_mpscq workqueue_items;
- /* Count of items in workqueue_items */
- gpr_atm workqueue_item_count;
- /* Wakeup fd used to wake pollers to check the contents of workqueue_items */
- grpc_wakeup_fd workqueue_wakeup_fd;
/* The fd of the underlying epoll set */
int epoll_fd;
@@ -282,8 +269,6 @@ static __thread polling_island *g_current_thread_polling_island;
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
@@ -296,57 +281,30 @@ static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
-static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
- workqueue_enqueue, workqueue_enqueue, "workqueue"};
-
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+#ifndef NDEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ pi, old_cnt, old_cnt + 1, reason, file, line);
+ }
pi_add_ref(pi);
- gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
- (void *)pi, old_cnt, old_cnt + 1, reason, file, line);
}
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
const char *reason, const char *file, int line) {
- long old_cnt = gpr_atm_acq_load(&pi->ref_count);
- pi_unref(exec_ctx, pi);
- gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
- (void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
-}
-
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
- const char *file, int line,
- const char *reason) {
- if (workqueue != NULL) {
- pi_add_ref_dbg((polling_island *)workqueue, reason, file, line);
- }
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {
- if (workqueue != NULL) {
- pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line);
- }
-}
-#else
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
- if (workqueue != NULL) {
- pi_add_ref((polling_island *)workqueue);
- }
- return workqueue;
-}
-
-static void workqueue_unref(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {
- if (workqueue != NULL) {
- pi_unref(exec_ctx, (polling_island *)workqueue);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
+ gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
+ pi, old_cnt, (old_cnt - 1), reason, file, line);
}
+ pi_unref(exec_ctx, pi);
}
#endif
@@ -511,26 +469,16 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
*error = GRPC_ERROR_NONE;
pi = gpr_malloc(sizeof(*pi));
- pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
pi->fds = NULL;
pi->epoll_fd = -1;
- gpr_mu_init(&pi->workqueue_read_mu);
- gpr_mpscq_init(&pi->workqueue_items);
- gpr_atm_rel_store(&pi->workqueue_item_count, 0);
-
gpr_atm_rel_store(&pi->ref_count, 0);
gpr_atm_rel_store(&pi->poller_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
- if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd),
- err_desc)) {
- goto done;
- }
-
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
@@ -538,8 +486,6 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
goto done;
}
- polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
-
if (initial_fd != NULL) {
polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
}
@@ -558,11 +504,7 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
if (pi->epoll_fd >= 0) {
close(pi->epoll_fd);
}
- GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0);
- gpr_mu_destroy(&pi->workqueue_read_mu);
- gpr_mpscq_destroy(&pi->workqueue_items);
gpr_mu_destroy(&pi->mu);
- grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd);
gpr_free(pi->fds);
gpr_free(pi);
}
@@ -707,45 +649,6 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
}
}
-static void workqueue_maybe_wakeup(polling_island *pi) {
- /* If this thread is the current poller, then it may be that it's about to
- decrement the current poller count, so we need to look past this thread */
- bool is_current_poller = (g_current_thread_polling_island == pi);
- gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
- gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
- /* Only issue a wakeup if it's likely that some poller could come in and take
- it right now. Note that since we do an anticipatory mpscq_pop every poll
- loop, it's ok if we miss the wakeup here, as we'll get the work item when
- the next poller enters anyway. */
- if (current_pollers >= min_current_pollers_for_wakeup) {
- GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
- grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd));
- }
-}
-
-static void workqueue_move_items_to_parent(polling_island *q) {
- polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to);
- if (p == NULL) {
- return;
- }
- gpr_mu_lock(&q->workqueue_read_mu);
- int num_added = 0;
- while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) {
- gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items);
- if (n != NULL) {
- gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1);
- gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1);
- gpr_mpscq_push(&p->workqueue_items, n);
- num_added++;
- }
- }
- gpr_mu_unlock(&q->workqueue_read_mu);
- if (num_added > 0) {
- workqueue_maybe_wakeup(p);
- }
- workqueue_move_items_to_parent(p);
-}
-
static polling_island *polling_island_merge(polling_island *p,
polling_island *q,
grpc_error **error) {
@@ -770,8 +673,6 @@ static polling_island *polling_island_merge(polling_island *p,
/* Add the 'merged_to' link from p --> q */
gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
-
- workqueue_move_items_to_parent(p);
}
/* else if p == q, nothing needs to be done */
@@ -782,32 +683,6 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
- GPR_TIMER_BEGIN("workqueue.enqueue", 0);
- grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler;
- /* take a ref to the workqueue: otherwise it can happen that whatever events
- * this kicks off ends up destroying the workqueue before this function
- * completes */
- GRPC_WORKQUEUE_REF(workqueue, "enqueue");
- polling_island *pi = (polling_island *)workqueue;
- gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1);
- closure->error_data.error = error;
- gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next);
- if (last == 0) {
- workqueue_maybe_wakeup(pi);
- }
- workqueue_move_items_to_parent(pi);
- GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
- GPR_TIMER_END("workqueue.enqueue", 0);
-}
-
-static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
- polling_island *pi = (polling_island *)workqueue;
- return workqueue == NULL ? grpc_schedule_on_exec_ctx
- : &pi->workqueue_scheduler;
-}
-
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@@ -848,14 +723,17 @@ static void polling_island_global_shutdown() {
static grpc_fd *fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_log(GPR_DEBUG, "FD %d %p ref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ }
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
@@ -864,18 +742,19 @@ static void ref_by(grpc_fd *fd, int n) {
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_atm old;
- gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd,
- (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ }
#else
static void unref_by(grpc_fd *fd, int n) {
- gpr_atm old;
#endif
- old = gpr_atm_full_fetch_add(&fd->refst, -n);
+ gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
/* Add the fd to the freelist */
gpr_mu_lock(&fd_freelist_mu);
@@ -893,7 +772,7 @@ static void unref_by(grpc_fd *fd, int n) {
}
/* Increment refcount by two to avoid changing the orphan bit */
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
int line) {
ref_by(fd, 2, reason, file, line);
@@ -961,9 +840,6 @@ static grpc_fd *fd_create(int fd, const char *name) {
char *fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
-#endif
gpr_free(fd_name);
return new_fd;
}
@@ -1021,7 +897,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
fd->po.pi = NULL;
}
- grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->po.mu);
UNREF_BY(fd, 2, reason); /* Drop the reference */
@@ -1032,7 +908,10 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
unhappy. */
PI_UNREF(exec_ctx, unref_pi, "fd_orphan");
}
- GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
+ if (error != GRPC_ERROR_NONE) {
+ const char *msg = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "fd_orphan: %s", msg);
+ }
GRPC_ERROR_UNREF(error);
}
@@ -1066,14 +945,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
-static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
- gpr_mu_lock(&fd->po.mu);
- grpc_workqueue *workqueue =
- GRPC_WORKQUEUE_REF((grpc_workqueue *)fd->po.pi, "fd_get_workqueue");
- gpr_mu_unlock(&fd->po.mu);
- return workqueue;
-}
-
/*******************************************************************************
* Pollset Definitions
*/
@@ -1280,7 +1151,7 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
/* Release the ref and set pollset->po.pi to NULL */
pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
- grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
}
/* pollset->po.mu lock must be held by the caller before calling this */
@@ -1311,44 +1182,6 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->po.mu);
}
-static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
- polling_island *pi) {
- if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
- gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items);
- gpr_mu_unlock(&pi->workqueue_read_mu);
- if (n != NULL) {
- gpr_atm remaining =
- gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) - 1;
- GRPC_POLLING_TRACE(
- "maybe_do_workqueue_work: pi: %p: got closure %p, remaining = "
- "%" PRIdPTR,
- pi, n, remaining);
- if (remaining > 0) {
- workqueue_maybe_wakeup(pi);
- }
- grpc_closure *c = (grpc_closure *)n;
- grpc_error *error = c->error_data.error;
-#ifndef NDEBUG
- c->scheduled = false;
-#endif
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- return true;
- } else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
- /* n == NULL might mean there's work but it's not available to be popped
- * yet - try to ensure another workqueue wakes up to check shortly if so
- */
- GRPC_POLLING_TRACE(
- "maybe_do_workqueue_work: pi: %p: more to do, but not yet", pi);
- workqueue_maybe_wakeup(pi);
- }
- } else {
- GRPC_POLLING_TRACE("maybe_do_workqueue_work: pi: %p: read already locked",
- pi);
- }
- return false;
-}
-
#define GRPC_EPOLL_MAX_EVENTS 100
/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
@@ -1404,76 +1237,61 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
PI_ADD_REF(pi, "ps_work");
gpr_mu_unlock(&pollset->po.mu);
- /* If we get some workqueue work to do, it might end up completing an item on
- the completion queue, so there's no need to poll... so we skip that and
- redo the complete loop to verify */
- GRPC_POLLING_TRACE("pollset_work: pollset: %p, worker %p, pi %p", pollset,
- worker, pi);
- if (!maybe_do_workqueue_work(exec_ctx, pi)) {
- GRPC_POLLING_TRACE("pollset_work: begins");
- gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
- g_current_thread_polling_island = pi;
-
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms,
- sig_mask);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
- if (ep_rv < 0) {
- if (errno != EINTR) {
- gpr_asprintf(&err_msg,
- "epoll_wait() epoll fd: %d failed with error: %d (%s)",
- epoll_fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- } else {
- /* We were interrupted. Save an interation by doing a zero timeout
- epoll_wait to see if there are any other events of interest */
- GRPC_POLLING_TRACE(
- "pollset_work: pollset: %p, worker: %p received kick",
- (void *)pollset, (void *)worker);
- ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
- }
+ gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
+ g_current_thread_polling_island = pi;
+
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ ep_rv =
+ epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
+ if (ep_rv < 0) {
+ if (errno != EINTR) {
+ gpr_asprintf(&err_msg,
+ "epoll_wait() epoll fd: %d failed with error: %d (%s)",
+ epoll_fd, errno, strerror(errno));
+ append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
+ } else {
+ /* We were interrupted. Save an interation by doing a zero timeout
+ epoll_wait to see if there are any other events of interest */
+ GRPC_POLLING_TRACE("pollset_work: pollset: %p, worker: %p received kick",
+ (void *)pollset, (void *)worker);
+ ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
}
+ }
#ifdef GRPC_TSAN
- /* See the definition of g_poll_sync for more details */
- gpr_atm_acq_load(&g_epoll_sync);
+ /* See the definition of g_poll_sync for more details */
+ gpr_atm_acq_load(&g_epoll_sync);
#endif /* defined(GRPC_TSAN) */
- for (int i = 0; i < ep_rv; ++i) {
- void *data_ptr = ep_ev[i].data.ptr;
- if (data_ptr == &pi->workqueue_wakeup_fd) {
- append_error(error,
- grpc_wakeup_fd_consume_wakeup(&pi->workqueue_wakeup_fd),
- err_desc);
- maybe_do_workqueue_work(exec_ctx, pi);
- } else if (data_ptr == &polling_island_wakeup_fd) {
- GRPC_POLLING_TRACE(
- "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
- "%d) got merged",
- (void *)pollset, (void *)worker, epoll_fd);
- /* This means that our polling island is merged with a different
- island. We do not have to do anything here since the subsequent call
- to the function pollset_work_and_unlock() will pick up the correct
- epoll_fd */
- } else {
- grpc_fd *fd = data_ptr;
- int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
- int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
- int write_ev = ep_ev[i].events & EPOLLOUT;
- if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd, pollset);
- }
- if (write_ev || cancel) {
- fd_become_writable(exec_ctx, fd);
- }
+ for (int i = 0; i < ep_rv; ++i) {
+ void *data_ptr = ep_ev[i].data.ptr;
+ if (data_ptr == &polling_island_wakeup_fd) {
+ GRPC_POLLING_TRACE(
+ "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
+ "%d) got merged",
+ (void *)pollset, (void *)worker, epoll_fd);
+ /* This means that our polling island is merged with a different
+ island. We do not have to do anything here since the subsequent call
+ to the function pollset_work_and_unlock() will pick up the correct
+ epoll_fd */
+ } else {
+ grpc_fd *fd = data_ptr;
+ int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
+ int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
+ int write_ev = ep_ev[i].events & EPOLLOUT;
+ if (read_ev || cancel) {
+ fd_become_readable(exec_ctx, fd, pollset);
+ }
+ if (write_ev || cancel) {
+ fd_become_writable(exec_ctx, fd);
}
}
-
- g_current_thread_polling_island = NULL;
- gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
- GRPC_POLLING_TRACE("pollset_work: ends");
}
+ g_current_thread_polling_island = NULL;
+ gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
+
GPR_ASSERT(pi != NULL);
/* Before leaving, release the extra ref we added to the polling island. It
@@ -1864,7 +1682,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
- .fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@@ -1882,10 +1699,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
- .workqueue_ref = workqueue_ref,
- .workqueue_unref = workqueue_unref,
- .workqueue_scheduler = workqueue_scheduler,
-
.shutdown_engine = shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 806f985229..1f8d7eef26 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -134,9 +134,7 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
/* Return 1 if this fd is orphaned, 0 otherwise */
static bool fd_is_orphaned(grpc_fd *fd);
-/* Reference counting for fds */
-//#define GRPC_FD_REF_COUNT_DEBUG
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
int line);
@@ -263,14 +261,17 @@ cv_fd_table g_cvfds;
* fd_posix.c
*/
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
- (int)gpr_atm_no_barrier_load(&fd->refst),
- (int)gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ }
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
@@ -279,18 +280,19 @@ static void ref_by(grpc_fd *fd, int n) {
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- gpr_atm old;
- gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
- (int)gpr_atm_no_barrier_load(&fd->refst),
- (int)gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ }
#else
static void unref_by(grpc_fd *fd, int n) {
- gpr_atm old;
#endif
- old = gpr_atm_full_fetch_add(&fd->refst, -n);
+ gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
gpr_mu_destroy(&fd->mu);
grpc_iomgr_unregister_object(&fd->iomgr_object);
@@ -321,9 +323,6 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_asprintf(&name2, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&r->iomgr_object, name2);
gpr_free(name2);
-#ifdef GRPC_FD_REF_COUNT_DEBUG
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, r, name);
-#endif
return r;
}
@@ -386,7 +385,7 @@ static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
if (!fd->released) {
close(fd->fd);
}
- grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
}
static int fd_wrapped_fd(grpc_fd *fd) {
@@ -417,7 +416,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
}
/* increment refcount by two to avoid changing the orphan bit */
-#ifdef GRPC_FD_REF_COUNT_DEBUG
+#ifndef NDEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
int line) {
ref_by(fd, 2, reason, file, line);
@@ -445,7 +444,7 @@ static grpc_error *fd_shutdown_error(grpc_fd *fd) {
static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st, grpc_closure *closure) {
if (fd->shutdown) {
- grpc_closure_sched(exec_ctx, closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown"));
} else if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */
@@ -453,7 +452,7 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
} else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
- grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd));
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd));
maybe_wake_one_watcher_locked(fd);
} else {
/* upcallptr was set to a different closure. This is an error! */
@@ -476,7 +475,7 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
return 0;
} else {
/* waiting ==> queue closure */
- grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd));
+ GRPC_CLOSURE_SCHED(exec_ctx, *st, fd_shutdown_error(fd));
*st = CLOSURE_NOT_READY;
return 1;
}
@@ -633,8 +632,6 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
GRPC_FD_UNREF(fd, "poll");
}
-static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; }
-
/*******************************************************************************
* pollset_posix.c
*/
@@ -836,7 +833,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GRPC_FD_UNREF(pollset->fds[i], "multipoller");
}
pollset->fd_count = 0;
- grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
}
static void work_combine_error(grpc_error **composite, grpc_error *error) {
@@ -885,7 +882,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (!pollset_has_workers(pollset) &&
!grpc_closure_list_empty(pollset->idle_jobs)) {
GPR_TIMER_MARK("pollset_work.idle_jobs", 0);
- grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
goto done;
}
/* If we're shutting down then we don't execute any extended work */
@@ -1058,7 +1055,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu);
} else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
- grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
@@ -1077,7 +1074,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->shutdown_done = closure;
pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset_has_workers(pollset)) {
- grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
}
if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
pollset->called_shutdown = 1;
@@ -1274,30 +1271,6 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
}
/*******************************************************************************
- * workqueue stubs
- */
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
- const char *file, int line,
- const char *reason) {
- return workqueue;
-}
-static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {}
-#else
-static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
- return workqueue;
-}
-static void workqueue_unref(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {}
-#endif
-
-static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
- return grpc_schedule_on_exec_ctx;
-}
-
-/*******************************************************************************
* Condition Variable polling extensions
*/
@@ -1514,7 +1487,6 @@ static const grpc_event_engine_vtable vtable = {
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
- .fd_get_workqueue = fd_get_workqueue,
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
@@ -1532,10 +1504,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_set_add_fd = pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd,
- .workqueue_ref = workqueue_ref,
- .workqueue_unref = workqueue_unref,
- .workqueue_scheduler = workqueue_scheduler,
-
.shutdown_engine = shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index 4377b2a0f6..2648df393d 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -41,6 +41,10 @@
grpc_tracer_flag grpc_polling_trace =
GRPC_TRACER_INITIALIZER(false); /* Disabled by default */
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_fd_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
/** Default poll() function - a pointer so that it can be overridden by some
* tests */
grpc_poll_function_type grpc_poll_function = poll;
@@ -59,13 +63,13 @@ typedef struct {
} event_engine_factory;
static const event_engine_factory g_factories[] = {
- {"epollex", grpc_init_epollex_linux},
{"epollsig", grpc_init_epollsig_linux},
{"epoll1", grpc_init_epoll1_linux},
{"epoll-threadpool", grpc_init_epoll_thread_pool_linux},
{"epoll-limited", grpc_init_epoll_limited_pollers_linux},
{"poll", grpc_init_poll_posix},
{"poll-cv", grpc_init_poll_cv_posix},
+ {"epollex", grpc_init_epollex_linux},
};
static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
@@ -156,10 +160,6 @@ grpc_fd *grpc_fd_create(int fd, const char *name) {
return g_event_engine->fd_create(fd, name);
}
-grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd) {
- return g_event_engine->fd_get_workqueue(fd);
-}
-
int grpc_fd_wrapped_fd(grpc_fd *fd) {
return g_event_engine->fd_wrapped_fd(fd);
}
@@ -261,26 +261,4 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
g_event_engine->pollset_set_del_fd(exec_ctx, pollset_set, fd);
}
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
- int line, const char *reason) {
- return g_event_engine->workqueue_ref(workqueue, file, line, reason);
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {
- g_event_engine->workqueue_unref(exec_ctx, workqueue, file, line, reason);
-}
-#else
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
- return g_event_engine->workqueue_ref(workqueue);
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
- g_event_engine->workqueue_unref(exec_ctx, workqueue);
-}
-#endif
-
-grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
- return g_event_engine->workqueue_scheduler(workqueue);
-}
-
#endif // GRPC_POSIX_SOCKET
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index f87fe16901..54c4f2ee11 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -26,7 +26,6 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/iomgr/workqueue.h"
extern grpc_tracer_flag grpc_polling_trace; /* Disabled by default */
@@ -45,7 +44,6 @@ typedef struct grpc_event_engine_vtable {
void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure);
bool (*fd_is_shutdown)(grpc_fd *fd);
- grpc_workqueue *(*fd_get_workqueue)(grpc_fd *fd);
grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx,
grpc_fd *fd);
@@ -82,17 +80,6 @@ typedef struct grpc_event_engine_vtable {
grpc_pollset_set *pollset_set, grpc_fd *fd);
void (*shutdown_engine)(void);
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
- grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue, const char *file,
- int line, const char *reason);
- void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason);
-#else
- grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue);
- void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
-#endif
- grpc_closure_scheduler *(*workqueue_scheduler)(grpc_workqueue *workqueue);
} grpc_event_engine_vtable;
void grpc_event_engine_init(void);
@@ -106,9 +93,6 @@ const char *grpc_get_poll_strategy_name();
This takes ownership of closing fd. */
grpc_fd *grpc_fd_create(int fd, const char *name);
-/* Get a workqueue that's associated with this fd */
-grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd);
-
/* Return the wrapped fd, or -1 if it has been released or closed. */
int grpc_fd_wrapped_fd(grpc_fd *fd);
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c
index 7cef32adce..833170ceed 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.c
@@ -23,7 +23,6 @@
#include <grpc/support/thd.h>
#include "src/core/lib/iomgr/combiner.h"
-#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) {
@@ -88,8 +87,19 @@ static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
#ifndef NDEBUG
closure->scheduled = false;
+ if (GRPC_TRACER_ON(grpc_trace_closure)) {
+ gpr_log(GPR_DEBUG, "running closure %p: created [%s:%d]: %s [%s:%d]",
+ closure, closure->file_created, closure->line_created,
+ closure->run ? "run" : "scheduled", closure->file_initiated,
+ closure->line_initiated);
+ }
#endif
closure->cb(exec_ctx, closure->cb_arg, error);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_closure)) {
+ gpr_log(GPR_DEBUG, "closure %p finished", closure);
+ }
+#endif
GRPC_ERROR_UNREF(error);
}
diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c
index 4ca23a2407..7621a7fe75 100644
--- a/src/core/lib/iomgr/executor.c
+++ b/src/core/lib/iomgr/executor.c
@@ -21,132 +21,172 @@
#include <string.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
+#include <grpc/support/tls.h>
+#include <grpc/support/useful.h>
+
#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/support/spinlock.h"
+
+#define MAX_DEPTH 2
-typedef struct grpc_executor_data {
- int busy; /**< is the thread currently running? */
- int shutting_down; /**< has \a grpc_shutdown() been invoked? */
- int pending_join; /**< has the thread finished but not been joined? */
- grpc_closure_list closures; /**< collection of pending work */
- gpr_thd_id tid; /**< thread id of the thread, only valid if \a busy or \a
- pending_join are true */
- gpr_thd_options options;
+typedef struct {
gpr_mu mu;
-} grpc_executor;
+ gpr_cv cv;
+ grpc_closure_list elems;
+ size_t depth;
+ bool shutdown;
+ gpr_thd_id id;
+} thread_state;
-static grpc_executor g_executor;
+static thread_state *g_thread_state;
+static size_t g_max_threads;
+static gpr_atm g_cur_threads;
+static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
-void grpc_executor_init() {
- memset(&g_executor, 0, sizeof(grpc_executor));
- gpr_mu_init(&g_executor.mu);
- g_executor.options = gpr_thd_options_default();
- gpr_thd_options_set_joinable(&g_executor.options);
-}
+GPR_TLS_DECL(g_this_thread_state);
-/* thread body */
-static void closure_exec_thread_func(void *ignored) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- while (1) {
- gpr_mu_lock(&g_executor.mu);
- if (g_executor.shutting_down != 0) {
- gpr_mu_unlock(&g_executor.mu);
- break;
- }
- if (grpc_closure_list_empty(g_executor.closures)) {
- /* no more work, time to die */
- GPR_ASSERT(g_executor.busy == 1);
- g_executor.busy = 0;
- gpr_mu_unlock(&g_executor.mu);
- break;
- } else {
- grpc_closure *c = g_executor.closures.head;
- grpc_closure_list_init(&g_executor.closures);
- gpr_mu_unlock(&g_executor.mu);
- while (c != NULL) {
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error_data.error;
+static void executor_thread(void *arg);
+
+static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
+ size_t n = 0;
+
+ grpc_closure *c = list.head;
+ while (c != NULL) {
+ grpc_closure *next = c->next_data.next;
+ grpc_error *error = c->error_data.error;
#ifndef NDEBUG
- c->scheduled = false;
+ c->scheduled = false;
#endif
- c->cb(&exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- c = next;
- }
- grpc_exec_ctx_flush(&exec_ctx);
- }
+ c->cb(exec_ctx, c->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
+ c = next;
+ n++;
}
- grpc_exec_ctx_finish(&exec_ctx);
+
+ return n;
}
-/* Spawn the thread if new work has arrived a no thread is up */
-static void maybe_spawn_locked() {
- if (grpc_closure_list_empty(g_executor.closures) == 1) {
- return;
- }
- if (g_executor.shutting_down == 1) {
- return;
- }
+bool grpc_executor_is_threaded() {
+ return gpr_atm_no_barrier_load(&g_cur_threads) > 0;
+}
- if (g_executor.busy != 0) {
- /* Thread still working. New work will be picked up by already running
- * thread. Not spawning anything. */
- return;
- } else if (g_executor.pending_join != 0) {
- /* Pickup the remains of the previous incarnations of the thread. */
- gpr_thd_join(g_executor.tid);
- g_executor.pending_join = 0;
+void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
+ gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads);
+ if (threading) {
+ if (cur_threads > 0) return;
+ g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores());
+ gpr_atm_no_barrier_store(&g_cur_threads, 1);
+ gpr_tls_init(&g_this_thread_state);
+ g_thread_state = gpr_zalloc(sizeof(thread_state) * g_max_threads);
+ for (size_t i = 0; i < g_max_threads; i++) {
+ gpr_mu_init(&g_thread_state[i].mu);
+ gpr_cv_init(&g_thread_state[i].cv);
+ g_thread_state[i].elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
+ }
+
+ gpr_thd_options opt = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&opt);
+ gpr_thd_new(&g_thread_state[0].id, executor_thread, &g_thread_state[0],
+ &opt);
+ } else {
+ if (cur_threads == 0) return;
+ for (size_t i = 0; i < g_max_threads; i++) {
+ gpr_mu_lock(&g_thread_state[i].mu);
+ g_thread_state[i].shutdown = true;
+ gpr_cv_signal(&g_thread_state[i].cv);
+ gpr_mu_unlock(&g_thread_state[i].mu);
+ }
+ /* ensure no thread is adding a new thread... once this is past, then
+ no thread will try to add a new one either (since shutdown is true) */
+ gpr_spinlock_lock(&g_adding_thread_lock);
+ gpr_spinlock_unlock(&g_adding_thread_lock);
+ for (gpr_atm i = 0; i < g_cur_threads; i++) {
+ gpr_thd_join(g_thread_state[i].id);
+ }
+ gpr_atm_no_barrier_store(&g_cur_threads, 0);
+ for (size_t i = 0; i < g_max_threads; i++) {
+ gpr_mu_destroy(&g_thread_state[i].mu);
+ gpr_cv_destroy(&g_thread_state[i].cv);
+ run_closures(exec_ctx, g_thread_state[i].elems);
+ }
+ gpr_free(g_thread_state);
+ gpr_tls_destroy(&g_this_thread_state);
}
+}
- /* All previous instances of the thread should have been joined at this point.
- * Spawn time! */
- g_executor.busy = 1;
- GPR_ASSERT(gpr_thd_new(&g_executor.tid, closure_exec_thread_func, NULL,
- &g_executor.options));
- g_executor.pending_join = 1;
+void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
+ gpr_atm_no_barrier_store(&g_cur_threads, 0);
+ grpc_executor_set_threading(exec_ctx, true);
}
-static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
- gpr_mu_lock(&g_executor.mu);
- if (g_executor.shutting_down == 0) {
- grpc_closure_list_append(&g_executor.closures, closure, error);
- maybe_spawn_locked();
+void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
+ grpc_executor_set_threading(exec_ctx, false);
+}
+
+static void executor_thread(void *arg) {
+ thread_state *ts = arg;
+ gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
+
+ grpc_exec_ctx exec_ctx =
+ GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
+
+ size_t subtract_depth = 0;
+ for (;;) {
+ gpr_mu_lock(&ts->mu);
+ ts->depth -= subtract_depth;
+ while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
+ gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+ }
+ if (ts->shutdown) {
+ gpr_mu_unlock(&ts->mu);
+ break;
+ }
+ grpc_closure_list exec = ts->elems;
+ ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
+ gpr_mu_unlock(&ts->mu);
+
+ subtract_depth = run_closures(&exec_ctx, exec);
+ grpc_exec_ctx_flush(&exec_ctx);
}
- gpr_mu_unlock(&g_executor.mu);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
- int pending_join;
-
- gpr_mu_lock(&g_executor.mu);
- pending_join = g_executor.pending_join;
- g_executor.shutting_down = 1;
- gpr_mu_unlock(&g_executor.mu);
- /* we can release the lock at this point despite the access to the closure
- * list below because we aren't accepting new work */
-
- /* Execute pending callbacks, some may be performing cleanups */
- grpc_closure *c = g_executor.closures.head;
- grpc_closure_list_init(&g_executor.closures);
- while (c != NULL) {
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error_data.error;
-#ifndef NDEBUG
- c->scheduled = false;
-#endif
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- c = next;
+static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
+ size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ if (cur_thread_count == 0) {
+ grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
+ return;
+ }
+ thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
+ if (ts == NULL) {
+ ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
}
- grpc_exec_ctx_flush(exec_ctx);
- GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
- if (pending_join) {
- gpr_thd_join(g_executor.tid);
+ gpr_mu_lock(&ts->mu);
+ if (grpc_closure_list_empty(ts->elems)) {
+ gpr_cv_signal(&ts->cv);
+ }
+ grpc_closure_list_append(&ts->elems, closure, error);
+ ts->depth++;
+ bool try_new_thread = ts->depth > MAX_DEPTH &&
+ cur_thread_count < g_max_threads && !ts->shutdown;
+ gpr_mu_unlock(&ts->mu);
+ if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
+ cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ if (cur_thread_count < g_max_threads) {
+ gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
+
+ gpr_thd_options opt = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&opt);
+ gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
+ &g_thread_state[cur_thread_count], &opt);
+ }
+ gpr_spinlock_unlock(&g_adding_thread_lock);
}
- gpr_mu_destroy(&g_executor.mu);
}
static const grpc_closure_scheduler_vtable executor_vtable = {
diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h
index ace9d80b06..c3382a0a12 100644
--- a/src/core/lib/iomgr/executor.h
+++ b/src/core/lib/iomgr/executor.h
@@ -26,11 +26,18 @@
* This mechanism is meant to outsource work (grpc_closure instances) to a
* thread, for those cases where blocking isn't an option but there isn't a
* non-blocking solution available. */
-void grpc_executor_init();
+void grpc_executor_init(grpc_exec_ctx *exec_ctx);
extern grpc_closure_scheduler *grpc_executor_scheduler;
/** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx);
+/** Is the executor multi-threaded? */
+bool grpc_executor_is_threaded();
+
+/* enable/disable threading - must be called after grpc_executor_init and before
+ grpc_executor_shutdown */
+void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool enable);
+
#endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_H */
diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c
index c8b784e4c0..3d19953eeb 100644
--- a/src/core/lib/iomgr/iomgr.c
+++ b/src/core/lib/iomgr/iomgr.c
@@ -29,6 +29,7 @@
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/timer.h"
@@ -41,11 +42,12 @@ static gpr_cv g_rcv;
static int g_shutdown;
static grpc_iomgr_object g_root_object;
-void grpc_iomgr_init(void) {
+void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) {
g_shutdown = 0;
gpr_mu_init(&g_mu);
gpr_cv_init(&g_rcv);
grpc_exec_ctx_global_init();
+ grpc_executor_init(exec_ctx);
grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = "root";
@@ -53,7 +55,7 @@ void grpc_iomgr_init(void) {
grpc_iomgr_platform_init();
}
-void grpc_iomgr_start(void) { grpc_timer_manager_init(); }
+void grpc_iomgr_start(grpc_exec_ctx *exec_ctx) { grpc_timer_manager_init(); }
static size_t count_objects(void) {
grpc_iomgr_object *obj;
@@ -78,6 +80,7 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
grpc_timer_manager_shutdown();
grpc_iomgr_platform_flush();
+ grpc_executor_shutdown(exec_ctx);
gpr_mu_lock(&g_mu);
g_shutdown = 1;
diff --git a/src/core/lib/iomgr/iomgr.h b/src/core/lib/iomgr/iomgr.h
index 45dedc2b0a..e3cd6ebe79 100644
--- a/src/core/lib/iomgr/iomgr.h
+++ b/src/core/lib/iomgr/iomgr.h
@@ -23,10 +23,10 @@
#include "src/core/lib/iomgr/port.h"
/** Initializes the iomgr. */
-void grpc_iomgr_init(void);
+void grpc_iomgr_init(grpc_exec_ctx *exec_ctx);
/** Starts any background threads for iomgr. */
-void grpc_iomgr_start(void);
+void grpc_iomgr_start(grpc_exec_ctx *exec_ctx);
/** Signals the intention to shutdown the iomgr. Expects to be able to flush
* exec_ctx. */
diff --git a/src/core/lib/iomgr/lockfree_event.c b/src/core/lib/iomgr/lockfree_event.c
index 6fa285b5f3..c2ceecb3c5 100644
--- a/src/core/lib/iomgr/lockfree_event.c
+++ b/src/core/lib/iomgr/lockfree_event.c
@@ -112,7 +112,7 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
closure when transitioning out of CLOSURE_NO_READY state (i.e there
is no other code that needs to 'happen-after' this) */
if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) {
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
return; /* Successful. Return */
}
@@ -125,7 +125,7 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
schedule the closure with the shutdown error */
if ((curr & FD_SHUTDOWN_BIT) > 0) {
grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT);
- grpc_closure_sched(exec_ctx, closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
return;
@@ -177,7 +177,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
happens-after on that edge), and a release to pair with anything
loading the shutdown state. */
if (gpr_atm_full_cas(state, curr, new_state)) {
- grpc_closure_sched(exec_ctx, (grpc_closure *)curr,
+ GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)curr,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
return true;
@@ -226,7 +226,7 @@ void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state) {
spurious set_ready; release pairs with this or the acquire in
notify_on (or set_shutdown) */
else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) {
- grpc_closure_sched(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE);
return;
}
/* else the state changed again (only possible by either a racing
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index 3ff878e596..a609a3877a 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_fd_refcount;
+#endif
+
/* A grpc_pollset is a set of file descriptors that a higher level item is
interested in. For example:
- a server will typically keep a pollset containing all connected channels,
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c
index 8ff647f03c..1a54065a91 100644
--- a/src/core/lib/iomgr/pollset_uv.c
+++ b/src/core/lib/iomgr/pollset_uv.c
@@ -31,6 +31,12 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_uv.h"
+#include "src/core/lib/debug/trace.h"
+
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_fd_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
struct grpc_pollset {
uv_timer_t timer;
int shutting_down;
@@ -88,7 +94,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
// kick the loop once
uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
}
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
}
void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index ab1a327b46..1bfc2a22a8 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -30,6 +30,10 @@
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_fd_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
gpr_mu grpc_polling_mu;
static grpc_pollset_worker *g_active_poller;
static grpc_pollset_worker g_global_root_worker;
@@ -95,7 +99,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
pollset->on_shutdown = closure;
}
@@ -143,7 +147,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
if (pollset->shutting_down && pollset->on_shutdown != NULL) {
- grpc_closure_sched(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE);
pollset->on_shutdown = NULL;
}
goto done;
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c
index a78de66941..35dedc23de 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.c
@@ -154,7 +154,7 @@ typedef struct {
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
grpc_error *error) {
request *r = rp;
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, r->on_done,
grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
gpr_free(r->name);
@@ -175,13 +175,13 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
request *r = gpr_malloc(sizeof(request));
- grpc_closure_init(&r->request_closure, do_request_thread, r,
+ GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler);
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
r->addrs_out = addrs;
- grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
}
void (*grpc_resolve_address)(
diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c
index f1ea516175..45de289e45 100644
--- a/src/core/lib/iomgr/resolve_address_uv.c
+++ b/src/core/lib/iomgr/resolve_address_uv.c
@@ -124,7 +124,7 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
/* Either no retry was attempted, or the retry failed. Either way, the
original error probably has more interesting information */
error = handle_addrinfo_result(status, res, r->addresses);
- grpc_closure_sched(&exec_ctx, r->on_done, error);
+ GRPC_CLOSURE_SCHED(&exec_ctx, r->on_done, error);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(r->hints);
gpr_free(r);
@@ -225,7 +225,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
int s;
err = try_split_host_port(name, default_port, &host, &port);
if (err != GRPC_ERROR_NONE) {
- grpc_closure_sched(exec_ctx, on_done, err);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_done, err);
return;
}
r = gpr_malloc(sizeof(request));
@@ -252,7 +252,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed");
err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR,
grpc_slice_from_static_string(uv_strerror(s)));
- grpc_closure_sched(exec_ctx, on_done, err);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_done, err);
gpr_free(r);
gpr_free(req);
gpr_free(hints);
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c
index 83adfd338a..45cfd7248d 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.c
@@ -139,7 +139,7 @@ static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
} else {
GRPC_ERROR_REF(error);
}
- grpc_closure_sched(exec_ctx, r->on_done, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, error);
gpr_free(r->name);
gpr_free(r->default_port);
gpr_free(r);
@@ -158,13 +158,13 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_closure *on_done,
grpc_resolved_addresses **addresses) {
request *r = gpr_malloc(sizeof(request));
- grpc_closure_init(&r->request_closure, do_request_thread, r,
+ GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler);
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
r->addresses = addresses;
- grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
}
void (*grpc_resolve_address)(
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
index d3632c4cba..f2cc1be74e 100644
--- a/src/core/lib/iomgr/resource_quota.c
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -259,7 +259,7 @@ static void rq_step_sched(grpc_exec_ctx *exec_ctx,
if (resource_quota->step_scheduled) return;
resource_quota->step_scheduled = true;
grpc_resource_quota_ref_internal(resource_quota);
- grpc_closure_sched(exec_ctx, &resource_quota->rq_step_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &resource_quota->rq_step_closure,
GRPC_ERROR_NONE);
}
@@ -305,7 +305,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
}
if (resource_user->free_pool >= 0) {
resource_user->allocating = false;
- grpc_closure_list_sched(exec_ctx, &resource_user->on_allocated);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated);
gpr_mu_unlock(&resource_user->mu);
} else {
rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
@@ -363,7 +363,7 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
resource_quota->debug_only_last_reclaimer_resource_user = resource_user;
resource_quota->debug_only_last_initiated_reclaimer = c;
resource_user->reclaimers[destructive] = NULL;
- grpc_closure_run(exec_ctx, c, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_NONE);
return true;
}
@@ -444,7 +444,7 @@ static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
resource_user->new_reclaimers[destructive] = NULL;
GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
if (gpr_atm_acq_load(&resource_user->shutdown) > 0) {
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CANCELLED);
return false;
}
resource_user->reclaimers[destructive] = closure;
@@ -485,9 +485,9 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
grpc_resource_user *resource_user = ru;
- grpc_closure_sched(exec_ctx, resource_user->reclaimers[0],
+ GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED);
- grpc_closure_sched(exec_ctx, resource_user->reclaimers[1],
+ GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
GRPC_ERROR_CANCELLED);
resource_user->reclaimers[0] = NULL;
resource_user->reclaimers[1] = NULL;
@@ -501,9 +501,9 @@ static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, (grpc_rulist)i);
}
- grpc_closure_sched(exec_ctx, resource_user->reclaimers[0],
+ GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED);
- grpc_closure_sched(exec_ctx, resource_user->reclaimers[1],
+ GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
GRPC_ERROR_CANCELLED);
if (resource_user->free_pool != 0) {
resource_user->resource_quota->free_pool += resource_user->free_pool;
@@ -525,7 +525,7 @@ static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg,
slice_allocator->length));
}
}
- grpc_closure_run(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_RUN(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error));
}
/*******************************************************************************
@@ -566,7 +566,7 @@ static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
grpc_resource_quota *grpc_resource_quota_create(const char *name) {
grpc_resource_quota *resource_quota = gpr_malloc(sizeof(*resource_quota));
gpr_ref_init(&resource_quota->refs, 1);
- resource_quota->combiner = grpc_combiner_create(NULL);
+ resource_quota->combiner = grpc_combiner_create();
resource_quota->free_pool = INT64_MAX;
resource_quota->size = INT64_MAX;
gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX);
@@ -579,12 +579,11 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) {
gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR,
(intptr_t)resource_quota);
}
- grpc_closure_init(
- &resource_quota->rq_step_closure, rq_step, resource_quota,
- grpc_combiner_finally_scheduler(resource_quota->combiner, true));
- grpc_closure_init(&resource_quota->rq_reclamation_done_closure,
+ GRPC_CLOSURE_INIT(&resource_quota->rq_step_closure, rq_step, resource_quota,
+ grpc_combiner_finally_scheduler(resource_quota->combiner));
+ GRPC_CLOSURE_INIT(&resource_quota->rq_reclamation_done_closure,
rq_reclamation_done, resource_quota,
- grpc_combiner_scheduler(resource_quota->combiner, false));
+ grpc_combiner_scheduler(resource_quota->combiner));
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
resource_quota->roots[i] = NULL;
}
@@ -634,8 +633,8 @@ void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
a->size = (int64_t)size;
gpr_atm_no_barrier_store(&resource_quota->last_size,
(gpr_atm)GPR_MIN((size_t)GPR_ATM_MAX, size));
- grpc_closure_init(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx);
- grpc_closure_sched(&exec_ctx, &a->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_INIT(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_SCHED(&exec_ctx, &a->closure, GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -687,20 +686,20 @@ grpc_resource_user *grpc_resource_user_create(
grpc_resource_user *resource_user = gpr_malloc(sizeof(*resource_user));
resource_user->resource_quota =
grpc_resource_quota_ref_internal(resource_quota);
- grpc_closure_init(&resource_user->allocate_closure, &ru_allocate,
+ GRPC_CLOSURE_INIT(&resource_user->allocate_closure, &ru_allocate,
resource_user,
- grpc_combiner_scheduler(resource_quota->combiner, false));
- grpc_closure_init(&resource_user->add_to_free_pool_closure,
+ grpc_combiner_scheduler(resource_quota->combiner));
+ GRPC_CLOSURE_INIT(&resource_user->add_to_free_pool_closure,
&ru_add_to_free_pool, resource_user,
- grpc_combiner_scheduler(resource_quota->combiner, false));
- grpc_closure_init(&resource_user->post_reclaimer_closure[0],
+ grpc_combiner_scheduler(resource_quota->combiner));
+ GRPC_CLOSURE_INIT(&resource_user->post_reclaimer_closure[0],
&ru_post_benign_reclaimer, resource_user,
- grpc_combiner_scheduler(resource_quota->combiner, false));
- grpc_closure_init(&resource_user->post_reclaimer_closure[1],
+ grpc_combiner_scheduler(resource_quota->combiner));
+ GRPC_CLOSURE_INIT(&resource_user->post_reclaimer_closure[1],
&ru_post_destructive_reclaimer, resource_user,
- grpc_combiner_scheduler(resource_quota->combiner, false));
- grpc_closure_init(&resource_user->destroy_closure, &ru_destroy, resource_user,
- grpc_combiner_scheduler(resource_quota->combiner, false));
+ grpc_combiner_scheduler(resource_quota->combiner));
+ GRPC_CLOSURE_INIT(&resource_user->destroy_closure, &ru_destroy, resource_user,
+ grpc_combiner_scheduler(resource_quota->combiner));
gpr_mu_init(&resource_user->mu);
gpr_atm_rel_store(&resource_user->refs, 1);
gpr_atm_rel_store(&resource_user->shutdown, 0);
@@ -740,7 +739,7 @@ static void ru_unref_by(grpc_exec_ctx *exec_ctx,
gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
GPR_ASSERT(old >= amount);
if (old == amount) {
- grpc_closure_sched(exec_ctx, &resource_user->destroy_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->destroy_closure,
GRPC_ERROR_NONE);
}
}
@@ -757,12 +756,12 @@ void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx,
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
- grpc_closure_sched(exec_ctx,
- grpc_closure_create(
- ru_shutdown, resource_user,
- grpc_combiner_scheduler(
- resource_user->resource_quota->combiner, false)),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(
+ ru_shutdown, resource_user,
+ grpc_combiner_scheduler(resource_user->resource_quota->combiner)),
+ GRPC_ERROR_NONE);
}
}
@@ -782,11 +781,11 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE);
if (!resource_user->allocating) {
resource_user->allocating = true;
- grpc_closure_sched(exec_ctx, &resource_user->allocate_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->allocate_closure,
GRPC_ERROR_NONE);
}
} else {
- grpc_closure_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, optional_on_done, GRPC_ERROR_NONE);
}
gpr_mu_unlock(&resource_user->mu);
}
@@ -805,7 +804,7 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
if (is_bigger_than_zero && was_zero_or_negative &&
!resource_user->added_to_free_pool) {
resource_user->added_to_free_pool = true;
- grpc_closure_sched(exec_ctx, &resource_user->add_to_free_pool_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->add_to_free_pool_closure,
GRPC_ERROR_NONE);
}
gpr_mu_unlock(&resource_user->mu);
@@ -818,7 +817,7 @@ void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_closure *closure) {
GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL);
resource_user->new_reclaimers[destructive] = closure;
- grpc_closure_sched(exec_ctx,
+ GRPC_CLOSURE_SCHED(exec_ctx,
&resource_user->post_reclaimer_closure[destructive],
GRPC_ERROR_NONE);
}
@@ -829,7 +828,7 @@ void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name, resource_user->name);
}
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure,
GRPC_ERROR_NONE);
}
@@ -837,9 +836,9 @@ void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
void grpc_resource_user_slice_allocator_init(
grpc_resource_user_slice_allocator *slice_allocator,
grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) {
- grpc_closure_init(&slice_allocator->on_allocated, ru_allocated_slices,
+ GRPC_CLOSURE_INIT(&slice_allocator->on_allocated, ru_allocated_slices,
slice_allocator, grpc_schedule_on_exec_ctx);
- grpc_closure_init(&slice_allocator->on_done, cb, p,
+ GRPC_CLOSURE_INIT(&slice_allocator->on_done, cb, p,
grpc_schedule_on_exec_ctx);
slice_allocator->resource_user = resource_user;
}
diff --git a/src/core/lib/iomgr/socket_factory_posix.c b/src/core/lib/iomgr/socket_factory_posix.c
index 7d25bc1265..0f82dea570 100644
--- a/src/core/lib/iomgr/socket_factory_posix.c
+++ b/src/core/lib/iomgr/socket_factory_posix.c
@@ -20,6 +20,7 @@
#ifdef GRPC_POSIX_SOCKET
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/socket_factory_posix.h"
#include <grpc/impl/codegen/grpc_types.h>
@@ -84,12 +85,8 @@ static const grpc_arg_pointer_vtable socket_factory_arg_vtable = {
socket_factory_arg_copy, socket_factory_arg_destroy, socket_factory_cmp};
grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory) {
- grpc_arg arg;
- arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_ARG_SOCKET_FACTORY;
- arg.value.pointer.vtable = &socket_factory_arg_vtable;
- arg.value.pointer.p = factory;
- return arg;
+ return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_FACTORY, factory,
+ &socket_factory_arg_vtable);
}
#endif
diff --git a/src/core/lib/iomgr/socket_mutator.c b/src/core/lib/iomgr/socket_mutator.c
index c4b9a0930b..5d6c2c400e 100644
--- a/src/core/lib/iomgr/socket_mutator.c
+++ b/src/core/lib/iomgr/socket_mutator.c
@@ -18,6 +18,8 @@
#include "src/core/lib/iomgr/socket_mutator.h"
+#include "src/core/lib/channel/channel_args.h"
+
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
@@ -74,10 +76,6 @@ static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = {
socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp};
grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator) {
- grpc_arg arg;
- arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_ARG_SOCKET_MUTATOR;
- arg.value.pointer.vtable = &socket_mutator_arg_vtable;
- arg.value.pointer.p = mutator;
- return arg;
+ return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_MUTATOR, mutator,
+ &socket_mutator_arg_vtable);
}
diff --git a/src/core/lib/iomgr/socket_windows.c b/src/core/lib/iomgr/socket_windows.c
index f73e33db86..a0d731b942 100644
--- a/src/core/lib/iomgr/socket_windows.c
+++ b/src/core/lib/iomgr/socket_windows.c
@@ -116,7 +116,7 @@ static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&socket->state_mu);
if (info->has_pending_iocp) {
info->has_pending_iocp = 0;
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
info->closure = closure;
}
@@ -139,7 +139,7 @@ void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
GPR_ASSERT(!info->has_pending_iocp);
gpr_mu_lock(&socket->state_mu);
if (info->closure) {
- grpc_closure_sched(exec_ctx, info->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, info->closure, GRPC_ERROR_NONE);
info->closure = NULL;
} else {
info->has_pending_iocp = 1;
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index abad3c2f22..21e320a6e7 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -234,7 +234,7 @@ finish:
grpc_channel_args_destroy(exec_ctx, ac->channel_args);
gpr_free(ac);
}
- grpc_closure_sched(exec_ctx, closure, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
}
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
@@ -263,7 +263,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
if (error != GRPC_ERROR_NONE) {
- grpc_closure_sched(exec_ctx, closure, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
return;
}
if (dsmode == GRPC_DSMODE_IPV4) {
@@ -272,7 +272,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
addr = &addr4_copy;
}
if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) {
- grpc_closure_sched(exec_ctx, closure, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
return;
}
@@ -290,13 +290,13 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
if (err >= 0) {
*ep =
grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
goto done;
}
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error");
- grpc_closure_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"));
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"));
goto done;
}
@@ -311,7 +311,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
addr_str = NULL;
gpr_mu_init(&ac->mu);
ac->refs = 2;
- grpc_closure_init(&ac->write_closure, on_writable, ac,
+ GRPC_CLOSURE_INIT(&ac->write_closure, on_writable, ac,
grpc_schedule_on_exec_ctx);
ac->channel_args = grpc_channel_args_copy(channel_args);
@@ -321,7 +321,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
}
gpr_mu_lock(&ac->mu);
- grpc_closure_init(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &ac->alarm,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
&ac->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c
index f4084339d6..ab6832932f 100644
--- a/src/core/lib/iomgr/tcp_client_uv.c
+++ b/src/core/lib/iomgr/tcp_client_uv.c
@@ -107,7 +107,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
if (done) {
uv_tcp_connect_cleanup(&exec_ctx, connect);
}
- grpc_closure_sched(&exec_ctx, closure, error);
+ GRPC_CLOSURE_SCHED(&exec_ctx, closure, error);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -150,7 +150,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
(const struct sockaddr *)resolved_addr->addr,
uv_tc_on_connect);
- grpc_closure_init(&connect->on_alarm, uv_tc_on_alarm, connect,
+ GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect,
grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &connect->alarm,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c
index e0913cfaed..fc62105cc9 100644
--- a/src/core/lib/iomgr/tcp_client_windows.c
+++ b/src/core/lib/iomgr/tcp_client_windows.c
@@ -116,7 +116,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
/* If the connection was aborted, the callback was already called when
the deadline was met. */
- grpc_closure_sched(exec_ctx, on_done, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
}
/* Tries to issue one async connection, then schedules both an IOCP
@@ -201,9 +201,9 @@ static void tcp_client_connect_impl(
ac->addr_name = grpc_sockaddr_to_uri(addr);
ac->endpoint = endpoint;
ac->channel_args = grpc_channel_args_copy(channel_args);
- grpc_closure_init(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
- grpc_closure_init(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm,
gpr_now(GPR_CLOCK_MONOTONIC));
grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect);
@@ -222,7 +222,7 @@ failure:
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
}
- grpc_closure_sched(exec_ctx, on_done, final_error);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_done, final_error);
}
// overridden by api_fuzzer.c
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 36091803bb..5de2b0f4ee 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -163,15 +163,18 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
gpr_free(tcp);
}
-/*#define GRPC_TCP_REFCOUNT_DEBUG*/
-#ifdef GRPC_TCP_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define TCP_UNREF(cl, tcp, reason) \
tcp_unref((cl), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
const char *reason, const char *file, int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
- reason, tcp->refcount.count, tcp->refcount.count - 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val - 1);
+ }
if (gpr_unref(&tcp->refcount)) {
tcp_free(exec_ctx, tcp);
}
@@ -179,8 +182,12 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp,
- reason, tcp->refcount.count, tcp->refcount.count + 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val + 1);
+ }
gpr_ref(&tcp->refcount);
}
#else
@@ -221,7 +228,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
tcp->read_cb = NULL;
tcp->incoming_buffer = NULL;
- grpc_closure_run(exec_ctx, cb, error);
+ GRPC_CLOSURE_RUN(exec_ctx, cb, error);
}
#define MAX_READ_IOVEC 4
@@ -348,7 +355,7 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->finished_edge = false;
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else {
- grpc_closure_sched(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
}
}
@@ -465,7 +472,7 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
gpr_log(GPR_DEBUG, "write: %s", str);
}
- grpc_closure_run(exec_ctx, cb, error);
+ GRPC_CLOSURE_RUN(exec_ctx, cb, error);
TCP_UNREF(exec_ctx, tcp, "write");
}
}
@@ -491,7 +498,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (buf->length == 0) {
GPR_TIMER_END("tcp_write", 0);
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, cb,
grpc_fd_is_shutdown(tcp->em_fd)
? tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"),
@@ -515,7 +522,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
}
- grpc_closure_sched(exec_ctx, cb, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
GPR_TIMER_END("tcp_write", 0);
@@ -543,26 +550,15 @@ static int tcp_get_fd(grpc_endpoint *ep) {
return tcp->fd;
}
-static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
- return grpc_fd_get_workqueue(tcp->em_fd);
-}
-
static grpc_resource_user *tcp_get_resource_user(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return tcp->resource_user;
}
-static const grpc_endpoint_vtable vtable = {tcp_read,
- tcp_write,
- tcp_get_workqueue,
- tcp_add_to_pollset,
- tcp_add_to_pollset_set,
- tcp_shutdown,
- tcp_destroy,
- tcp_get_resource_user,
- tcp_get_peer,
- tcp_get_fd};
+static const grpc_endpoint_vtable vtable = {
+ tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set,
+ tcp_shutdown, tcp_destroy, tcp_get_resource_user, tcp_get_peer,
+ tcp_get_fd};
#define MAX_CHUNK_SIZE 32 * 1024 * 1024
@@ -627,9 +623,9 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
- grpc_closure_init(&tcp->read_closure, tcp_handle_read, tcp,
+ GRPC_CLOSURE_INIT(&tcp->read_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&tcp->write_closure, tcp_handle_write, tcp,
+ GRPC_CLOSURE_INIT(&tcp->write_closure, tcp_handle_write, tcp,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index 7bb8392d4b..f304642951 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -121,7 +121,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
GPR_ASSERT(s->shutdown);
gpr_mu_unlock(&s->mu);
if (s->shutdown_complete != NULL) {
- grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
gpr_mu_destroy(&s->mu);
@@ -163,7 +163,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
grpc_tcp_listener *sp;
for (sp = s->head; sp; sp = sp->next) {
grpc_unlink_if_unix_domain_socket(&sp->addr);
- grpc_closure_init(&sp->destroyed_closure, destroyed_port, s,
+ GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s,
grpc_schedule_on_exec_ctx);
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
"tcp_listener_shutdown");
@@ -503,7 +503,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
"clone_port", clone_port(sp, (unsigned)(pollset_count - 1))));
for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
- grpc_closure_init(&sp->read_closure, on_read, sp,
+ GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++;
@@ -513,7 +513,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
}
- grpc_closure_init(&sp->read_closure, on_read, sp,
+ GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++;
@@ -540,7 +540,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
- grpc_closure_list_sched(exec_ctx, &s->shutdown_starting);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting);
gpr_mu_unlock(&s->mu);
tcp_server_destroy(exec_ctx, s);
}
diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c
index 632a232861..2de0ea90e7 100644
--- a/src/core/lib/iomgr/tcp_server_uv.c
+++ b/src/core/lib/iomgr/tcp_server_uv.c
@@ -117,7 +117,7 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
GPR_ASSERT(s->shutdown);
if (s->shutdown_complete != NULL) {
- grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
while (s->head) {
@@ -171,7 +171,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) {
/* Complete shutdown_starting work before destroying. */
grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_closure_list_sched(&local_exec_ctx, &s->shutdown_starting);
+ GRPC_CLOSURE_LIST_SCHED(&local_exec_ctx, &s->shutdown_starting);
if (exec_ctx == NULL) {
grpc_exec_ctx_flush(&local_exec_ctx);
tcp_server_destroy(&local_exec_ctx, s);
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c
index e8343a94a0..0162afc1ad 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.c
@@ -134,10 +134,10 @@ static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg,
static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_tcp_server *s) {
if (s->shutdown_complete != NULL) {
- grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
- grpc_closure_sched(exec_ctx, grpc_closure_create(destroy_server, s,
+ GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(destroy_server, s,
grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
}
@@ -176,7 +176,7 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
- grpc_closure_list_sched(exec_ctx, &s->shutdown_starting);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting);
gpr_mu_unlock(&s->mu);
tcp_server_destroy(exec_ctx, s);
}
@@ -437,7 +437,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
sp->new_socket = INVALID_SOCKET;
sp->port = port;
sp->port_index = port_index;
- grpc_closure_init(&sp->on_accept, on_accept, sp, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&sp->on_accept, on_accept, sp, grpc_schedule_on_exec_ctx);
GPR_ASSERT(sp->socket);
gpr_mu_unlock(&s->mu);
*listener = sp;
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
index 5d3d315234..7c21b44e76 100644
--- a/src/core/lib/iomgr/tcp_uv.c
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -69,16 +69,18 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
gpr_free(tcp);
}
-/*#define GRPC_TCP_REFCOUNT_DEBUG*/
-#ifdef GRPC_TCP_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define TCP_UNREF(exec_ctx, tcp, reason) \
tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
const char *reason, const char *file, int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "TCP unref %p : %s %" PRIiPTR " -> %" PRIiPTR, tcp, reason,
- tcp->refcount.count, tcp->refcount.count - 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val - 1);
+ }
if (gpr_unref(&tcp->refcount)) {
tcp_free(exec_ctx, tcp);
}
@@ -86,9 +88,12 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "TCP ref %p : %s %" PRIiPTR " -> %" PRIiPTR, tcp, reason,
- tcp->refcount.count, tcp->refcount.count + 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val + 1);
+ }
gpr_ref(&tcp->refcount);
}
#else
@@ -161,7 +166,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
// nread < 0: Error
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed");
}
- grpc_closure_sched(&exec_ctx, cb, error);
+ GRPC_CLOSURE_SCHED(&exec_ctx, cb, error);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -183,7 +188,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
error =
grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
grpc_slice_from_static_string(uv_strerror(status)));
- grpc_closure_sched(exec_ctx, cb, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
@@ -210,7 +215,7 @@ static void write_callback(uv_write_t *req, int status) {
gpr_free(tcp->write_buffers);
grpc_resource_user_free(&exec_ctx, tcp->resource_user,
sizeof(uv_buf_t) * tcp->write_slices->count);
- grpc_closure_sched(&exec_ctx, cb, error);
+ GRPC_CLOSURE_SCHED(&exec_ctx, cb, error);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -236,7 +241,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
if (tcp->shutting_down) {
- grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"TCP socket is shutting down"));
return;
}
@@ -247,7 +252,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (tcp->write_slices->count == 0) {
// No slices means we don't have to do anything,
// and libuv doesn't like empty writes
- grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
return;
}
@@ -318,15 +323,12 @@ static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) {
return tcp->resource_user;
}
-static grpc_workqueue *uv_get_workqueue(grpc_endpoint *ep) { return NULL; }
-
static int uv_get_fd(grpc_endpoint *ep) { return -1; }
static grpc_endpoint_vtable vtable = {
- uv_endpoint_read, uv_endpoint_write, uv_get_workqueue,
- uv_add_to_pollset, uv_add_to_pollset_set, uv_endpoint_shutdown,
- uv_destroy, uv_get_resource_user, uv_get_peer,
- uv_get_fd};
+ uv_endpoint_read, uv_endpoint_write, uv_add_to_pollset,
+ uv_add_to_pollset_set, uv_endpoint_shutdown, uv_destroy,
+ uv_get_resource_user, uv_get_peer, uv_get_fd};
grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
grpc_resource_quota *resource_quota,
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index a0a2563956..6704a158ce 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -48,6 +48,8 @@
#define GRPC_FIONBIO FIONBIO
#endif
+grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false);
+
static grpc_error *set_non_block(SOCKET sock) {
int status;
uint32_t param = 1;
@@ -115,15 +117,18 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
gpr_free(tcp);
}
-/*#define GRPC_TCP_REFCOUNT_DEBUG*/
-#ifdef GRPC_TCP_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define TCP_UNREF(exec_ctx, tcp, reason) \
tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
const char *reason, const char *file, int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
- reason, tcp->refcount.count, tcp->refcount.count - 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val - 1);
+ }
if (gpr_unref(&tcp->refcount)) {
tcp_free(exec_ctx, tcp);
}
@@ -131,8 +136,12 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp,
- reason, tcp->refcount.count, tcp->refcount.count + 1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
+ val + 1);
+ }
gpr_ref(&tcp->refcount);
}
#else
@@ -179,7 +188,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
tcp->read_cb = NULL;
TCP_UNREF(exec_ctx, tcp, "read");
- grpc_closure_sched(exec_ctx, cb, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -193,7 +202,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
WSABUF buffer;
if (tcp->shutting_down) {
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, cb,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"TCP socket is shutting down", &tcp->shutdown_error, 1));
@@ -220,7 +229,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
/* Did we get data immediately ? Yay. */
if (info->wsa_error != WSAEWOULDBLOCK) {
info->bytes_transfered = bytes_read;
- grpc_closure_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE);
return;
}
@@ -233,7 +242,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
info->wsa_error = wsa_error;
- grpc_closure_sched(exec_ctx, &tcp->on_read,
+ GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read,
GRPC_WSA_ERROR(info->wsa_error, "WSARecv"));
return;
}
@@ -265,7 +274,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
}
TCP_UNREF(exec_ctx, tcp, "write");
- grpc_closure_sched(exec_ctx, cb, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
/* Initiates a write. */
@@ -283,7 +292,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
size_t len;
if (tcp->shutting_down) {
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, cb,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"TCP socket is shutting down", &tcp->shutdown_error, 1));
@@ -317,7 +326,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_error *error = status == 0
? GRPC_ERROR_NONE
: GRPC_WSA_ERROR(info->wsa_error, "WSASend");
- grpc_closure_sched(exec_ctx, cb, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
if (allocated) gpr_free(allocated);
return;
}
@@ -335,7 +344,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
TCP_UNREF(exec_ctx, tcp, "write");
- grpc_closure_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"));
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"));
return;
}
}
@@ -395,8 +404,6 @@ static char *win_get_peer(grpc_endpoint *ep) {
return gpr_strdup(tcp->peer_string);
}
-static grpc_workqueue *win_get_workqueue(grpc_endpoint *ep) { return NULL; }
-
static grpc_resource_user *win_get_resource_user(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return tcp->resource_user;
@@ -404,16 +411,10 @@ static grpc_resource_user *win_get_resource_user(grpc_endpoint *ep) {
static int win_get_fd(grpc_endpoint *ep) { return -1; }
-static grpc_endpoint_vtable vtable = {win_read,
- win_write,
- win_get_workqueue,
- win_add_to_pollset,
- win_add_to_pollset_set,
- win_shutdown,
- win_destroy,
- win_get_resource_user,
- win_get_peer,
- win_get_fd};
+static grpc_endpoint_vtable vtable = {
+ win_read, win_write, win_add_to_pollset, win_add_to_pollset_set,
+ win_shutdown, win_destroy, win_get_resource_user, win_get_peer,
+ win_get_fd};
grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
grpc_channel_args *channel_args,
@@ -434,8 +435,8 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
tcp->socket = socket;
gpr_mu_init(&tcp->mu);
gpr_ref_init(&tcp->refcount, 1);
- grpc_closure_init(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
- grpc_closure_init(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
tcp->peer_string = gpr_strdup(peer_string);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c
index 69b3cfd139..bf73d2c685 100644
--- a/src/core/lib/iomgr/timer_generic.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -230,7 +230,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
if (!g_shared_mutables.initialized) {
timer->pending = false;
- grpc_closure_sched(exec_ctx, timer->closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Attempt to create timer before initialization"));
return;
@@ -240,7 +240,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
timer->pending = true;
if (gpr_time_cmp(deadline, now) <= 0) {
timer->pending = false;
- grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
gpr_mu_unlock(&shard->mu);
/* early out */
return;
@@ -310,7 +310,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
timer->pending ? "true" : "false");
}
if (timer->pending) {
- grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
timer->pending = false;
if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer);
@@ -400,7 +400,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
grpc_timer *timer;
gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) {
- grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
n++;
}
*new_min_deadline = compute_min_deadline(shard);
diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c
index f814f0e474..4f204cfbf8 100644
--- a/src/core/lib/iomgr/timer_uv.c
+++ b/src/core/lib/iomgr/timer_uv.c
@@ -44,7 +44,7 @@ void run_expired_timer(uv_timer_t *handle) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_ASSERT(timer->pending);
timer->pending = 0;
- grpc_closure_sched(&exec_ctx, timer->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(&exec_ctx, timer->closure, GRPC_ERROR_NONE);
stop_uv_timer(handle);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -57,7 +57,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
timer->closure = closure;
if (gpr_time_cmp(deadline, now) <= 0) {
timer->pending = 0;
- grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
return;
}
timer->pending = 1;
@@ -76,7 +76,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
if (timer->pending) {
timer->pending = 0;
- grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
stop_uv_timer((uv_timer_t *)timer->uv_timer);
}
}
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index 200a722c7e..54e7f417a7 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -156,7 +156,7 @@ static void dummy_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
if (s->shutdown_complete != NULL) {
- grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
gpr_mu_destroy(&s->mu);
@@ -201,13 +201,13 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
for (sp = s->head; sp; sp = sp->next) {
grpc_unlink_if_unix_domain_socket(&sp->addr);
- grpc_closure_init(&sp->destroyed_closure, destroyed_port, s,
+ GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s,
grpc_schedule_on_exec_ctx);
if (!sp->orphan_notified) {
/* Call the orphan_cb to signal that the FD is about to be closed and
* should no longer be used. Because at this point, all listening ports
* have been shutdown already, no need to shutdown again.*/
- grpc_closure_init(&sp->orphan_fd_closure, dummy_cb, sp->emfd,
+ GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, dummy_cb, sp->emfd,
grpc_schedule_on_exec_ctx);
GPR_ASSERT(sp->orphan_cb);
sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure,
@@ -240,7 +240,7 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
struct shutdown_fd_args *args = gpr_malloc(sizeof(*args));
args->fd = sp->emfd;
args->server_mu = &s->mu;
- grpc_closure_init(&sp->orphan_fd_closure, shutdown_fd, args,
+ GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
grpc_schedule_on_exec_ctx);
sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure,
sp->server->user_data);
@@ -525,11 +525,11 @@ void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
}
- grpc_closure_init(&sp->read_closure, on_read, sp,
+ GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
- grpc_closure_init(&sp->write_closure, on_write, sp,
+ GRPC_CLOSURE_INIT(&sp->write_closure, on_write, sp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);
diff --git a/src/core/lib/iomgr/workqueue.h b/src/core/lib/iomgr/workqueue.h
deleted file mode 100644
index 558d4955d3..0000000000
--- a/src/core/lib/iomgr/workqueue.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_H
-#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_H
-
-#include "src/core/lib/iomgr/closure.h"
-#include "src/core/lib/iomgr/exec_ctx.h"
-#include "src/core/lib/iomgr/iomgr.h"
-#include "src/core/lib/iomgr/pollset.h"
-#include "src/core/lib/iomgr/pollset_set.h"
-#include "src/core/lib/iomgr/port.h"
-
-#ifdef GPR_WINDOWS
-#include "src/core/lib/iomgr/workqueue_windows.h"
-#endif
-
-/* grpc_workqueue is forward declared in exec_ctx.h */
-
-/* Reference counting functions. Use the macro's always
- (GRPC_WORKQUEUE_{REF,UNREF}).
-
- Pass in a descriptive reason string for reffing/unreffing as the last
- argument to each macro. When GRPC_WORKQUEUE_REFCOUNT_DEBUG is defined, that
- string will be printed alongside the refcount. When it is not defined, the
- string will be discarded at compilation time. */
-
-/*#define GRPC_WORKQUEUE_REFCOUNT_DEBUG*/
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-#define GRPC_WORKQUEUE_REF(p, r) \
- grpc_workqueue_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_WORKQUEUE_UNREF(exec_ctx, p, r) \
- grpc_workqueue_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
- int line, const char *reason);
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason);
-#else
-#define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p))
-#define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p))
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue);
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
-#endif
-
-/** Fetch the workqueue closure scheduler. Items added to a work queue will be
- started in approximately the order they were enqueued, on some thread that
- may or may not be the current thread. Successive closures enqueued onto a
- workqueue MAY be executed concurrently.
-
- It is generally more expensive to add a closure to a workqueue than to the
- execution context, both in terms of CPU work and in execution latency.
-
- Use work queues when it's important that other threads be given a chance to
- tackle some workload. */
-grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue);
-
-#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_H */
diff --git a/src/core/lib/iomgr/workqueue_uv.c b/src/core/lib/iomgr/workqueue_uv.c
deleted file mode 100644
index 1900aa1ca6..0000000000
--- a/src/core/lib/iomgr/workqueue_uv.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/port.h"
-
-#ifdef GRPC_UV
-
-#include "src/core/lib/iomgr/workqueue.h"
-
-// Minimal implementation of grpc_workqueue for libuv
-// Works by directly enqueuing workqueue items onto the current execution
-// context, which is at least correct, if not performant or in the spirit of
-// workqueues.
-
-void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
- int line, const char *reason) {
- return workqueue;
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {}
-#else
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
- return workqueue;
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
-#endif
-
-grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
- return grpc_schedule_on_exec_ctx;
-}
-
-#endif /* GPR_UV */
diff --git a/src/core/lib/iomgr/workqueue_uv.h b/src/core/lib/iomgr/workqueue_uv.h
deleted file mode 100644
index 5cac45ea2a..0000000000
--- a/src/core/lib/iomgr/workqueue_uv.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H
-#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H
-
-#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H */
diff --git a/src/core/lib/iomgr/workqueue_windows.c b/src/core/lib/iomgr/workqueue_windows.c
deleted file mode 100644
index c7d650c767..0000000000
--- a/src/core/lib/iomgr/workqueue_windows.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#ifdef GPR_WINDOWS
-
-#include "src/core/lib/iomgr/workqueue.h"
-
-// Minimal implementation of grpc_workqueue for Windows
-// Works by directly enqueuing workqueue items onto the current execution
-// context, which is at least correct, if not performant or in the spirit of
-// workqueues.
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
- int line, const char *reason) {
- return workqueue;
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {}
-#else
-grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
- return workqueue;
-}
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
-#endif
-
-grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
- return grpc_schedule_on_exec_ctx;
-}
-
-#endif /* GPR_WINDOWS */
diff --git a/src/core/lib/iomgr/workqueue_windows.h b/src/core/lib/iomgr/workqueue_windows.h
deleted file mode 100644
index 47ceed1110..0000000000
--- a/src/core/lib/iomgr/workqueue_windows.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_WINDOWS_H
-#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_WINDOWS_H
-
-#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_WINDOWS_H */
diff --git a/src/core/lib/security/context/security_context.c b/src/core/lib/security/context/security_context.c
index e528428650..dffe6d2e91 100644
--- a/src/core/lib/security/context/security_context.c
+++ b/src/core/lib/security/context/security_context.c
@@ -18,6 +18,7 @@
#include <string.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/api_trace.h"
@@ -28,6 +29,11 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_auth_context_refcount =
+ GRPC_TRACER_INITIALIZER(false);
+#endif
+
/* --- grpc_call --- */
grpc_call_error grpc_call_set_credentials(grpc_call *call,
@@ -121,14 +127,17 @@ grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained) {
return ctx;
}
-#ifdef GRPC_AUTH_CONTEXT_REFCOUNT_DEBUG
+#ifndef NDEBUG
grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx,
const char *file, int line,
const char *reason) {
if (ctx == NULL) return NULL;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "AUTH_CONTEXT:%p ref %d -> %d %s", ctx, (int)ctx->refcount.count,
- (int)ctx->refcount.count + 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_auth_context_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&ctx->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "AUTH_CONTEXT:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", ctx, val,
+ val + 1, reason);
+ }
#else
grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx) {
if (ctx == NULL) return NULL;
@@ -137,13 +146,16 @@ grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx) {
return ctx;
}
-#ifdef GRPC_AUTH_CONTEXT_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_auth_context_unref(grpc_auth_context *ctx, const char *file, int line,
const char *reason) {
if (ctx == NULL) return;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "AUTH_CONTEXT:%p unref %d -> %d %s", ctx, (int)ctx->refcount.count,
- (int)ctx->refcount.count - 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_auth_context_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&ctx->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "AUTH_CONTEXT:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", ctx, val,
+ val - 1, reason);
+ }
#else
void grpc_auth_context_unref(grpc_auth_context *ctx) {
if (ctx == NULL) return;
@@ -304,13 +316,8 @@ static const grpc_arg_pointer_vtable auth_context_pointer_vtable = {
auth_context_pointer_cmp};
grpc_arg grpc_auth_context_to_arg(grpc_auth_context *p) {
- grpc_arg arg;
- memset(&arg, 0, sizeof(grpc_arg));
- arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_AUTH_CONTEXT_ARG;
- arg.value.pointer.p = p;
- arg.value.pointer.vtable = &auth_context_pointer_vtable;
- return arg;
+ return grpc_channel_arg_pointer_create(GRPC_AUTH_CONTEXT_ARG, p,
+ &auth_context_pointer_vtable);
}
grpc_auth_context *grpc_auth_context_from_arg(const grpc_arg *arg) {
diff --git a/src/core/lib/security/context/security_context.h b/src/core/lib/security/context/security_context.h
index 102f9d6e2f..0df39257a7 100644
--- a/src/core/lib/security/context/security_context.h
+++ b/src/core/lib/security/context/security_context.h
@@ -22,6 +22,10 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/security/credentials/credentials.h"
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_auth_context_refcount;
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -50,7 +54,7 @@ struct grpc_auth_context {
grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained);
/* Refcounting. */
-#ifdef GRPC_AUTH_CONTEXT_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_AUTH_CONTEXT_REF(p, r) \
grpc_auth_context_ref((p), __FILE__, __LINE__, (r))
#define GRPC_AUTH_CONTEXT_UNREF(p, r) \
diff --git a/src/core/lib/security/credentials/credentials.c b/src/core/lib/security/credentials/credentials.c
index 64a7c3e728..b1f1e82076 100644
--- a/src/core/lib/security/credentials/credentials.c
+++ b/src/core/lib/security/credentials/credentials.c
@@ -159,12 +159,8 @@ static const grpc_arg_pointer_vtable credentials_pointer_vtable = {
grpc_arg grpc_channel_credentials_to_arg(
grpc_channel_credentials *credentials) {
- grpc_arg result;
- result.type = GRPC_ARG_POINTER;
- result.key = GRPC_ARG_CHANNEL_CREDENTIALS;
- result.value.pointer.vtable = &credentials_pointer_vtable;
- result.value.pointer.p = credentials;
- return result;
+ return grpc_channel_arg_pointer_create(
+ GRPC_ARG_CHANNEL_CREDENTIALS, credentials, &credentials_pointer_vtable);
}
grpc_channel_credentials *grpc_channel_credentials_from_arg(
@@ -260,13 +256,8 @@ static const grpc_arg_pointer_vtable cred_ptr_vtable = {
server_credentials_pointer_cmp};
grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *p) {
- grpc_arg arg;
- memset(&arg, 0, sizeof(grpc_arg));
- arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_SERVER_CREDENTIALS_ARG;
- arg.value.pointer.p = p;
- arg.value.pointer.vtable = &cred_ptr_vtable;
- return arg;
+ return grpc_channel_arg_pointer_create(GRPC_SERVER_CREDENTIALS_ARG, p,
+ &cred_ptr_vtable);
}
grpc_server_credentials *grpc_server_credentials_from_arg(const grpc_arg *arg) {
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.c b/src/core/lib/security/credentials/fake/fake_credentials.c
index 15288e1dbe..67e74f7b92 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.c
+++ b/src/core/lib/security/credentials/fake/fake_credentials.c
@@ -78,11 +78,8 @@ grpc_server_credentials *grpc_fake_transport_security_server_credentials_create(
}
grpc_arg grpc_fake_transport_expected_targets_arg(char *expected_targets) {
- grpc_arg arg;
- arg.type = GRPC_ARG_STRING;
- arg.key = GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS;
- arg.value.string = expected_targets;
- return arg;
+ return grpc_channel_arg_string_create(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS,
+ expected_targets);
}
const char *grpc_fake_transport_get_expected_targets(
@@ -123,8 +120,8 @@ static void md_only_test_get_request_metadata(
if (c->is_async) {
grpc_credentials_metadata_request *cb_arg =
grpc_credentials_metadata_request_create(creds, cb, user_data);
- grpc_closure_sched(exec_ctx,
- grpc_closure_create(on_simulated_token_fetch_done,
+ GRPC_CLOSURE_SCHED(exec_ctx,
+ GRPC_CLOSURE_CREATE(on_simulated_token_fetch_done,
cb_arg, grpc_executor_scheduler),
GRPC_ERROR_NONE);
} else {
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index aaa7d97d3f..a2a8e289ee 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -115,7 +115,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
grpc_httpcli_get(
exec_ctx, &context, &detector.pollent, resource_quota, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
- grpc_closure_create(on_compute_engine_detection_http_response, &detector,
+ GRPC_CLOSURE_CREATE(on_compute_engine_detection_http_response, &detector,
grpc_schedule_on_exec_ctx),
&detector.response);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
@@ -140,7 +140,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
gpr_mu_unlock(g_polling_mu);
grpc_httpcli_context_destroy(exec_ctx, &context);
- grpc_closure_init(&destroy_closure, destroy_pollset,
+ GRPC_CLOSURE_INIT(&destroy_closure, destroy_pollset,
grpc_polling_entity_pollset(&detector.pollent),
grpc_schedule_on_exec_ctx);
grpc_pollset_shutdown(exec_ctx,
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c
index f6db670a67..8c747085bb 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.c
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c
@@ -668,7 +668,7 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_httpcli_get(
exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
- grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx),
+ GRPC_CLOSURE_CREATE(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx),
&ctx->responses[HTTP_RESPONSE_KEYS]);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
grpc_json_destroy(json);
@@ -771,7 +771,7 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&req.http.path, "/%s/%s", path_prefix, iss);
}
http_cb =
- grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_CREATE(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx);
rsp_idx = HTTP_RESPONSE_KEYS;
} else {
req.host = gpr_strdup(strstr(iss, "https://") == iss ? iss + 8 : iss);
@@ -783,7 +783,7 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&req.http.path, "/%s%s", path_prefix,
GRPC_OPENID_CONFIG_URL_SUFFIX);
}
- http_cb = grpc_closure_create(on_openid_config_retrieved, ctx,
+ http_cb = GRPC_CLOSURE_CREATE(on_openid_config_retrieved, ctx,
grpc_schedule_on_exec_ctx);
rsp_idx = HTTP_RESPONSE_OPENID;
}
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
index acf4c37da8..9de561b310 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
@@ -300,7 +300,7 @@ static void compute_engine_fetch_oauth2(
grpc_resource_quota_create("oauth2_credentials");
grpc_httpcli_get(
exec_ctx, httpcli_context, pollent, resource_quota, &request, deadline,
- grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
+ GRPC_CLOSURE_CREATE(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
&metadata_req->response);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
@@ -360,7 +360,7 @@ static void refresh_token_fetch_oauth2(
grpc_httpcli_post(
exec_ctx, httpcli_context, pollent, resource_quota, &request, body,
strlen(body), deadline,
- grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
+ GRPC_CLOSURE_CREATE(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
&metadata_req->response);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
gpr_free(body);
diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.c b/src/core/lib/security/credentials/ssl/ssl_credentials.c
index b94b457d35..006db1ec76 100644
--- a/src/core/lib/security/credentials/ssl/ssl_credentials.c
+++ b/src/core/lib/security/credentials/ssl/ssl_credentials.c
@@ -52,11 +52,8 @@ static grpc_security_status ssl_create_security_connector(
grpc_channel_args **new_args) {
grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds;
grpc_security_status status = GRPC_SECURITY_OK;
- size_t i = 0;
const char *overridden_target_name = NULL;
- grpc_arg new_arg;
-
- for (i = 0; args && i < args->num_args; i++) {
+ for (size_t i = 0; args && i < args->num_args; i++) {
grpc_arg *arg = &args->args[i];
if (strcmp(arg->key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG) == 0 &&
arg->type == GRPC_ARG_STRING) {
@@ -69,9 +66,8 @@ static grpc_security_status ssl_create_security_connector(
if (status != GRPC_SECURITY_OK) {
return status;
}
- new_arg.type = GRPC_ARG_STRING;
- new_arg.key = GRPC_ARG_HTTP2_SCHEME;
- new_arg.value.string = "https";
+ grpc_arg new_arg =
+ grpc_channel_arg_string_create(GRPC_ARG_HTTP2_SCHEME, "https");
*new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1);
return status;
}
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index 537f6f922a..58112b04b4 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -50,7 +50,8 @@ typedef struct {
*/
grpc_polling_entity *pollent;
grpc_transport_stream_op_batch op;
- uint8_t security_context_set;
+ gpr_atm security_context_set;
+ gpr_mu security_context_mu;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context;
} call_data;
@@ -238,19 +239,26 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_linked_mdelem *l;
grpc_client_security_context *sec_ctx = NULL;
- if (!op->cancel_stream && calld->security_context_set == 0) {
- calld->security_context_set = 1;
- GPR_ASSERT(op->payload->context != NULL);
- if (op->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
- op->payload->context[GRPC_CONTEXT_SECURITY].value =
- grpc_client_security_context_create();
- op->payload->context[GRPC_CONTEXT_SECURITY].destroy =
- grpc_client_security_context_destroy;
+ if (!op->cancel_stream) {
+ /* double checked lock over security context to ensure it's set once */
+ if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
+ gpr_mu_lock(&calld->security_context_mu);
+ if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
+ GPR_ASSERT(op->payload->context != NULL);
+ if (op->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
+ op->payload->context[GRPC_CONTEXT_SECURITY].value =
+ grpc_client_security_context_create();
+ op->payload->context[GRPC_CONTEXT_SECURITY].destroy =
+ grpc_client_security_context_destroy;
+ }
+ sec_ctx = op->payload->context[GRPC_CONTEXT_SECURITY].value;
+ GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
+ sec_ctx->auth_context =
+ GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
+ gpr_atm_rel_store(&calld->security_context_set, 1);
+ }
+ gpr_mu_unlock(&calld->security_context_mu);
}
- sec_ctx = op->payload->context[GRPC_CONTEXT_SECURITY].value;
- GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
- sec_ctx->auth_context =
- GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
}
if (op->send_initial_metadata) {
@@ -297,6 +305,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(*calld));
+ gpr_mu_init(&calld->security_context_mu);
return GRPC_ERROR_NONE;
}
@@ -320,6 +329,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_slice_unref_internal(exec_ctx, calld->method);
}
reset_auth_metadata_context(&calld->auth_md_context);
+ gpr_mu_destroy(&calld->security_context_mu);
}
/* Constructor for channel_data */
diff --git a/src/core/lib/security/transport/lb_targets_info.c b/src/core/lib/security/transport/lb_targets_info.c
index 45bc91b30c..5583a4e0ff 100644
--- a/src/core/lib/security/transport/lb_targets_info.c
+++ b/src/core/lib/security/transport/lb_targets_info.c
@@ -37,12 +37,9 @@ static const grpc_arg_pointer_vtable server_to_balancer_names_vtable = {
grpc_arg grpc_lb_targets_info_create_channel_arg(
grpc_slice_hash_table *targets_info) {
- grpc_arg arg;
- arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_ARG_LB_SECURE_NAMING_MAP;
- arg.value.pointer.p = targets_info;
- arg.value.pointer.vtable = &server_to_balancer_names_vtable;
- return arg;
+ return grpc_channel_arg_pointer_create(GRPC_ARG_LB_SECURE_NAMING_MAP,
+ targets_info,
+ &server_to_balancer_names_vtable);
}
grpc_slice_hash_table *grpc_lb_targets_info_find_in_args(
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c
index 8823458da5..f4ed81db1a 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.c
@@ -75,18 +75,20 @@ static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) {
gpr_free(ep);
}
-/*#define GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG*/
-#ifdef GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \
secure_endpoint_unref((exec_ctx), (ep), (reason), __FILE__, __LINE__)
#define SECURE_ENDPOINT_REF(ep, reason) \
secure_endpoint_ref((ep), (reason), __FILE__, __LINE__)
-static void secure_endpoint_unref(secure_endpoint *ep,
- grpc_closure_list *closure_list,
+static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
const char *reason, const char *file,
int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP unref %p : %s %d -> %d",
- ep, reason, ep->ref.count, ep->ref.count - 1);
+ if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SECENDP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep, reason, val,
+ val - 1);
+ }
if (gpr_unref(&ep->ref)) {
destroy(exec_ctx, ep);
}
@@ -94,8 +96,12 @@ static void secure_endpoint_unref(secure_endpoint *ep,
static void secure_endpoint_ref(secure_endpoint *ep, const char *reason,
const char *file, int line) {
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP ref %p : %s %d -> %d",
- ep, reason, ep->ref.count, ep->ref.count + 1);
+ if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SECENDP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep, reason, val,
+ val + 1);
+ }
gpr_ref(&ep->ref);
}
#else
@@ -132,7 +138,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
}
}
ep->read_buffer = NULL;
- grpc_closure_sched(exec_ctx, ep->read_cb, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, ep->read_cb, error);
SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
}
@@ -317,7 +323,7 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
if (result != TSI_OK) {
/* TODO(yangg) do different things according to the error type? */
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer);
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, cb,
grpc_set_tsi_error_result(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Wrap failed"), result));
@@ -365,11 +371,6 @@ static int endpoint_get_fd(grpc_endpoint *secure_ep) {
return grpc_endpoint_get_fd(ep->wrapped_ep);
}
-static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) {
- secure_endpoint *ep = (secure_endpoint *)secure_ep;
- return grpc_endpoint_get_workqueue(ep->wrapped_ep);
-}
-
static grpc_resource_user *endpoint_get_resource_user(
grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
@@ -378,7 +379,6 @@ static grpc_resource_user *endpoint_get_resource_user(
static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_write,
- endpoint_get_workqueue,
endpoint_add_to_pollset,
endpoint_add_to_pollset_set,
endpoint_shutdown,
@@ -405,7 +405,7 @@ grpc_endpoint *grpc_secure_endpoint_create(
grpc_slice_buffer_init(&ep->output_buffer);
grpc_slice_buffer_init(&ep->source_buffer);
ep->read_buffer = NULL;
- grpc_closure_init(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx);
gpr_mu_init(&ep->protector_mu);
gpr_ref_init(&ep->ref, 1);
return &ep->base;
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index 519e2538a1..3c0c24254b 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -43,6 +43,11 @@
#include "src/core/tsi/ssl_transport_security.h"
#include "src/core/tsi/transport_security_adapter.h"
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_security_connector_refcount =
+ GRPC_TRACER_INITIALIZER(false);
+#endif
+
/* -- Constants. -- */
#ifndef INSTALL_PREFIX
@@ -122,7 +127,7 @@ void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
grpc_auth_context **auth_context,
grpc_closure *on_peer_checked) {
if (sc == NULL) {
- grpc_closure_sched(exec_ctx, on_peer_checked,
+ GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"cannot check peer -- no security connector"));
tsi_peer_destruct(&peer);
@@ -142,14 +147,17 @@ void grpc_channel_security_connector_check_call_host(
}
}
-#ifdef GRPC_SECURITY_CONNECTOR_REFCOUNT_DEBUG
+#ifndef NDEBUG
grpc_security_connector *grpc_security_connector_ref(
grpc_security_connector *sc, const char *file, int line,
const char *reason) {
if (sc == NULL) return NULL;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "SECURITY_CONNECTOR:%p ref %d -> %d %s", sc,
- (int)sc->refcount.count, (int)sc->refcount.count + 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_security_connector_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&sc->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SECURITY_CONNECTOR:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", sc,
+ val, val + 1, reason);
+ }
#else
grpc_security_connector *grpc_security_connector_ref(
grpc_security_connector *sc) {
@@ -159,15 +167,18 @@ grpc_security_connector *grpc_security_connector_ref(
return sc;
}
-#ifdef GRPC_SECURITY_CONNECTOR_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc,
const char *file, int line,
const char *reason) {
if (sc == NULL) return;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "SECURITY_CONNECTOR:%p unref %d -> %d %s", sc,
- (int)sc->refcount.count, (int)sc->refcount.count - 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_security_connector_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&sc->refcount.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "SECURITY_CONNECTOR:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", sc,
+ val, val - 1, reason);
+ }
#else
void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc) {
@@ -191,12 +202,8 @@ static const grpc_arg_pointer_vtable connector_pointer_vtable = {
connector_pointer_cmp};
grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc) {
- grpc_arg result;
- result.type = GRPC_ARG_POINTER;
- result.key = GRPC_ARG_SECURITY_CONNECTOR;
- result.value.pointer.vtable = &connector_pointer_vtable;
- result.value.pointer.p = sc;
- return result;
+ return grpc_channel_arg_pointer_create(GRPC_ARG_SECURITY_CONNECTOR, sc,
+ &connector_pointer_vtable);
}
grpc_security_connector *grpc_security_connector_from_arg(const grpc_arg *arg) {
@@ -340,7 +347,7 @@ static void fake_check_peer(grpc_exec_ctx *exec_ctx,
*auth_context, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
GRPC_FAKE_TRANSPORT_SECURITY_TYPE);
end:
- grpc_closure_sched(exec_ctx, on_peer_checked, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer);
}
@@ -602,7 +609,7 @@ static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx,
? c->overridden_target_name
: c->target_name,
&peer, auth_context);
- grpc_closure_sched(exec_ctx, on_peer_checked, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer);
}
@@ -612,7 +619,7 @@ static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx,
grpc_closure *on_peer_checked) {
grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context);
tsi_peer_destruct(&peer);
- grpc_closure_sched(exec_ctx, on_peer_checked, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
}
static void add_shallow_auth_property_to_peer(tsi_peer *peer,
diff --git a/src/core/lib/security/transport/security_connector.h b/src/core/lib/security/transport/security_connector.h
index 24b1086ee3..1c0fe40045 100644
--- a/src/core/lib/security/transport/security_connector.h
+++ b/src/core/lib/security/transport/security_connector.h
@@ -29,6 +29,10 @@
#include "src/core/tsi/ssl_transport_security.h"
#include "src/core/tsi/transport_security_interface.h"
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_security_connector_refcount;
+#endif
+
/* --- status enum. --- */
typedef enum { GRPC_SECURITY_OK = 0, GRPC_SECURITY_ERROR } grpc_security_status;
@@ -66,7 +70,7 @@ struct grpc_security_connector {
};
/* Refcounting. */
-#ifdef GRPC_SECURITY_CONNECTOR_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_SECURITY_CONNECTOR_REF(p, r) \
grpc_security_connector_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, r) \
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c
index f39d10cd81..239a211c0b 100644
--- a/src/core/lib/security/transport/security_handshaker.c
+++ b/src/core/lib/security/transport/security_handshaker.c
@@ -124,7 +124,7 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
h->shutdown = true;
}
// Invoke callback.
- grpc_closure_sched(exec_ctx, h->on_handshake_done, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error);
}
static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -173,7 +173,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1);
grpc_channel_args_destroy(exec_ctx, tmp_args);
// Invoke callback.
- grpc_closure_sched(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE);
// Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing.
h->shutdown = true;
@@ -408,13 +408,13 @@ static grpc_handshaker *security_handshaker_create(
gpr_ref_init(&h->refs, 1);
h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
- grpc_closure_init(&h->on_handshake_data_sent_to_peer,
+ GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer,
on_handshake_data_sent_to_peer, h,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&h->on_handshake_data_received_from_peer,
+ GRPC_CLOSURE_INIT(&h->on_handshake_data_received_from_peer,
on_handshake_data_received_from_peer, h,
grpc_schedule_on_exec_ctx);
- grpc_closure_init(&h->on_peer_checked, on_peer_checked, h,
+ GRPC_CLOSURE_INIT(&h->on_peer_checked, on_peer_checked, h,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&h->outgoing);
return &h->base;
@@ -440,7 +440,7 @@ static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_tcp_server_acceptor *acceptor,
grpc_closure *on_handshake_done,
grpc_handshaker_args *args) {
- grpc_closure_sched(exec_ctx, on_handshake_done,
+ GRPC_CLOSURE_SCHED(exec_ctx, on_handshake_done,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Failed to create security handshaker"));
}
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index eb7b635098..4e6914be7b 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -113,7 +113,7 @@ static void on_md_processing_done(
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
}
grpc_metadata_array_destroy(&calld->md);
- grpc_closure_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE);
} else {
for (size_t i = 0; i < calld->md.count; i++) {
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
@@ -128,7 +128,7 @@ static void on_md_processing_done(
&exec_ctx, calld->transport_op->payload->send_message.send_message);
calld->transport_op->payload->send_message.send_message = NULL;
}
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
&exec_ctx, calld->on_done_recv,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
GRPC_ERROR_INT_GRPC_STATUS, status));
@@ -151,7 +151,7 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
return;
}
}
- grpc_closure_sched(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error));
}
static void set_recv_ops_md_callbacks(grpc_call_element *elem,
@@ -193,7 +193,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* initialize members */
memset(calld, 0, sizeof(*calld));
- grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem,
+ GRPC_CLOSURE_INIT(&calld->auth_on_recv, auth_on_recv, elem,
grpc_schedule_on_exec_ctx);
if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) {
diff --git a/src/core/lib/support/stack_lockfree.c b/src/core/lib/support/stack_lockfree.c
deleted file mode 100644
index 0fb64ed001..0000000000
--- a/src/core/lib/support/stack_lockfree.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/support/stack_lockfree.h"
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/atm.h>
-#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
-
-/* The lockfree node structure is a single architecture-level
- word that allows for an atomic CAS to set it up. */
-struct lockfree_node_contents {
- /* next thing to look at. Actual index for head, next index otherwise */
- uint16_t index;
-#ifdef GPR_ARCH_64
- uint16_t pad;
- uint32_t aba_ctr;
-#else
-#ifdef GPR_ARCH_32
- uint16_t aba_ctr;
-#else
-#error Unsupported bit width architecture
-#endif
-#endif
-};
-
-/* Use a union to make sure that these are in the same bits as an atm word */
-typedef union lockfree_node {
- gpr_atm atm;
- struct lockfree_node_contents contents;
-} lockfree_node;
-
-/* make sure that entries aligned to 8-bytes */
-#define ENTRY_ALIGNMENT_BITS 3
-/* reserve this entry as invalid */
-#define INVALID_ENTRY_INDEX ((1 << 16) - 1)
-
-struct gpr_stack_lockfree {
- lockfree_node *entries;
- lockfree_node head; /* An atomic entry describing curr head */
-};
-
-gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries) {
- gpr_stack_lockfree *stack;
- stack = (gpr_stack_lockfree *)gpr_malloc(sizeof(*stack));
- /* Since we only allocate 16 bits to represent an entry number,
- * make sure that we are within the desired range */
- /* Reserve the highest entry number as a dummy */
- GPR_ASSERT(entries < INVALID_ENTRY_INDEX);
- stack->entries = (lockfree_node *)gpr_malloc_aligned(
- entries * sizeof(stack->entries[0]), ENTRY_ALIGNMENT_BITS);
- /* Clear out all entries */
- memset(stack->entries, 0, entries * sizeof(stack->entries[0]));
- memset(&stack->head, 0, sizeof(stack->head));
-
- GPR_ASSERT(sizeof(stack->entries->atm) == sizeof(stack->entries->contents));
-
- /* Point the head at reserved dummy entry */
- stack->head.contents.index = INVALID_ENTRY_INDEX;
-/* Fill in the pad and aba_ctr to avoid confusing memcheck tools */
-#ifdef GPR_ARCH_64
- stack->head.contents.pad = 0;
-#endif
- stack->head.contents.aba_ctr = 0;
- return stack;
-}
-
-void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack) {
- gpr_free_aligned(stack->entries);
- gpr_free(stack);
-}
-
-int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
- lockfree_node head;
- lockfree_node newhead;
- lockfree_node curent;
- lockfree_node newent;
-
- /* First fill in the entry's index and aba ctr for new head */
- newhead.contents.index = (uint16_t)entry;
-#ifdef GPR_ARCH_64
- /* Fill in the pad to avoid confusing memcheck tools */
- newhead.contents.pad = 0;
-#endif
-
- /* Also post-increment the aba_ctr */
- curent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
- newhead.contents.aba_ctr = ++curent.contents.aba_ctr;
- gpr_atm_no_barrier_store(&stack->entries[entry].atm, curent.atm);
-
- do {
- /* Atomically get the existing head value for use */
- head.atm = gpr_atm_no_barrier_load(&(stack->head.atm));
- /* Point to it */
- newent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
- newent.contents.index = head.contents.index;
- gpr_atm_no_barrier_store(&stack->entries[entry].atm, newent.atm);
- } while (!gpr_atm_rel_cas(&(stack->head.atm), head.atm, newhead.atm));
- /* Use rel_cas above to make sure that entry index is set properly */
- return head.contents.index == INVALID_ENTRY_INDEX;
-}
-
-int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) {
- lockfree_node head;
- lockfree_node newhead;
-
- do {
- head.atm = gpr_atm_acq_load(&(stack->head.atm));
- if (head.contents.index == INVALID_ENTRY_INDEX) {
- return -1;
- }
- newhead.atm =
- gpr_atm_no_barrier_load(&(stack->entries[head.contents.index].atm));
-
- } while (!gpr_atm_no_barrier_cas(&(stack->head.atm), head.atm, newhead.atm));
-
- return head.contents.index;
-}
diff --git a/src/core/lib/support/stack_lockfree.h b/src/core/lib/support/stack_lockfree.h
deleted file mode 100644
index 6324211b72..0000000000
--- a/src/core/lib/support/stack_lockfree.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H
-#define GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H
-
-#include <stddef.h>
-
-typedef struct gpr_stack_lockfree gpr_stack_lockfree;
-
-/* This stack must specify the maximum number of entries to track.
- The current implementation only allows up to 65534 entries */
-gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries);
-void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack);
-
-/* Pass in a valid entry number for the next stack entry */
-/* Returns 1 if this is the first element on the stack, 0 otherwise */
-int gpr_stack_lockfree_push(gpr_stack_lockfree *, int entry);
-
-/* Returns -1 on empty or the actual entry number */
-int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack);
-
-#endif /* GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H */
diff --git a/src/core/lib/support/time_precise.c b/src/core/lib/support/time_precise.c
index 1de373e002..6ce19e53cc 100644
--- a/src/core/lib/support/time_precise.c
+++ b/src/core/lib/support/time_precise.c
@@ -22,26 +22,26 @@
#ifdef GRPC_TIMERS_RDTSC
#if defined(__i386__)
-static void gpr_get_cycle_counter(long long int *clk) {
- long long int ret;
+static void gpr_get_cycle_counter(int64_t int *clk) {
+ int64_t int ret;
__asm__ volatile("rdtsc" : "=A"(ret));
*clk = ret;
}
// ----------------------------------------------------------------
#elif defined(__x86_64__) || defined(__amd64__)
-static void gpr_get_cycle_counter(long long int *clk) {
- unsigned long long low, high;
+static void gpr_get_cycle_counter(int64_t *clk) {
+ uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
- *clk = (long long)(high << 32) | (long long)low;
+ *clk = (int64_t)(high << 32) | (int64_t)low;
}
#endif
static double cycles_per_second = 0;
-static long long int start_cycle;
+static int64_t start_cycle;
void gpr_precise_clock_init(void) {
time_t start;
- long long end_cycle;
+ int64_t end_cycle;
gpr_log(GPR_DEBUG, "Calibrating timers");
start = time(NULL);
while (time(NULL) == start)
@@ -55,7 +55,7 @@ void gpr_precise_clock_init(void) {
}
void gpr_precise_clock_now(gpr_timespec *clk) {
- long long int counter;
+ int64_t counter;
double secs;
gpr_get_cycle_counter(&counter);
secs = (double)(counter - start_cycle) / cycles_per_second;
diff --git a/src/core/lib/surface/alarm.c b/src/core/lib/surface/alarm.c
index 4cd73a3f20..ef8405cca8 100644
--- a/src/core/lib/surface/alarm.c
+++ b/src/core/lib/surface/alarm.c
@@ -50,7 +50,7 @@ grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
alarm->tag = tag;
grpc_cq_begin_op(cq, tag);
- grpc_closure_init(&alarm->on_alarm, alarm_cb, alarm,
+ GRPC_CLOSURE_INIT(&alarm->on_alarm, alarm_cb, alarm,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&exec_ctx, &alarm->alarm,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index e4d27ffbf6..c769866ceb 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -457,7 +457,7 @@ void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
}
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define REF_REASON reason
#define REF_ARG , const char *reason
#else
@@ -520,7 +520,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
}
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info,
- grpc_closure_init(&c->release_call, release_call, c,
+ GRPC_CLOSURE_INIT(&c->release_call, release_call, c,
grpc_schedule_on_exec_ctx));
GPR_TIMER_END("destroy_call", 0);
}
@@ -634,7 +634,7 @@ static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
GRPC_CALL_INTERNAL_REF(c, "termination");
set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error));
grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(
- grpc_closure_create(done_termination, c, grpc_schedule_on_exec_ctx));
+ GRPC_CLOSURE_CREATE(done_termination, c, grpc_schedule_on_exec_ctx));
op->cancel_stream = true;
op->payload->cancel_stream.cancel_error = error;
execute_op(exec_ctx, c, op);
@@ -929,33 +929,6 @@ static grpc_compression_algorithm decode_compression(grpc_mdelem md) {
return algorithm;
}
-static void recv_common_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_metadata_batch *b) {
- if (b->idx.named.grpc_status != NULL) {
- uint32_t status_code = decode_status(b->idx.named.grpc_status->md);
- grpc_error *error =
- status_code == GRPC_STATUS_OK
- ? GRPC_ERROR_NONE
- : grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Error received from peer"),
- GRPC_ERROR_INT_GRPC_STATUS,
- (intptr_t)status_code);
-
- if (b->idx.named.grpc_message != NULL) {
- error = grpc_error_set_str(
- error, GRPC_ERROR_STR_GRPC_MESSAGE,
- grpc_slice_ref_internal(GRPC_MDVALUE(b->idx.named.grpc_message->md)));
- grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_message);
- } else if (error != GRPC_ERROR_NONE) {
- error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE,
- grpc_empty_slice());
- }
-
- set_status_from_error(exec_ctx, call, STATUS_FROM_WIRE, error);
- grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_status);
- }
-}
-
static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
int is_trailing) {
if (b->list.count == 0) return;
@@ -980,8 +953,6 @@ static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_metadata_batch *b) {
- recv_common_filter(exec_ctx, call, b);
-
if (b->idx.named.grpc_encoding != NULL) {
GPR_TIMER_BEGIN("incoming_compression_algorithm", 0);
set_incoming_compression_algorithm(
@@ -989,7 +960,6 @@ static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
GPR_TIMER_END("incoming_compression_algorithm", 0);
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_encoding);
}
-
if (b->idx.named.grpc_accept_encoding != NULL) {
GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0);
set_encodings_accepted_by_peer(exec_ctx, call,
@@ -997,14 +967,33 @@ static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_accept_encoding);
GPR_TIMER_END("encodings_accepted_by_peer", 0);
}
-
publish_app_metadata(call, b, false);
}
static void recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args,
grpc_metadata_batch *b) {
grpc_call *call = args;
- recv_common_filter(exec_ctx, call, b);
+ if (b->idx.named.grpc_status != NULL) {
+ uint32_t status_code = decode_status(b->idx.named.grpc_status->md);
+ grpc_error *error =
+ status_code == GRPC_STATUS_OK
+ ? GRPC_ERROR_NONE
+ : grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Error received from peer"),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ (intptr_t)status_code);
+ if (b->idx.named.grpc_message != NULL) {
+ error = grpc_error_set_str(
+ error, GRPC_ERROR_STR_GRPC_MESSAGE,
+ grpc_slice_ref_internal(GRPC_MDVALUE(b->idx.named.grpc_message->md)));
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_message);
+ } else if (error != GRPC_ERROR_NONE) {
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE,
+ grpc_empty_slice());
+ }
+ set_status_from_error(exec_ctx, call, STATUS_FROM_WIRE, error);
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_status);
+ }
publish_app_metadata(call, b, true);
}
@@ -1170,7 +1159,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
if (bctl->completion_data.notify_tag.is_closure) {
/* unrefs bctl->error */
bctl->call = NULL;
- grpc_closure_run(exec_ctx, bctl->completion_data.notify_tag.tag, error);
+ GRPC_CLOSURE_RUN(exec_ctx, bctl->completion_data.notify_tag.tag, error);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
@@ -1275,7 +1264,7 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx,
} else {
*call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0);
}
- grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready, bctl,
+ GRPC_CLOSURE_INIT(&call->receiving_slice_ready, receiving_slice_ready, bctl,
grpc_schedule_on_exec_ctx);
continue_receiving_slices(exec_ctx, bctl);
}
@@ -1390,11 +1379,11 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
call->has_initial_md_been_received = true;
if (call->saved_receiving_stream_ready_bctlp != NULL) {
- grpc_closure *saved_rsr_closure = grpc_closure_create(
+ grpc_closure *saved_rsr_closure = GRPC_CLOSURE_CREATE(
receiving_stream_ready, call->saved_receiving_stream_ready_bctlp,
grpc_schedule_on_exec_ctx);
call->saved_receiving_stream_ready_bctlp = NULL;
- grpc_closure_run(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_RUN(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error));
}
finish_batch_step(exec_ctx, bctl);
@@ -1436,7 +1425,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
free_no_op_completion, NULL,
gpr_malloc(sizeof(grpc_cq_completion)));
} else {
- grpc_closure_sched(exec_ctx, notify_tag, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, notify_tag, GRPC_ERROR_NONE);
}
error = GRPC_CALL_OK;
goto done;
@@ -1453,7 +1442,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op_batch *stream_op = &bctl->op;
grpc_transport_stream_op_batch_payload *stream_op_payload =
&call->stream_op_payload;
- stream_op->covered_by_poller = true;
/* rewrite batch ops into a transport op */
for (i = 0; i < nops; i++) {
@@ -1642,14 +1630,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
- /* IF this is a server, then GRPC_OP_RECV_INITIAL_METADATA *must* come
- from server.c. In that case, it's coming from accept_stream, and in
- that case we're not necessarily covered by a poller. */
- stream_op->covered_by_poller = call->is_client;
call->received_initial_metadata = true;
call->buffered_metadata[0] =
op->data.recv_initial_metadata.recv_initial_metadata;
- grpc_closure_init(&call->receiving_initial_metadata_ready,
+ GRPC_CLOSURE_INIT(&call->receiving_initial_metadata_ready,
receiving_initial_metadata_ready, bctl,
grpc_schedule_on_exec_ctx);
stream_op->recv_initial_metadata = true;
@@ -1673,7 +1657,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->recv_message = true;
call->receiving_buffer = op->data.recv_message.recv_message;
stream_op_payload->recv_message.recv_message = &call->receiving_stream;
- grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready,
+ GRPC_CLOSURE_INIT(&call->receiving_stream_ready, receiving_stream_ready,
bctl, grpc_schedule_on_exec_ctx);
stream_op_payload->recv_message.recv_message_ready =
&call->receiving_stream_ready;
@@ -1739,7 +1723,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
}
gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed);
- grpc_closure_init(&bctl->finish_batch, finish_batch, bctl,
+ GRPC_CLOSURE_INIT(&bctl->finish_batch, finish_batch, bctl,
grpc_schedule_on_exec_ctx);
stream_op->on_complete = &bctl->finish_batch;
gpr_atm_rel_store(&call->any_ops_sent_atm, 1);
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index 60b661cf8c..185bfccb77 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -62,7 +62,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_call_internal_ref(grpc_call *call, const char *reason);
void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call,
const char *reason);
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 5647dff28b..5780a18ce8 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -345,7 +345,7 @@ grpc_call *grpc_channel_create_registered_call(
return call;
}
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define REF_REASON reason
#define REF_ARG , const char *reason
#else
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
index 848debc7c5..528bb868e2 100644
--- a/src/core/lib/surface/channel.h
+++ b/src/core/lib/surface/channel.h
@@ -59,7 +59,7 @@ grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx,
size_t grpc_channel_get_call_size_estimate(grpc_channel *channel);
void grpc_channel_update_call_size_estimate(grpc_channel *channel, size_t size);
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_channel_internal_ref(grpc_channel *channel, const char *reason);
void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
const char *reason);
diff --git a/src/core/lib/surface/channel_ping.c b/src/core/lib/surface/channel_ping.c
index 4de3a8af64..80eb80af78 100644
--- a/src/core/lib/surface/channel_ping.c
+++ b/src/core/lib/surface/channel_ping.c
@@ -56,7 +56,7 @@ void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
GPR_ASSERT(reserved == NULL);
pr->tag = tag;
pr->cq = cq;
- grpc_closure_init(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx);
op->send_ping = &pr->closure;
op->bind_pollset = grpc_cq_pollset(cq);
grpc_cq_begin_op(cq, tag);
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 07288053c8..b04aee6c73 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -38,6 +38,7 @@
grpc_tracer_flag grpc_trace_operation_failures = GRPC_TRACER_INITIALIZER(false);
#ifndef NDEBUG
grpc_tracer_flag grpc_trace_pending_tags = GRPC_TRACER_INITIALIZER(false);
+grpc_tracer_flag grpc_trace_cq_refcount = GRPC_TRACER_INITIALIZER(false);
#endif
typedef struct {
@@ -113,7 +114,7 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
npp->root = w.next;
if (&w == npp->root) {
if (npp->shutdown) {
- grpc_closure_sched(exec_ctx, npp->shutdown, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, npp->shutdown, GRPC_ERROR_NONE);
}
npp->root = NULL;
}
@@ -146,7 +147,7 @@ static void non_polling_poller_shutdown(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(closure != NULL);
p->shutdown = closure;
if (p->root == NULL) {
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
non_polling_worker *w = p->root;
do {
@@ -417,7 +418,7 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
cqd->outstanding_tag_count = 0;
#endif
cq_event_queue_init(&cqd->queue);
- grpc_closure_init(&cqd->pollset_shutdown_done, on_pollset_shutdown_done, cc,
+ GRPC_CLOSURE_INIT(&cqd->pollset_shutdown_done, on_pollset_shutdown_done, cc,
grpc_schedule_on_exec_ctx);
GPR_TIMER_END("grpc_completion_queue_create_internal", 0);
@@ -437,12 +438,16 @@ int grpc_get_cq_poll_num(grpc_completion_queue *cc) {
return cur_num_polls;
}
-#ifdef GRPC_CQ_REF_COUNT_DEBUG
+#ifndef NDEBUG
void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason,
const char *file, int line) {
cq_data *cqd = &cc->data;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p ref %d -> %d %s", cc,
- (int)cqd->owning_refs.count, (int)cqd->owning_refs.count + 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_cq_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&cqd->owning_refs.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "CQ:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", cc, val, val + 1,
+ reason);
+ }
#else
void grpc_cq_internal_ref(grpc_completion_queue *cc) {
cq_data *cqd = &cc->data;
@@ -456,12 +461,16 @@ static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cc, "pollset_destroy");
}
-#ifdef GRPC_CQ_REF_COUNT_DEBUG
-void grpc_cq_internal_unref(grpc_completion_queue *cc, const char *reason,
- const char *file, int line) {
+#ifndef NDEBUG
+void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
+ const char *reason, const char *file, int line) {
cq_data *cqd = &cc->data;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p unref %d -> %d %s", cc,
- (int)cqd->owning_refs.count, (int)cqd->owning_refs.count - 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_cq_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&cqd->owning_refs.count);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "CQ:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", cc, val, val - 1,
+ reason);
+ }
#else
void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cc) {
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
index 49097bac39..97ea9cae20 100644
--- a/src/core/lib/surface/completion_queue.h
+++ b/src/core/lib/surface/completion_queue.h
@@ -30,8 +30,10 @@
extern grpc_tracer_flag grpc_cq_pluck_trace;
extern grpc_tracer_flag grpc_cq_event_timeout_trace;
extern grpc_tracer_flag grpc_trace_operation_failures;
+
#ifndef NDEBUG
extern grpc_tracer_flag grpc_trace_pending_tags;
+extern grpc_tracer_flag grpc_trace_cq_refcount;
#endif
#ifdef __cplusplus
@@ -52,9 +54,7 @@ typedef struct grpc_cq_completion {
uintptr_t next;
} grpc_cq_completion;
-//#define GRPC_CQ_REF_COUNT_DEBUG
-
-#ifdef GRPC_CQ_REF_COUNT_DEBUG
+#ifndef NDEBUG
void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason,
const char *file, int line);
void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index 3575c5495c..14a86bfa0a 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -113,6 +113,7 @@ void grpc_init(void) {
int i;
gpr_once_init(&g_basic_init, do_basic_init);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(&g_init_mu);
if (++g_initializations == 1) {
gpr_time_init();
@@ -125,22 +126,26 @@ void grpc_init(void) {
grpc_register_tracer("channel_stack_builder",
&grpc_trace_channel_stack_builder);
grpc_register_tracer("http1", &grpc_http1_trace);
- grpc_register_tracer("queue_pluck", &grpc_cq_pluck_trace);
+ grpc_register_tracer("queue_pluck", &grpc_cq_pluck_trace); // default on
grpc_register_tracer("combiner", &grpc_combiner_trace);
grpc_register_tracer("server_channel", &grpc_server_channel_trace);
grpc_register_tracer("bdp_estimator", &grpc_bdp_estimator_trace);
- // Default pluck trace to 1
- grpc_register_tracer("queue_timeout", &grpc_cq_event_timeout_trace);
- // Default timeout trace to 1
+ grpc_register_tracer("queue_timeout",
+ &grpc_cq_event_timeout_trace); // default on
grpc_register_tracer("op_failure", &grpc_trace_operation_failures);
grpc_register_tracer("resource_quota", &grpc_resource_quota_trace);
grpc_register_tracer("call_error", &grpc_call_error_trace);
#ifndef NDEBUG
grpc_register_tracer("pending_tags", &grpc_trace_pending_tags);
+ grpc_register_tracer("queue_refcount", &grpc_trace_cq_refcount);
+ grpc_register_tracer("closure", &grpc_trace_closure);
+ grpc_register_tracer("error_refcount", &grpc_trace_error_refcount);
+ grpc_register_tracer("stream_refcount", &grpc_trace_stream_refcount);
+ grpc_register_tracer("fd_refcount", &grpc_trace_fd_refcount);
+ grpc_register_tracer("metadata", &grpc_trace_metadata);
#endif
grpc_security_pre_init();
- grpc_iomgr_init();
- grpc_executor_init();
+ grpc_iomgr_init(&exec_ctx);
gpr_timers_global_init();
grpc_handshaker_factory_registry_init();
grpc_security_init();
@@ -156,19 +161,20 @@ void grpc_init(void) {
grpc_tracer_init("GRPC_TRACE");
/* no more changes to channel init pipelines */
grpc_channel_init_finalize();
- grpc_iomgr_start();
+ grpc_iomgr_start(&exec_ctx);
}
gpr_mu_unlock(&g_init_mu);
+ grpc_exec_ctx_finish(&exec_ctx);
GRPC_API_TRACE("grpc_init(void)", 0, ());
}
void grpc_shutdown(void) {
int i;
GRPC_API_TRACE("grpc_shutdown(void)", 0, ());
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_exec_ctx exec_ctx =
+ GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
- grpc_executor_shutdown(&exec_ctx);
grpc_iomgr_shutdown(&exec_ctx);
gpr_timers_global_destroy();
grpc_tracer_shutdown();
diff --git a/src/core/lib/surface/init_secure.c b/src/core/lib/surface/init_secure.c
index fb6635716d..7dbea581d0 100644
--- a/src/core/lib/surface/init_secure.c
+++ b/src/core/lib/surface/init_secure.c
@@ -32,9 +32,19 @@
#include "src/core/lib/surface/channel_init.h"
#include "src/core/tsi/transport_security_interface.h"
+#ifndef NDEBUG
+#include "src/core/lib/security/context/security_context.h"
+#endif
+
void grpc_security_pre_init(void) {
grpc_register_tracer("secure_endpoint", &grpc_trace_secure_endpoint);
grpc_register_tracer("transport_security", &tsi_tracing_enabled);
+#ifndef NDEBUG
+ grpc_register_tracer("auth_context_refcount",
+ &grpc_trace_auth_context_refcount);
+ grpc_register_tracer("security_connector_refcount",
+ &grpc_trace_security_connector_refcount);
+#endif
}
static bool maybe_prepend_client_auth_filter(
diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc
index c9f498ce6a..a0791080a9 100644
--- a/src/core/lib/surface/lame_client.cc
+++ b/src/core/lib/surface/lame_client.cc
@@ -105,17 +105,17 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
if (op->on_connectivity_state_change) {
GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN);
*op->connectivity_state = GRPC_CHANNEL_SHUTDOWN;
- grpc_closure_sched(exec_ctx, op->on_connectivity_state_change,
+ GRPC_CLOSURE_SCHED(exec_ctx, op->on_connectivity_state_change,
GRPC_ERROR_NONE);
}
if (op->send_ping != NULL) {
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx, op->send_ping,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
if (op->on_consumed != NULL) {
- grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
}
}
@@ -128,7 +128,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
- grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 6cccd0d634..84ddf74ab9 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -269,7 +269,7 @@ static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
bool send_goaway, grpc_error *send_disconnect) {
struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc));
- grpc_closure_init(&sc->closure, shutdown_cleanup, sc,
+ GRPC_CLOSURE_INIT(&sc->closure, shutdown_cleanup, sc,
grpc_schedule_on_exec_ctx);
grpc_transport_op *op = grpc_make_transport_op(&sc->closure);
grpc_channel_element *elem;
@@ -337,11 +337,11 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
- grpc_closure_init(
+ GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
grpc_schedule_on_exec_ctx);
- grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
}
}
@@ -432,7 +432,7 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
orphan_channel(chand);
server_ref(chand->server);
maybe_finish_shutdown(exec_ctx, chand->server);
- grpc_closure_init(&chand->finish_destroy_channel_closure,
+ GRPC_CLOSURE_INIT(&chand->finish_destroy_channel_closure,
finish_destroy_channel, chand, grpc_schedule_on_exec_ctx);
if (GRPC_TRACER_ON(grpc_server_channel_trace) && error != GRPC_ERROR_NONE) {
@@ -475,6 +475,7 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
*rc->data.registered.deadline = calld->deadline;
if (rc->data.registered.optional_payload) {
*rc->data.registered.optional_payload = calld->payload;
+ calld->payload = NULL;
}
break;
default:
@@ -497,11 +498,11 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
- grpc_closure_init(
+ GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
grpc_schedule_on_exec_ctx);
- grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_REF(error));
return;
}
@@ -546,9 +547,9 @@ static void finish_start_new_rpc(
gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
- grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem,
+ GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_schedule_on_exec_ctx);
- grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
return;
}
@@ -563,7 +564,7 @@ static void finish_start_new_rpc(
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_MESSAGE;
op.data.recv_message.recv_message = &calld->payload;
- grpc_closure_init(&calld->publish, publish_new_rpc, elem,
+ GRPC_CLOSURE_INIT(&calld->publish, publish_new_rpc, elem,
grpc_schedule_on_exec_ctx);
grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1,
&calld->publish);
@@ -740,7 +741,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
GRPC_ERROR_UNREF(src_error);
}
- grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata, error);
+ GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv_initial_metadata, error);
}
static void server_mutate_op(grpc_call_element *elem,
@@ -779,9 +780,9 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
- grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem,
+ GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
grpc_schedule_on_exec_ctx);
- grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE);
} else if (calld->state == PENDING) {
calld->state = ZOMBIED;
@@ -819,7 +820,7 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
op.op = GRPC_OP_RECV_INITIAL_METADATA;
op.data.recv_initial_metadata.recv_initial_metadata =
&calld->initial_metadata;
- grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem,
+ GRPC_CLOSURE_INIT(&calld->got_initial_metadata, got_initial_metadata, elem,
grpc_schedule_on_exec_ctx);
grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1,
&calld->got_initial_metadata);
@@ -855,7 +856,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
calld->call = grpc_call_from_top_element(elem);
gpr_mu_init(&calld->mu_state);
- grpc_closure_init(&calld->server_on_recv_initial_metadata,
+ GRPC_CLOSURE_INIT(&calld->server_on_recv_initial_metadata,
server_on_recv_initial_metadata, elem,
grpc_schedule_on_exec_ctx);
@@ -878,6 +879,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_slice_unref_internal(exec_ctx, calld->path);
}
grpc_metadata_array_destroy(&calld->initial_metadata);
+ grpc_byte_buffer_destroy(calld->payload);
gpr_mu_destroy(&calld->mu_state);
@@ -895,7 +897,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->next = chand->prev = chand;
chand->registered_methods = NULL;
chand->connectivity_state = GRPC_CHANNEL_IDLE;
- grpc_closure_init(&chand->channel_connectivity_changed,
+ GRPC_CLOSURE_INIT(&chand->channel_connectivity_changed,
channel_connectivity_changed, chand,
grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE;
@@ -1075,7 +1077,7 @@ void grpc_server_start(grpc_server *server) {
server_ref(server);
server->starting = true;
- grpc_closure_sched(&exec_ctx, grpc_closure_create(start_listeners, server,
+ GRPC_CLOSURE_SCHED(&exec_ctx, GRPC_CLOSURE_CREATE(start_listeners, server,
grpc_executor_scheduler),
GRPC_ERROR_NONE);
@@ -1255,7 +1257,7 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* Shutdown listeners */
for (l = server->listeners; l; l = l->next) {
- grpc_closure_init(&l->destroy_done, listener_destroy_done, server,
+ GRPC_CLOSURE_INIT(&l->destroy_done, listener_destroy_done, server,
grpc_schedule_on_exec_ctx);
l->destroy(&exec_ctx, server, l->arg, &l->destroy_done);
}
@@ -1349,11 +1351,11 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&calld->mu_state);
if (calld->state == ZOMBIED) {
gpr_mu_unlock(&calld->mu_state);
- grpc_closure_init(
+ GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
grpc_schedule_on_exec_ctx);
- grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
GRPC_ERROR_NONE);
} else {
GPR_ASSERT(calld->state == PENDING);
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index f1bbfc082a..6fe40af3b2 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -67,7 +67,7 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutdown connectivity owner");
}
- grpc_closure_sched(exec_ctx, w->notify, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, w->notify, error);
gpr_free(w);
}
GRPC_ERROR_UNREF(tracker->current_error);
@@ -125,7 +125,7 @@ bool grpc_connectivity_state_notify_on_state_change(
if (current == NULL) {
grpc_connectivity_state_watcher *w = tracker->watchers;
if (w != NULL && w->notify == notify) {
- grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
tracker->watchers = w->next;
gpr_free(w);
return false;
@@ -133,7 +133,7 @@ bool grpc_connectivity_state_notify_on_state_change(
while (w != NULL) {
grpc_connectivity_state_watcher *rm_candidate = w->next;
if (rm_candidate != NULL && rm_candidate->notify == notify) {
- grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
w->next = w->next->next;
gpr_free(rm_candidate);
return false;
@@ -144,7 +144,7 @@ bool grpc_connectivity_state_notify_on_state_change(
} else {
if (cur != *current) {
*current = cur;
- grpc_closure_sched(exec_ctx, notify,
+ GRPC_CLOSURE_SCHED(exec_ctx, notify,
GRPC_ERROR_REF(tracker->current_error));
} else {
grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
@@ -197,7 +197,7 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
w->notify);
}
- grpc_closure_sched(exec_ctx, w->notify,
+ GRPC_CLOSURE_SCHED(exec_ctx, w->notify,
GRPC_ERROR_REF(tracker->current_error));
gpr_free(w);
}
diff --git a/src/core/lib/transport/metadata.c b/src/core/lib/transport/metadata.c
index 9491730719..87a2abf344 100644
--- a/src/core/lib/transport/metadata.c
+++ b/src/core/lib/transport/metadata.c
@@ -47,7 +47,8 @@
* used to determine which kind of element a pointer refers to.
*/
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_metadata = GRPC_TRACER_INITIALIZER(false);
#define DEBUG_ARGS , const char *file, int line
#define FWD_DEBUG_ARGS , file, line
#define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s), __FILE__, __LINE__)
@@ -144,15 +145,17 @@ static int is_mdelem_static(grpc_mdelem e) {
static void ref_md_locked(mdtab_shard *shard,
interned_metadata *md DEBUG_ARGS) {
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM REF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", (void *)md,
+ gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 1)) {
gpr_atm_no_barrier_fetch_add(&shard->free_estimate, -1);
@@ -243,13 +246,16 @@ grpc_mdelem grpc_mdelem_create(
allocated->key = grpc_slice_ref_internal(key);
allocated->value = grpc_slice_ref_internal(value);
gpr_atm_rel_store(&allocated->refcnt, 1);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(allocated->key);
- char *value_str = grpc_slice_to_c_string(allocated->value);
- gpr_log(GPR_DEBUG, "ELM ALLOC:%p:%zu: '%s' = '%s'", (void *)allocated,
- gpr_atm_no_barrier_load(&allocated->refcnt), key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(allocated->key);
+ char *value_str = grpc_slice_to_c_string(allocated->value);
+ gpr_log(GPR_DEBUG, "ELM ALLOC:%p:%" PRIdPTR ": '%s' = '%s'",
+ (void *)allocated, gpr_atm_no_barrier_load(&allocated->refcnt),
+ key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
return GRPC_MAKE_MDELEM(allocated, GRPC_MDELEM_STORAGE_ALLOCATED);
}
@@ -294,13 +300,15 @@ grpc_mdelem grpc_mdelem_create(
md->bucket_next = shard->elems[idx];
shard->elems[idx] = md;
gpr_mu_init(&md->mu_user_data);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(GPR_DEBUG, "ELM NEW:%p:%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt), key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(GPR_DEBUG, "ELM NEW:%p:%" PRIdPTR ": '%s' = '%s'", (void *)md,
+ gpr_atm_no_barrier_load(&md->refcnt), key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
shard->count++;
@@ -356,15 +364,17 @@ grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) {
break;
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata *md = (interned_metadata *)GRPC_MDELEM_DATA(gmd);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM REF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
+ (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
/* we can assume the ref count is >= 1 as the application is calling
this function - meaning that no adjustment to mdtab_free is necessary,
@@ -376,15 +386,17 @@ grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) {
}
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata *md = (allocated_metadata *)GRPC_MDELEM_DATA(gmd);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM REF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
+ (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
/* we can assume the ref count is >= 1 as the application is calling
this function - meaning that no adjustment to mdtab_free is necessary,
@@ -404,15 +416,17 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) {
break;
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata *md = (interned_metadata *)GRPC_MDELEM_DATA(gmd);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM UNREF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
+ (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
uint32_t hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash(md->key),
grpc_slice_hash(md->value));
@@ -428,15 +442,17 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) {
}
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata *md = (allocated_metadata *)GRPC_MDELEM_DATA(gmd);
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM UNREF:%p:%zu->%zu: '%s' = '%s'", (void *)md,
- gpr_atm_no_barrier_load(&md->refcnt),
- gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
- gpr_free(key_str);
- gpr_free(value_str);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_metadata)) {
+ char *key_str = grpc_slice_to_c_string(md->key);
+ char *value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+ "ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
+ (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
+ gpr_free(key_str);
+ gpr_free(value_str);
+ }
#endif
const gpr_atm prev_refcount = gpr_atm_full_fetch_add(&md->refcnt, -1);
GPR_ASSERT(prev_refcount >= 1);
diff --git a/src/core/lib/transport/metadata.h b/src/core/lib/transport/metadata.h
index 5e1afecd2e..974469e436 100644
--- a/src/core/lib/transport/metadata.h
+++ b/src/core/lib/transport/metadata.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_metadata;
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -132,9 +136,7 @@ void *grpc_mdelem_get_user_data(grpc_mdelem md,
void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *),
void *user_data);
-/* Reference counting */
-//#define GRPC_METADATA_REFCOUNT_DEBUG
-#ifdef GRPC_METADATA_REFCOUNT_DEBUG
+#ifndef NDEBUG
#define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s), __FILE__, __LINE__)
#define GRPC_MDELEM_UNREF(exec_ctx, s) \
grpc_mdelem_unref((exec_ctx), (s), __FILE__, __LINE__)
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index abc289ccd9..6a9eba110d 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -31,25 +31,33 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/transport_impl.h"
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_stream_refcount = GRPC_TRACER_INITIALIZER(false);
+#endif
+
+#ifndef NDEBUG
void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
- gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
- gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s",
- refcount->object_type, refcount, refcount->destroy.cb_arg, val,
- val + 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
+ gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s",
+ refcount->object_type, refcount, refcount->destroy.cb_arg, val,
+ val + 1, reason);
+ }
#else
void grpc_stream_ref(grpc_stream_refcount *refcount) {
#endif
gpr_ref_non_zero(&refcount->refs);
}
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
const char *reason) {
- gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
- gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s",
- refcount->object_type, refcount, refcount->destroy.cb_arg, val,
- val - 1, reason);
+ if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) {
+ gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
+ gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s",
+ refcount->object_type, refcount, refcount->destroy.cb_arg, val,
+ val - 1, reason);
+ }
#else
void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
@@ -65,7 +73,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
there. */
refcount->destroy.scheduler = grpc_executor_scheduler;
}
- grpc_closure_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
}
}
@@ -74,7 +82,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
offsetof(grpc_stream_refcount, slice_refcount)))
static void slice_stream_ref(void *p) {
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
grpc_stream_ref(STREAM_REF_FROM_SLICE_REF(p), "slice");
#else
grpc_stream_ref(STREAM_REF_FROM_SLICE_REF(p));
@@ -82,7 +90,7 @@ static void slice_stream_ref(void *p) {
}
static void slice_stream_unref(grpc_exec_ctx *exec_ctx, void *p) {
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
grpc_stream_unref(exec_ctx, STREAM_REF_FROM_SLICE_REF(p), "slice");
#else
grpc_stream_unref(exec_ctx, STREAM_REF_FROM_SLICE_REF(p));
@@ -102,7 +110,7 @@ static const grpc_slice_refcount_vtable stream_ref_slice_vtable = {
.eq = grpc_slice_default_eq_impl,
.hash = grpc_slice_default_hash_impl};
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
grpc_iomgr_cb_func cb, void *cb_arg,
const char *object_type) {
@@ -112,7 +120,7 @@ void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
grpc_iomgr_cb_func cb, void *cb_arg) {
#endif
gpr_ref_init(&refcount->refs, initial_refs);
- grpc_closure_init(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx);
refcount->slice_refcount.vtable = &stream_ref_slice_vtable;
refcount->slice_refcount.sub_refcount = &refcount->slice_refcount;
}
@@ -198,20 +206,25 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
return transport->vtable->get_endpoint(exec_ctx, transport);
}
+// grpc_transport_stream_op_batch_finish_with_failure
+// is a function that must always unref cancel_error
+// though it lives in lib, it handles transport stream ops sure
+// it's grpc_transport_stream_op_batch_finish_with_failure
+
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
grpc_error *error) {
if (op->recv_message) {
- grpc_closure_sched(exec_ctx, op->payload->recv_message.recv_message_ready,
+ GRPC_CLOSURE_SCHED(exec_ctx, op->payload->recv_message.recv_message_ready,
GRPC_ERROR_REF(error));
}
if (op->recv_initial_metadata) {
- grpc_closure_sched(
+ GRPC_CLOSURE_SCHED(
exec_ctx,
op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_REF(error));
}
- grpc_closure_sched(exec_ctx, op->on_complete, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, error);
if (op->cancel_stream) {
GRPC_ERROR_UNREF(op->payload->cancel_stream.cancel_error);
}
@@ -226,13 +239,13 @@ typedef struct {
static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
made_transport_op *op = arg;
- grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
gpr_free(op);
}
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
made_transport_op *op = gpr_malloc(sizeof(*op));
- grpc_closure_init(&op->outer_on_complete, destroy_made_transport_op, op,
+ GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_op, op,
grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete;
memset(&op->op, 0, sizeof(op->op));
@@ -252,14 +265,14 @@ static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
made_transport_stream_op *op = arg;
grpc_closure *c = op->inner_on_complete;
gpr_free(op);
- grpc_closure_run(exec_ctx, c, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_REF(error));
}
grpc_transport_stream_op_batch *grpc_make_transport_stream_op(
grpc_closure *on_complete) {
made_transport_stream_op *op = gpr_zalloc(sizeof(*op));
op->op.payload = &op->payload;
- grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op,
+ GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_stream_op,
op, grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete;
op->op.on_complete = &op->outer_on_complete;
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 16f12c345b..84e53e683a 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -42,18 +42,20 @@ typedef struct grpc_transport grpc_transport;
for a stream. */
typedef struct grpc_stream grpc_stream;
-//#define GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
+extern grpc_tracer_flag grpc_trace_stream_refcount;
+#endif
typedef struct grpc_stream_refcount {
gpr_refcount refs;
grpc_closure destroy;
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
const char *object_type;
#endif
grpc_slice_refcount slice_refcount;
} grpc_stream_refcount;
-#ifdef GRPC_STREAM_REFCOUNT_DEBUG
+#ifndef NDEBUG
void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
grpc_iomgr_cb_func cb, void *cb_arg,
const char *object_type);
@@ -112,10 +114,6 @@ typedef struct grpc_transport_stream_op_batch {
/** Values for the stream op (fields set are determined by flags above) */
grpc_transport_stream_op_batch_payload *payload;
- /** Is the completion of this op covered by a poller (if false: the op should
- complete independently of some pollset being polled) */
- bool covered_by_poller : 1;
-
/** Send initial metadata to the peer, from the provided metadata batch. */
bool send_initial_metadata : 1;
@@ -169,6 +167,10 @@ struct grpc_transport_stream_op_batch_payload {
uint32_t *recv_flags;
/** Should be enqueued when initial metadata is ready to be processed. */
grpc_closure *recv_initial_metadata_ready;
+ // If not NULL, will be set to true if trailing metadata is
+ // immediately available. This may be a signal that we received a
+ // Trailers-Only response.
+ bool *trailing_metadata_available;
} recv_initial_metadata;
struct {
@@ -196,6 +198,8 @@ struct grpc_transport_stream_op_batch_payload {
grpc_chttp2_grpc_status_to_http2_error. Send a RST_STREAM with this
error. */
struct {
+ // Error contract: the transport that gets this op must cause cancel_error
+ // to be unref'ed after processing it
grpc_error *cancel_error;
} cancel_stream;
@@ -210,9 +214,13 @@ typedef struct grpc_transport_op {
/** connectivity monitoring - set connectivity_state to NULL to unsubscribe */
grpc_closure *on_connectivity_state_change;
grpc_connectivity_state *connectivity_state;
- /** should the transport be disconnected */
+ /** should the transport be disconnected
+ * Error contract: the transport that gets this op must cause
+ * disconnect_with_error to be unref'ed after processing it */
grpc_error *disconnect_with_error;
- /** what should the goaway contain? */
+ /** what should the goaway contain?
+ * Error contract: the transport that gets this op must cause
+ * goaway_error to be unref'ed after processing it */
grpc_error *goaway_error;
/** set the callback for accepting new streams;
this is a permanent callback, unlike the other one-shot closures.
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c
index b47b1e8306..7b18229ba6 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.c
@@ -64,9 +64,6 @@ char *grpc_transport_stream_op_batch_string(
gpr_strvec b;
gpr_strvec_init(&b);
- gpr_strvec_add(
- &b, gpr_strdup(op->covered_by_poller ? "[COVERED]" : "[UNCOVERED]"));
-
if (op->send_initial_metadata) {
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("SEND_INITIAL_METADATA{"));
diff --git a/src/cpp/common/channel_filter.h b/src/cpp/common/channel_filter.h
index 1b6ace6b13..5d629f7c14 100644
--- a/src/cpp/common/channel_filter.h
+++ b/src/cpp/common/channel_filter.h
@@ -208,38 +208,45 @@ class TransportStreamOpBatch {
/// Represents channel data.
class ChannelData {
public:
+ ChannelData() {}
virtual ~ChannelData() {}
- /// Initializes the call data.
- virtual grpc_error *Init(grpc_exec_ctx *exec_ctx,
+ // TODO(roth): Come up with a more C++-like API for the channel element.
+
+ /// Initializes the channel data.
+ virtual grpc_error *Init(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_channel_element_args *args) {
return GRPC_ERROR_NONE;
}
- // TODO(roth): Find a way to avoid passing elem into these methods.
+ // Called before destruction.
+ virtual void Destroy(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {}
virtual void StartTransportOp(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, TransportOp *op);
virtual void GetInfo(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
const grpc_channel_info *channel_info);
-
- protected:
- ChannelData() {}
};
/// Represents call data.
class CallData {
public:
+ CallData() {}
virtual ~CallData() {}
+ // TODO(roth): Come up with a more C++-like API for the call element.
+
/// Initializes the call data.
- virtual grpc_error *Init(grpc_exec_ctx *exec_ctx, ChannelData *channel_data,
+ virtual grpc_error *Init(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_element_args *args) {
return GRPC_ERROR_NONE;
}
- // TODO(roth): Find a way to avoid passing elem into these methods.
+ // Called before destruction.
+ virtual void Destroy(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ const grpc_call_final_info *final_info,
+ grpc_closure *then_call_closure) {}
/// Starts a new stream operation.
virtual void StartTransportStreamOpBatch(grpc_exec_ctx *exec_ctx,
@@ -253,9 +260,6 @@ class CallData {
/// Gets the peer name.
virtual char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
-
- protected:
- CallData() {}
};
namespace internal {
@@ -271,19 +275,24 @@ class ChannelFilter final {
static grpc_error *InitChannelElement(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
+ // Construct the object in the already-allocated memory.
ChannelDataType *channel_data = new (elem->channel_data) ChannelDataType();
- return channel_data->Init(exec_ctx, args);
+ return channel_data->Init(exec_ctx, elem, args);
}
static void DestroyChannelElement(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
- reinterpret_cast<ChannelDataType *>(elem->channel_data)->~ChannelDataType();
+ ChannelDataType *channel_data =
+ reinterpret_cast<ChannelDataType *>(elem->channel_data);
+ channel_data->Destroy(exec_ctx, elem);
+ channel_data->~ChannelDataType();
}
static void StartTransportOp(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
- ChannelDataType *channel_data = (ChannelDataType *)elem->channel_data;
+ ChannelDataType *channel_data =
+ reinterpret_cast<ChannelDataType *>(elem->channel_data);
TransportOp op_wrapper(op);
channel_data->StartTransportOp(exec_ctx, elem, &op_wrapper);
}
@@ -291,7 +300,8 @@ class ChannelFilter final {
static void GetChannelInfo(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
const grpc_channel_info *channel_info) {
- ChannelDataType *channel_data = (ChannelDataType *)elem->channel_data;
+ ChannelDataType *channel_data =
+ reinterpret_cast<ChannelDataType *>(elem->channel_data);
channel_data->GetInfo(exec_ctx, elem, channel_info);
}
@@ -300,24 +310,24 @@ class ChannelFilter final {
static grpc_error *InitCallElement(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- ChannelDataType *channel_data = (ChannelDataType *)elem->channel_data;
// Construct the object in the already-allocated memory.
CallDataType *call_data = new (elem->call_data) CallDataType();
- return call_data->Init(exec_ctx, channel_data, args);
+ return call_data->Init(exec_ctx, elem, args);
}
static void DestroyCallElement(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_call_closure) {
- GPR_ASSERT(then_call_closure == NULL);
- reinterpret_cast<CallDataType *>(elem->call_data)->~CallDataType();
+ CallDataType *call_data = reinterpret_cast<CallDataType *>(elem->call_data);
+ call_data->Destroy(exec_ctx, elem, final_info, then_call_closure);
+ call_data->~CallDataType();
}
static void StartTransportStreamOpBatch(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- CallDataType *call_data = (CallDataType *)elem->call_data;
+ CallDataType *call_data = reinterpret_cast<CallDataType *>(elem->call_data);
TransportStreamOpBatch op_wrapper(op);
call_data->StartTransportStreamOpBatch(exec_ctx, elem, &op_wrapper);
}
@@ -325,12 +335,12 @@ class ChannelFilter final {
static void SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
- CallDataType *call_data = (CallDataType *)elem->call_data;
+ CallDataType *call_data = reinterpret_cast<CallDataType *>(elem->call_data);
call_data->SetPollsetOrPollsetSet(exec_ctx, elem, pollent);
}
static char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- CallDataType *call_data = (CallDataType *)elem->call_data;
+ CallDataType *call_data = reinterpret_cast<CallDataType *>(elem->call_data);
return call_data->GetPeer(exec_ctx, elem);
}
};
diff --git a/src/csharp/Grpc.Core.Tests/ServerTest.cs b/src/csharp/Grpc.Core.Tests/ServerTest.cs
index f6343f2a13..884414792d 100644
--- a/src/csharp/Grpc.Core.Tests/ServerTest.cs
+++ b/src/csharp/Grpc.Core.Tests/ServerTest.cs
@@ -17,6 +17,7 @@
#endregion
using System;
+using System.IO;
using System.Linq;
using Grpc.Core;
using Grpc.Core.Internal;
@@ -66,6 +67,21 @@ namespace Grpc.Core.Tests
}
[Test]
+ public void StartThrowsWithUnboundPorts()
+ {
+ int twiceBoundPort = 9999;
+ Server server = new Server(new[] { new ChannelOption(ChannelOptions.SoReuseport, 0) })
+ {
+ Ports = {
+ new ServerPort("localhost", twiceBoundPort, ServerCredentials.Insecure),
+ new ServerPort("localhost", twiceBoundPort, ServerCredentials.Insecure)
+ }
+ };
+ Assert.Throws(typeof(IOException), () => server.Start());
+ server.ShutdownAsync().Wait();
+ }
+
+ [Test]
public void CannotModifyAfterStarted()
{
Server server = new Server
diff --git a/src/csharp/Grpc.Core/Server.cs b/src/csharp/Grpc.Core/Server.cs
index 462713e6bb..77ad876bdf 100644
--- a/src/csharp/Grpc.Core/Server.cs
+++ b/src/csharp/Grpc.Core/Server.cs
@@ -19,6 +19,7 @@
using System;
using System.Collections;
using System.Collections.Generic;
+using System.IO;
using System.Linq;
using System.Threading.Tasks;
using Grpc.Core.Internal;
@@ -140,6 +141,7 @@ namespace Grpc.Core
/// <summary>
/// Starts the server.
+ /// Throws <c>IOException</c> if not successful.
/// </summary>
public void Start()
{
@@ -148,7 +150,8 @@ namespace Grpc.Core
GrpcPreconditions.CheckState(!startRequested);
GrpcPreconditions.CheckState(!shutdownRequested);
startRequested = true;
-
+
+ CheckPortsBoundSuccessfully();
handle.Start();
for (int i = 0; i < requestCallTokensPerCq; i++)
@@ -301,6 +304,22 @@ namespace Grpc.Core
}
}
+ /// <summary>
+ /// Checks that all ports have been bound successfully.
+ /// </summary>
+ private void CheckPortsBoundSuccessfully()
+ {
+ lock (myLock)
+ {
+ var unboundPort = ports.FirstOrDefault(port => port.BoundPort == 0);
+ if (unboundPort != null)
+ {
+ throw new IOException(
+ string.Format("Failed to bind port \"{0}:{1}\"", unboundPort.Host, unboundPort.Port));
+ }
+ }
+ }
+
private void DisposeHandle()
{
var activeCallCount = activeCallCounter.Count;
diff --git a/src/csharp/Grpc.IntegrationTesting/Control.cs b/src/csharp/Grpc.IntegrationTesting/Control.cs
index 6c0176fb43..d62b5a1c5b 100644
--- a/src/csharp/Grpc.IntegrationTesting/Control.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Control.cs
@@ -32,7 +32,7 @@ namespace Grpc.Testing {
"U2VjdXJpdHlQYXJhbXMSEwoLdXNlX3Rlc3RfY2EYASABKAgSHAoUc2VydmVy",
"X2hvc3Rfb3ZlcnJpZGUYAiABKAkiTQoKQ2hhbm5lbEFyZxIMCgRuYW1lGAEg",
"ASgJEhMKCXN0cl92YWx1ZRgCIAEoCUgAEhMKCWludF92YWx1ZRgDIAEoBUgA",
- "QgcKBXZhbHVlIqAECgxDbGllbnRDb25maWcSFgoOc2VydmVyX3RhcmdldHMY",
+ "QgcKBXZhbHVlItUECgxDbGllbnRDb25maWcSFgoOc2VydmVyX3RhcmdldHMY",
"ASADKAkSLQoLY2xpZW50X3R5cGUYAiABKA4yGC5ncnBjLnRlc3RpbmcuQ2xp",
"ZW50VHlwZRI1Cg9zZWN1cml0eV9wYXJhbXMYAyABKAsyHC5ncnBjLnRlc3Rp",
"bmcuU2VjdXJpdHlQYXJhbXMSJAocb3V0c3RhbmRpbmdfcnBjc19wZXJfY2hh",
@@ -44,52 +44,57 @@ namespace Grpc.Testing {
"cxgMIAEoCzIdLmdycGMudGVzdGluZy5IaXN0b2dyYW1QYXJhbXMSEQoJY29y",
"ZV9saXN0GA0gAygFEhIKCmNvcmVfbGltaXQYDiABKAUSGAoQb3RoZXJfY2xp",
"ZW50X2FwaRgPIAEoCRIuCgxjaGFubmVsX2FyZ3MYECADKAsyGC5ncnBjLnRl",
- "c3RpbmcuQ2hhbm5lbEFyZyI4CgxDbGllbnRTdGF0dXMSKAoFc3RhdHMYASAB",
- "KAsyGS5ncnBjLnRlc3RpbmcuQ2xpZW50U3RhdHMiFQoETWFyaxINCgVyZXNl",
- "dBgBIAEoCCJoCgpDbGllbnRBcmdzEisKBXNldHVwGAEgASgLMhouZ3JwYy50",
- "ZXN0aW5nLkNsaWVudENvbmZpZ0gAEiIKBG1hcmsYAiABKAsyEi5ncnBjLnRl",
- "c3RpbmcuTWFya0gAQgkKB2FyZ3R5cGUitAIKDFNlcnZlckNvbmZpZxItCgtz",
- "ZXJ2ZXJfdHlwZRgBIAEoDjIYLmdycGMudGVzdGluZy5TZXJ2ZXJUeXBlEjUK",
- "D3NlY3VyaXR5X3BhcmFtcxgCIAEoCzIcLmdycGMudGVzdGluZy5TZWN1cml0",
- "eVBhcmFtcxIMCgRwb3J0GAQgASgFEhwKFGFzeW5jX3NlcnZlcl90aHJlYWRz",
- "GAcgASgFEhIKCmNvcmVfbGltaXQYCCABKAUSMwoOcGF5bG9hZF9jb25maWcY",
- "CSABKAsyGy5ncnBjLnRlc3RpbmcuUGF5bG9hZENvbmZpZxIRCgljb3JlX2xp",
- "c3QYCiADKAUSGAoQb3RoZXJfc2VydmVyX2FwaRgLIAEoCRIcChNyZXNvdXJj",
- "ZV9xdW90YV9zaXplGOkHIAEoBSJoCgpTZXJ2ZXJBcmdzEisKBXNldHVwGAEg",
- "ASgLMhouZ3JwYy50ZXN0aW5nLlNlcnZlckNvbmZpZ0gAEiIKBG1hcmsYAiAB",
- "KAsyEi5ncnBjLnRlc3RpbmcuTWFya0gAQgkKB2FyZ3R5cGUiVQoMU2VydmVy",
- "U3RhdHVzEigKBXN0YXRzGAEgASgLMhkuZ3JwYy50ZXN0aW5nLlNlcnZlclN0",
- "YXRzEgwKBHBvcnQYAiABKAUSDQoFY29yZXMYAyABKAUiDQoLQ29yZVJlcXVl",
- "c3QiHQoMQ29yZVJlc3BvbnNlEg0KBWNvcmVzGAEgASgFIgYKBFZvaWQi/QEK",
- "CFNjZW5hcmlvEgwKBG5hbWUYASABKAkSMQoNY2xpZW50X2NvbmZpZxgCIAEo",
- "CzIaLmdycGMudGVzdGluZy5DbGllbnRDb25maWcSEwoLbnVtX2NsaWVudHMY",
- "AyABKAUSMQoNc2VydmVyX2NvbmZpZxgEIAEoCzIaLmdycGMudGVzdGluZy5T",
- "ZXJ2ZXJDb25maWcSEwoLbnVtX3NlcnZlcnMYBSABKAUSFgoOd2FybXVwX3Nl",
- "Y29uZHMYBiABKAUSGQoRYmVuY2htYXJrX3NlY29uZHMYByABKAUSIAoYc3Bh",
- "d25fbG9jYWxfd29ya2VyX2NvdW50GAggASgFIjYKCVNjZW5hcmlvcxIpCglz",
- "Y2VuYXJpb3MYASADKAsyFi5ncnBjLnRlc3RpbmcuU2NlbmFyaW8i+AIKFVNj",
- "ZW5hcmlvUmVzdWx0U3VtbWFyeRILCgNxcHMYASABKAESGwoTcXBzX3Blcl9z",
- "ZXJ2ZXJfY29yZRgCIAEoARIaChJzZXJ2ZXJfc3lzdGVtX3RpbWUYAyABKAES",
- "GAoQc2VydmVyX3VzZXJfdGltZRgEIAEoARIaChJjbGllbnRfc3lzdGVtX3Rp",
- "bWUYBSABKAESGAoQY2xpZW50X3VzZXJfdGltZRgGIAEoARISCgpsYXRlbmN5",
- "XzUwGAcgASgBEhIKCmxhdGVuY3lfOTAYCCABKAESEgoKbGF0ZW5jeV85NRgJ",
- "IAEoARISCgpsYXRlbmN5Xzk5GAogASgBEhMKC2xhdGVuY3lfOTk5GAsgASgB",
- "EhgKEHNlcnZlcl9jcHVfdXNhZ2UYDCABKAESJgoec3VjY2Vzc2Z1bF9yZXF1",
- "ZXN0c19wZXJfc2Vjb25kGA0gASgBEiIKGmZhaWxlZF9yZXF1ZXN0c19wZXJf",
- "c2Vjb25kGA4gASgBIoMDCg5TY2VuYXJpb1Jlc3VsdBIoCghzY2VuYXJpbxgB",
- "IAEoCzIWLmdycGMudGVzdGluZy5TY2VuYXJpbxIuCglsYXRlbmNpZXMYAiAB",
- "KAsyGy5ncnBjLnRlc3RpbmcuSGlzdG9ncmFtRGF0YRIvCgxjbGllbnRfc3Rh",
- "dHMYAyADKAsyGS5ncnBjLnRlc3RpbmcuQ2xpZW50U3RhdHMSLwoMc2VydmVy",
- "X3N0YXRzGAQgAygLMhkuZ3JwYy50ZXN0aW5nLlNlcnZlclN0YXRzEhQKDHNl",
- "cnZlcl9jb3JlcxgFIAMoBRI0CgdzdW1tYXJ5GAYgASgLMiMuZ3JwYy50ZXN0",
- "aW5nLlNjZW5hcmlvUmVzdWx0U3VtbWFyeRIWCg5jbGllbnRfc3VjY2VzcxgH",
- "IAMoCBIWCg5zZXJ2ZXJfc3VjY2VzcxgIIAMoCBI5Cg9yZXF1ZXN0X3Jlc3Vs",
- "dHMYCSADKAsyIC5ncnBjLnRlc3RpbmcuUmVxdWVzdFJlc3VsdENvdW50KkEK",
- "CkNsaWVudFR5cGUSDwoLU1lOQ19DTElFTlQQABIQCgxBU1lOQ19DTElFTlQQ",
- "ARIQCgxPVEhFUl9DTElFTlQQAipbCgpTZXJ2ZXJUeXBlEg8KC1NZTkNfU0VS",
- "VkVSEAASEAoMQVNZTkNfU0VSVkVSEAESGAoUQVNZTkNfR0VORVJJQ19TRVJW",
- "RVIQAhIQCgxPVEhFUl9TRVJWRVIQAyojCgdScGNUeXBlEgkKBVVOQVJZEAAS",
- "DQoJU1RSRUFNSU5HEAFiBnByb3RvMw=="));
+ "c3RpbmcuQ2hhbm5lbEFyZxIWCg50aHJlYWRzX3Blcl9jcRgRIAEoBRIbChNt",
+ "ZXNzYWdlc19wZXJfc3RyZWFtGBIgASgFIjgKDENsaWVudFN0YXR1cxIoCgVz",
+ "dGF0cxgBIAEoCzIZLmdycGMudGVzdGluZy5DbGllbnRTdGF0cyIVCgRNYXJr",
+ "Eg0KBXJlc2V0GAEgASgIImgKCkNsaWVudEFyZ3MSKwoFc2V0dXAYASABKAsy",
+ "Gi5ncnBjLnRlc3RpbmcuQ2xpZW50Q29uZmlnSAASIgoEbWFyaxgCIAEoCzIS",
+ "LmdycGMudGVzdGluZy5NYXJrSABCCQoHYXJndHlwZSLMAgoMU2VydmVyQ29u",
+ "ZmlnEi0KC3NlcnZlcl90eXBlGAEgASgOMhguZ3JwYy50ZXN0aW5nLlNlcnZl",
+ "clR5cGUSNQoPc2VjdXJpdHlfcGFyYW1zGAIgASgLMhwuZ3JwYy50ZXN0aW5n",
+ "LlNlY3VyaXR5UGFyYW1zEgwKBHBvcnQYBCABKAUSHAoUYXN5bmNfc2VydmVy",
+ "X3RocmVhZHMYByABKAUSEgoKY29yZV9saW1pdBgIIAEoBRIzCg5wYXlsb2Fk",
+ "X2NvbmZpZxgJIAEoCzIbLmdycGMudGVzdGluZy5QYXlsb2FkQ29uZmlnEhEK",
+ "CWNvcmVfbGlzdBgKIAMoBRIYChBvdGhlcl9zZXJ2ZXJfYXBpGAsgASgJEhYK",
+ "DnRocmVhZHNfcGVyX2NxGAwgASgFEhwKE3Jlc291cmNlX3F1b3RhX3NpemUY",
+ "6QcgASgFImgKClNlcnZlckFyZ3MSKwoFc2V0dXAYASABKAsyGi5ncnBjLnRl",
+ "c3RpbmcuU2VydmVyQ29uZmlnSAASIgoEbWFyaxgCIAEoCzISLmdycGMudGVz",
+ "dGluZy5NYXJrSABCCQoHYXJndHlwZSJVCgxTZXJ2ZXJTdGF0dXMSKAoFc3Rh",
+ "dHMYASABKAsyGS5ncnBjLnRlc3RpbmcuU2VydmVyU3RhdHMSDAoEcG9ydBgC",
+ "IAEoBRINCgVjb3JlcxgDIAEoBSINCgtDb3JlUmVxdWVzdCIdCgxDb3JlUmVz",
+ "cG9uc2USDQoFY29yZXMYASABKAUiBgoEVm9pZCL9AQoIU2NlbmFyaW8SDAoE",
+ "bmFtZRgBIAEoCRIxCg1jbGllbnRfY29uZmlnGAIgASgLMhouZ3JwYy50ZXN0",
+ "aW5nLkNsaWVudENvbmZpZxITCgtudW1fY2xpZW50cxgDIAEoBRIxCg1zZXJ2",
+ "ZXJfY29uZmlnGAQgASgLMhouZ3JwYy50ZXN0aW5nLlNlcnZlckNvbmZpZxIT",
+ "CgtudW1fc2VydmVycxgFIAEoBRIWCg53YXJtdXBfc2Vjb25kcxgGIAEoBRIZ",
+ "ChFiZW5jaG1hcmtfc2Vjb25kcxgHIAEoBRIgChhzcGF3bl9sb2NhbF93b3Jr",
+ "ZXJfY291bnQYCCABKAUiNgoJU2NlbmFyaW9zEikKCXNjZW5hcmlvcxgBIAMo",
+ "CzIWLmdycGMudGVzdGluZy5TY2VuYXJpbyK8AwoVU2NlbmFyaW9SZXN1bHRT",
+ "dW1tYXJ5EgsKA3FwcxgBIAEoARIbChNxcHNfcGVyX3NlcnZlcl9jb3JlGAIg",
+ "ASgBEhoKEnNlcnZlcl9zeXN0ZW1fdGltZRgDIAEoARIYChBzZXJ2ZXJfdXNl",
+ "cl90aW1lGAQgASgBEhoKEmNsaWVudF9zeXN0ZW1fdGltZRgFIAEoARIYChBj",
+ "bGllbnRfdXNlcl90aW1lGAYgASgBEhIKCmxhdGVuY3lfNTAYByABKAESEgoK",
+ "bGF0ZW5jeV85MBgIIAEoARISCgpsYXRlbmN5Xzk1GAkgASgBEhIKCmxhdGVu",
+ "Y3lfOTkYCiABKAESEwoLbGF0ZW5jeV85OTkYCyABKAESGAoQc2VydmVyX2Nw",
+ "dV91c2FnZRgMIAEoARImCh5zdWNjZXNzZnVsX3JlcXVlc3RzX3Blcl9zZWNv",
+ "bmQYDSABKAESIgoaZmFpbGVkX3JlcXVlc3RzX3Blcl9zZWNvbmQYDiABKAES",
+ "IAoYY2xpZW50X3BvbGxzX3Blcl9yZXF1ZXN0GA8gASgBEiAKGHNlcnZlcl9w",
+ "b2xsc19wZXJfcmVxdWVzdBgQIAEoASKDAwoOU2NlbmFyaW9SZXN1bHQSKAoI",
+ "c2NlbmFyaW8YASABKAsyFi5ncnBjLnRlc3RpbmcuU2NlbmFyaW8SLgoJbGF0",
+ "ZW5jaWVzGAIgASgLMhsuZ3JwYy50ZXN0aW5nLkhpc3RvZ3JhbURhdGESLwoM",
+ "Y2xpZW50X3N0YXRzGAMgAygLMhkuZ3JwYy50ZXN0aW5nLkNsaWVudFN0YXRz",
+ "Ei8KDHNlcnZlcl9zdGF0cxgEIAMoCzIZLmdycGMudGVzdGluZy5TZXJ2ZXJT",
+ "dGF0cxIUCgxzZXJ2ZXJfY29yZXMYBSADKAUSNAoHc3VtbWFyeRgGIAEoCzIj",
+ "LmdycGMudGVzdGluZy5TY2VuYXJpb1Jlc3VsdFN1bW1hcnkSFgoOY2xpZW50",
+ "X3N1Y2Nlc3MYByADKAgSFgoOc2VydmVyX3N1Y2Nlc3MYCCADKAgSOQoPcmVx",
+ "dWVzdF9yZXN1bHRzGAkgAygLMiAuZ3JwYy50ZXN0aW5nLlJlcXVlc3RSZXN1",
+ "bHRDb3VudCpBCgpDbGllbnRUeXBlEg8KC1NZTkNfQ0xJRU5UEAASEAoMQVNZ",
+ "TkNfQ0xJRU5UEAESEAoMT1RIRVJfQ0xJRU5UEAIqWwoKU2VydmVyVHlwZRIP",
+ "CgtTWU5DX1NFUlZFUhAAEhAKDEFTWU5DX1NFUlZFUhABEhgKFEFTWU5DX0dF",
+ "TkVSSUNfU0VSVkVSEAISEAoMT1RIRVJfU0VSVkVSEAMqcgoHUnBjVHlwZRIJ",
+ "CgVVTkFSWRAAEg0KCVNUUkVBTUlORxABEhkKFVNUUkVBTUlOR19GUk9NX0NM",
+ "SUVOVBACEhkKFVNUUkVBTUlOR19GUk9NX1NFUlZFUhADEhcKE1NUUkVBTUlO",
+ "R19CT1RIX1dBWVMQBGIGcHJvdG8z"));
descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
new pbr::FileDescriptor[] { global::Grpc.Testing.PayloadsReflection.Descriptor, global::Grpc.Testing.StatsReflection.Descriptor, },
new pbr::GeneratedClrTypeInfo(new[] {typeof(global::Grpc.Testing.ClientType), typeof(global::Grpc.Testing.ServerType), typeof(global::Grpc.Testing.RpcType), }, new pbr::GeneratedClrTypeInfo[] {
@@ -98,11 +103,11 @@ namespace Grpc.Testing {
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.LoadParams), global::Grpc.Testing.LoadParams.Parser, new[]{ "ClosedLoop", "Poisson" }, new[]{ "Load" }, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.SecurityParams), global::Grpc.Testing.SecurityParams.Parser, new[]{ "UseTestCa", "ServerHostOverride" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ChannelArg), global::Grpc.Testing.ChannelArg.Parser, new[]{ "Name", "StrValue", "IntValue" }, new[]{ "Value" }, null, null),
- new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientConfig), global::Grpc.Testing.ClientConfig.Parser, new[]{ "ServerTargets", "ClientType", "SecurityParams", "OutstandingRpcsPerChannel", "ClientChannels", "AsyncClientThreads", "RpcType", "LoadParams", "PayloadConfig", "HistogramParams", "CoreList", "CoreLimit", "OtherClientApi", "ChannelArgs" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientConfig), global::Grpc.Testing.ClientConfig.Parser, new[]{ "ServerTargets", "ClientType", "SecurityParams", "OutstandingRpcsPerChannel", "ClientChannels", "AsyncClientThreads", "RpcType", "LoadParams", "PayloadConfig", "HistogramParams", "CoreList", "CoreLimit", "OtherClientApi", "ChannelArgs", "ThreadsPerCq", "MessagesPerStream" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientStatus), global::Grpc.Testing.ClientStatus.Parser, new[]{ "Stats" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.Mark), global::Grpc.Testing.Mark.Parser, new[]{ "Reset" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientArgs), global::Grpc.Testing.ClientArgs.Parser, new[]{ "Setup", "Mark" }, new[]{ "Argtype" }, null, null),
- new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ServerConfig), global::Grpc.Testing.ServerConfig.Parser, new[]{ "ServerType", "SecurityParams", "Port", "AsyncServerThreads", "CoreLimit", "PayloadConfig", "CoreList", "OtherServerApi", "ResourceQuotaSize" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ServerConfig), global::Grpc.Testing.ServerConfig.Parser, new[]{ "ServerType", "SecurityParams", "Port", "AsyncServerThreads", "CoreLimit", "PayloadConfig", "CoreList", "OtherServerApi", "ThreadsPerCq", "ResourceQuotaSize" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ServerArgs), global::Grpc.Testing.ServerArgs.Parser, new[]{ "Setup", "Mark" }, new[]{ "Argtype" }, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ServerStatus), global::Grpc.Testing.ServerStatus.Parser, new[]{ "Stats", "Port", "Cores" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.CoreRequest), global::Grpc.Testing.CoreRequest.Parser, null, null, null, null),
@@ -110,7 +115,7 @@ namespace Grpc.Testing {
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.Void), global::Grpc.Testing.Void.Parser, null, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.Scenario), global::Grpc.Testing.Scenario.Parser, new[]{ "Name", "ClientConfig", "NumClients", "ServerConfig", "NumServers", "WarmupSeconds", "BenchmarkSeconds", "SpawnLocalWorkerCount" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.Scenarios), global::Grpc.Testing.Scenarios.Parser, new[]{ "Scenarios_" }, null, null, null),
- new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ScenarioResultSummary), global::Grpc.Testing.ScenarioResultSummary.Parser, new[]{ "Qps", "QpsPerServerCore", "ServerSystemTime", "ServerUserTime", "ClientSystemTime", "ClientUserTime", "Latency50", "Latency90", "Latency95", "Latency99", "Latency999", "ServerCpuUsage", "SuccessfulRequestsPerSecond", "FailedRequestsPerSecond" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ScenarioResultSummary), global::Grpc.Testing.ScenarioResultSummary.Parser, new[]{ "Qps", "QpsPerServerCore", "ServerSystemTime", "ServerUserTime", "ClientSystemTime", "ClientUserTime", "Latency50", "Latency90", "Latency95", "Latency99", "Latency999", "ServerCpuUsage", "SuccessfulRequestsPerSecond", "FailedRequestsPerSecond", "ClientPollsPerRequest", "ServerPollsPerRequest" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ScenarioResult), global::Grpc.Testing.ScenarioResult.Parser, new[]{ "Scenario", "Latencies", "ClientStats", "ServerStats", "ServerCores", "Summary", "ClientSuccess", "ServerSuccess", "RequestResults" }, null, null, null)
}));
}
@@ -144,6 +149,9 @@ namespace Grpc.Testing {
public enum RpcType {
[pbr::OriginalName("UNARY")] Unary = 0,
[pbr::OriginalName("STREAMING")] Streaming = 1,
+ [pbr::OriginalName("STREAMING_FROM_CLIENT")] StreamingFromClient = 2,
+ [pbr::OriginalName("STREAMING_FROM_SERVER")] StreamingFromServer = 3,
+ [pbr::OriginalName("STREAMING_BOTH_WAYS")] StreamingBothWays = 4,
}
#endregion
@@ -942,6 +950,8 @@ namespace Grpc.Testing {
coreLimit_ = other.coreLimit_;
otherClientApi_ = other.otherClientApi_;
channelArgs_ = other.channelArgs_.Clone();
+ threadsPerCq_ = other.threadsPerCq_;
+ messagesPerStream_ = other.messagesPerStream_;
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1123,6 +1133,34 @@ namespace Grpc.Testing {
get { return channelArgs_; }
}
+ /// <summary>Field number for the "threads_per_cq" field.</summary>
+ public const int ThreadsPerCqFieldNumber = 17;
+ private int threadsPerCq_;
+ /// <summary>
+ /// Number of threads that share each completion queue
+ /// </summary>
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int ThreadsPerCq {
+ get { return threadsPerCq_; }
+ set {
+ threadsPerCq_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "messages_per_stream" field.</summary>
+ public const int MessagesPerStreamFieldNumber = 18;
+ private int messagesPerStream_;
+ /// <summary>
+ /// Number of messages on a stream before it gets finished/restarted
+ /// </summary>
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int MessagesPerStream {
+ get { return messagesPerStream_; }
+ set {
+ messagesPerStream_ = value;
+ }
+ }
+
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
public override bool Equals(object other) {
return Equals(other as ClientConfig);
@@ -1150,6 +1188,8 @@ namespace Grpc.Testing {
if (CoreLimit != other.CoreLimit) return false;
if (OtherClientApi != other.OtherClientApi) return false;
if(!channelArgs_.Equals(other.channelArgs_)) return false;
+ if (ThreadsPerCq != other.ThreadsPerCq) return false;
+ if (MessagesPerStream != other.MessagesPerStream) return false;
return true;
}
@@ -1170,6 +1210,8 @@ namespace Grpc.Testing {
if (CoreLimit != 0) hash ^= CoreLimit.GetHashCode();
if (OtherClientApi.Length != 0) hash ^= OtherClientApi.GetHashCode();
hash ^= channelArgs_.GetHashCode();
+ if (ThreadsPerCq != 0) hash ^= ThreadsPerCq.GetHashCode();
+ if (MessagesPerStream != 0) hash ^= MessagesPerStream.GetHashCode();
return hash;
}
@@ -1227,6 +1269,14 @@ namespace Grpc.Testing {
output.WriteString(OtherClientApi);
}
channelArgs_.WriteTo(output, _repeated_channelArgs_codec);
+ if (ThreadsPerCq != 0) {
+ output.WriteRawTag(136, 1);
+ output.WriteInt32(ThreadsPerCq);
+ }
+ if (MessagesPerStream != 0) {
+ output.WriteRawTag(144, 1);
+ output.WriteInt32(MessagesPerStream);
+ }
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1268,6 +1318,12 @@ namespace Grpc.Testing {
size += 1 + pb::CodedOutputStream.ComputeStringSize(OtherClientApi);
}
size += channelArgs_.CalculateSize(_repeated_channelArgs_codec);
+ if (ThreadsPerCq != 0) {
+ size += 2 + pb::CodedOutputStream.ComputeInt32Size(ThreadsPerCq);
+ }
+ if (MessagesPerStream != 0) {
+ size += 2 + pb::CodedOutputStream.ComputeInt32Size(MessagesPerStream);
+ }
return size;
}
@@ -1324,6 +1380,12 @@ namespace Grpc.Testing {
OtherClientApi = other.OtherClientApi;
}
channelArgs_.Add(other.channelArgs_);
+ if (other.ThreadsPerCq != 0) {
+ ThreadsPerCq = other.ThreadsPerCq;
+ }
+ if (other.MessagesPerStream != 0) {
+ MessagesPerStream = other.MessagesPerStream;
+ }
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1403,6 +1465,14 @@ namespace Grpc.Testing {
channelArgs_.AddEntriesFrom(input, _repeated_channelArgs_codec);
break;
}
+ case 136: {
+ ThreadsPerCq = input.ReadInt32();
+ break;
+ }
+ case 144: {
+ MessagesPerStream = input.ReadInt32();
+ break;
+ }
}
}
}
@@ -1873,6 +1943,7 @@ namespace Grpc.Testing {
PayloadConfig = other.payloadConfig_ != null ? other.PayloadConfig.Clone() : null;
coreList_ = other.coreList_.Clone();
otherServerApi_ = other.otherServerApi_;
+ threadsPerCq_ = other.threadsPerCq_;
resourceQuotaSize_ = other.resourceQuotaSize_;
}
@@ -1989,6 +2060,20 @@ namespace Grpc.Testing {
}
}
+ /// <summary>Field number for the "threads_per_cq" field.</summary>
+ public const int ThreadsPerCqFieldNumber = 12;
+ private int threadsPerCq_;
+ /// <summary>
+ /// Number of threads that share each completion queue
+ /// </summary>
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int ThreadsPerCq {
+ get { return threadsPerCq_; }
+ set {
+ threadsPerCq_ = value;
+ }
+ }
+
/// <summary>Field number for the "resource_quota_size" field.</summary>
public const int ResourceQuotaSizeFieldNumber = 1001;
private int resourceQuotaSize_;
@@ -2024,6 +2109,7 @@ namespace Grpc.Testing {
if (!object.Equals(PayloadConfig, other.PayloadConfig)) return false;
if(!coreList_.Equals(other.coreList_)) return false;
if (OtherServerApi != other.OtherServerApi) return false;
+ if (ThreadsPerCq != other.ThreadsPerCq) return false;
if (ResourceQuotaSize != other.ResourceQuotaSize) return false;
return true;
}
@@ -2039,6 +2125,7 @@ namespace Grpc.Testing {
if (payloadConfig_ != null) hash ^= PayloadConfig.GetHashCode();
hash ^= coreList_.GetHashCode();
if (OtherServerApi.Length != 0) hash ^= OtherServerApi.GetHashCode();
+ if (ThreadsPerCq != 0) hash ^= ThreadsPerCq.GetHashCode();
if (ResourceQuotaSize != 0) hash ^= ResourceQuotaSize.GetHashCode();
return hash;
}
@@ -2079,6 +2166,10 @@ namespace Grpc.Testing {
output.WriteRawTag(90);
output.WriteString(OtherServerApi);
}
+ if (ThreadsPerCq != 0) {
+ output.WriteRawTag(96);
+ output.WriteInt32(ThreadsPerCq);
+ }
if (ResourceQuotaSize != 0) {
output.WriteRawTag(200, 62);
output.WriteInt32(ResourceQuotaSize);
@@ -2110,6 +2201,9 @@ namespace Grpc.Testing {
if (OtherServerApi.Length != 0) {
size += 1 + pb::CodedOutputStream.ComputeStringSize(OtherServerApi);
}
+ if (ThreadsPerCq != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(ThreadsPerCq);
+ }
if (ResourceQuotaSize != 0) {
size += 2 + pb::CodedOutputStream.ComputeInt32Size(ResourceQuotaSize);
}
@@ -2149,6 +2243,9 @@ namespace Grpc.Testing {
if (other.OtherServerApi.Length != 0) {
OtherServerApi = other.OtherServerApi;
}
+ if (other.ThreadsPerCq != 0) {
+ ThreadsPerCq = other.ThreadsPerCq;
+ }
if (other.ResourceQuotaSize != 0) {
ResourceQuotaSize = other.ResourceQuotaSize;
}
@@ -2201,6 +2298,10 @@ namespace Grpc.Testing {
OtherServerApi = input.ReadString();
break;
}
+ case 96: {
+ ThreadsPerCq = input.ReadInt32();
+ break;
+ }
case 8008: {
ResourceQuotaSize = input.ReadInt32();
break;
@@ -3386,6 +3487,8 @@ namespace Grpc.Testing {
serverCpuUsage_ = other.serverCpuUsage_;
successfulRequestsPerSecond_ = other.successfulRequestsPerSecond_;
failedRequestsPerSecond_ = other.failedRequestsPerSecond_;
+ clientPollsPerRequest_ = other.clientPollsPerRequest_;
+ serverPollsPerRequest_ = other.serverPollsPerRequest_;
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -3574,6 +3677,31 @@ namespace Grpc.Testing {
}
}
+ /// <summary>Field number for the "client_polls_per_request" field.</summary>
+ public const int ClientPollsPerRequestFieldNumber = 15;
+ private double clientPollsPerRequest_;
+ /// <summary>
+ /// Number of polls called inside completion queue per request
+ /// </summary>
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public double ClientPollsPerRequest {
+ get { return clientPollsPerRequest_; }
+ set {
+ clientPollsPerRequest_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "server_polls_per_request" field.</summary>
+ public const int ServerPollsPerRequestFieldNumber = 16;
+ private double serverPollsPerRequest_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public double ServerPollsPerRequest {
+ get { return serverPollsPerRequest_; }
+ set {
+ serverPollsPerRequest_ = value;
+ }
+ }
+
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
public override bool Equals(object other) {
return Equals(other as ScenarioResultSummary);
@@ -3601,6 +3729,8 @@ namespace Grpc.Testing {
if (ServerCpuUsage != other.ServerCpuUsage) return false;
if (SuccessfulRequestsPerSecond != other.SuccessfulRequestsPerSecond) return false;
if (FailedRequestsPerSecond != other.FailedRequestsPerSecond) return false;
+ if (ClientPollsPerRequest != other.ClientPollsPerRequest) return false;
+ if (ServerPollsPerRequest != other.ServerPollsPerRequest) return false;
return true;
}
@@ -3621,6 +3751,8 @@ namespace Grpc.Testing {
if (ServerCpuUsage != 0D) hash ^= ServerCpuUsage.GetHashCode();
if (SuccessfulRequestsPerSecond != 0D) hash ^= SuccessfulRequestsPerSecond.GetHashCode();
if (FailedRequestsPerSecond != 0D) hash ^= FailedRequestsPerSecond.GetHashCode();
+ if (ClientPollsPerRequest != 0D) hash ^= ClientPollsPerRequest.GetHashCode();
+ if (ServerPollsPerRequest != 0D) hash ^= ServerPollsPerRequest.GetHashCode();
return hash;
}
@@ -3687,6 +3819,14 @@ namespace Grpc.Testing {
output.WriteRawTag(113);
output.WriteDouble(FailedRequestsPerSecond);
}
+ if (ClientPollsPerRequest != 0D) {
+ output.WriteRawTag(121);
+ output.WriteDouble(ClientPollsPerRequest);
+ }
+ if (ServerPollsPerRequest != 0D) {
+ output.WriteRawTag(129, 1);
+ output.WriteDouble(ServerPollsPerRequest);
+ }
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -3734,6 +3874,12 @@ namespace Grpc.Testing {
if (FailedRequestsPerSecond != 0D) {
size += 1 + 8;
}
+ if (ClientPollsPerRequest != 0D) {
+ size += 1 + 8;
+ }
+ if (ServerPollsPerRequest != 0D) {
+ size += 2 + 8;
+ }
return size;
}
@@ -3784,6 +3930,12 @@ namespace Grpc.Testing {
if (other.FailedRequestsPerSecond != 0D) {
FailedRequestsPerSecond = other.FailedRequestsPerSecond;
}
+ if (other.ClientPollsPerRequest != 0D) {
+ ClientPollsPerRequest = other.ClientPollsPerRequest;
+ }
+ if (other.ServerPollsPerRequest != 0D) {
+ ServerPollsPerRequest = other.ServerPollsPerRequest;
+ }
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -3850,6 +4002,14 @@ namespace Grpc.Testing {
FailedRequestsPerSecond = input.ReadDouble();
break;
}
+ case 121: {
+ ClientPollsPerRequest = input.ReadDouble();
+ break;
+ }
+ case 129: {
+ ServerPollsPerRequest = input.ReadDouble();
+ break;
+ }
}
}
}
diff --git a/src/csharp/Grpc.IntegrationTesting/CustomErrorDetailsTest.cs b/src/csharp/Grpc.IntegrationTesting/CustomErrorDetailsTest.cs
new file mode 100644
index 0000000000..be996f91e0
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting/CustomErrorDetailsTest.cs
@@ -0,0 +1,112 @@
+#region Copyright notice and license
+
+// Copyright 2015-2016 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#endregion
+
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Threading;
+using System.Threading.Tasks;
+using Google.Protobuf;
+using Grpc.Core;
+using Grpc.Core.Utils;
+using Grpc.Testing;
+using NUnit.Framework;
+
+namespace Grpc.IntegrationTesting
+{
+ /// <summary>
+ /// Shows how to attach custom error details as a binary trailer.
+ /// </summary>
+ public class CustomErrorDetailsTest
+ {
+ const string DebugInfoTrailerName = "debug-info-bin";
+ const string ExceptionDetail = "Exception thrown on purpose.";
+ const string Host = "localhost";
+ Server server;
+ Channel channel;
+ TestService.TestServiceClient client;
+
+ [TestFixtureSetUp]
+ public void Init()
+ {
+ // Disable SO_REUSEPORT to prevent https://github.com/grpc/grpc/issues/10755
+ server = new Server(new[] { new ChannelOption(ChannelOptions.SoReuseport, 0) })
+ {
+ Services = { TestService.BindService(new CustomErrorDetailsTestServiceImpl()) },
+ Ports = { { Host, ServerPort.PickUnused, ServerCredentials.Insecure } }
+ };
+ server.Start();
+
+ channel = new Channel(Host, server.Ports.Single().BoundPort, ChannelCredentials.Insecure);
+ client = new TestService.TestServiceClient(channel);
+ }
+
+ [TestFixtureTearDown]
+ public void Cleanup()
+ {
+ channel.ShutdownAsync().Wait();
+ server.ShutdownAsync().Wait();
+ }
+
+ [Test]
+ public async Task UnaryCall()
+ {
+ var call = client.UnaryCallAsync(new SimpleRequest { ResponseSize = 10 });
+
+ try
+ {
+ await call.ResponseAsync;
+ Assert.Fail();
+ }
+ catch (RpcException e)
+ {
+ Assert.AreEqual(StatusCode.Unknown, e.Status.StatusCode);
+ var debugInfo = GetDebugInfo(call.GetTrailers());
+ Assert.AreEqual(debugInfo.Detail, ExceptionDetail);
+ Assert.IsNotEmpty(debugInfo.StackEntries);
+ }
+ }
+
+ private DebugInfo GetDebugInfo(Metadata trailers)
+ {
+ var entry = trailers.First((e) => e.Key == DebugInfoTrailerName);
+ return DebugInfo.Parser.ParseFrom(entry.ValueBytes);
+ }
+
+ private class CustomErrorDetailsTestServiceImpl : TestService.TestServiceBase
+ {
+ public override async Task<SimpleResponse> UnaryCall(SimpleRequest request, ServerCallContext context)
+ {
+ try
+ {
+ throw new ArgumentException(ExceptionDetail);
+ }
+ catch (Exception e)
+ {
+ // Fill debug info with some structured details about the failure.
+ var debugInfo = new DebugInfo();
+ debugInfo.Detail = e.Message;
+ debugInfo.StackEntries.AddRange(e.StackTrace.Split(new[] { Environment.NewLine }, StringSplitOptions.None));
+ context.ResponseTrailers.Add(DebugInfoTrailerName, debugInfo.ToByteArray());
+ throw new RpcException(new Status(StatusCode.Unknown, "The handler threw exception."));
+ }
+ }
+ }
+ }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting/EchoMessages.cs b/src/csharp/Grpc.IntegrationTesting/EchoMessages.cs
new file mode 100644
index 0000000000..b2fe73acdf
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting/EchoMessages.cs
@@ -0,0 +1,1354 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: src/proto/grpc/testing/echo_messages.proto
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Grpc.Testing {
+
+ /// <summary>Holder for reflection information generated from src/proto/grpc/testing/echo_messages.proto</summary>
+ public static partial class EchoMessagesReflection {
+
+ #region Descriptor
+ /// <summary>File descriptor for src/proto/grpc/testing/echo_messages.proto</summary>
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static EchoMessagesReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "CipzcmMvcHJvdG8vZ3JwYy90ZXN0aW5nL2VjaG9fbWVzc2FnZXMucHJvdG8S",
+ "DGdycGMudGVzdGluZyIyCglEZWJ1Z0luZm8SFQoNc3RhY2tfZW50cmllcxgB",
+ "IAMoCRIOCgZkZXRhaWwYAiABKAkiUAoLRXJyb3JTdGF0dXMSDAoEY29kZRgB",
+ "IAEoBRIVCg1lcnJvcl9tZXNzYWdlGAIgASgJEhwKFGJpbmFyeV9lcnJvcl9k",
+ "ZXRhaWxzGAMgASgJIskDCg1SZXF1ZXN0UGFyYW1zEhUKDWVjaG9fZGVhZGxp",
+ "bmUYASABKAgSHgoWY2xpZW50X2NhbmNlbF9hZnRlcl91cxgCIAEoBRIeChZz",
+ "ZXJ2ZXJfY2FuY2VsX2FmdGVyX3VzGAMgASgFEhUKDWVjaG9fbWV0YWRhdGEY",
+ "BCABKAgSGgoSY2hlY2tfYXV0aF9jb250ZXh0GAUgASgIEh8KF3Jlc3BvbnNl",
+ "X21lc3NhZ2VfbGVuZ3RoGAYgASgFEhEKCWVjaG9fcGVlchgHIAEoCBIgChhl",
+ "eHBlY3RlZF9jbGllbnRfaWRlbnRpdHkYCCABKAkSHAoUc2tpcF9jYW5jZWxs",
+ "ZWRfY2hlY2sYCSABKAgSKAogZXhwZWN0ZWRfdHJhbnNwb3J0X3NlY3VyaXR5",
+ "X3R5cGUYCiABKAkSKwoKZGVidWdfaW5mbxgLIAEoCzIXLmdycGMudGVzdGlu",
+ "Zy5EZWJ1Z0luZm8SEgoKc2VydmVyX2RpZRgMIAEoCBIcChRiaW5hcnlfZXJy",
+ "b3JfZGV0YWlscxgNIAEoCRIxCg5leHBlY3RlZF9lcnJvchgOIAEoCzIZLmdy",
+ "cGMudGVzdGluZy5FcnJvclN0YXR1cyJKCgtFY2hvUmVxdWVzdBIPCgdtZXNz",
+ "YWdlGAEgASgJEioKBXBhcmFtGAIgASgLMhsuZ3JwYy50ZXN0aW5nLlJlcXVl",
+ "c3RQYXJhbXMiRgoOUmVzcG9uc2VQYXJhbXMSGAoQcmVxdWVzdF9kZWFkbGlu",
+ "ZRgBIAEoAxIMCgRob3N0GAIgASgJEgwKBHBlZXIYAyABKAkiTAoMRWNob1Jl",
+ "c3BvbnNlEg8KB21lc3NhZ2UYASABKAkSKwoFcGFyYW0YAiABKAsyHC5ncnBj",
+ "LnRlc3RpbmcuUmVzcG9uc2VQYXJhbXNiBnByb3RvMw=="));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { },
+ new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.DebugInfo), global::Grpc.Testing.DebugInfo.Parser, new[]{ "StackEntries", "Detail" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ErrorStatus), global::Grpc.Testing.ErrorStatus.Parser, new[]{ "Code", "ErrorMessage", "BinaryErrorDetails" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.RequestParams), global::Grpc.Testing.RequestParams.Parser, new[]{ "EchoDeadline", "ClientCancelAfterUs", "ServerCancelAfterUs", "EchoMetadata", "CheckAuthContext", "ResponseMessageLength", "EchoPeer", "ExpectedClientIdentity", "SkipCancelledCheck", "ExpectedTransportSecurityType", "DebugInfo", "ServerDie", "BinaryErrorDetails", "ExpectedError" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.EchoRequest), global::Grpc.Testing.EchoRequest.Parser, new[]{ "Message", "Param" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ResponseParams), global::Grpc.Testing.ResponseParams.Parser, new[]{ "RequestDeadline", "Host", "Peer" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.EchoResponse), global::Grpc.Testing.EchoResponse.Parser, new[]{ "Message", "Param" }, null, null, null)
+ }));
+ }
+ #endregion
+
+ }
+ #region Messages
+ /// <summary>
+ /// Message to be echoed back serialized in trailer.
+ /// </summary>
+ public sealed partial class DebugInfo : pb::IMessage<DebugInfo> {
+ private static readonly pb::MessageParser<DebugInfo> _parser = new pb::MessageParser<DebugInfo>(() => new DebugInfo());
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser<DebugInfo> Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Grpc.Testing.EchoMessagesReflection.Descriptor.MessageTypes[0]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public DebugInfo() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public DebugInfo(DebugInfo other) : this() {
+ stackEntries_ = other.stackEntries_.Clone();
+ detail_ = other.detail_;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public DebugInfo Clone() {
+ return new DebugInfo(this);
+ }
+
+ /// <summary>Field number for the "stack_entries" field.</summary>
+ public const int StackEntriesFieldNumber = 1;
+ private static readonly pb::FieldCodec<string> _repeated_stackEntries_codec
+ = pb::FieldCodec.ForString(10);
+ private readonly pbc::RepeatedField<string> stackEntries_ = new pbc::RepeatedField<string>();
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField<string> StackEntries {
+ get { return stackEntries_; }
+ }
+
+ /// <summary>Field number for the "detail" field.</summary>
+ public const int DetailFieldNumber = 2;
+ private string detail_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string Detail {
+ get { return detail_; }
+ set {
+ detail_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as DebugInfo);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(DebugInfo other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if(!stackEntries_.Equals(other.stackEntries_)) return false;
+ if (Detail != other.Detail) return false;
+ return true;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ hash ^= stackEntries_.GetHashCode();
+ if (Detail.Length != 0) hash ^= Detail.GetHashCode();
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ stackEntries_.WriteTo(output, _repeated_stackEntries_codec);
+ if (Detail.Length != 0) {
+ output.WriteRawTag(18);
+ output.WriteString(Detail);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ size += stackEntries_.CalculateSize(_repeated_stackEntries_codec);
+ if (Detail.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(Detail);
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(DebugInfo other) {
+ if (other == null) {
+ return;
+ }
+ stackEntries_.Add(other.stackEntries_);
+ if (other.Detail.Length != 0) {
+ Detail = other.Detail;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ input.SkipLastField();
+ break;
+ case 10: {
+ stackEntries_.AddEntriesFrom(input, _repeated_stackEntries_codec);
+ break;
+ }
+ case 18: {
+ Detail = input.ReadString();
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ /// <summary>
+ /// Error status client expects to see.
+ /// </summary>
+ public sealed partial class ErrorStatus : pb::IMessage<ErrorStatus> {
+ private static readonly pb::MessageParser<ErrorStatus> _parser = new pb::MessageParser<ErrorStatus>(() => new ErrorStatus());
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser<ErrorStatus> Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Grpc.Testing.EchoMessagesReflection.Descriptor.MessageTypes[1]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ErrorStatus() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ErrorStatus(ErrorStatus other) : this() {
+ code_ = other.code_;
+ errorMessage_ = other.errorMessage_;
+ binaryErrorDetails_ = other.binaryErrorDetails_;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ErrorStatus Clone() {
+ return new ErrorStatus(this);
+ }
+
+ /// <summary>Field number for the "code" field.</summary>
+ public const int CodeFieldNumber = 1;
+ private int code_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int Code {
+ get { return code_; }
+ set {
+ code_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "error_message" field.</summary>
+ public const int ErrorMessageFieldNumber = 2;
+ private string errorMessage_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string ErrorMessage {
+ get { return errorMessage_; }
+ set {
+ errorMessage_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ /// <summary>Field number for the "binary_error_details" field.</summary>
+ public const int BinaryErrorDetailsFieldNumber = 3;
+ private string binaryErrorDetails_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string BinaryErrorDetails {
+ get { return binaryErrorDetails_; }
+ set {
+ binaryErrorDetails_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as ErrorStatus);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(ErrorStatus other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (Code != other.Code) return false;
+ if (ErrorMessage != other.ErrorMessage) return false;
+ if (BinaryErrorDetails != other.BinaryErrorDetails) return false;
+ return true;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (Code != 0) hash ^= Code.GetHashCode();
+ if (ErrorMessage.Length != 0) hash ^= ErrorMessage.GetHashCode();
+ if (BinaryErrorDetails.Length != 0) hash ^= BinaryErrorDetails.GetHashCode();
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (Code != 0) {
+ output.WriteRawTag(8);
+ output.WriteInt32(Code);
+ }
+ if (ErrorMessage.Length != 0) {
+ output.WriteRawTag(18);
+ output.WriteString(ErrorMessage);
+ }
+ if (BinaryErrorDetails.Length != 0) {
+ output.WriteRawTag(26);
+ output.WriteString(BinaryErrorDetails);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (Code != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(Code);
+ }
+ if (ErrorMessage.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(ErrorMessage);
+ }
+ if (BinaryErrorDetails.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(BinaryErrorDetails);
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(ErrorStatus other) {
+ if (other == null) {
+ return;
+ }
+ if (other.Code != 0) {
+ Code = other.Code;
+ }
+ if (other.ErrorMessage.Length != 0) {
+ ErrorMessage = other.ErrorMessage;
+ }
+ if (other.BinaryErrorDetails.Length != 0) {
+ BinaryErrorDetails = other.BinaryErrorDetails;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ input.SkipLastField();
+ break;
+ case 8: {
+ Code = input.ReadInt32();
+ break;
+ }
+ case 18: {
+ ErrorMessage = input.ReadString();
+ break;
+ }
+ case 26: {
+ BinaryErrorDetails = input.ReadString();
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ public sealed partial class RequestParams : pb::IMessage<RequestParams> {
+ private static readonly pb::MessageParser<RequestParams> _parser = new pb::MessageParser<RequestParams>(() => new RequestParams());
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser<RequestParams> Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Grpc.Testing.EchoMessagesReflection.Descriptor.MessageTypes[2]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public RequestParams() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public RequestParams(RequestParams other) : this() {
+ echoDeadline_ = other.echoDeadline_;
+ clientCancelAfterUs_ = other.clientCancelAfterUs_;
+ serverCancelAfterUs_ = other.serverCancelAfterUs_;
+ echoMetadata_ = other.echoMetadata_;
+ checkAuthContext_ = other.checkAuthContext_;
+ responseMessageLength_ = other.responseMessageLength_;
+ echoPeer_ = other.echoPeer_;
+ expectedClientIdentity_ = other.expectedClientIdentity_;
+ skipCancelledCheck_ = other.skipCancelledCheck_;
+ expectedTransportSecurityType_ = other.expectedTransportSecurityType_;
+ DebugInfo = other.debugInfo_ != null ? other.DebugInfo.Clone() : null;
+ serverDie_ = other.serverDie_;
+ binaryErrorDetails_ = other.binaryErrorDetails_;
+ ExpectedError = other.expectedError_ != null ? other.ExpectedError.Clone() : null;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public RequestParams Clone() {
+ return new RequestParams(this);
+ }
+
+ /// <summary>Field number for the "echo_deadline" field.</summary>
+ public const int EchoDeadlineFieldNumber = 1;
+ private bool echoDeadline_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool EchoDeadline {
+ get { return echoDeadline_; }
+ set {
+ echoDeadline_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "client_cancel_after_us" field.</summary>
+ public const int ClientCancelAfterUsFieldNumber = 2;
+ private int clientCancelAfterUs_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int ClientCancelAfterUs {
+ get { return clientCancelAfterUs_; }
+ set {
+ clientCancelAfterUs_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "server_cancel_after_us" field.</summary>
+ public const int ServerCancelAfterUsFieldNumber = 3;
+ private int serverCancelAfterUs_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int ServerCancelAfterUs {
+ get { return serverCancelAfterUs_; }
+ set {
+ serverCancelAfterUs_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "echo_metadata" field.</summary>
+ public const int EchoMetadataFieldNumber = 4;
+ private bool echoMetadata_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool EchoMetadata {
+ get { return echoMetadata_; }
+ set {
+ echoMetadata_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "check_auth_context" field.</summary>
+ public const int CheckAuthContextFieldNumber = 5;
+ private bool checkAuthContext_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool CheckAuthContext {
+ get { return checkAuthContext_; }
+ set {
+ checkAuthContext_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "response_message_length" field.</summary>
+ public const int ResponseMessageLengthFieldNumber = 6;
+ private int responseMessageLength_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int ResponseMessageLength {
+ get { return responseMessageLength_; }
+ set {
+ responseMessageLength_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "echo_peer" field.</summary>
+ public const int EchoPeerFieldNumber = 7;
+ private bool echoPeer_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool EchoPeer {
+ get { return echoPeer_; }
+ set {
+ echoPeer_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "expected_client_identity" field.</summary>
+ public const int ExpectedClientIdentityFieldNumber = 8;
+ private string expectedClientIdentity_ = "";
+ /// <summary>
+ /// will force check_auth_context.
+ /// </summary>
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string ExpectedClientIdentity {
+ get { return expectedClientIdentity_; }
+ set {
+ expectedClientIdentity_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ /// <summary>Field number for the "skip_cancelled_check" field.</summary>
+ public const int SkipCancelledCheckFieldNumber = 9;
+ private bool skipCancelledCheck_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool SkipCancelledCheck {
+ get { return skipCancelledCheck_; }
+ set {
+ skipCancelledCheck_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "expected_transport_security_type" field.</summary>
+ public const int ExpectedTransportSecurityTypeFieldNumber = 10;
+ private string expectedTransportSecurityType_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string ExpectedTransportSecurityType {
+ get { return expectedTransportSecurityType_; }
+ set {
+ expectedTransportSecurityType_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ /// <summary>Field number for the "debug_info" field.</summary>
+ public const int DebugInfoFieldNumber = 11;
+ private global::Grpc.Testing.DebugInfo debugInfo_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public global::Grpc.Testing.DebugInfo DebugInfo {
+ get { return debugInfo_; }
+ set {
+ debugInfo_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "server_die" field.</summary>
+ public const int ServerDieFieldNumber = 12;
+ private bool serverDie_;
+ /// <summary>
+ /// Server should not see a request with this set.
+ /// </summary>
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool ServerDie {
+ get { return serverDie_; }
+ set {
+ serverDie_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "binary_error_details" field.</summary>
+ public const int BinaryErrorDetailsFieldNumber = 13;
+ private string binaryErrorDetails_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string BinaryErrorDetails {
+ get { return binaryErrorDetails_; }
+ set {
+ binaryErrorDetails_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ /// <summary>Field number for the "expected_error" field.</summary>
+ public const int ExpectedErrorFieldNumber = 14;
+ private global::Grpc.Testing.ErrorStatus expectedError_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public global::Grpc.Testing.ErrorStatus ExpectedError {
+ get { return expectedError_; }
+ set {
+ expectedError_ = value;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as RequestParams);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(RequestParams other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (EchoDeadline != other.EchoDeadline) return false;
+ if (ClientCancelAfterUs != other.ClientCancelAfterUs) return false;
+ if (ServerCancelAfterUs != other.ServerCancelAfterUs) return false;
+ if (EchoMetadata != other.EchoMetadata) return false;
+ if (CheckAuthContext != other.CheckAuthContext) return false;
+ if (ResponseMessageLength != other.ResponseMessageLength) return false;
+ if (EchoPeer != other.EchoPeer) return false;
+ if (ExpectedClientIdentity != other.ExpectedClientIdentity) return false;
+ if (SkipCancelledCheck != other.SkipCancelledCheck) return false;
+ if (ExpectedTransportSecurityType != other.ExpectedTransportSecurityType) return false;
+ if (!object.Equals(DebugInfo, other.DebugInfo)) return false;
+ if (ServerDie != other.ServerDie) return false;
+ if (BinaryErrorDetails != other.BinaryErrorDetails) return false;
+ if (!object.Equals(ExpectedError, other.ExpectedError)) return false;
+ return true;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (EchoDeadline != false) hash ^= EchoDeadline.GetHashCode();
+ if (ClientCancelAfterUs != 0) hash ^= ClientCancelAfterUs.GetHashCode();
+ if (ServerCancelAfterUs != 0) hash ^= ServerCancelAfterUs.GetHashCode();
+ if (EchoMetadata != false) hash ^= EchoMetadata.GetHashCode();
+ if (CheckAuthContext != false) hash ^= CheckAuthContext.GetHashCode();
+ if (ResponseMessageLength != 0) hash ^= ResponseMessageLength.GetHashCode();
+ if (EchoPeer != false) hash ^= EchoPeer.GetHashCode();
+ if (ExpectedClientIdentity.Length != 0) hash ^= ExpectedClientIdentity.GetHashCode();
+ if (SkipCancelledCheck != false) hash ^= SkipCancelledCheck.GetHashCode();
+ if (ExpectedTransportSecurityType.Length != 0) hash ^= ExpectedTransportSecurityType.GetHashCode();
+ if (debugInfo_ != null) hash ^= DebugInfo.GetHashCode();
+ if (ServerDie != false) hash ^= ServerDie.GetHashCode();
+ if (BinaryErrorDetails.Length != 0) hash ^= BinaryErrorDetails.GetHashCode();
+ if (expectedError_ != null) hash ^= ExpectedError.GetHashCode();
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (EchoDeadline != false) {
+ output.WriteRawTag(8);
+ output.WriteBool(EchoDeadline);
+ }
+ if (ClientCancelAfterUs != 0) {
+ output.WriteRawTag(16);
+ output.WriteInt32(ClientCancelAfterUs);
+ }
+ if (ServerCancelAfterUs != 0) {
+ output.WriteRawTag(24);
+ output.WriteInt32(ServerCancelAfterUs);
+ }
+ if (EchoMetadata != false) {
+ output.WriteRawTag(32);
+ output.WriteBool(EchoMetadata);
+ }
+ if (CheckAuthContext != false) {
+ output.WriteRawTag(40);
+ output.WriteBool(CheckAuthContext);
+ }
+ if (ResponseMessageLength != 0) {
+ output.WriteRawTag(48);
+ output.WriteInt32(ResponseMessageLength);
+ }
+ if (EchoPeer != false) {
+ output.WriteRawTag(56);
+ output.WriteBool(EchoPeer);
+ }
+ if (ExpectedClientIdentity.Length != 0) {
+ output.WriteRawTag(66);
+ output.WriteString(ExpectedClientIdentity);
+ }
+ if (SkipCancelledCheck != false) {
+ output.WriteRawTag(72);
+ output.WriteBool(SkipCancelledCheck);
+ }
+ if (ExpectedTransportSecurityType.Length != 0) {
+ output.WriteRawTag(82);
+ output.WriteString(ExpectedTransportSecurityType);
+ }
+ if (debugInfo_ != null) {
+ output.WriteRawTag(90);
+ output.WriteMessage(DebugInfo);
+ }
+ if (ServerDie != false) {
+ output.WriteRawTag(96);
+ output.WriteBool(ServerDie);
+ }
+ if (BinaryErrorDetails.Length != 0) {
+ output.WriteRawTag(106);
+ output.WriteString(BinaryErrorDetails);
+ }
+ if (expectedError_ != null) {
+ output.WriteRawTag(114);
+ output.WriteMessage(ExpectedError);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (EchoDeadline != false) {
+ size += 1 + 1;
+ }
+ if (ClientCancelAfterUs != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(ClientCancelAfterUs);
+ }
+ if (ServerCancelAfterUs != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(ServerCancelAfterUs);
+ }
+ if (EchoMetadata != false) {
+ size += 1 + 1;
+ }
+ if (CheckAuthContext != false) {
+ size += 1 + 1;
+ }
+ if (ResponseMessageLength != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(ResponseMessageLength);
+ }
+ if (EchoPeer != false) {
+ size += 1 + 1;
+ }
+ if (ExpectedClientIdentity.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(ExpectedClientIdentity);
+ }
+ if (SkipCancelledCheck != false) {
+ size += 1 + 1;
+ }
+ if (ExpectedTransportSecurityType.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(ExpectedTransportSecurityType);
+ }
+ if (debugInfo_ != null) {
+ size += 1 + pb::CodedOutputStream.ComputeMessageSize(DebugInfo);
+ }
+ if (ServerDie != false) {
+ size += 1 + 1;
+ }
+ if (BinaryErrorDetails.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(BinaryErrorDetails);
+ }
+ if (expectedError_ != null) {
+ size += 1 + pb::CodedOutputStream.ComputeMessageSize(ExpectedError);
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(RequestParams other) {
+ if (other == null) {
+ return;
+ }
+ if (other.EchoDeadline != false) {
+ EchoDeadline = other.EchoDeadline;
+ }
+ if (other.ClientCancelAfterUs != 0) {
+ ClientCancelAfterUs = other.ClientCancelAfterUs;
+ }
+ if (other.ServerCancelAfterUs != 0) {
+ ServerCancelAfterUs = other.ServerCancelAfterUs;
+ }
+ if (other.EchoMetadata != false) {
+ EchoMetadata = other.EchoMetadata;
+ }
+ if (other.CheckAuthContext != false) {
+ CheckAuthContext = other.CheckAuthContext;
+ }
+ if (other.ResponseMessageLength != 0) {
+ ResponseMessageLength = other.ResponseMessageLength;
+ }
+ if (other.EchoPeer != false) {
+ EchoPeer = other.EchoPeer;
+ }
+ if (other.ExpectedClientIdentity.Length != 0) {
+ ExpectedClientIdentity = other.ExpectedClientIdentity;
+ }
+ if (other.SkipCancelledCheck != false) {
+ SkipCancelledCheck = other.SkipCancelledCheck;
+ }
+ if (other.ExpectedTransportSecurityType.Length != 0) {
+ ExpectedTransportSecurityType = other.ExpectedTransportSecurityType;
+ }
+ if (other.debugInfo_ != null) {
+ if (debugInfo_ == null) {
+ debugInfo_ = new global::Grpc.Testing.DebugInfo();
+ }
+ DebugInfo.MergeFrom(other.DebugInfo);
+ }
+ if (other.ServerDie != false) {
+ ServerDie = other.ServerDie;
+ }
+ if (other.BinaryErrorDetails.Length != 0) {
+ BinaryErrorDetails = other.BinaryErrorDetails;
+ }
+ if (other.expectedError_ != null) {
+ if (expectedError_ == null) {
+ expectedError_ = new global::Grpc.Testing.ErrorStatus();
+ }
+ ExpectedError.MergeFrom(other.ExpectedError);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ input.SkipLastField();
+ break;
+ case 8: {
+ EchoDeadline = input.ReadBool();
+ break;
+ }
+ case 16: {
+ ClientCancelAfterUs = input.ReadInt32();
+ break;
+ }
+ case 24: {
+ ServerCancelAfterUs = input.ReadInt32();
+ break;
+ }
+ case 32: {
+ EchoMetadata = input.ReadBool();
+ break;
+ }
+ case 40: {
+ CheckAuthContext = input.ReadBool();
+ break;
+ }
+ case 48: {
+ ResponseMessageLength = input.ReadInt32();
+ break;
+ }
+ case 56: {
+ EchoPeer = input.ReadBool();
+ break;
+ }
+ case 66: {
+ ExpectedClientIdentity = input.ReadString();
+ break;
+ }
+ case 72: {
+ SkipCancelledCheck = input.ReadBool();
+ break;
+ }
+ case 82: {
+ ExpectedTransportSecurityType = input.ReadString();
+ break;
+ }
+ case 90: {
+ if (debugInfo_ == null) {
+ debugInfo_ = new global::Grpc.Testing.DebugInfo();
+ }
+ input.ReadMessage(debugInfo_);
+ break;
+ }
+ case 96: {
+ ServerDie = input.ReadBool();
+ break;
+ }
+ case 106: {
+ BinaryErrorDetails = input.ReadString();
+ break;
+ }
+ case 114: {
+ if (expectedError_ == null) {
+ expectedError_ = new global::Grpc.Testing.ErrorStatus();
+ }
+ input.ReadMessage(expectedError_);
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ public sealed partial class EchoRequest : pb::IMessage<EchoRequest> {
+ private static readonly pb::MessageParser<EchoRequest> _parser = new pb::MessageParser<EchoRequest>(() => new EchoRequest());
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser<EchoRequest> Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Grpc.Testing.EchoMessagesReflection.Descriptor.MessageTypes[3]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public EchoRequest() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public EchoRequest(EchoRequest other) : this() {
+ message_ = other.message_;
+ Param = other.param_ != null ? other.Param.Clone() : null;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public EchoRequest Clone() {
+ return new EchoRequest(this);
+ }
+
+ /// <summary>Field number for the "message" field.</summary>
+ public const int MessageFieldNumber = 1;
+ private string message_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string Message {
+ get { return message_; }
+ set {
+ message_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ /// <summary>Field number for the "param" field.</summary>
+ public const int ParamFieldNumber = 2;
+ private global::Grpc.Testing.RequestParams param_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public global::Grpc.Testing.RequestParams Param {
+ get { return param_; }
+ set {
+ param_ = value;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as EchoRequest);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(EchoRequest other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (Message != other.Message) return false;
+ if (!object.Equals(Param, other.Param)) return false;
+ return true;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (Message.Length != 0) hash ^= Message.GetHashCode();
+ if (param_ != null) hash ^= Param.GetHashCode();
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (Message.Length != 0) {
+ output.WriteRawTag(10);
+ output.WriteString(Message);
+ }
+ if (param_ != null) {
+ output.WriteRawTag(18);
+ output.WriteMessage(Param);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (Message.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(Message);
+ }
+ if (param_ != null) {
+ size += 1 + pb::CodedOutputStream.ComputeMessageSize(Param);
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(EchoRequest other) {
+ if (other == null) {
+ return;
+ }
+ if (other.Message.Length != 0) {
+ Message = other.Message;
+ }
+ if (other.param_ != null) {
+ if (param_ == null) {
+ param_ = new global::Grpc.Testing.RequestParams();
+ }
+ Param.MergeFrom(other.Param);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ input.SkipLastField();
+ break;
+ case 10: {
+ Message = input.ReadString();
+ break;
+ }
+ case 18: {
+ if (param_ == null) {
+ param_ = new global::Grpc.Testing.RequestParams();
+ }
+ input.ReadMessage(param_);
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ public sealed partial class ResponseParams : pb::IMessage<ResponseParams> {
+ private static readonly pb::MessageParser<ResponseParams> _parser = new pb::MessageParser<ResponseParams>(() => new ResponseParams());
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser<ResponseParams> Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Grpc.Testing.EchoMessagesReflection.Descriptor.MessageTypes[4]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ResponseParams() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ResponseParams(ResponseParams other) : this() {
+ requestDeadline_ = other.requestDeadline_;
+ host_ = other.host_;
+ peer_ = other.peer_;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ResponseParams Clone() {
+ return new ResponseParams(this);
+ }
+
+ /// <summary>Field number for the "request_deadline" field.</summary>
+ public const int RequestDeadlineFieldNumber = 1;
+ private long requestDeadline_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public long RequestDeadline {
+ get { return requestDeadline_; }
+ set {
+ requestDeadline_ = value;
+ }
+ }
+
+ /// <summary>Field number for the "host" field.</summary>
+ public const int HostFieldNumber = 2;
+ private string host_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string Host {
+ get { return host_; }
+ set {
+ host_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ /// <summary>Field number for the "peer" field.</summary>
+ public const int PeerFieldNumber = 3;
+ private string peer_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string Peer {
+ get { return peer_; }
+ set {
+ peer_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as ResponseParams);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(ResponseParams other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (RequestDeadline != other.RequestDeadline) return false;
+ if (Host != other.Host) return false;
+ if (Peer != other.Peer) return false;
+ return true;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (RequestDeadline != 0L) hash ^= RequestDeadline.GetHashCode();
+ if (Host.Length != 0) hash ^= Host.GetHashCode();
+ if (Peer.Length != 0) hash ^= Peer.GetHashCode();
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (RequestDeadline != 0L) {
+ output.WriteRawTag(8);
+ output.WriteInt64(RequestDeadline);
+ }
+ if (Host.Length != 0) {
+ output.WriteRawTag(18);
+ output.WriteString(Host);
+ }
+ if (Peer.Length != 0) {
+ output.WriteRawTag(26);
+ output.WriteString(Peer);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (RequestDeadline != 0L) {
+ size += 1 + pb::CodedOutputStream.ComputeInt64Size(RequestDeadline);
+ }
+ if (Host.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(Host);
+ }
+ if (Peer.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(Peer);
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(ResponseParams other) {
+ if (other == null) {
+ return;
+ }
+ if (other.RequestDeadline != 0L) {
+ RequestDeadline = other.RequestDeadline;
+ }
+ if (other.Host.Length != 0) {
+ Host = other.Host;
+ }
+ if (other.Peer.Length != 0) {
+ Peer = other.Peer;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ input.SkipLastField();
+ break;
+ case 8: {
+ RequestDeadline = input.ReadInt64();
+ break;
+ }
+ case 18: {
+ Host = input.ReadString();
+ break;
+ }
+ case 26: {
+ Peer = input.ReadString();
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ public sealed partial class EchoResponse : pb::IMessage<EchoResponse> {
+ private static readonly pb::MessageParser<EchoResponse> _parser = new pb::MessageParser<EchoResponse>(() => new EchoResponse());
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser<EchoResponse> Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Grpc.Testing.EchoMessagesReflection.Descriptor.MessageTypes[5]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public EchoResponse() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public EchoResponse(EchoResponse other) : this() {
+ message_ = other.message_;
+ Param = other.param_ != null ? other.Param.Clone() : null;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public EchoResponse Clone() {
+ return new EchoResponse(this);
+ }
+
+ /// <summary>Field number for the "message" field.</summary>
+ public const int MessageFieldNumber = 1;
+ private string message_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string Message {
+ get { return message_; }
+ set {
+ message_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ /// <summary>Field number for the "param" field.</summary>
+ public const int ParamFieldNumber = 2;
+ private global::Grpc.Testing.ResponseParams param_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public global::Grpc.Testing.ResponseParams Param {
+ get { return param_; }
+ set {
+ param_ = value;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as EchoResponse);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(EchoResponse other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (Message != other.Message) return false;
+ if (!object.Equals(Param, other.Param)) return false;
+ return true;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (Message.Length != 0) hash ^= Message.GetHashCode();
+ if (param_ != null) hash ^= Param.GetHashCode();
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (Message.Length != 0) {
+ output.WriteRawTag(10);
+ output.WriteString(Message);
+ }
+ if (param_ != null) {
+ output.WriteRawTag(18);
+ output.WriteMessage(Param);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (Message.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(Message);
+ }
+ if (param_ != null) {
+ size += 1 + pb::CodedOutputStream.ComputeMessageSize(Param);
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(EchoResponse other) {
+ if (other == null) {
+ return;
+ }
+ if (other.Message.Length != 0) {
+ Message = other.Message;
+ }
+ if (other.param_ != null) {
+ if (param_ == null) {
+ param_ = new global::Grpc.Testing.ResponseParams();
+ }
+ Param.MergeFrom(other.Param);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ input.SkipLastField();
+ break;
+ case 10: {
+ Message = input.ReadString();
+ break;
+ }
+ case 18: {
+ if (param_ == null) {
+ param_ = new global::Grpc.Testing.ResponseParams();
+ }
+ input.ReadMessage(param_);
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/src/csharp/Grpc.IntegrationTesting/Services.cs b/src/csharp/Grpc.IntegrationTesting/Services.cs
index bf36a0253b..7a0845dffb 100644
--- a/src/csharp/Grpc.IntegrationTesting/Services.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Services.cs
@@ -24,20 +24,28 @@ namespace Grpc.Testing {
string.Concat(
"CiVzcmMvcHJvdG8vZ3JwYy90ZXN0aW5nL3NlcnZpY2VzLnByb3RvEgxncnBj",
"LnRlc3RpbmcaJXNyYy9wcm90by9ncnBjL3Rlc3RpbmcvbWVzc2FnZXMucHJv",
- "dG8aJHNyYy9wcm90by9ncnBjL3Rlc3RpbmcvY29udHJvbC5wcm90bzKqAQoQ",
- "QmVuY2htYXJrU2VydmljZRJGCglVbmFyeUNhbGwSGy5ncnBjLnRlc3Rpbmcu",
- "U2ltcGxlUmVxdWVzdBocLmdycGMudGVzdGluZy5TaW1wbGVSZXNwb25zZRJO",
- "Cg1TdHJlYW1pbmdDYWxsEhsuZ3JwYy50ZXN0aW5nLlNpbXBsZVJlcXVlc3Qa",
- "HC5ncnBjLnRlc3RpbmcuU2ltcGxlUmVzcG9uc2UoATABMpcCCg1Xb3JrZXJT",
- "ZXJ2aWNlEkUKCVJ1blNlcnZlchIYLmdycGMudGVzdGluZy5TZXJ2ZXJBcmdz",
- "GhouZ3JwYy50ZXN0aW5nLlNlcnZlclN0YXR1cygBMAESRQoJUnVuQ2xpZW50",
- "EhguZ3JwYy50ZXN0aW5nLkNsaWVudEFyZ3MaGi5ncnBjLnRlc3RpbmcuQ2xp",
- "ZW50U3RhdHVzKAEwARJCCglDb3JlQ291bnQSGS5ncnBjLnRlc3RpbmcuQ29y",
- "ZVJlcXVlc3QaGi5ncnBjLnRlc3RpbmcuQ29yZVJlc3BvbnNlEjQKClF1aXRX",
- "b3JrZXISEi5ncnBjLnRlc3RpbmcuVm9pZBoSLmdycGMudGVzdGluZy5Wb2lk",
- "YgZwcm90bzM="));
+ "dG8aJHNyYy9wcm90by9ncnBjL3Rlc3RpbmcvY29udHJvbC5wcm90bxoic3Jj",
+ "L3Byb3RvL2dycGMvdGVzdGluZy9zdGF0cy5wcm90bzKmAwoQQmVuY2htYXJr",
+ "U2VydmljZRJGCglVbmFyeUNhbGwSGy5ncnBjLnRlc3RpbmcuU2ltcGxlUmVx",
+ "dWVzdBocLmdycGMudGVzdGluZy5TaW1wbGVSZXNwb25zZRJOCg1TdHJlYW1p",
+ "bmdDYWxsEhsuZ3JwYy50ZXN0aW5nLlNpbXBsZVJlcXVlc3QaHC5ncnBjLnRl",
+ "c3RpbmcuU2ltcGxlUmVzcG9uc2UoATABElIKE1N0cmVhbWluZ0Zyb21DbGll",
+ "bnQSGy5ncnBjLnRlc3RpbmcuU2ltcGxlUmVxdWVzdBocLmdycGMudGVzdGlu",
+ "Zy5TaW1wbGVSZXNwb25zZSgBElIKE1N0cmVhbWluZ0Zyb21TZXJ2ZXISGy5n",
+ "cnBjLnRlc3RpbmcuU2ltcGxlUmVxdWVzdBocLmdycGMudGVzdGluZy5TaW1w",
+ "bGVSZXNwb25zZTABElIKEVN0cmVhbWluZ0JvdGhXYXlzEhsuZ3JwYy50ZXN0",
+ "aW5nLlNpbXBsZVJlcXVlc3QaHC5ncnBjLnRlc3RpbmcuU2ltcGxlUmVzcG9u",
+ "c2UoATABMpcCCg1Xb3JrZXJTZXJ2aWNlEkUKCVJ1blNlcnZlchIYLmdycGMu",
+ "dGVzdGluZy5TZXJ2ZXJBcmdzGhouZ3JwYy50ZXN0aW5nLlNlcnZlclN0YXR1",
+ "cygBMAESRQoJUnVuQ2xpZW50EhguZ3JwYy50ZXN0aW5nLkNsaWVudEFyZ3Ma",
+ "Gi5ncnBjLnRlc3RpbmcuQ2xpZW50U3RhdHVzKAEwARJCCglDb3JlQ291bnQS",
+ "GS5ncnBjLnRlc3RpbmcuQ29yZVJlcXVlc3QaGi5ncnBjLnRlc3RpbmcuQ29y",
+ "ZVJlc3BvbnNlEjQKClF1aXRXb3JrZXISEi5ncnBjLnRlc3RpbmcuVm9pZBoS",
+ "LmdycGMudGVzdGluZy5Wb2lkMl4KGFJlcG9ydFFwc1NjZW5hcmlvU2Vydmlj",
+ "ZRJCCg5SZXBvcnRTY2VuYXJpbxIcLmdycGMudGVzdGluZy5TY2VuYXJpb1Jl",
+ "c3VsdBoSLmdycGMudGVzdGluZy5Wb2lkYgZwcm90bzM="));
descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
- new pbr::FileDescriptor[] { global::Grpc.Testing.MessagesReflection.Descriptor, global::Grpc.Testing.ControlReflection.Descriptor, },
+ new pbr::FileDescriptor[] { global::Grpc.Testing.MessagesReflection.Descriptor, global::Grpc.Testing.ControlReflection.Descriptor, global::Grpc.Testing.StatsReflection.Descriptor, },
new pbr::GeneratedClrTypeInfo(null, null));
}
#endregion
diff --git a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
index 143c9ac9fc..bd5971e296 100644
--- a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
@@ -46,6 +46,27 @@ namespace Grpc.Testing {
__Marshaller_SimpleRequest,
__Marshaller_SimpleResponse);
+ static readonly grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_StreamingFromClient = new grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
+ grpc::MethodType.ClientStreaming,
+ __ServiceName,
+ "StreamingFromClient",
+ __Marshaller_SimpleRequest,
+ __Marshaller_SimpleResponse);
+
+ static readonly grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_StreamingFromServer = new grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
+ grpc::MethodType.ServerStreaming,
+ __ServiceName,
+ "StreamingFromServer",
+ __Marshaller_SimpleRequest,
+ __Marshaller_SimpleResponse);
+
+ static readonly grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_StreamingBothWays = new grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
+ grpc::MethodType.DuplexStreaming,
+ __ServiceName,
+ "StreamingBothWays",
+ __Marshaller_SimpleRequest,
+ __Marshaller_SimpleResponse);
+
/// <summary>Service descriptor</summary>
public static global::Google.Protobuf.Reflection.ServiceDescriptor Descriptor
{
@@ -68,8 +89,9 @@ namespace Grpc.Testing {
}
/// <summary>
- /// One request followed by one response.
- /// The server returns the client payload as-is.
+ /// Repeated sequence of one request followed by one response.
+ /// Should be called streaming ping-pong
+ /// The server returns the client payload as-is on each response
/// </summary>
/// <param name="requestStream">Used for reading requests from the client.</param>
/// <param name="responseStream">Used for sending responses back to the client.</param>
@@ -80,6 +102,44 @@ namespace Grpc.Testing {
throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
+ /// <summary>
+ /// Single-sided unbounded streaming from client to server
+ /// The server returns the client payload as-is once the client does WritesDone
+ /// </summary>
+ /// <param name="requestStream">Used for reading requests from the client.</param>
+ /// <param name="context">The context of the server-side call handler being invoked.</param>
+ /// <returns>The response to send back to the client (wrapped by a task).</returns>
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> StreamingFromClient(grpc::IAsyncStreamReader<global::Grpc.Testing.SimpleRequest> requestStream, grpc::ServerCallContext context)
+ {
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
+ }
+
+ /// <summary>
+ /// Single-sided unbounded streaming from server to client
+ /// The server repeatedly returns the client payload as-is
+ /// </summary>
+ /// <param name="request">The request received from the client.</param>
+ /// <param name="responseStream">Used for sending responses back to the client.</param>
+ /// <param name="context">The context of the server-side call handler being invoked.</param>
+ /// <returns>A task indicating completion of the handler.</returns>
+ public virtual global::System.Threading.Tasks.Task StreamingFromServer(global::Grpc.Testing.SimpleRequest request, grpc::IServerStreamWriter<global::Grpc.Testing.SimpleResponse> responseStream, grpc::ServerCallContext context)
+ {
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
+ }
+
+ /// <summary>
+ /// Two-sided unbounded streaming between server to client
+ /// Both sides send the content of their own choice to the other
+ /// </summary>
+ /// <param name="requestStream">Used for reading requests from the client.</param>
+ /// <param name="responseStream">Used for sending responses back to the client.</param>
+ /// <param name="context">The context of the server-side call handler being invoked.</param>
+ /// <returns>A task indicating completion of the handler.</returns>
+ public virtual global::System.Threading.Tasks.Task StreamingBothWays(grpc::IAsyncStreamReader<global::Grpc.Testing.SimpleRequest> requestStream, grpc::IServerStreamWriter<global::Grpc.Testing.SimpleResponse> responseStream, grpc::ServerCallContext context)
+ {
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
+ }
+
}
/// <summary>Client for BenchmarkService</summary>
@@ -154,8 +214,9 @@ namespace Grpc.Testing {
return CallInvoker.AsyncUnaryCall(__Method_UnaryCall, null, options, request);
}
/// <summary>
- /// One request followed by one response.
- /// The server returns the client payload as-is.
+ /// Repeated sequence of one request followed by one response.
+ /// Should be called streaming ping-pong
+ /// The server returns the client payload as-is on each response
/// </summary>
/// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param>
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
@@ -166,8 +227,9 @@ namespace Grpc.Testing {
return StreamingCall(new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
- /// One request followed by one response.
- /// The server returns the client payload as-is.
+ /// Repeated sequence of one request followed by one response.
+ /// Should be called streaming ping-pong
+ /// The server returns the client payload as-is on each response
/// </summary>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
@@ -175,6 +237,74 @@ namespace Grpc.Testing {
{
return CallInvoker.AsyncDuplexStreamingCall(__Method_StreamingCall, null, options);
}
+ /// <summary>
+ /// Single-sided unbounded streaming from client to server
+ /// The server returns the client payload as-is once the client does WritesDone
+ /// </summary>
+ /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param>
+ /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
+ /// <param name="cancellationToken">An optional token for canceling the call.</param>
+ /// <returns>The call object.</returns>
+ public virtual grpc::AsyncClientStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingFromClient(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ {
+ return StreamingFromClient(new grpc::CallOptions(headers, deadline, cancellationToken));
+ }
+ /// <summary>
+ /// Single-sided unbounded streaming from client to server
+ /// The server returns the client payload as-is once the client does WritesDone
+ /// </summary>
+ /// <param name="options">The options for the call.</param>
+ /// <returns>The call object.</returns>
+ public virtual grpc::AsyncClientStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingFromClient(grpc::CallOptions options)
+ {
+ return CallInvoker.AsyncClientStreamingCall(__Method_StreamingFromClient, null, options);
+ }
+ /// <summary>
+ /// Single-sided unbounded streaming from server to client
+ /// The server repeatedly returns the client payload as-is
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param>
+ /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
+ /// <param name="cancellationToken">An optional token for canceling the call.</param>
+ /// <returns>The call object.</returns>
+ public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.SimpleResponse> StreamingFromServer(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ {
+ return StreamingFromServer(request, new grpc::CallOptions(headers, deadline, cancellationToken));
+ }
+ /// <summary>
+ /// Single-sided unbounded streaming from server to client
+ /// The server repeatedly returns the client payload as-is
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="options">The options for the call.</param>
+ /// <returns>The call object.</returns>
+ public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.SimpleResponse> StreamingFromServer(global::Grpc.Testing.SimpleRequest request, grpc::CallOptions options)
+ {
+ return CallInvoker.AsyncServerStreamingCall(__Method_StreamingFromServer, null, options, request);
+ }
+ /// <summary>
+ /// Two-sided unbounded streaming between server to client
+ /// Both sides send the content of their own choice to the other
+ /// </summary>
+ /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param>
+ /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
+ /// <param name="cancellationToken">An optional token for canceling the call.</param>
+ /// <returns>The call object.</returns>
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingBothWays(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ {
+ return StreamingBothWays(new grpc::CallOptions(headers, deadline, cancellationToken));
+ }
+ /// <summary>
+ /// Two-sided unbounded streaming between server to client
+ /// Both sides send the content of their own choice to the other
+ /// </summary>
+ /// <param name="options">The options for the call.</param>
+ /// <returns>The call object.</returns>
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingBothWays(grpc::CallOptions options)
+ {
+ return CallInvoker.AsyncDuplexStreamingCall(__Method_StreamingBothWays, null, options);
+ }
/// <summary>Creates a new instance of client from given <c>ClientBaseConfiguration</c>.</summary>
protected override BenchmarkServiceClient NewInstance(ClientBaseConfiguration configuration)
{
@@ -188,7 +318,10 @@ namespace Grpc.Testing {
{
return grpc::ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_UnaryCall, serviceImpl.UnaryCall)
- .AddMethod(__Method_StreamingCall, serviceImpl.StreamingCall).Build();
+ .AddMethod(__Method_StreamingCall, serviceImpl.StreamingCall)
+ .AddMethod(__Method_StreamingFromClient, serviceImpl.StreamingFromClient)
+ .AddMethod(__Method_StreamingFromServer, serviceImpl.StreamingFromServer)
+ .AddMethod(__Method_StreamingBothWays, serviceImpl.StreamingBothWays).Build();
}
}
@@ -489,5 +622,124 @@ namespace Grpc.Testing {
}
}
+ public static partial class ReportQpsScenarioService
+ {
+ static readonly string __ServiceName = "grpc.testing.ReportQpsScenarioService";
+
+ static readonly grpc::Marshaller<global::Grpc.Testing.ScenarioResult> __Marshaller_ScenarioResult = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ScenarioResult.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.Void> __Marshaller_Void = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.Void.Parser.ParseFrom);
+
+ static readonly grpc::Method<global::Grpc.Testing.ScenarioResult, global::Grpc.Testing.Void> __Method_ReportScenario = new grpc::Method<global::Grpc.Testing.ScenarioResult, global::Grpc.Testing.Void>(
+ grpc::MethodType.Unary,
+ __ServiceName,
+ "ReportScenario",
+ __Marshaller_ScenarioResult,
+ __Marshaller_Void);
+
+ /// <summary>Service descriptor</summary>
+ public static global::Google.Protobuf.Reflection.ServiceDescriptor Descriptor
+ {
+ get { return global::Grpc.Testing.ServicesReflection.Descriptor.Services[2]; }
+ }
+
+ /// <summary>Base class for server-side implementations of ReportQpsScenarioService</summary>
+ public abstract partial class ReportQpsScenarioServiceBase
+ {
+ /// <summary>
+ /// Report results of a QPS test benchmark scenario.
+ /// </summary>
+ /// <param name="request">The request received from the client.</param>
+ /// <param name="context">The context of the server-side call handler being invoked.</param>
+ /// <returns>The response to send back to the client (wrapped by a task).</returns>
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Void> ReportScenario(global::Grpc.Testing.ScenarioResult request, grpc::ServerCallContext context)
+ {
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
+ }
+
+ }
+
+ /// <summary>Client for ReportQpsScenarioService</summary>
+ public partial class ReportQpsScenarioServiceClient : grpc::ClientBase<ReportQpsScenarioServiceClient>
+ {
+ /// <summary>Creates a new client for ReportQpsScenarioService</summary>
+ /// <param name="channel">The channel to use to make remote calls.</param>
+ public ReportQpsScenarioServiceClient(grpc::Channel channel) : base(channel)
+ {
+ }
+ /// <summary>Creates a new client for ReportQpsScenarioService that uses a custom <c>CallInvoker</c>.</summary>
+ /// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
+ public ReportQpsScenarioServiceClient(grpc::CallInvoker callInvoker) : base(callInvoker)
+ {
+ }
+ /// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
+ protected ReportQpsScenarioServiceClient() : base()
+ {
+ }
+ /// <summary>Protected constructor to allow creation of configured clients.</summary>
+ /// <param name="configuration">The client configuration.</param>
+ protected ReportQpsScenarioServiceClient(ClientBaseConfiguration configuration) : base(configuration)
+ {
+ }
+
+ /// <summary>
+ /// Report results of a QPS test benchmark scenario.
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param>
+ /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
+ /// <param name="cancellationToken">An optional token for canceling the call.</param>
+ /// <returns>The response received from the server.</returns>
+ public virtual global::Grpc.Testing.Void ReportScenario(global::Grpc.Testing.ScenarioResult request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ {
+ return ReportScenario(request, new grpc::CallOptions(headers, deadline, cancellationToken));
+ }
+ /// <summary>
+ /// Report results of a QPS test benchmark scenario.
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="options">The options for the call.</param>
+ /// <returns>The response received from the server.</returns>
+ public virtual global::Grpc.Testing.Void ReportScenario(global::Grpc.Testing.ScenarioResult request, grpc::CallOptions options)
+ {
+ return CallInvoker.BlockingUnaryCall(__Method_ReportScenario, null, options, request);
+ }
+ /// <summary>
+ /// Report results of a QPS test benchmark scenario.
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param>
+ /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
+ /// <param name="cancellationToken">An optional token for canceling the call.</param>
+ /// <returns>The call object.</returns>
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Void> ReportScenarioAsync(global::Grpc.Testing.ScenarioResult request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ {
+ return ReportScenarioAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
+ }
+ /// <summary>
+ /// Report results of a QPS test benchmark scenario.
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="options">The options for the call.</param>
+ /// <returns>The call object.</returns>
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Void> ReportScenarioAsync(global::Grpc.Testing.ScenarioResult request, grpc::CallOptions options)
+ {
+ return CallInvoker.AsyncUnaryCall(__Method_ReportScenario, null, options, request);
+ }
+ /// <summary>Creates a new instance of client from given <c>ClientBaseConfiguration</c>.</summary>
+ protected override ReportQpsScenarioServiceClient NewInstance(ClientBaseConfiguration configuration)
+ {
+ return new ReportQpsScenarioServiceClient(configuration);
+ }
+ }
+
+ /// <summary>Creates service definition that can be registered with a server</summary>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static grpc::ServerServiceDefinition BindService(ReportQpsScenarioServiceBase serviceImpl)
+ {
+ return grpc::ServerServiceDefinition.CreateBuilder()
+ .AddMethod(__Method_ReportScenario, serviceImpl.ReportScenario).Build();
+ }
+
+ }
}
#endregion
diff --git a/src/csharp/Grpc.IntegrationTesting/Stats.cs b/src/csharp/Grpc.IntegrationTesting/Stats.cs
index 79ff220436..23b56df6bd 100644
--- a/src/csharp/Grpc.IntegrationTesting/Stats.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Stats.cs
@@ -23,27 +23,28 @@ namespace Grpc.Testing {
byte[] descriptorData = global::System.Convert.FromBase64String(
string.Concat(
"CiJzcmMvcHJvdG8vZ3JwYy90ZXN0aW5nL3N0YXRzLnByb3RvEgxncnBjLnRl",
- "c3RpbmciegoLU2VydmVyU3RhdHMSFAoMdGltZV9lbGFwc2VkGAEgASgBEhEK",
- "CXRpbWVfdXNlchgCIAEoARITCgt0aW1lX3N5c3RlbRgDIAEoARIWCg50b3Rh",
- "bF9jcHVfdGltZRgEIAEoBBIVCg1pZGxlX2NwdV90aW1lGAUgASgEIjsKD0hp",
- "c3RvZ3JhbVBhcmFtcxISCgpyZXNvbHV0aW9uGAEgASgBEhQKDG1heF9wb3Nz",
- "aWJsZRgCIAEoASJ3Cg1IaXN0b2dyYW1EYXRhEg4KBmJ1Y2tldBgBIAMoDRIQ",
- "CghtaW5fc2VlbhgCIAEoARIQCghtYXhfc2VlbhgDIAEoARILCgNzdW0YBCAB",
- "KAESFgoOc3VtX29mX3NxdWFyZXMYBSABKAESDQoFY291bnQYBiABKAEiOAoS",
- "UmVxdWVzdFJlc3VsdENvdW50EhMKC3N0YXR1c19jb2RlGAEgASgFEg0KBWNv",
- "dW50GAIgASgDIrYBCgtDbGllbnRTdGF0cxIuCglsYXRlbmNpZXMYASABKAsy",
- "Gy5ncnBjLnRlc3RpbmcuSGlzdG9ncmFtRGF0YRIUCgx0aW1lX2VsYXBzZWQY",
- "AiABKAESEQoJdGltZV91c2VyGAMgASgBEhMKC3RpbWVfc3lzdGVtGAQgASgB",
- "EjkKD3JlcXVlc3RfcmVzdWx0cxgFIAMoCzIgLmdycGMudGVzdGluZy5SZXF1",
- "ZXN0UmVzdWx0Q291bnRiBnByb3RvMw=="));
+ "c3RpbmcikQEKC1NlcnZlclN0YXRzEhQKDHRpbWVfZWxhcHNlZBgBIAEoARIR",
+ "Cgl0aW1lX3VzZXIYAiABKAESEwoLdGltZV9zeXN0ZW0YAyABKAESFgoOdG90",
+ "YWxfY3B1X3RpbWUYBCABKAQSFQoNaWRsZV9jcHVfdGltZRgFIAEoBBIVCg1j",
+ "cV9wb2xsX2NvdW50GAYgASgEIjsKD0hpc3RvZ3JhbVBhcmFtcxISCgpyZXNv",
+ "bHV0aW9uGAEgASgBEhQKDG1heF9wb3NzaWJsZRgCIAEoASJ3Cg1IaXN0b2dy",
+ "YW1EYXRhEg4KBmJ1Y2tldBgBIAMoDRIQCghtaW5fc2VlbhgCIAEoARIQCght",
+ "YXhfc2VlbhgDIAEoARILCgNzdW0YBCABKAESFgoOc3VtX29mX3NxdWFyZXMY",
+ "BSABKAESDQoFY291bnQYBiABKAEiOAoSUmVxdWVzdFJlc3VsdENvdW50EhMK",
+ "C3N0YXR1c19jb2RlGAEgASgFEg0KBWNvdW50GAIgASgDIs0BCgtDbGllbnRT",
+ "dGF0cxIuCglsYXRlbmNpZXMYASABKAsyGy5ncnBjLnRlc3RpbmcuSGlzdG9n",
+ "cmFtRGF0YRIUCgx0aW1lX2VsYXBzZWQYAiABKAESEQoJdGltZV91c2VyGAMg",
+ "ASgBEhMKC3RpbWVfc3lzdGVtGAQgASgBEjkKD3JlcXVlc3RfcmVzdWx0cxgF",
+ "IAMoCzIgLmdycGMudGVzdGluZy5SZXF1ZXN0UmVzdWx0Q291bnQSFQoNY3Ff",
+ "cG9sbF9jb3VudBgGIAEoBGIGcHJvdG8z"));
descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
new pbr::FileDescriptor[] { },
new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
- new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ServerStats), global::Grpc.Testing.ServerStats.Parser, new[]{ "TimeElapsed", "TimeUser", "TimeSystem", "TotalCpuTime", "IdleCpuTime" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ServerStats), global::Grpc.Testing.ServerStats.Parser, new[]{ "TimeElapsed", "TimeUser", "TimeSystem", "TotalCpuTime", "IdleCpuTime", "CqPollCount" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.HistogramParams), global::Grpc.Testing.HistogramParams.Parser, new[]{ "Resolution", "MaxPossible" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.HistogramData), global::Grpc.Testing.HistogramData.Parser, new[]{ "Bucket", "MinSeen", "MaxSeen", "Sum", "SumOfSquares", "Count" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.RequestResultCount), global::Grpc.Testing.RequestResultCount.Parser, new[]{ "StatusCode", "Count" }, null, null, null),
- new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientStats), global::Grpc.Testing.ClientStats.Parser, new[]{ "Latencies", "TimeElapsed", "TimeUser", "TimeSystem", "RequestResults" }, null, null, null)
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientStats), global::Grpc.Testing.ClientStats.Parser, new[]{ "Latencies", "TimeElapsed", "TimeUser", "TimeSystem", "RequestResults", "CqPollCount" }, null, null, null)
}));
}
#endregion
@@ -79,6 +80,7 @@ namespace Grpc.Testing {
timeSystem_ = other.timeSystem_;
totalCpuTime_ = other.totalCpuTime_;
idleCpuTime_ = other.idleCpuTime_;
+ cqPollCount_ = other.cqPollCount_;
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -157,6 +159,20 @@ namespace Grpc.Testing {
}
}
+ /// <summary>Field number for the "cq_poll_count" field.</summary>
+ public const int CqPollCountFieldNumber = 6;
+ private ulong cqPollCount_;
+ /// <summary>
+ /// Number of polls called inside completion queue
+ /// </summary>
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ulong CqPollCount {
+ get { return cqPollCount_; }
+ set {
+ cqPollCount_ = value;
+ }
+ }
+
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
public override bool Equals(object other) {
return Equals(other as ServerStats);
@@ -175,6 +191,7 @@ namespace Grpc.Testing {
if (TimeSystem != other.TimeSystem) return false;
if (TotalCpuTime != other.TotalCpuTime) return false;
if (IdleCpuTime != other.IdleCpuTime) return false;
+ if (CqPollCount != other.CqPollCount) return false;
return true;
}
@@ -186,6 +203,7 @@ namespace Grpc.Testing {
if (TimeSystem != 0D) hash ^= TimeSystem.GetHashCode();
if (TotalCpuTime != 0UL) hash ^= TotalCpuTime.GetHashCode();
if (IdleCpuTime != 0UL) hash ^= IdleCpuTime.GetHashCode();
+ if (CqPollCount != 0UL) hash ^= CqPollCount.GetHashCode();
return hash;
}
@@ -216,6 +234,10 @@ namespace Grpc.Testing {
output.WriteRawTag(40);
output.WriteUInt64(IdleCpuTime);
}
+ if (CqPollCount != 0UL) {
+ output.WriteRawTag(48);
+ output.WriteUInt64(CqPollCount);
+ }
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -236,6 +258,9 @@ namespace Grpc.Testing {
if (IdleCpuTime != 0UL) {
size += 1 + pb::CodedOutputStream.ComputeUInt64Size(IdleCpuTime);
}
+ if (CqPollCount != 0UL) {
+ size += 1 + pb::CodedOutputStream.ComputeUInt64Size(CqPollCount);
+ }
return size;
}
@@ -259,6 +284,9 @@ namespace Grpc.Testing {
if (other.IdleCpuTime != 0UL) {
IdleCpuTime = other.IdleCpuTime;
}
+ if (other.CqPollCount != 0UL) {
+ CqPollCount = other.CqPollCount;
+ }
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -289,6 +317,10 @@ namespace Grpc.Testing {
IdleCpuTime = input.ReadUInt64();
break;
}
+ case 48: {
+ CqPollCount = input.ReadUInt64();
+ break;
+ }
}
}
}
@@ -876,6 +908,7 @@ namespace Grpc.Testing {
timeUser_ = other.timeUser_;
timeSystem_ = other.timeSystem_;
requestResults_ = other.requestResults_.Clone();
+ cqPollCount_ = other.cqPollCount_;
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -946,6 +979,20 @@ namespace Grpc.Testing {
get { return requestResults_; }
}
+ /// <summary>Field number for the "cq_poll_count" field.</summary>
+ public const int CqPollCountFieldNumber = 6;
+ private ulong cqPollCount_;
+ /// <summary>
+ /// Number of polls called inside completion queue
+ /// </summary>
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ulong CqPollCount {
+ get { return cqPollCount_; }
+ set {
+ cqPollCount_ = value;
+ }
+ }
+
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
public override bool Equals(object other) {
return Equals(other as ClientStats);
@@ -964,6 +1011,7 @@ namespace Grpc.Testing {
if (TimeUser != other.TimeUser) return false;
if (TimeSystem != other.TimeSystem) return false;
if(!requestResults_.Equals(other.requestResults_)) return false;
+ if (CqPollCount != other.CqPollCount) return false;
return true;
}
@@ -975,6 +1023,7 @@ namespace Grpc.Testing {
if (TimeUser != 0D) hash ^= TimeUser.GetHashCode();
if (TimeSystem != 0D) hash ^= TimeSystem.GetHashCode();
hash ^= requestResults_.GetHashCode();
+ if (CqPollCount != 0UL) hash ^= CqPollCount.GetHashCode();
return hash;
}
@@ -1002,6 +1051,10 @@ namespace Grpc.Testing {
output.WriteDouble(TimeSystem);
}
requestResults_.WriteTo(output, _repeated_requestResults_codec);
+ if (CqPollCount != 0UL) {
+ output.WriteRawTag(48);
+ output.WriteUInt64(CqPollCount);
+ }
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1020,6 +1073,9 @@ namespace Grpc.Testing {
size += 1 + 8;
}
size += requestResults_.CalculateSize(_repeated_requestResults_codec);
+ if (CqPollCount != 0UL) {
+ size += 1 + pb::CodedOutputStream.ComputeUInt64Size(CqPollCount);
+ }
return size;
}
@@ -1044,6 +1100,9 @@ namespace Grpc.Testing {
TimeSystem = other.TimeSystem;
}
requestResults_.Add(other.requestResults_);
+ if (other.CqPollCount != 0UL) {
+ CqPollCount = other.CqPollCount;
+ }
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1077,6 +1136,10 @@ namespace Grpc.Testing {
requestResults_.AddEntriesFrom(input, _repeated_requestResults_codec);
break;
}
+ case 48: {
+ CqPollCount = input.ReadUInt64();
+ break;
+ }
}
}
}
diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c
index f5c0030309..aebce364c5 100644
--- a/src/csharp/ext/grpc_csharp_ext.c
+++ b/src/csharp/ext/grpc_csharp_ext.c
@@ -398,8 +398,14 @@ GPR_EXPORT grpc_call *GPR_CALLTYPE grpcsharp_channel_create_call(
host_slice = grpc_slice_from_copied_string(host);
host_slice_ptr = &host_slice;
}
- return grpc_channel_create_call(channel, parent_call, propagation_mask, cq,
- method_slice, host_slice_ptr, deadline, NULL);
+ grpc_call *ret =
+ grpc_channel_create_call(channel, parent_call, propagation_mask, cq,
+ method_slice, host_slice_ptr, deadline, NULL);
+ grpc_slice_unref(method_slice);
+ if (host != NULL) {
+ grpc_slice_unref(host_slice);
+ }
+ return ret;
}
GPR_EXPORT grpc_connectivity_state GPR_CALLTYPE
@@ -790,7 +796,9 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server(
ops[nops].reserved = NULL;
nops++;
}
- return grpcsharp_call_start_batch(call, ops, nops, ctx, NULL);
+ grpc_call_error ret = grpcsharp_call_start_batch(call, ops, nops, ctx, NULL);
+ grpc_slice_unref(status_details_slice);
+ return ret;
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
diff --git a/src/csharp/generate_proto_csharp.sh b/src/csharp/generate_proto_csharp.sh
index 8caaaabe0f..1a1adbbae5 100755
--- a/src/csharp/generate_proto_csharp.sh
+++ b/src/csharp/generate_proto_csharp.sh
@@ -37,4 +37,4 @@ $PROTOC --plugin=$PLUGIN --csharp_out=$REFLECTION_DIR --grpc_out=$REFLECTION_DIR
# don't match the package names. Setting -I to the correct value src/proto
# breaks the code generation.
$PROTOC --plugin=$PLUGIN --csharp_out=$TESTING_DIR --grpc_out=$TESTING_DIR \
- -I . src/proto/grpc/testing/{control,empty,messages,metrics,payloads,services,stats,test}.proto
+ -I . src/proto/grpc/testing/{control,echo_messages,empty,messages,metrics,payloads,services,stats,test}.proto
diff --git a/src/csharp/tests.json b/src/csharp/tests.json
index 707d140f62..bc6adbbfe8 100644
--- a/src/csharp/tests.json
+++ b/src/csharp/tests.json
@@ -42,6 +42,7 @@
"Grpc.HealthCheck.Tests.HealthServiceImplTest"
],
"Grpc.IntegrationTesting": [
+ "Grpc.IntegrationTesting.CustomErrorDetailsTest",
"Grpc.IntegrationTesting.GeneratedClientTest",
"Grpc.IntegrationTesting.GeneratedServiceBaseTest",
"Grpc.IntegrationTesting.HistogramTest",
diff --git a/src/node/src/grpc_extension.js b/src/node/src/grpc_extension.js
index c13bf819de..af43eacad2 100644
--- a/src/node/src/grpc_extension.js
+++ b/src/node/src/grpc_extension.js
@@ -16,6 +16,13 @@
*
*/
+/**
+ * @module
+ * @private
+ */
+
+'use strict';
+
var binary = require('node-pre-gyp/lib/pre-binding');
var path = require('path');
var binding_path =
diff --git a/src/node/test/surface_test.js b/src/node/test/surface_test.js
index 8c750ea484..d58d18057e 100644
--- a/src/node/test/surface_test.js
+++ b/src/node/test/surface_test.js
@@ -1363,3 +1363,50 @@ describe('Cancelling surface client', function() {
call.cancel();
});
});
+describe('Client reconnect', function() {
+ var server;
+ var Client;
+ var client;
+ var port;
+ beforeEach(function() {
+ var test_proto = ProtoBuf.loadProtoFile(__dirname + '/echo_service.proto');
+ var echo_service = test_proto.lookup('EchoService');
+ Client = grpc.loadObject(echo_service);
+ server = new grpc.Server();
+ server.addService(Client.service, {
+ echo: function(call, callback) {
+ callback(null, call.request);
+ }
+ });
+ port = server.bind('localhost:0', server_insecure_creds);
+ client = new Client('localhost:' + port, grpc.credentials.createInsecure());
+ server.start();
+ });
+ afterEach(function() {
+ server.forceShutdown();
+ });
+ it('should reconnect after server restart', function(done) {
+ client.echo({value: 'test value', value2: 3}, function(error, response) {
+ assert.ifError(error);
+ assert.deepEqual(response, {value: 'test value', value2: 3});
+ server.tryShutdown(function() {
+ server = new grpc.Server();
+ server.addService(Client.service, {
+ echo: function(call, callback) {
+ callback(null, call.request);
+ }
+ });
+ server.bind('localhost:' + port, server_insecure_creds);
+ server.start();
+ client.echo(undefined, function(error, response) {
+ if (error) {
+ console.log(error);
+ }
+ assert.ifError(error);
+ assert.deepEqual(response, {value: '', value2: 0});
+ done();
+ });
+ });
+ });
+ });
+});
diff --git a/src/objective-c/GRPCClient/GRPCCall.h b/src/objective-c/GRPCClient/GRPCCall.h
index 8c5bcf1c8b..178a446c8b 100644
--- a/src/objective-c/GRPCClient/GRPCCall.h
+++ b/src/objective-c/GRPCClient/GRPCCall.h
@@ -164,6 +164,12 @@ extern id const kGRPCTrailersKey;
@interface GRPCCall : GRXWriter
/**
+ * The authority for the RPC. If nil, the default authority will be used. This property must be nil
+ * when Cronet transport is enabled.
+ */
+@property (atomic, readwrite) NSString *serverName;
+
+/**
* The container of the request headers of an RPC conforms to this protocol, which is a subset of
* NSMutableDictionary's interface. It will become a NSMutableDictionary later on.
* The keys of this container are the header names, which per the HTTP standard are case-
diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m
index 6ba401def4..872362419e 100644
--- a/src/objective-c/GRPCClient/GRPCCall.m
+++ b/src/objective-c/GRPCClient/GRPCCall.m
@@ -425,7 +425,7 @@ static NSMutableDictionary *callFlags;
_responseWriteable = [[GRXConcurrentWriteable alloc] initWithWriteable:writeable
dispatchQueue:_responseQueue];
- _wrappedCall = [[GRPCWrappedCall alloc] initWithHost:_host path:_path];
+ _wrappedCall = [[GRPCWrappedCall alloc] initWithHost:_host serverName:_serverName path:_path];
NSAssert(_wrappedCall, @"Error allocating RPC objects. Low memory?");
[self sendHeaders:_requestHeaders];
diff --git a/src/objective-c/GRPCClient/private/GRPCChannel.h b/src/objective-c/GRPCClient/private/GRPCChannel.h
index e4dfbca38d..e2aa5bd036 100644
--- a/src/objective-c/GRPCClient/private/GRPCChannel.h
+++ b/src/objective-c/GRPCClient/private/GRPCChannel.h
@@ -62,5 +62,6 @@ struct grpc_channel_credentials;
channelArgs:(nullable NSDictionary *)channelArgs;
- (nullable grpc_call *)unmanagedCallWithPath:(nonnull NSString *)path
+ serverName:(nonnull NSString *)serverName
completionQueue:(nonnull GRPCCompletionQueue *)queue;
@end
diff --git a/src/objective-c/GRPCClient/private/GRPCChannel.m b/src/objective-c/GRPCClient/private/GRPCChannel.m
index 79fe7c6e05..52dbc70b99 100644
--- a/src/objective-c/GRPCClient/private/GRPCChannel.m
+++ b/src/objective-c/GRPCClient/private/GRPCChannel.m
@@ -181,13 +181,24 @@ static grpc_channel_args *BuildChannelArgs(NSDictionary *dictionary) {
}
- (grpc_call *)unmanagedCallWithPath:(NSString *)path
+ serverName:(NSString *)serverName
completionQueue:(GRPCCompletionQueue *)queue {
- return grpc_channel_create_call(_unmanagedChannel,
- NULL, GRPC_PROPAGATE_DEFAULTS,
- queue.unmanagedQueue,
- grpc_slice_from_copied_string(path.UTF8String),
- NULL, // Passing NULL for host
- gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+ grpc_slice host_slice;
+ if (serverName) {
+ host_slice = grpc_slice_from_copied_string(serverName.UTF8String);
+ }
+ grpc_slice path_slice = grpc_slice_from_copied_string(path.UTF8String);
+ grpc_call *call = grpc_channel_create_call(_unmanagedChannel,
+ NULL, GRPC_PROPAGATE_DEFAULTS,
+ queue.unmanagedQueue,
+ path_slice,
+ serverName ? &host_slice : NULL,
+ gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+ if (serverName) {
+ grpc_slice_unref(host_slice);
+ }
+ grpc_slice_unref(path_slice);
+ return call;
}
@end
diff --git a/src/objective-c/GRPCClient/private/GRPCHost.h b/src/objective-c/GRPCClient/private/GRPCHost.h
index 4b1f780dd2..0c1d715240 100644
--- a/src/objective-c/GRPCClient/private/GRPCHost.h
+++ b/src/objective-c/GRPCClient/private/GRPCHost.h
@@ -54,6 +54,7 @@ struct grpc_channel_credentials;
/** Create a grpc_call object to the provided path on this host. */
- (nullable struct grpc_call *)unmanagedCallWithPath:(NSString *)path
+ serverName:(NSString *)serverName
completionQueue:(GRPCCompletionQueue *)queue;
// TODO: There's a race when a new RPC is coming through just as an existing one is getting
diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m
index 5b4d647a1a..23794c1fed 100644
--- a/src/objective-c/GRPCClient/private/GRPCHost.m
+++ b/src/objective-c/GRPCClient/private/GRPCHost.m
@@ -120,6 +120,7 @@ static GRPCConnectivityMonitor *connectivityMonitor = nil;
}
- (nullable grpc_call *)unmanagedCallWithPath:(NSString *)path
+ serverName:(NSString *)serverName
completionQueue:(GRPCCompletionQueue *)queue {
GRPCChannel *channel;
// This is racing -[GRPCHost disconnect].
@@ -129,7 +130,7 @@ static GRPCConnectivityMonitor *connectivityMonitor = nil;
}
channel = _channel;
}
- return [channel unmanagedCallWithPath:path completionQueue:queue];
+ return [channel unmanagedCallWithPath:path serverName:serverName completionQueue:queue];
}
- (BOOL)setTLSPEMRootCerts:(nullable NSString *)pemRootCerts
diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.h b/src/objective-c/GRPCClient/private/GRPCWrappedCall.h
index ed245ff7ed..64075591a3 100644
--- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.h
+++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.h
@@ -75,6 +75,7 @@
@interface GRPCWrappedCall : NSObject
- (instancetype)initWithHost:(NSString *)host
+ serverName:(NSString *)serverName
path:(NSString *)path NS_DESIGNATED_INITIALIZER;
- (void)startBatchWithOperations:(NSArray *)ops errorHandler:(void(^)())errorHandler;
diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
index 8c8b0b2570..9802465001 100644
--- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
+++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
@@ -75,6 +75,10 @@
}
- (void)dealloc {
+ for (int i = 0; i < _op.data.send_initial_metadata.count; i++) {
+ grpc_slice_unref(_op.data.send_initial_metadata.metadata[i].key);
+ grpc_slice_unref(_op.data.send_initial_metadata.metadata[i].value);
+ }
gpr_free(_op.data.send_initial_metadata.metadata);
}
@@ -232,10 +236,11 @@
}
- (instancetype)init {
- return [self initWithHost:nil path:nil];
+ return [self initWithHost:nil serverName:nil path:nil];
}
- (instancetype)initWithHost:(NSString *)host
+ serverName:(NSString *)serverName
path:(NSString *)path {
if (!path || !host) {
[NSException raise:NSInvalidArgumentException
@@ -248,7 +253,7 @@
// queue. Currently we use a singleton queue.
_queue = [GRPCCompletionQueue completionQueue];
- _call = [[GRPCHost hostWithAddress:host] unmanagedCallWithPath:path completionQueue:_queue];
+ _call = [[GRPCHost hostWithAddress:host] unmanagedCallWithPath:path serverName:serverName completionQueue:_queue];
if (_call == NULL) {
return nil;
}
diff --git a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m
index aa52239f8f..453b0752c3 100644
--- a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m
+++ b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m
@@ -246,6 +246,10 @@ static char *roots_filename;
[self testIndividualCase:"cancel_after_invoke"];
}
+- (void)testCancelAfterRoundTrip {
+ [self testIndividualCase:"cancel_after_round_trip"];
+}
+
- (void)testCancelBeforeInvoke {
[self testIndividualCase:"cancel_before_invoke"];
}
diff --git a/src/objective-c/tests/PluginTest/imported-with-dash.proto b/src/objective-c/tests/PluginTest/imported-with-dash.proto
new file mode 100644
index 0000000000..c01bbecc07
--- /dev/null
+++ b/src/objective-c/tests/PluginTest/imported-with-dash.proto
@@ -0,0 +1,22 @@
+// Copyright 2017 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+syntax = "proto3";
+
+package grpc.testing;
+
+option objc_class_prefix = "RMT";
+
+message TestMessageImported {
+ int32 dummy = 1;
+}
diff --git a/src/objective-c/tests/PluginTest/test-dash-filename.proto b/src/objective-c/tests/PluginTest/test-dash-filename.proto
new file mode 100644
index 0000000000..afbb6035df
--- /dev/null
+++ b/src/objective-c/tests/PluginTest/test-dash-filename.proto
@@ -0,0 +1,27 @@
+// Copyright 2017 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+syntax = "proto3";
+
+package grpc.testing;
+
+option objc_class_prefix = "RMT";
+
+import "imported-with-dash.proto";
+
+message TestMessage {
+ int32 dummy = 1;
+}
+
+service DummyService {
+}
diff --git a/src/objective-c/tests/build_tests.sh b/src/objective-c/tests/build_tests.sh
index 711a9cc8e4..340044f322 100755
--- a/src/objective-c/tests/build_tests.sh
+++ b/src/objective-c/tests/build_tests.sh
@@ -37,3 +37,4 @@ rm -f RemoteTestClient/*.{h,m}
echo "TIME: $(date)"
pod install
+
diff --git a/src/objective-c/tests/run_tests.sh b/src/objective-c/tests/run_tests.sh
index 6f27d1c462..8fa9439284 100755
--- a/src/objective-c/tests/run_tests.sh
+++ b/src/objective-c/tests/run_tests.sh
@@ -23,6 +23,38 @@ cd $(dirname $0)
# Run the tests server.
BINDIR=../../../bins/$CONFIG
+PROTOC=$BINDIR/protobuf/protoc
+PLUGIN=$BINDIR/grpc_objective_c_plugin
+
+rm -rf PluginTest/*pb*
+
+# Verify the output proto filename
+eval $PROTOC \
+ --plugin=protoc-gen-grpc=$PLUGIN \
+ --objc_out=PluginTest \
+ --grpc_out=PluginTest \
+ -I PluginTest \
+ -I ../../../third_party/protobuf/src \
+ PluginTest/*.proto
+
+[ -e ./PluginTest/TestDashFilename.pbrpc.h ] || {
+ echo >&2 "protoc outputs wrong filename."
+ exit 1
+}
+
+# Verify names of the imported protos in generated code
+[ "`cat PluginTest/TestDashFilename.pbrpc.h |
+ egrep '#import ".*\.pb(objc|rpc)\.h"$' |
+ egrep '-'`" ] && {
+ echo >&2 "protoc generated import with wrong filename."
+ exit 1
+}
+[ "`cat PluginTest/TestDashFilename.pbrpc.m |
+ egrep '#import ".*\.pb(objc|rpc)\.m"$' |
+ egrep '-'`" ] && {
+ echo >&2 "protoc generated import with wrong filename."
+ exit 1
+}
[ -f $BINDIR/interop_server ] || {
echo >&2 "Can't find the test server. Make sure run_tests.py is making" \
diff --git a/src/php/ext/grpc/version.h b/src/php/ext/grpc/version.h
index 2e8f0f2b64..744028b2ca 100644
--- a/src/php/ext/grpc/version.h
+++ b/src/php/ext/grpc/version.h
@@ -20,6 +20,6 @@
#ifndef VERSION_H
#define VERSION_H
-#define PHP_GRPC_VERSION "1.5.0"
+#define PHP_GRPC_VERSION "1.5.0dev"
#endif /* VERSION_H */
diff --git a/src/proto/grpc/testing/control.proto b/src/proto/grpc/testing/control.proto
index c356fc5efa..4252a6f090 100644
--- a/src/proto/grpc/testing/control.proto
+++ b/src/proto/grpc/testing/control.proto
@@ -102,6 +102,9 @@ message ClientConfig {
repeated ChannelArg channel_args = 16;
+ // Number of threads that share each completion queue
+ int32 threads_per_cq = 17;
+
// Number of messages on a stream before it gets finished/restarted
int32 messages_per_stream = 18;
}
@@ -142,6 +145,9 @@ message ServerConfig {
// If we use an OTHER_SERVER client_type, this string gives more detail
string other_server_api = 11;
+ // Number of threads that share each completion queue
+ int32 threads_per_cq = 12;
+
// c++-only options (for now) --------------------------------
// Buffer pool size (no buffer pool specified if unset)
diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py
index 1562c3e24d..cf4ce0941b 100644
--- a/src/python/grpcio/grpc/_channel.py
+++ b/src/python/grpcio/grpc/_channel.py
@@ -849,7 +849,10 @@ def _poll_connectivity(state, channel, initial_try_to_connect):
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
- callbacks = _deliveries(state)
+ # NOTE(nathaniel): The field is only ever used as a
+ # sequence so it's fine that both lists and tuples are
+ # assigned to it.
+ callbacks = _deliveries(state) # pylint: disable=redefined-variable-type
if callbacks:
_spawn_delivery(state, callbacks)
diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py
index 9e2d40b67d..cd59b07c04 100644
--- a/src/python/grpcio/grpc/_server.py
+++ b/src/python/grpcio/grpc/_server.py
@@ -16,10 +16,11 @@
import collections
import enum
import logging
-import six
import threading
import time
+import six
+
import grpc
from grpc import _common
from grpc._cython import cygrpc
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 1a5c537d1f..5819a624f7 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -39,7 +39,6 @@ CORE_SOURCE_FILES = [
'src/core/lib/support/log_windows.c',
'src/core/lib/support/mpscq.c',
'src/core/lib/support/murmur_hash.c',
- 'src/core/lib/support/stack_lockfree.c',
'src/core/lib/support/string.c',
'src/core/lib/support/string_posix.c',
'src/core/lib/support/string_util_windows.c',
@@ -143,8 +142,6 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
- 'src/core/lib/iomgr/workqueue_uv.c',
- 'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
'src/core/lib/json/json_string.c',
@@ -287,6 +284,7 @@ CORE_SOURCE_FILES = [
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c',
+ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c',
'src/core/ext/filters/load_reporting/load_reporting.c',
@@ -313,6 +311,7 @@ CORE_SOURCE_FILES = [
'src/core/plugin_registry/grpc_plugin_registry.c',
'src/boringssl/err_data.c',
'third_party/boringssl/crypto/aes/aes.c',
+ 'third_party/boringssl/crypto/aes/key_wrap.c',
'third_party/boringssl/crypto/aes/mode_wrappers.c',
'third_party/boringssl/crypto/asn1/a_bitstr.c',
'third_party/boringssl/crypto/asn1/a_bool.c',
@@ -344,12 +343,12 @@ CORE_SOURCE_FILES = [
'third_party/boringssl/crypto/asn1/tasn_new.c',
'third_party/boringssl/crypto/asn1/tasn_typ.c',
'third_party/boringssl/crypto/asn1/tasn_utl.c',
+ 'third_party/boringssl/crypto/asn1/time_support.c',
'third_party/boringssl/crypto/asn1/x_bignum.c',
'third_party/boringssl/crypto/asn1/x_long.c',
'third_party/boringssl/crypto/base64/base64.c',
'third_party/boringssl/crypto/bio/bio.c',
'third_party/boringssl/crypto/bio/bio_mem.c',
- 'third_party/boringssl/crypto/bio/buffer.c',
'third_party/boringssl/crypto/bio/connect.c',
'third_party/boringssl/crypto/bio/fd.c',
'third_party/boringssl/crypto/bio/file.c',
@@ -456,12 +455,7 @@ CORE_SOURCE_FILES = [
'third_party/boringssl/crypto/modes/ctr.c',
'third_party/boringssl/crypto/modes/gcm.c',
'third_party/boringssl/crypto/modes/ofb.c',
- 'third_party/boringssl/crypto/newhope/error_correction.c',
- 'third_party/boringssl/crypto/newhope/newhope.c',
- 'third_party/boringssl/crypto/newhope/ntt.c',
- 'third_party/boringssl/crypto/newhope/poly.c',
- 'third_party/boringssl/crypto/newhope/precomp.c',
- 'third_party/boringssl/crypto/newhope/reduce.c',
+ 'third_party/boringssl/crypto/modes/polyval.c',
'third_party/boringssl/crypto/obj/obj.c',
'third_party/boringssl/crypto/obj/obj_xref.c',
'third_party/boringssl/crypto/pem/pem_all.c',
@@ -472,14 +466,15 @@ CORE_SOURCE_FILES = [
'third_party/boringssl/crypto/pem/pem_pkey.c',
'third_party/boringssl/crypto/pem/pem_x509.c',
'third_party/boringssl/crypto/pem/pem_xaux.c',
- 'third_party/boringssl/crypto/pkcs8/p5_pbe.c',
'third_party/boringssl/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl/crypto/pkcs8/p8_pkey.c',
'third_party/boringssl/crypto/pkcs8/pkcs8.c',
'third_party/boringssl/crypto/poly1305/poly1305.c',
'third_party/boringssl/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl/crypto/poly1305/poly1305_vec.c',
+ 'third_party/boringssl/crypto/pool/pool.c',
'third_party/boringssl/crypto/rand/deterministic.c',
+ 'third_party/boringssl/crypto/rand/fuchsia.c',
'third_party/boringssl/crypto/rand/rand.c',
'third_party/boringssl/crypto/rand/urandom.c',
'third_party/boringssl/crypto/rand/windows.c',
@@ -491,6 +486,7 @@ CORE_SOURCE_FILES = [
'third_party/boringssl/crypto/rsa/rsa.c',
'third_party/boringssl/crypto/rsa/rsa_asn1.c',
'third_party/boringssl/crypto/rsa/rsa_impl.c',
+ 'third_party/boringssl/crypto/sha/sha1-altivec.c',
'third_party/boringssl/crypto/sha/sha1.c',
'third_party/boringssl/crypto/sha/sha256.c',
'third_party/boringssl/crypto/sha/sha512.c',
@@ -499,7 +495,6 @@ CORE_SOURCE_FILES = [
'third_party/boringssl/crypto/thread_none.c',
'third_party/boringssl/crypto/thread_pthread.c',
'third_party/boringssl/crypto/thread_win.c',
- 'third_party/boringssl/crypto/time_support.c',
'third_party/boringssl/crypto/x509/a_digest.c',
'third_party/boringssl/crypto/x509/a_sign.c',
'third_party/boringssl/crypto/x509/a_strex.c',
@@ -583,6 +578,7 @@ CORE_SOURCE_FILES = [
'third_party/boringssl/crypto/x509v3/v3_skey.c',
'third_party/boringssl/crypto/x509v3/v3_sxnet.c',
'third_party/boringssl/crypto/x509v3/v3_utl.c',
+ 'third_party/boringssl/ssl/bio_ssl.c',
'third_party/boringssl/ssl/custom_extensions.c',
'third_party/boringssl/ssl/d1_both.c',
'third_party/boringssl/ssl/d1_lib.c',
@@ -593,7 +589,6 @@ CORE_SOURCE_FILES = [
'third_party/boringssl/ssl/handshake_client.c',
'third_party/boringssl/ssl/handshake_server.c',
'third_party/boringssl/ssl/s3_both.c',
- 'third_party/boringssl/ssl/s3_enc.c',
'third_party/boringssl/ssl/s3_lib.c',
'third_party/boringssl/ssl/s3_pkt.c',
'third_party/boringssl/ssl/ssl_aead_ctx.c',
@@ -604,9 +599,12 @@ CORE_SOURCE_FILES = [
'third_party/boringssl/ssl/ssl_ecdh.c',
'third_party/boringssl/ssl/ssl_file.c',
'third_party/boringssl/ssl/ssl_lib.c',
- 'third_party/boringssl/ssl/ssl_rsa.c',
+ 'third_party/boringssl/ssl/ssl_privkey.c',
+ 'third_party/boringssl/ssl/ssl_privkey_cc.cc',
'third_party/boringssl/ssl/ssl_session.c',
'third_party/boringssl/ssl/ssl_stat.c',
+ 'third_party/boringssl/ssl/ssl_transcript.c',
+ 'third_party/boringssl/ssl/ssl_x509.c',
'third_party/boringssl/ssl/t1_enc.c',
'third_party/boringssl/ssl/t1_lib.c',
'third_party/boringssl/ssl/tls13_both.c',
diff --git a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
index e4536db38b..299ce75e79 100644
--- a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
+++ b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
@@ -25,8 +25,6 @@ from google.protobuf import descriptor_pool
from google.protobuf import descriptor_pb2
from src.proto.grpc.testing import empty_pb2
-#empty2_pb2 is imported for import-consequent side-effects.
-from src.proto.grpc.testing.proto2 import empty2_pb2 # pylint: disable=unused-import
from src.proto.grpc.testing.proto2 import empty2_extensions_pb2
from tests.unit.framework.common import test_constants
@@ -48,12 +46,10 @@ def _file_descriptor_to_proto(descriptor):
class ReflectionServicerTest(unittest.TestCase):
def setUp(self):
- servicer = reflection.ReflectionServicer(service_names=_SERVICE_NAMES)
server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(server_pool)
+ reflection.enable_server_reflection(_SERVICE_NAMES, self._server)
port = self._server.add_insecure_port('[::]:0')
- reflection_pb2_grpc.add_ServerReflectionServicer_to_server(servicer,
- self._server)
self._server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
diff --git a/src/ruby/lib/grpc/errors.rb b/src/ruby/lib/grpc/errors.rb
index 9071293a24..8f6aea30ad 100644
--- a/src/ruby/lib/grpc/errors.rb
+++ b/src/ruby/lib/grpc/errors.rb
@@ -50,7 +50,8 @@ module GRPC
Struct::Status.new(code, details, @metadata)
end
- def self.new_status_exception(code, details = 'unkown cause', metadata = {})
+ def self.new_status_exception(code, details = 'unknown cause',
+ metadata = {})
codes = {}
codes[OK] = Ok
codes[CANCELLED] = Cancelled
@@ -59,16 +60,16 @@ module GRPC
codes[DEADLINE_EXCEEDED] = DeadlineExceeded
codes[NOT_FOUND] = NotFound
codes[ALREADY_EXISTS] = AlreadyExists
- codes[PERMISSION_DENIED] = PermissionDenied
+ codes[PERMISSION_DENIED] = PermissionDenied
codes[UNAUTHENTICATED] = Unauthenticated
codes[RESOURCE_EXHAUSTED] = ResourceExhausted
codes[FAILED_PRECONDITION] = FailedPrecondition
codes[ABORTED] = Aborted
codes[OUT_OF_RANGE] = OutOfRange
- codes[UNIMPLEMENTED] = Unimplemented
+ codes[UNIMPLEMENTED] = Unimplemented
codes[INTERNAL] = Internal
- codes[UNIMPLEMENTED] = Unimplemented
- codes[UNAVAILABLE] = Unavailable
+ codes[UNIMPLEMENTED] = Unimplemented
+ codes[UNAVAILABLE] = Unavailable
codes[DATA_LOSS] = DataLoss
if codes[code].nil?
diff --git a/src/ruby/spec/channel_credentials_spec.rb b/src/ruby/spec/channel_credentials_spec.rb
index 058168a473..e53f316208 100644
--- a/src/ruby/spec/channel_credentials_spec.rb
+++ b/src/ruby/spec/channel_credentials_spec.rb
@@ -20,7 +20,7 @@ describe GRPC::Core::ChannelCredentials do
def load_test_certs
test_root = File.join(File.dirname(__FILE__), 'testdata')
- files = ['ca.pem', 'server1.pem', 'server1.key']
+ files = ['ca.pem', 'server1.key', 'server1.pem']
files.map { |f| File.open(File.join(test_root, f)).read }
end